1
0
mirror of https://github.com/aquasecurity/kube-bench.git synced 2024-11-22 08:08:07 +00:00

CIS GKE 1.0.0 benchmark (#570)

* Add initial commit for CIS GKE 1.0 benchmark

* Update README with GKE instructions

* Fix YAML linter issues

* Set GKE benchmark k8s version to gke-1.0

* Add tests for gke-1.0

Co-authored-by: Roberto Rojas <robertojrojas@gmail.com>
This commit is contained in:
Abubakr-Sadik Nii Nai Davis 2020-03-03 14:51:48 +00:00 committed by GitHub
parent 237f8cf818
commit d988b81540
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 1910 additions and 23 deletions

View File

@ -54,8 +54,9 @@ kube-bench supports the tests for Kubernetes as defined in the CIS Benchmarks 1.
By default, kube-bench will determine the test set to run based on the Kubernetes version running on the machine.
There is also preliminary support for Red Hat's OpenShift Hardening Guide for 3.10 and 3.11. Please note that kube-bench does not automatically detect OpenShift - see below.
kube-bench has preliminary support for Red Hat's OpenShift Hardening Guide for 3.10 and 3.11.
There is also preliminary support for Google's Kubernetes Engine starting from Kubernetes 1.15.
Please note that kube-bench does not automatically detect OpenShift and GKE - see below.
## Installation
@ -115,6 +116,7 @@ The following table shows the valid targets based on the CIS Benchmark version.
| cis-1.3| master, node |
| cis-1.4| master, node |
| cis-1.5| master, controlplane, node, etcd, policies |
| gke-1.0| master, controlplane, node, etcd, policies, managedservices |
If no targets are specified, `kube-bench` will determine the appropriate targets based on the CIS Benchmark version.
@ -262,6 +264,18 @@ kube-bench includes a set of test files for Red Hat's OpenShift hardening guide
when you run the `kube-bench` command (either directly or through YAML).
### Running in an GKE cluster
| CIS Benchmark | Targets |
|---|---|
| gke-1.0| master, controlplane, node, etcd, policies, managedservices |
kube-bench includes benchmarks for GKE. To run this you will need to specify `--benchmark gke-1.0` when you run the `kube-bench` command.
To run the benchmark as a job in your GKE cluster apply the included `job-gke.yaml`.
```
kubectl apply -f job-gke.yaml
```
## Output

View File

@ -162,6 +162,8 @@ controlplane:
policies:
components: []
managedservices:
components: []
version_mapping:
"1.11": "cis-1.3"
@ -171,5 +173,6 @@ version_mapping:
"1.15": "cis-1.5"
"1.16": "cis-1.5"
"1.17": "cis-1.5"
"gke-1.0": "gke-1.0"
"ocp-3.10": "rh-0.7"
"ocp-3.11": "rh-0.7"

2
cfg/gke-1.0/config.yaml Normal file
View File

@ -0,0 +1,2 @@
---
## Version-specific settings that override the values in cfg/config.yaml

View File

@ -0,0 +1,32 @@
---
controls:
version: "gke-1.0"
id: 3
text: "Control Plane Configuration"
type: "controlplane"
groups:
- id: 3.1
text: "Authentication and Authorization"
checks:
- id: 3.1.1
text: "Client certificate authentication should not be used for users (Not Scored)"
type: "manual"
remediation: |
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be
implemented in place of client certificates.
You can remediate the availability of client certificates in your GKE cluster. See
Recommendation 6.8.2.
scored: false
- id: 3.2
text: "Logging"
checks:
- id: 3.2.1
text: "Ensure that a minimal audit policy is created (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 3.2.2
text: "Ensure that the audit policy covers key security concerns (Not Scored) "
remediation: "This control cannot be modified in GKE."
scored: false

45
cfg/gke-1.0/etcd.yaml Normal file
View File

@ -0,0 +1,45 @@
---
controls:
version: "gke-1.0"
id: 2
text: "Etcd Node Configuration"
type: "etcd"
groups:
- id: 2
text: "Etcd Node Configuration Files"
checks:
- id: 2.1
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 2.2
text: "Ensure that the --client-cert-auth argument is set to true (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 2.3
text: "Ensure that the --auto-tls argument is not set to true (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 2.4
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
set as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 2.5
text: "Ensure that the --peer-client-cert-auth argument is set to true (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 2.6
text: "Ensure that the --peer-auto-tls argument is not set to true (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 2.7
text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false

View File

@ -0,0 +1,703 @@
---
controls:
version: "gke-1.0"
id: 6
text: "Managed Services"
type: "managedservices"
groups:
- id: 6.1
text: "Image Registry and Image Scanning"
checks:
- id: 6.1.1
text: "Ensure Image Vulnerability Scanning using GCR Container Analysis
or a third-party provider (Scored)"
type: "manual"
remediation: |
Using Command Line:
gcloud services enable containerscanning.googleapis.com
scored: true
- id: 6.1.2
text: "Minimize user access to GCR (Scored)"
type: "manual"
remediation: |
Using Command Line:
To change roles at the GCR bucket level:
Firstly, run the following if read permissions are required:
gsutil iam ch [TYPE]:[EMAIL-ADDRESS]:objectViewer
gs://artifacts.[PROJECT_ID].appspot.com
Then remove the excessively privileged role (Storage Admin / Storage Object Admin /
Storage Object Creator) using:
gsutil iam ch -d [TYPE]:[EMAIL-ADDRESS]:[ROLE]
gs://artifacts.[PROJECT_ID].appspot.com
where:
[TYPE] can be one of the following:
o user, if the [EMAIL-ADDRESS] is a Google account
o serviceAccount, if [EMAIL-ADDRESS] specifies a Service account
[EMAIL-ADDRESS] can be one of the following:
o a Google account (for example, someone@example.com)
o a Cloud IAM service account
To modify roles defined at the project level and subsequently inherited within the GCR
bucket, or the Service Account User role, extract the IAM policy file, modify it accordingly
and apply it using:
gcloud projects set-iam-policy [PROJECT_ID] [POLICY_FILE]
scored: true
- id: 6.1.3
text: "Minimize cluster access to read-only for GCR (Scored)"
type: "manual"
remediation: |
Using Command Line:
For an account explicitly granted to the bucket. First, add read access to the Kubernetes
Service Account
gsutil iam ch [TYPE]:[EMAIL-ADDRESS]:objectViewer
gs://artifacts.[PROJECT_ID].appspot.com
where:
[TYPE] can be one of the following:
o user, if the [EMAIL-ADDRESS] is a Google account
o serviceAccount, if [EMAIL-ADDRESS] specifies a Service account
[EMAIL-ADDRESS] can be one of the following:
o a Google account (for example, someone@example.com)
o a Cloud IAM service account
Then remove the excessively privileged role (Storage Admin / Storage Object Admin /
Storage Object Creator) using:
gsutil iam ch -d [TYPE]:[EMAIL-ADDRESS]:[ROLE]
gs://artifacts.[PROJECT_ID].appspot.com
For an account that inherits access to the GCR Bucket through Project level permissions,
modify the Projects IAM policy file accordingly, then upload it using:
gcloud projects set-iam-policy [PROJECT_ID] [POLICY_FILE]
scored: true
- id: 6.1.4
text: "Minimize Container Registries to only those approved (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
First, update the cluster to enable Binary Authorization:
gcloud container cluster update [CLUSTER_NAME] \
--enable-binauthz
Create a Binary Authorization Policy using the Binary Authorization Policy Reference
(https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for guidance.
Import the policy file into Binary Authorization:
gcloud container binauthz policy import [YAML_POLICY]
scored: false
- id: 6.2
text: "Identity and Access Management (IAM)"
checks:
- id: 6.2.1
text: "Ensure GKE clusters are not running using the Compute Engine
default service account (Scored)"
type: "manual"
remediation: |
Using Command Line:
Firstly, create a minimally privileged service account:
gcloud iam service-accounts create [SA_NAME] \
--display-name "GKE Node Service Account"
export NODE_SA_EMAIL=`gcloud iam service-accounts list \
--format='value(email)' \
--filter='displayName:GKE Node Service Account'`
Grant the following roles to the service account:
export PROJECT_ID=`gcloud config get-value project`
gcloud projects add-iam-policy-binding $PROJECT_ID \
--member serviceAccount:$NODE_SA_EMAIL \
--role roles/monitoring.metricWriter
gcloud projects add-iam-policy-binding $PROJECT_ID \
--member serviceAccount:$NODE_SA_EMAIL \
--role roles/monitoring.viewer
gcloud projects add-iam-policy-binding $PROJECT_ID \
--member serviceAccount:$NODE_SA_EMAIL \
--role roles/logging.logWriter
To create a new Node pool using the Service account, run the following command:
gcloud container node-pools create [NODE_POOL] \
--service-account=[SA_NAME]@[PROJECT_ID].iam.gserviceaccount.com \
--cluster=[CLUSTER_NAME] --zone [COMPUTE_ZONE]
You will need to migrate your workloads to the new Node pool, and delete Node pools that
use the default service account to complete the remediation.
scored: true
- id: 6.2.2
text: "Prefer using dedicated GCP Service Accounts and Workload Identity (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
gcloud beta container clusters update [CLUSTER_NAME] --zone [CLUSTER_ZONE] \
--identity-namespace=[PROJECT_ID].svc.id.goog
Note that existing Node pools are unaffected. New Node pools default to --workload-
metadata-from-node=GKE_METADATA_SERVER .
Then, modify existing Node pools to enable GKE_METADATA_SERVER:
gcloud beta container node-pools update [NODEPOOL_NAME] \
--cluster=[CLUSTER_NAME] --zone [CLUSTER_ZONE] \
--workload-metadata-from-node=GKE_METADATA_SERVER
You may also need to modify workloads in order for them to use Workload Identity as
described within https://cloud.google.com/kubernetes-engine/docs/how-to/workload-
identity. Also consider the effects on the availability of your hosted workloads as Node
pools are updated, it may be more appropriate to create new Node Pools.
scored: false
- id: 6.3
text: "Cloud Key Management Service (Cloud KMS)"
checks:
- id: 6.3.1
text: "Ensure Kubernetes Secrets are encrypted using keys managed in Cloud KMS (Scored)"
type: "manual"
remediation: |
Using Command Line:
To create a key
Create a key ring:
gcloud kms keyrings create [RING_NAME] \
--location [LOCATION] \
--project [KEY_PROJECT_ID]
Create a key:
gcloud kms keys create [KEY_NAME] \
--location [LOCATION] \
--keyring [RING_NAME] \
--purpose encryption \
--project [KEY_PROJECT_ID]
Grant the Kubernetes Engine Service Agent service account the Cloud KMS CryptoKey
Encrypter/Decrypter role:
gcloud kms keys add-iam-policy-binding [KEY_NAME] \
--location [LOCATION] \
--keyring [RING_NAME] \
--member serviceAccount:[SERVICE_ACCOUNT_NAME] \
--role roles/cloudkms.cryptoKeyEncrypterDecrypter \
--project [KEY_PROJECT_ID]
To create a new cluster with Application-layer Secrets Encryption:
gcloud container clusters create [CLUSTER_NAME] \
--cluster-version=latest \
--zone [ZONE] \
--database-encryption-key projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKey s/[KEY_NAME] \
--project [CLUSTER_PROJECT_ID]
To enable on an existing cluster:
gcloud container clusters update [CLUSTER_NAME] \
--zone [ZONE] \
--database-encryption-key projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKey s/[KEY_NAME] \
--project [CLUSTER_PROJECT_ID]
scored: true
- id: 6.4
text: "Node Metadata"
checks:
- id: 6.4.1
text: "Ensure legacy Compute Engine instance metadata APIs are Disabled (Scored)"
type: "manual"
remediation: |
Using Command Line:
To update an existing cluster, create a new Node pool with the legacy GCE metadata
endpoint disabled:
gcloud container node-pools create [POOL_NAME] \
--metadata disable-legacy-endpoints=true \
--cluster [CLUSTER_NAME] \
--zone [COMPUTE_ZONE]
You will need to migrate workloads from any existing non-conforming Node pools, to the
new Node pool, then delete non-conforming Node pools to complete the remediation.
scored: true
- id: 6.4.2
text: "Ensure the GKE Metadata Server is Enabled (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
gcloud beta container clusters update [CLUSTER_NAME] \
--identity-namespace=[PROJECT_ID].svc.id.goog
Note that existing Node pools are unaffected. New Node pools default to --workload-
metadata-from-node=GKE_METADATA_SERVER .
To modify an existing Node pool to enable GKE Metadata Server:
gcloud beta container node-pools update [NODEPOOL_NAME] \
--cluster=[CLUSTER_NAME] \
--workload-metadata-from-node=GKE_METADATA_SERVER
You may also need to modify workloads in order for them to use Workload Identity as
described within https://cloud.google.com/kubernetes-engine/docs/how-to/workload-
identity.
scored: false
- id: 6.5
text: "Node Configuration and Maintenance"
checks:
- id: 6.5.1
text: "Ensure Container-Optimized OS (COS) is used for GKE node images (Scored)"
type: "manual"
remediation: |
Using Command Line:
To set the node image to cos for an existing cluster's Node pool:
gcloud container clusters upgrade [CLUSTER_NAME]\
--image-type cos \
--zone [COMPUTE_ZONE] --node-pool [POOL_NAME]
scored: true
- id: 6.5.2
text: "Ensure Node Auto-Repair is enabled for GKE nodes (Scored)"
type: "manual"
remediation: |
Using Command Line:
To enable node auto-repair for an existing cluster with Node pool, run the following
command:
gcloud container node-pools update [POOL_NAME] \
--cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \
--enable-autorepair
scored: true
- id: 6.5.3
text: "Ensure Node Auto-Upgrade is enabled for GKE nodes (Scored)"
type: "manual"
remediation: |
Using Command Line:
To enable node auto-upgrade for an existing cluster's Node pool, run the following
command:
gcloud container node-pools update [NODE_POOL] \
--cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \
--enable-autoupgrade
scored: true
- id: 6.5.4
text: "Automate GKE version management using Release Channels (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
Create a new cluster by running the following command:
gcloud beta container clusters create [CLUSTER_NAME] \
--zone [COMPUTE_ZONE] \
--release-channel [RELEASE_CHANNEL]
where [RELEASE_CHANNEL] is stable or regular according to your needs.
scored: false
- id: 6.5.5
text: "Ensure Shielded GKE Nodes are Enabled (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
To create a Node pool within the cluster with Integrity Monitoring enabled, run the
following command:
gcloud beta container node-pools create [NODEPOOL_NAME] \
--cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \
--shielded-integrity-monitoring
You will also need to migrate workloads from existing non-conforming Node pools to the
newly created Node pool, then delete the non-conforming pools.
scored: false
- id: 6.5.6
text: "Ensure Shielded GKE Nodes are Enabled (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
To migrate an existing cluster, you will need to specify the --enable-shielded-nodes flag
on a cluster update command:
gcloud beta container clusters update [CLUSTER_NAME] \
--zone [CLUSTER_ZONE] \
--enable-shielded-nodes
scored: false
- id: 6.5.7
text: "Ensure Secure Boot for Shielded GKE Nodes is Enabled (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
To create a Node pool within the cluster with Secure Boot enabled, run the following
command:
gcloud beta container node-pools create [NODEPOOL_NAME] \
--cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \
--shielded-secure-boot
You will also need to migrate workloads from existing non-conforming Node pools to the
newly created Node pool, then delete the non-conforming pools.
scored: false
- id: 6.6
text: "Cluster Networking"
checks:
- id: 6.6.1
text: "Enable VPC Flow Logs and Intranode Visibility (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
To enable intranode visibility on an existing cluster, run the following command:
gcloud beta container clusters update [CLUSTER_NAME] \
--enable-intra-node-visibility
scored: false
- id: 6.6.2
text: "Ensure use of VPC-native clusters (Scored)"
type: "manual"
remediation: |
Using Command Line:
To enable Alias IP on a new cluster, run the following command:
gcloud container clusters create [CLUSTER_NAME] \
--zone [COMPUTE_ZONE] \
--enable-ip-alias
scored: true
- id: 6.6.3
text: "Ensure Master Authorized Networks is Enabled (Scored)"
type: "manual"
remediation: |
Using Command Line:
To check Master Authorized Networks status for an existing cluster, run the following
command;
gcloud container clusters describe [CLUSTER_NAME] \
--zone [COMPUTE_ZONE] \
--format json | jq '.masterAuthorizedNetworksConfig'
The output should return
{
"enabled": true
}
if Master Authorized Networks is enabled.
If Master Authorized Networks is disabled, the
above command will return null ( { } ).
scored: true
- id: 6.6.4
text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Scored)"
type: "manual"
remediation: |
Using Command Line:
Create a cluster with a Private Endpoint enabled and Public Access disabled by including
the --enable-private-endpoint flag within the cluster create command:
gcloud container clusters create [CLUSTER_NAME] \
--enable-private-endpoint
Setting this flag also requires the setting of --enable-private-nodes , --enable-ip-alias
and --master-ipv4-cidr=[MASTER_CIDR_RANGE] .
scored: true
- id: 6.6.5
text: "Ensure clusters are created with Private Nodes (Scored)"
type: "manual"
remediation: |
Using Command Line:
To create a cluster with Private Nodes enabled, include the --enable-private-nodes flag
within the cluster create command:
gcloud container clusters create [CLUSTER_NAME] \
--enable-private-nodes
Setting this flag also requires the setting of --enable-ip-alias and --master-ipv4-
cidr=[MASTER_CIDR_RANGE] .
scored: true
- id: 6.6.6
text: "Consider firewalling GKE worker nodes (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
Use the following command to generate firewall rules, setting the variables as appropriate.
You may want to use the target [TAG] and [SERVICE_ACCOUNT] previously identified.
gcloud compute firewall-rules create FIREWALL_RULE_NAME \
--network [NETWORK] \
--priority [PRIORITY] \
--direction [DIRECTION] \
--action [ACTION] \
--target-tags [TAG] \
--target-service-accounts [SERVICE_ACCOUNT] \
--source-ranges [SOURCE_CIDR-RANGE] \
--source-tags [SOURCE_TAGS] \
--source-service-accounts=[SOURCE_SERVICE_ACCOUNT] \
--destination-ranges [DESTINATION_CIDR_RANGE] \
--rules [RULES]
scored: false
- id: 6.6.7
text: "Ensure Network Policy is Enabled and set as appropriate (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
To enable Network Policy for an existing cluster, firstly enable the Network Policy add-on:
gcloud container clusters update [CLUSTER_NAME] \
--zone [COMPUTE_ZONE] \
--update-addons NetworkPolicy=ENABLED
Then, enable Network Policy:
gcloud container clusters update [CLUSTER_NAME] \
--zone [COMPUTE_ZONE] \
--enable-network-policy
scored: false
- id: 6.6.8
text: "Ensure use of Google-managed SSL Certificates (Not Scored)"
type: "manual"
remediation: |
If services of type:LoadBalancer are discovered, consider replacing the Service with an
Ingress.
To configure the Ingress and use Google-managed SSL certificates, follow the instructions
as listed at https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs.
scored: false
- id: 6.7
text: "Logging"
checks:
- id: 6.7.1
text: "Ensure Stackdriver Kubernetes Logging and Monitoring is Enabled (Scored)"
type: "manual"
remediation: |
Using Command Line:
STACKDRIVER KUBERNETES ENGINE MONITORING SUPPORT (PREFERRED):
To enable Stackdriver Kubernetes Engine Monitoring for an existing cluster, run the
following command:
gcloud container clusters update [CLUSTER_NAME] \
--zone [COMPUTE_ZONE] \
--enable-stackdriver-kubernetes
LEGACY STACKDRIVER SUPPORT:
Both Logging and Monitoring support must be enabled.
To enable Legacy Stackdriver Logging for an existing cluster, run the following command:
gcloud container clusters update [CLUSTER_NAME] --zone [COMPUTE_ZONE] \
--logging-service logging.googleapis.com
To enable Legacy Stackdriver Monitoring for an existing cluster, run the following
command:
gcloud container clusters update [CLUSTER_NAME] --zone [COMPUTE_ZONE] \
--monitoring-service monitoring.googleapis.com
scored: true
- id: 6.7.2
text: "Enable Linux auditd logging (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
Download the example manifests:
curl https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-node-tools/master/os-audit/cos-auditd-logging.yaml \
> cos-auditd-logging.yaml
Edit the example manifests if needed. Then, deploy them:
kubectl apply -f cos-auditd-logging.yaml
Verify that the logging Pods have started. If you defined a different Namespace in your
manifests, replace cos-auditd with the name of the namespace you're using:
kubectl get pods --namespace=cos-auditd
scored: false
- id: 6.8
text: "Authentication and Authorization"
checks:
- id: 6.8.1
text: "Ensure Basic Authentication using static passwords is Disabled (Scored)"
type: "manual"
remediation: |
Using Command Line:
To update an existing cluster and disable Basic Authentication by removing the static
password:
gcloud container clusters update [CLUSTER_NAME] \
--no-enable-basic-auth
scored: true
- id: 6.8.2
text: "Ensure authentication using Client Certificates is Disabled (Scored)"
type: "manual"
remediation: |
Using Command Line:
Create a new cluster without a Client Certificate:
gcloud container clusters create [CLUSTER_NAME] \
--no-issue-client-certificate
scored: true
- id: 6.8.3
text: "Manage Kubernetes RBAC users with Google Groups for GKE (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
Follow the G Suite Groups instructions at https://cloud.google.com/kubernetes-
engine/docs/how-to/role-based-access-control#google-groups-for-gke.
Then, create a cluster with
gcloud beta container clusters create my-cluster \
--security-group="gke-security-groups@[yourdomain.com]"
Finally create Roles, ClusterRoles, RoleBindings, and ClusterRoleBindings that
reference your G Suite Groups.
scored: false
- id: 6.8.4
text: "Ensure Legacy Authorization (ABAC) is Disabled (Scored)"
type: "manual"
remediation: |
Using Command Line:
To disable Legacy Authorization for an existing cluster, run the following command:
gcloud container clusters update [CLUSTER_NAME] \
--zone [COMPUTE_ZONE] \
--no-enable-legacy-authorization
scored: true
- id: 6.9
text: "Storage"
checks:
- id: 6.9.1
text: "Enable Customer-Managed Encryption Keys (CMEK) for GKE Persistent Disks (PD) (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
FOR NODE BOOT DISKS:
Create a new node pool using customer-managed encryption keys for the node boot disk, of
[DISK_TYPE] either pd-standard or pd-ssd :
gcloud beta container node-pools create [CLUSTER_NAME] \
--disk-type [DISK_TYPE] \
--boot-disk-kms-key \
projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]
Create a cluster using customer-managed encryption keys for the node boot disk, of
[DISK_TYPE] either pd-standard or pd-ssd :
gcloud beta container clusters create [CLUSTER_NAME] \
--disk-type [DISK_TYPE] \
--boot-disk-kms-key \
projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]
FOR ATTACHED DISKS:
Follow the instructions detailed at https://cloud.google.com/kubernetes-
engine/docs/how-to/using-cmek.
scored: false
- id: 6.10
text: "Other Cluster Configurations"
checks:
- id: 6.10.1
text: "Ensure Kubernetes Web UI is Disabled (Scored)"
type: "manual"
remediation: |
Using Command Line:
To disable the Kubernetes Dashboard on an existing cluster, run the following command:
gcloud container clusters update [CLUSTER_NAME] \
--zone [ZONE] \
--update-addons=KubernetesDashboard=DISABLED
scored: true
- id: 6.10.2
text: "Ensure that Alpha clusters are not used for production workloads (Scored)"
type: "manual"
remediation: |
Using Command Line:
Upon creating a new cluster
gcloud container clusters create [CLUSTER_NAME] \
--zone [COMPUTE_ZONE]
Do not use the --enable-kubernetes-alpha argument.
scored: true
- id: 6.10.3
text: "Ensure Pod Security Policy is Enabled and set as appropriate (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
To enable Pod Security Policy for an existing cluster, run the following command:
gcloud beta container clusters update [CLUSTER_NAME] \
--zone [COMPUTE_ZONE] \
--enable-pod-security-policy
scored: false
- id: 6.10.4
text: "Consider GKE Sandbox for running untrusted workloads (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
To enable GKE Sandbox on an existing cluster, a new Node pool must be created.
gcloud container node-pools create [NODE_POOL_NAME] \
--zone=[COMPUTE-ZONE] \
--cluster=[CLUSTER_NAME] \
--image-type=cos_containerd \
--sandbox type=gvisor
scored: false
- id: 6.10.5
text: "Ensure use of Binary Authorization (Scored)"
type: "manual"
remediation: |
Using Command Line:
Firstly, update the cluster to enable Binary Authorization:
gcloud container cluster update [CLUSTER_NAME] \
--zone [COMPUTE-ZONE] \
--enable-binauthz
Create a Binary Authorization Policy using the Binary Authorization Policy Reference
(https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for
guidance.
Import the policy file into Binary Authorization:
gcloud container binauthz policy import [YAML_POLICY]
scored: true
- id: 6.10.6
text: "Enable Cloud Security Command Center (Cloud SCC) (Not Scored)"
type: "manual"
remediation: |
Using Command Line:
Follow the instructions at https://cloud.google.com/security-command-
center/docs/quickstart-scc-setup.
scored: false

348
cfg/gke-1.0/master.yaml Normal file
View File

@ -0,0 +1,348 @@
---
controls:
version: "gke-1.0"
id: 1
text: "Control Plane Components"
type: "master"
groups:
- id: 1.1
text: "Master Node Configuration Files "
checks:
- id: 1.1.1
text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.2
text: "Ensure that the API server pod specification file ownership is set to root:root (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.3
text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.4
text: "Ensure that the controller manager pod specification file ownership is set to root:root (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.5
text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.6
text: "Ensure that the scheduler pod specification file ownership is set to root:root (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.7
text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.8
text: "Ensure that the etcd pod specification file ownership is set to root:root (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.9
text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.10
text: "Ensure that the Container Network Interface file ownership is set to root:root (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.11
text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.12
text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.13
text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.14
text: "Ensure that the admin.conf file ownership is set to root:root (Not Scored) "
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.15
text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: true
- id: 1.1.16
text: "Ensure that the scheduler.conf file ownership is set to root:root (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.17
text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.18
text: "Ensure that the controller-manager.conf file ownership is set to root:root (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.19
text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.20
text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.1.21
text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2
text: "API Server"
checks:
- id: 1.2.1
text: "Ensure that the --anonymous-auth argument is set to false (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.2
text: "Ensure that the --basic-auth-file argument is not set (Not Scored)"
remediation: |
Although the use of the --basic-auth-file argument cannot be audited on GKE, you can
remediate the use of basic authentication. See Recommendation 6.8.1.
scored: false
- id: 1.2.3
text: "Ensure that the --token-auth-file parameter is not set (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.4
text: "Ensure that the --kubelet-https argument is set to true (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.5
text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.6
text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.7
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.8
text: "Ensure that the --authorization-mode argument includes Node (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.9
text: "Ensure that the --authorization-mode argument includes RBAC (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.10
text: "Ensure that the admission control plugin EventRateLimit is set (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.11
text: "Ensure that the admission control plugin AlwaysAdmit is not set (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.12
text: "Ensure that the admission control plugin AlwaysPullImages is set (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.13
text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.14
text: "Ensure that the admission control plugin ServiceAccount is set (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.15
text: "Ensure that the admission control plugin NamespaceLifecycle is set (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.16
text: "Ensure that the admission control plugin PodSecurityPolicy is set (Not Scored)"
remediation: |
To verify and remediate the use of Pod Security Policy on GKE, see Recommendation 6.10.3.
scored: false
- id: 1.2.17
text: "Ensure that the admission control plugin NodeRestriction is set (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.18
text: "Ensure that the --insecure-bind-address argument is not set (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.19
text: "Ensure that the --insecure-port argument is set to 0 (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.20
text: "Ensure that the --secure-port argument is not set to 0 (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.21
text: "Ensure that the --profiling argument is set to false (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.22
text: "Ensure that the --audit-log-path argument is set (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.23
text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.24
text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.25
text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.26
text: "Ensure that the --request-timeout argument is set as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.27
text: "Ensure that the --service-account-lookup argument is set to true (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.28
text: "Ensure that the --service-account-key-file argument is set as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.29
text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.30
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.31
text: "Ensure that the --client-ca-file argument is set as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.32
text: "Ensure that the --etcd-cafile argument is set as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.2.33
text: "Ensure that the --encryption-provider-config argument is set as appropriate (Not Scored)"
remediation: |
To verify and remediate the use of secret encryption on GKE, see Recommendation 6.3.1.
scored: false
- id: 1.2.34
text: "Ensure that encryption providers are appropriately configured (Not Scored)"
remediation: |
To verify and remediate the use of secret encryption on GKE, see Recommendation 6.3.1.
scored: false
- id: 1.2.35
text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.3
text: "Controller Manager"
checks:
- id: 1.3.1
text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.3.2
text: "Ensure that the --profiling argument is set to false (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.3.3
text: "Ensure that the --use-service-account-credentials argument is set to true (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.3.4
text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.3.5
text: "Ensure that the --root-ca-file argument is set as appropriate (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.3.6
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.3.7
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.4
text: "Scheduler"
checks:
- id: 1.4.1
text: "Ensure that the --profiling argument is set to false (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 1.4.2
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Not Scored) "
remediation: "This control cannot be modified in GKE."
scored: false

449
cfg/gke-1.0/node.yaml Normal file
View File

@ -0,0 +1,449 @@
---
controls:
version: "gke-1.0"
id: 4
text: "Worker Node Security Configuration"
type: "node"
groups:
- id: 4.1
text: "Worker Node Configuration Files"
checks:
- id: 4.1.1
text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 4.1.2
text: "Ensure that the kubelet service file ownership is set to root:root (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 4.1.3
text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)"
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %a $proxykubeconfig; fi'' '
tests:
test_items:
- flag: "644"
compare:
op: eq
value: "644"
set: true
- flag: "640"
compare:
op: eq
value: "640"
set: true
- flag: "600"
compare:
op: eq
value: "600"
set: true
- flag: "444"
compare:
op: eq
value: "444"
set: true
- flag: "440"
compare:
op: eq
value: "440"
set: true
- flag: "400"
compare:
op: eq
value: "400"
set: true
- flag: "000"
compare:
op: eq
value: "000"
set: true
bin_op: or
remediation: |
Run the below command (based on the file location on your system) on each worker node.
For example,
chmod 644 $proykubeconfig
scored: true
- id: 4.1.4
text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)"
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
tests:
test_items:
- flag: root:root
set: true
remediation: |
Run the below command (based on the file location on your system) on each worker node.
For example, chown root:root $proxykubeconfig
scored: true
- id: 4.1.5
text: "Ensure that the kubelet.conf file permissions are set to 644 or more restrictive (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 4.1.6
text: "Ensure that the kubelet.conf file ownership is set to root:root (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 4.1.7
text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 4.1.8
text: "Ensure that the client certificate authorities file ownership is set to root:root (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false
- id: 4.1.9
text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)"
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'' '
tests:
test_items:
- flag: "644"
set: true
compare:
op: eq
value: "644"
- flag: "640"
set: true
compare:
op: eq
value: "640"
- flag: "600"
set: true
compare:
op: eq
value: "600"
- flag: "444"
compare:
op: eq
value: "444"
set: true
- flag: "440"
compare:
op: eq
value: "440"
set: true
- flag: "400"
compare:
op: eq
value: "400"
set: true
- flag: "000"
compare:
op: eq
value: "000"
set: true
bin_op: or
remediation: |
Run the following command (using the config file location identied in the Audit step)
chmod 644 $kubeletconf
scored: true
- id: 4.1.10
text: "Ensure that the kubelet configuration file ownership is set to root:root (Scored)"
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
tests:
test_items:
- flag: root:root
set: true
remediation: |
Run the following command (using the config file location identied in the Audit step)
chown root:root $kubeletconf
scored: true
- id: 4.2
text: "Kubelet"
checks:
- id: 4.2.1
text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: "--anonymous-auth"
path: '{.authentication.anonymous.enabled}'
set: true
compare:
op: eq
value: false
remediation: |
If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
false.
If using executable arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
--anonymous-auth=false
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.2
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --authorization-mode
path: '{.authorization.mode}'
set: true
compare:
op: nothave
value: AlwaysAllow
remediation: |
If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If
using executable arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_AUTHZ_ARGS variable.
--authorization-mode=Webhook
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.3
text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --client-ca-file
path: '{.authentication.x509.clientCAFile}'
set: true
remediation: |
If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to
the location of the client CA file.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_AUTHZ_ARGS variable.
--client-ca-file=<path/to/client-ca-file>
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.4
text: "Ensure that the --read-only-port argument is set to 0 (Scored)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: "--read-only-port"
path: '{.readOnlyPort}'
set: true
compare:
op: eq
value: 0
remediation: |
If using a Kubelet config file, edit the file to set readOnlyPort to 0.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
--read-only-port=0
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.5
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --streaming-connection-idle-timeout
path: '{.streamingConnectionIdleTimeout}'
set: true
compare:
op: noteq
value: 0
- flag: --streaming-connection-idle-timeout
path: '{.streamingConnectionIdleTimeout}'
set: false
bin_op: or
remediation: |
If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a
value other than 0.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
--streaming-connection-idle-timeout=5m
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.6
text: "Ensure that the --protect-kernel-defaults argument is set to true (Scored)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --protect-kernel-defaults
path: '{.protectKernelDefaults}'
set: true
compare:
op: eq
value: true
remediation: |
If using a Kubelet config file, edit the file to set protectKernelDefaults: true.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
--protect-kernel-defaults=true
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.7
text: "Ensure that the --make-iptables-util-chains argument is set to true (Scored) "
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --make-iptables-util-chains
path: '{.makeIPTablesUtilChains}'
set: true
compare:
op: eq
value: true
- flag: --make-iptables-util-chains
path: '{.makeIPTablesUtilChains}'
set: false
bin_op: or
remediation: |
If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
remove the --make-iptables-util-chains argument from the
KUBELET_SYSTEM_PODS_ARGS variable.
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.8
text: "Ensure that the --hostname-override argument is not set (Scored)"
# This is one of those properties that can only be set as a command line argument.
# To check if the property is set as expected, we need to parse the kubelet command
# instead reading the Kubelet Configuration file.
audit: "/bin/ps -fC $kubeletbin "
tests:
test_items:
- flag: --hostname-override
set: false
remediation: |
Edit the kubelet service file $kubeletsvc
on each worker node and remove the --hostname-override argument from the
KUBELET_SYSTEM_PODS_ARGS variable.
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.9
text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Scored)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --event-qps
path: '{.eventRecordQPS}'
set: true
compare:
op: eq
value: 0
remediation: |
If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.10
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --tls-cert-file
path: '{.tlsCertFile}'
set: true
- flag: --tls-private-key-file
path: '{.tlsPrivateKeyFile}'
set: true
remediation: |
If using a Kubelet config file, edit the file to set tlsCertFile to the location
of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile
to the location of the corresponding private key file.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
--tls-cert-file=<path/to/tls-certificate-file>
--tls-private-key-file=<path/to/tls-key-file>
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.11
text: "Ensure that the --rotate-certificates argument is not set to false (Scored)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --rotate-certificates
path: '{.rotateCertificates}'
set: true
compare:
op: eq
value: true
- flag: --rotate-certificates
path: '{.rotateCertificates}'
set: false
bin_op: or
remediation: |
If using a Kubelet config file, edit the file to add the line rotateCertificates: true or
remove it altogether to use the default value.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS
variable.
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.12
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: RotateKubeletServerCertificate
path: '{.featureGates.RotateKubeletServerCertificate}'
set: true
compare:
op: eq
value: true
remediation: |
Edit the kubelet service file $kubeletsvc
on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
--feature-gates=RotateKubeletServerCertificate=true
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.13
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)"
remediation: "This control cannot be modified in GKE."
scored: false

239
cfg/gke-1.0/policies.yaml Normal file
View File

@ -0,0 +1,239 @@
---
controls:
version: "gke-1.0"
id: 5
text: "Kubernetes Policies"
type: "policies"
groups:
- id: 5.1
text: "RBAC and Service Accounts"
checks:
- id: 5.1.1
text: "Ensure that the cluster-admin role is only used where required (Not Scored)"
type: "manual"
remediation: |
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
if they need this role or if they could use a role with fewer privileges.
Where possible, first bind users to a lower privileged role and then remove the
clusterrolebinding to the cluster-admin role :
kubectl delete clusterrolebinding [name]
scored: false
- id: 5.1.2
text: "Minimize access to secrets (Not Scored)"
type: "manual"
remediation: |
Where possible, remove get, list and watch access to secret objects in the cluster.
scored: false
- id: 5.1.3
text: "Minimize wildcard use in Roles and ClusterRoles (Not Scored)"
type: "manual"
remediation: |
Where possible replace any use of wildcards in clusterroles and roles with specific
objects or actions.
scored: false
- id: 5.1.4
text: "Minimize access to create pods (Not Scored)"
type: "manual"
Remediation: |
Where possible, remove create access to pod objects in the cluster.
scored: false
- id: 5.1.5
text: "Ensure that default service accounts are not actively used. (Scored)"
type: "manual"
remediation: |
Create explicit service accounts wherever a Kubernetes workload requires specific access
to the Kubernetes API server.
Modify the configuration of each default service account to include this value
automountServiceAccountToken: false
scored: true
- id: 5.1.6
text: "Ensure that Service Account Tokens are only mounted where necessary (Not Scored)"
type: "manual"
remediation: |
Modify the definition of pods and service accounts which do not need to mount service
account tokens to disable it.
scored: false
- id: 5.2
text: "Pod Security Policies"
checks:
- id: 5.2.1
text: "Minimize the admission of privileged containers (Not Scored)"
type: "manual"
remediation: |
Create a PSP as described in the Kubernetes documentation, ensuring that
the .spec.privileged field is omitted or set to false.
scored: false
- id: 5.2.2
text: "Minimize the admission of containers wishing to share the host process ID namespace (Scored)"
type: "manual"
remediation: |
Create a PSP as described in the Kubernetes documentation, ensuring that the
.spec.hostPID field is omitted or set to false.
scored: true
- id: 5.2.3
text: "Minimize the admission of containers wishing to share the host IPC namespace (Scored)"
type: "manual"
remediation: |
Create a PSP as described in the Kubernetes documentation, ensuring that the
.spec.hostIPC field is omitted or set to false.
scored: true
- id: 5.2.4
text: "Minimize the admission of containers wishing to share the host network namespace (Scored)"
type: "manual"
remediation: |
Create a PSP as described in the Kubernetes documentation, ensuring that the
.spec.hostNetwork field is omitted or set to false.
scored: true
- id: 5.2.5
text: "Minimize the admission of containers with allowPrivilegeEscalation (Scored)"
type: "manual"
remediation: |
Create a PSP as described in the Kubernetes documentation, ensuring that the
.spec.allowPrivilegeEscalation field is omitted or set to false.
scored: true
- id: 5.2.6
text: "Minimize the admission of root containers (Scored)"
type: "manual"
remediation: |
Create a PSP as described in the Kubernetes documentation, ensuring that the
.spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of
UIDs not including 0.
scored: true
- id: 5.2.7
text: "Minimize the admission of containers with the NET_RAW capability (Scored)"
type: "manual"
remediation: |
Create a PSP as described in the Kubernetes documentation, ensuring that the
.spec.requiredDropCapabilities is set to include either NET_RAW or ALL.
scored: true
- id: 5.2.8
text: "Minimize the admission of containers with added capabilities (Scored)"
type: "manual"
remediation: |
Ensure that allowedCapabilities is not present in PSPs for the cluster unless
it is set to an empty array.
scored: true
- id: 5.2.9
text: "Minimize the admission of containers with capabilities assigned (Scored) "
type: "manual"
remediation: |
Review the use of capabilites in applications runnning on your cluster. Where a namespace
contains applicaions which do not require any Linux capabities to operate consider adding
a PSP which forbids the admission of containers which do not drop all capabilities.
scored: true
- id: 5.3
text: "Network Policies and CNI"
checks:
- id: 5.3.1
text: "Ensure that the CNI in use supports Network Policies (Not Scored)"
type: "manual"
remediation: |
To use a CNI plugin with Network Policy, enable Network Policy in GKE, and the CNI plugin
will be updated. See Recommendation 6.6.7.
scored: false
- id: 5.3.2
text: "Ensure that all Namespaces have Network Policies defined (Scored)"
type: "manual"
remediation: |
Follow the documentation and create NetworkPolicy objects as you need them.
scored: true
- id: 5.4
text: "Secrets Management"
checks:
- id: 5.4.1
text: "Prefer using secrets as files over secrets as environment variables (Not Scored)"
type: "manual"
remediation: |
if possible, rewrite application code to read secrets from mounted secret files, rather than
from environment variables.
scored: false
- id: 5.4.2
text: "Consider external secret storage (Not Scored)"
type: "manual"
remediation: |
Refer to the secrets management options offered by your cloud provider or a third-party
secrets management solution.
scored: false
- id: 5.5
text: "Extensible Admission Control"
checks:
- id: 5.5.1
text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)"
type: "manual"
remediation: |
Follow the Kubernetes documentation and setup image provenance.
See also Recommendation 6.10.5 for GKE specifically.
scored: false
- id: 5.6
text: "General Policies"
checks:
- id: 5.6.1
text: "Create administrative boundaries between resources using namespaces (Not Scored)"
type: "manual"
remediation: |
Follow the documentation and create namespaces for objects in your deployment as you need
them.
scored: false
- id: 5.6.2
text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Not Scored)"
type: "manual"
remediation: |
Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you
would need to enable alpha features in the apiserver by passing "--feature-
gates=AllAlpha=true" argument.
Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS
parameter to "--feature-gates=AllAlpha=true"
KUBE_API_ARGS="--feature-gates=AllAlpha=true"
Based on your system, restart the kube-apiserver service. For example:
systemctl restart kube-apiserver.service
Use annotations to enable the docker/default seccomp profile in your pod definitions. An
example is as below:
apiVersion: v1
kind: Pod
metadata:
name: trustworthy-pod
annotations:
seccomp.security.alpha.kubernetes.io/pod: docker/default
spec:
containers:
- name: trustworthy-container
image: sotrustworthy:latest
scored: false
- id: 5.6.3
text: "Apply Security Context to Your Pods and Containers (Not Scored)"
type: "manual"
remediation: |
Follow the Kubernetes documentation and apply security contexts to your pods. For a
suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
Containers.
scored: false
- id: 5.6.4
text: "The default namespace should not be used (Scored)"
type: "manual"
remediation: |
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
resources and that all new resources are created in a specific namespace.
scored: true

View File

@ -55,6 +55,8 @@ const (
CONTROLPLANE NodeType = "controlplane"
// POLICIES a node to run policies from
POLICIES NodeType = "policies"
// MANAGEDSERVICES a node to run managedservices from
MANAGEDSERVICES = "managedservices"
// MANUAL Check Type
MANUAL string = "manual"

View File

@ -225,6 +225,8 @@ func loadConfig(nodetype check.NodeType) string {
file = etcdFile
case check.POLICIES:
file = policiesFile
case check.MANAGEDSERVICES:
file = managedservicesFile
}
benchmarkVersion, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, viper.GetViper())
@ -382,6 +384,7 @@ var benchmarkVersionToTargetsMap = map[string][]string{
"cis-1.3": []string{string(check.MASTER), string(check.NODE)},
"cis-1.4": []string{string(check.MASTER), string(check.NODE)},
"cis-1.5": []string{string(check.MASTER), string(check.NODE), string(check.CONTROLPLANE), string(check.ETCD), string(check.POLICIES)},
"gke-1.0": []string{string(check.MASTER), string(check.NODE), string(check.CONTROLPLANE), string(check.ETCD), string(check.POLICIES), string(check.MANAGEDSERVICES)},
}
// validTargets helps determine if the targets

View File

@ -210,6 +210,7 @@ func TestMapToCISVersion(t *testing.T) {
{kubeVersion: "1.15", succeed: true, exp: "cis-1.5"},
{kubeVersion: "1.16", succeed: true, exp: "cis-1.5"},
{kubeVersion: "1.17", succeed: true, exp: "cis-1.5"},
{kubeVersion: "gke-1.0", succeed: true, exp: "gke-1.0"},
{kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"},
{kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"},
{kubeVersion: "unknown", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: unknown"},
@ -334,6 +335,7 @@ func TestGetBenchmarkVersion(t *testing.T) {
{n: "kubeVersion", kubeVersion: "1.11", benchmarkVersion: "", v: viperWithData, exp: "cis-1.3", callFn: withNoPath, succeed: true},
{n: "ocpVersion310", kubeVersion: "ocp-3.10", benchmarkVersion: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
{n: "ocpVersion311", kubeVersion: "ocp-3.11", benchmarkVersion: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
{n: "gke10", kubeVersion: "gke-1.0", benchmarkVersion: "", v: viperWithData, exp: "gke-1.0", callFn: withNoPath, succeed: true},
}
for _, c := range cases {
rv, err := c.callFn(c.kubeVersion, c.benchmarkVersion, c.v, getBenchmarkVersion)
@ -388,6 +390,12 @@ func TestValidTargets(t *testing.T) {
targets: []string{"master", "node", "controlplane", "etcd", "policies"},
expected: true,
},
{
name: "gke-1.0 valid",
benchmark: "gke-1.0",
targets: []string{"master", "node", "controlplane", "etcd", "policies", "managedservices"},
expected: true,
},
}
for _, c := range cases {

View File

@ -33,27 +33,28 @@ type FilterOpts struct {
}
var (
envVarsPrefix = "KUBE_BENCH"
defaultKubeVersion = "1.11"
kubeVersion string
benchmarkVersion string
cfgFile string
cfgDir = "./cfg/"
jsonFmt bool
junitFmt bool
pgSQL bool
masterFile = "master.yaml"
nodeFile = "node.yaml"
etcdFile = "etcd.yaml"
controlplaneFile = "controlplane.yaml"
policiesFile = "policies.yaml"
noResults bool
noSummary bool
noRemediations bool
filterOpts FilterOpts
includeTestOutput bool
outputFile string
configFileError error
envVarsPrefix = "KUBE_BENCH"
defaultKubeVersion = "1.11"
kubeVersion string
benchmarkVersion string
cfgFile string
cfgDir = "./cfg/"
jsonFmt bool
junitFmt bool
pgSQL bool
masterFile = "master.yaml"
nodeFile = "node.yaml"
etcdFile = "etcd.yaml"
controlplaneFile = "controlplane.yaml"
policiesFile = "policies.yaml"
managedservicesFile = "managedservices.yaml"
noResults bool
noSummary bool
noRemediations bool
filterOpts FilterOpts
includeTestOutput bool
outputFile string
configFileError error
)
// RootCmd represents the base command when called without any subcommands
@ -96,6 +97,13 @@ var RootCmd = &cobra.Command{
runChecks(check.POLICIES, loadConfig(check.POLICIES))
}
// Managedservices is only valid for GKE 1.0 and later,
// this a gatekeeper for previous versions.
if validTargets(benchmarkVersion, []string{string(check.MANAGEDSERVICES)}) {
glog.V(1).Info("== Running managed services checks ==\n")
runChecks(check.MANAGEDSERVICES, loadConfig(check.MANAGEDSERVICES))
}
},
}

31
job-gke.yaml Normal file
View File

@ -0,0 +1,31 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: kube-bench
spec:
template:
spec:
hostPID: true
containers:
- name: kube-bench
image: aquasec/kube-bench:latest
command: ["kube-bench", "--benchmark", "gke-1.0", "run", "--targets", "node,policies,managedservices"]
volumeMounts:
- name: var-lib-kubelet
mountPath: /var/lib/kubelet
- name: etc-systemd
mountPath: /etc/systemd
- name: etc-kubernetes
mountPath: /etc/kubernetes
restartPolicy: Never
volumes:
- name: var-lib-kubelet
hostPath:
path: "/var/lib/kubelet"
- name: etc-systemd
hostPath:
path: "/etc/systemd"
- name: etc-kubernetes
hostPath:
path: "/etc/kubernetes"