1
0
mirror of https://github.com/aquasecurity/kube-bench.git synced 2025-05-29 04:08:49 +00:00

Merge branch 'gke-stigs' into gcp-scc-publisher

This commit is contained in:
Carter Williamson 2025-03-19 15:50:51 -07:00
commit 414f456de7
8 changed files with 1034 additions and 2 deletions

View File

@ -447,6 +447,11 @@ target_mapping:
- "controlplane"
- "policies"
- "managedservices"
"gke-stig-kubernetes-v2r2":
- "node"
- "controlplane"
- "policies"
- "managedservices"
"tkgi-1.2.53":
- "master"
- "etcd"

View File

@ -0,0 +1,7 @@
---
## Version-specific settings that override the values in cfg/config.yaml
node:
kubelet:
confs:
- "/home/kubernetes/kubelet-config.yaml"
- "/etc/kubernetes/kubelet-config.yaml"

View File

@ -0,0 +1,51 @@
---
controls:
version: "gke-stig-kubernetes-v1r6"
id: 2
text: "Control Plane Configuration"
type: "controlplane"
groups:
- id: 2.1
text: "DISA Category Code I - API Server Security"
checks:
- id: V-242400
text: "The Kubernetes API server must have Alpha APIs disabled"
type: "manual"
remediation: |
Check the release channel using the GCP gcloud CLI.
gcloud container clusters describe <ClusterName> --region <RegionName> --format json | jq -r '.releaseChannel.channel'
This should be set to "STABLE". Any "Alpha" clusters will need to be rebuilt on the STABLE release channel.
- id: 2.2
text: "DISA Category Code II - Controller Manager Security"
checks:
- id: V-242443
text: " Kubernetes must contain the latest updates as authorized by IAVMs, CTOs, DTMs, and STIGs. (Manual)"
type: "manual"
remediation: |
Upgrade Kubernetes to a supported version.
- id: V-242461
text: "Kubernetes API Server audit logs must be enabled. (Manual)"
type: "manual"
remediation: |
Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler.
Ref: https://cloud.google.com/kubernetes-engine/docs/how-to/view-logs#control-plane-access-logs
- id: V-242462
text: "The Kubernetes API Server must be set to audit log max size | Component of GKE Control Plane"
type: "skip"
- id: V-242463
text: "The Kubernetes API Server must be set to audit log maximum backup | Component of GKE Control Plane"
type: "skip"
- id: V-242464
text: "The Kubernetes API Server audit log retention must be set | Component of GKE Control Plane"
type: "skip"
- id: V-242394
text: "The Kubernetes API Server audit log path must be set | Component of GKE Control Plane"
type: "skip"

View File

@ -0,0 +1,245 @@
---
controls:
version: "gke-stig-kubernetes-v2r2"
id: 5
text: "Managed Services"
type: "managedservices"
groups:
- id: 5.1
text: "DISA Category Code I"
checks:
- id: V-242386
text: "The Kubernetes API server must have the insecure port flag disabled | Component of GKE Control Plane"
type: "skip"
- id: V-242388
text: "The Kubernetes API server must have the insecure bind address not set | Component of GKE Control Plane"
type: "skip"
- id: V-242436
text: "The Kubernetes API server must have the ValidatingAdmissionWebhook enabled | Component of GKE Control Plane"
type: "skip"
- id: V-242437
text: "[Deprecated] Kubernetes must have a pod security policy set. policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+"
type: "skip"
- id: V-245542
text: "Kubernetes API Server must disable basic authentication to protect information in transit | Component of GKE Control Plane"
type: "skip"
- id: 5.2
text: "DISA Category Code II"
checks:
- id: V-242376
text: "The Kubernetes Controller Manager must use TLS 1.2, at a minimum | Component of GKE Control Plane"
type: "skip"
- id: V-242377
text: "The Kubernetes Scheduler must use TLS 1.2, at a minimum | Component of GKE Control Plane"
type: "skip"
- id: V-242378
text: "The Kubernetes API Server must use TLS 1.2, at a minimum | Component of GKE Control Plane"
type: "skip"
- id: V-242379
text: "The Kubernetes etcd must use TLS to protect the confidentiality of sensitive data during electronic dissemination | Component of GKE Control Plane"
type: "skip"
- id: V-242380
text: "The Kubernetes API Server must use TLS to protect the confidentiality of sensitive data during electronic dissemination | Component of GKE Control Plane"
type: "skip"
- id: V-242382
text: "The Kubernetes API Server must enable Node,RBAC as the authorization mode | Component of GKE Control Plane"
type: "skip"
- id: V-242384
text: "The Kubernetes Scheduler must have secure binding | Component of GKE Control Plane"
type: "skip"
- id: V-242385
text: "The Kubernetes Controller Manager must have secure binding | Component of GKE Control Plane"
type: "skip"
- id: V-242389
text: "The Kubernetes API server must have the secure port set | Component of GKE Control Plane"
type: "skip"
- id: V-242401
text: "The Kubernetes API Server must have an audit policy set | Component of GKE Control Plane"
type: "skip"
- id: V-242402
text: "The Kubernetes API Server must have an audit log path set | Component of GKE Control Plane"
type: "skip"
- id: V-242403
text: "Kubernetes API Server must generate audit records | Component of GKE Control Plane"
type: "skip"
- id: V-242405
text: "The Kubernetes manifests must be owned by root | Component of GKE Control Plane"
type: "skip"
- id: V-242408
text: "The Kubernetes manifests must have least privileges | Component of GKE Control Plane"
type: "skip"
- id: V-242409
text: "Kubernetes Controller Manager must disable profiling | Component of GKE Control Plane"
type: "skip"
- id: V-242410
text: "The Kubernetes API Server must enforce PPS that adhere to PPSM CAL | Component of GKE Control Plane"
type: "skip"
- id: V-242411
text: "The Kubernetes Scheduler must enforce PPS that adhere to PPSM CAL | Component of GKE Control Plane"
type: "skip"
- id: V-242412
text: "The Kubernetes Controllers must enforce PPS that adhere to PPSM CAL | Component of GKE Control Plane"
type: "skip"
- id: V-242413
text: "The Kubernetes etcd must enforce PPS that adhere to PPSM CAL | Component of GKE Control Plane"
type: "skip"
- id: V-242418
text: "The Kubernetes API server must use approved cipher suites | Component of GKE Control Plane"
type: "skip"
- id: V-242419
text: "Kubernetes API Server must have the SSL Certificate Authority set | Component of GKE Control Plane"
type: "skip"
- id: V-242421
text: "Kubernetes Controller Manager must have the SSL Certificate Authority set | Component of GKE Control Plane"
type: "skip"
- id: V-242422
text: "Kubernetes API Server must have a certificate for communication | Component of GKE Control Plane"
type: "skip"
- id: V-242423
text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane"
type: "skip"
- id: V-242424
text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane"
type: "skip"
- id: V-242425
text: "Kubernetes Kubelet must enable tls-cert-file for client authentication to secure service | Component of GKE Control Plane"
type: "skip"
- id: V-242426
text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane"
type: "skip"
- id: V-242427
text: "Kubernetes etcd must have a key file for secure communication | Component of GKE Control Plane"
type: "skip"
- id: V-242428
text: "Kubernetes etcd must have a certificate for communication | Component of GKE Control Plane"
type: "skip"
- id: V-242429
text: "Kubernetes etcd must have the SSL Certificate Authority set | Component of GKE Control Plane"
type: "skip"
- id: V-242430
text: "Kubernetes etcd must have a certificate for communication | Component of GKE Control Plane"
type: "skip"
- id: V-242431
text: "Kubernetes etcd must have a key file for secure communication | Component of GKE Control Plane"
type: "skip"
- id: V-242432
text: "Kubernetes etcd must have peer-cert-file set for secure communication | Component of GKE Control Plane"
type: "skip"
- id: V-242433
text: "Kubernetes etcd must have a peer-key-file set for secure communication | Component of GKE Control Plane"
type: "skip"
- id: V-242438
text: "Kubernetes API Server must configure timeouts to limit attack surface | Component of GKE Control Plane"
type: "skip"
- id: V-242444
text: "The Kubernetes component manifests must be owned by root | Component of GKE Control Plane"
type: "skip"
- id: V-242445
text: "The Kubernetes component etcd must be owned by etcd | Component of GKE Control Plane"
type: "skip"
- id: V-242446
text: "The Kubernetes conf files must be owned by root | Component of GKE Control Plane"
type: "skip"
- id: V-242447
text: "The Kubernetes Kube Proxy must have file permissions set to 644 or more restrictive | Component of GKE Control Plane"
type: "skip"
- id: V-242448
text: "The Kubernetes Kube Proxy must be owned by root | Component of GKE Control Plane"
type: "skip"
- id: V-242449
text: "The Kubernetes Kubelet certificate authority file must have file permissions set to 644 or more restrictive | Component of GKE Control Plane"
type: "skip"
- id: V-242450
text: "The Kubernetes Kubelet certificate authority must be owned by root | Component of GKE Control Plane"
type: "skip"
- id: V-242451
text: "The Kubernetes component PKI must be owned by root | Component of GKE Control Plane"
type: "skip"
- id: V-242459
text: "The Kubernetes etcd must have file permissions set to 644 or more restrictive | Component of GKE Control Plane"
type: "skip"
- id: V-242460
text: "The Kubernetes admin.conf must have file permissions set to 644 or more restrictive | Component of GKE Control Plane"
type: "skip"
- id: V-242466
text: "The Kubernetes PKI CRT must have file permissions set to 644 or more restrictive | Component of GKE Control Plane"
type: "skip"
- id: V-242467
text: "The Kubernetes PKI keys must have file permissions set to 600 or more restrictive | Component of GKE Control Plane"
type: "skip"
- id: V-242468
text: "The Kubernetes API Server must prohibit communication using TLS version 1.0 and 1.1, and SSL 2.0 and 3.0 | Component of GKE Control Plane"
type: "skip"
- id: V-245543
text: "Kubernetes API Server must disable token authentication to protect information in transit | Component of GKE Control Plane"
type: "skip"
- id: V-245544
text: "Kubernetes endpoints must use approved organizational certificate and key pair to protect information in transit | Component of GKE Control Plane"
type: "skip"
- id: V-254800
text: "Kubernetes must have a Pod Security Admission control file configured. | Component of GKE Control Plane"
type: "skip"
- id: V-254801
text: "Kubernetes must enable PodSecurity admission controller on static pods and Kubelets. | Component of GKE Control Plane"
type: "skip"
- id: V-242394
text: "Kubernetes Worker Nodes must not have the sshd service enabled | Component of GKE Control Plane"
type: "skip"

View File

@ -0,0 +1,608 @@
---
controls:
version: "gke-stig-kubernetes-v1r6"
id: 3
text: "Node Configuration"
type: "node"
groups:
- id: 3.1
text: "DISA Category Code I"
checks:
- id: V-242387 # CIS 3.2.4
text: "The Kubernetes Kubelet must have the read-only port flag disabled"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: "--read-only-port"
path: '{.readOnlyPort}'
set: false
- path: '{.readOnlyPort}'
compare:
op: eq
value: 0
bin_op: or
remediation: |
If modifying the Kubelet config file, edit the kubelet-config.json file
$kubeletconf and set the below parameter to 0
"readOnlyPort": 0
If using executable arguments, edit the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
worker node and add the below parameter at the end of the KUBELET_ARGS variable
string.
--read-only-port=0
For each remediation:
Based on your system, restart the kubelet service and check status
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet -l
scored: true
- id: V-242391 # CIS 3.2.1
text: "Ensure that the Anonymous Auth is Not Enabled (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: "--anonymous-auth"
path: '{.authentication.anonymous.enabled}'
compare:
op: eq
value: false
remediation: |
Remediation Method 1:
If configuring via the Kubelet config file, you first need to locate the file.
To do this, SSH to each node and execute the following command to find the kubelet
process:
ps -ef | grep kubelet
The output of the above command provides details of the active kubelet process, from
which we can see the location of the configuration file provided to the kubelet service
with the --config argument. The file can be viewed with a command such as more or
less, like so:
sudo less $kubeletconf
Disable Anonymous Authentication by setting the following parameter:
"authentication": { "anonymous": { "enabled": false } }
Remediation Method 2:
If using executable arguments, edit the kubelet service file on each worker node and
ensure the below parameters are part of the KUBELET_ARGS variable string.
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
Bottlerocket AMIs, then this file can be found at
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
you may need to look up documentation for your chosen operating system to determine
which service manager is configured:
--anonymous-auth=false
For Both Remediation Steps:
Based on your system, restart the kubelet service and check the service status.
The following example is for operating systems using systemd, such as the Amazon
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
command. If systemctl is not available then you will need to look up documentation for
your chosen operating system to determine which service manager is configured:
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet -l
scored: true
- id: V-242392 # CIS 3.2.2
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --authorization-mode
path: '{.authorization.mode}'
compare:
op: nothave
value: AlwaysAllow
remediation: |
Remediation Method 1:
If configuring via the Kubelet config file, you first need to locate the file.
To do this, SSH to each node and execute the following command to find the kubelet
process:
ps -ef | grep kubelet
The output of the above command provides details of the active kubelet process, from
which we can see the location of the configuration file provided to the kubelet service
with the --config argument. The file can be viewed with a command such as more or
less, like so:
sudo less /path/to/kubelet-config.json
Enable Webhook Authentication by setting the following parameter:
"authentication": { "webhook": { "enabled": true } }
Next, set the Authorization Mode to Webhook by setting the following parameter:
"authorization": { "mode": "Webhook }
Finer detail of the authentication and authorization fields can be found in the
Kubelet Configuration documentation (https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).
Remediation Method 2:
If using executable arguments, edit the kubelet service file on each worker node and
ensure the below parameters are part of the KUBELET_ARGS variable string.
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
Bottlerocket AMIs, then this file can be found at
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
you may need to look up documentation for your chosen operating system to determine
which service manager is configured:
--authentication-token-webhook
--authorization-mode=Webhook
For Both Remediation Steps:
Based on your system, restart the kubelet service and check the service status.
The following example is for operating systems using systemd, such as the Amazon
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
command. If systemctl is not available then you will need to look up documentation for
your chosen operating system to determine which service manager is configured:
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet -l
scored: true
- id: V-242395
text: "Kubernetes dashboard must not be enabled."
audit: "kubectl get pods --all-namespaces -l k8s-app=kubernetes-dashboard"
tests:
test_items:
- flag: "k8s-app=kubernetes-dashboard"
set: false
remediation: |
Delete the Kubernetes dashboard deployment with the following command:
kubectl delete deployment kubernetes-dashboard --namespace=kube-system
scored: true
- id: V-242396
text: "Kubernetes Kubectl cp command must give expected access and results. (Manual)"
type: "manual"
# audit: "kubectl version --client --output=yaml | grep 'gitVersion' | sed -E 's/.*v([0-9]+)\\.([0-9]+)\\.([0-9]+)/major=\\1\\nminor=\\2\\npatch=\\3/'"
# tests:
# bin_op: or
# test_items:
# - flag: "major="
# compare:
# op: gte
# value: 1
# - flag: "minor="
# compare:
# op: gte
# value: 12
# - flag: "patch="
# compare:
# op: gte
# value: 9
remediation: |
If any Worker nodes are not using kubectl version 1.12.9 or newer, this is a finding.
Upgrade the Master and Worker nodes to the latest version of kubectl.
scored: false
- id: V-242397
text: "The Kubernetes kubelet staticPodPath must not enable static pods."
audit: "ps -ef | grep $kubeletbin | grep -- --config"
tests:
bin_op: or
test_items:
- flag: "staticPodPath"
set: false
- path: '{.staticPodPath}'
set: false
remediation: |
Edit the Kubernetes kubelet configuration file.
Remove the setting "staticPodPath".
Restart the kubelet service using:
systemctl daemon-reload && systemctl restart kubelet
scored: true
- id: V-242398
text: "Kubernetes DynamicAuditing must not be enabled. (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
bin_op: or
test_items:
- flag: "--feature-gates"
compare:
op: nothave
value: "DynamicAuditing=true"
set: true
- flag: "--feature-gates"
set: false
remediation: |
Edit any manifest files or kubelet config files that contain the feature-gates
setting with DynamicAuditing set to "true".
Set the flag to "false" or remove the "DynamicAuditing" setting
completely. Restart the kubelet service if the kubelet config file
if the kubelet config file is changed.
scored: true
- id: V-242399
text: "Kubernetes DynamicKubeletConfig must not be enabled. (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
bin_op: or
test_items:
- flag: "--feature-gates"
compare:
op: nothave
value: "DynamicKubeletConfig=true"
set: true
- flag: "--feature-gates"
set: false
remediation: |
Edit any manifest files or $kubeletconf that contain the feature-gates
setting with DynamicKubeletConfig set to "true".
Set the flag to "false" or remove the "DynamicKubeletConfig" setting
completely. Restart the kubelet service if the kubelet config file
if the kubelet config file is changed.
scored: true
- id: V-242404 # CIS 3.2.8
text: "Ensure that the --rotate-certificates argument is not present or is set to true (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --rotate-certificates
path: '{.rotateCertificates}'
compare:
op: eq
value: true
- flag: --rotate-certificates
path: '{.rotateCertificates}'
set: false
bin_op: or
remediation: |
Remediation Method 1:
If modifying the Kubelet config file, edit the kubelet-config.yaml file
/etc/kubernetes/kubelet/kubelet-config.yaml and set the below parameter to
true
"RotateCertificate":true
Additionally, ensure that the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate
executable argument to false because this would override the Kubelet
config file.
Remediation Method 2:
If using executable arguments, edit the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
worker node and add the below parameter at the end of the KUBELET_ARGS variable
string.
--RotateCertificate=true
scored: true
- id: V-242406
text: "The Kubernetes kubelet configuration file must be owned by root (Automated)"
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
tests:
test_items:
- flag: root:root
remediation: |
Run the below command (based on the file location on your system) on the each worker node.
For example,
chown root:root $kubeletkubeconfig
scored: true
- id: V-242407
text: "The Kubernetes kubelet configuration files must have file permissions set to 644 or more restrictive (Automated)"
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "644"
remediation: |
Run the following command (using the config file location identified in the Audit step)
chmod 644 $kubeletconf
scored: true
- id: V-242414
text: "The Kubernetes cluster must use non-privileged host ports for user pods. (Manual)"
type: "manual"
remediation: |
For any of the pods that are using ports below 1024,
reconfigure the pod to use a service to map a host non-privileged
port to the pod port or reconfigure the image to use non-privileged ports.
kubectl get services -A -o json | jq '.items[].spec.ports'
Note this should excempt non-configurable services from the GKE managed service, such as anthos, gatewaykeeper, kubelet, etc.
scored: false
- id: V-242415
text: "Secrets in Kubernetes must not be stored as environment variables.(Manual)"
type: "manual"
remediation: |
Run the following command:
kubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {"\n"}{end}' -A
If any of the values returned reference environment variables
rewrite application code to read secrets from mounted secret files, rather than
from environment variables.
scored: false
- id: V-242442
text: "Kubernetes must remove old components after updated versions have been installed. (Manual)"
type: "manual"
remediation: |
To view all pods and the images used to create the pods, from the Master node, run the following command:
kubectl get pods --all-namespaces -o jsonpath="{..image}" | \
tr -s '[[:space:]]' '\n' | \
sort | \
uniq -c
Review the images used for pods running within Kubernetes.
Remove any old pods that are using older images.
scored: false
- id: 3.2
text: "DISA Category Code II - Node Security"
checks:
# TODO Verify this.. seems to be failing but also not sure if this can be disabled with GKE
- id: V-242393
text: "Kubernetes Worker Nodes must not have sshd service running. (Automated)"
audit: 'ps aux | grep sshd'
tests:
test_items:
- flag: bin/sshd
set: false
remediation: |
To stop the sshd service, run the command: systemctl stop sshd
scored: true
# TODO Verify this, low confidence this will work
# Both of these are not working at the moment
# - id: V-242394
# text: "Kubernetes Worker Nodes must not have the sshd service enabled. (Automated)"
# audit: "/bin/sh -c 'systemctl list-unit-files | grep sshd'"
# tests:
# bin_op:
# test_items:
# - flag: "disabled"
# - flag: "sshd"
# set: false
# remediation: |
# To disable the sshd service, run the command:
# chkconfig sshd off
# scored: true
# - id: V-242394
# text: "Kubernetes Worker Nodes must not have the sshd service enabled."
# audit: "systemctl is-enabled sshd"
# tests:
# test_items:
# - flag: "sshd"
# compare:
# op: eq
# value: "disabled"
# remediation: |
# To disable the sshd service, run the command:
# systemctl disable sshd
# scored: true
- id: V-242434 # CIS 3.2.6
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --make-iptables-util-chains
path: '{.makeIPTablesUtilChains}'
compare:
op: eq
value: true
- flag: --make-iptables-utils-chains
path: '{.makeIPTablesUtilChains}'
set: false
bin_op: or
remediation: |
Remediation Method 1:
If modifying the Kubelet config file, edit the kubelet-config.json file
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
true
"makeIPTablesUtilChains": true
Ensure that /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf
does not set the --make-iptables-util-chains argument because that would
override your Kubelet config file.
Remediation Method 2:
If using executable arguments, edit the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
worker node and add the below parameter at the end of the KUBELET_ARGS variable
string.
--make-iptables-util-chains:true
Remediation Method 3:
If using the api configz endpoint consider searching for the status of
"makeIPTablesUtilChains.: true by extracting the live configuration from the nodes
running kubelet.
**See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
Live Cluster (https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/),
and then rerun the curl statement from audit process to check for kubelet
configuration changes
kubectl proxy --port=8001 &
export HOSTNAME_PORT=localhost:8001 (example host and port number)
export NODE_NAME=gke-cluster-1-pool1-5e572947-r2hg (example node name from
"kubectl get nodes")
curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
For all three remediations:
Based on your system, restart the kubelet service and check status
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet -l
scored: true
- id: V-242420
text: "Kubernetes Kubelet must have the SSL Certificate Authority set."
audit: "ps -ef | grep kubelet"
audit_config: "/bin/cat $kubeletconf"
tests:
bin_op: or
test_items:
- flag: "--client-ca-file"
set: true
- path: "{.authentication.x509.clientCAFile}"
set: true
remediation: |
On the Control Plane, run the command:
ps -ef | grep kubelet
If the "--client-ca-file" option exists, verify its value is correctly set.
Note the path to the config file (identified by --config).
Edit the Kubernetes Kubelet config file:
Set the value of "clientCAFile" to a path containing an Approved Organizational Certificate.
Restart the kubelet service using the following command:
systemctl daemon-reload && systemctl restart kubelet
scored: false
- id: V-242452
text: "The Kubernetes kubelet KubeConfig must have file permissions set to 644 or more restrictive."
audit: "stat -c %a $kubeletconf"
tests:
test_items:
- flag: "644"
compare:
op: lte
value: "644"
remediation: |
Change the permissions of the Kubelet KubeConfig file to 644 by executing the command:
chmod 644 $kubeletconf
scored: false
- id: V-242453
text: "The Kubernetes kubelet KubeConfig file must be owned by root."
audit: "stat -c %U:%G $kubeletconf"
tests:
test_items:
- flag: "root:root"
set: true
remediation: |
Change the ownership of the kubelet.conf file to root:root by executing the command:
chown root:root $kubeletconf
scored: false
- id: V-242454
text: "The Kubernetes kubeadm.conf must be owned by root."
audit: "stat -c %U:%G $kubeletsvc"
tests:
test_items:
- flag: "root:root"
set: true
remediation: |
Change the ownership of the kubeadm.conf to root:root by executing the command:
chown root:root $kubeletsvc
scored: false
- id: V-242455
text: "The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive."
audit: "stat -c %a $kubeletsvc"
tests:
test_items:
- flag: "644"
compare:
op: lte
value: "644"
remediation: |
Change the permissions of the kubeadm.conf to 644 by executing the command:
chmod 644 $kubeletsvc
scored: false
- id: V-242456
text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive."
audit: "stat -c %a $kubeletconf"
tests:
test_items:
- flag: "644"
compare:
op: lte
value: "644"
remediation: |
Change the permissions of the config.yaml to 644 by executing the command:
chmod 644 $kubeletconf
scored: false
- id: V-242457
text: "The Kubernetes kubelet config must be owned by root."
audit: "stat -c %U:%G $kubeletconf"
tests:
test_items:
- flag: "root:root"
set: true
remediation: |
Change the ownership of the kubelet config file to root:root by executing the command:
chown root:root $kubeletconf
scored: false
- id: V-245541
text: "Kubernetes Kubelet must not disable timeouts."
audit: "ps -ef | grep kubelet"
audit_config: "/bin/cat $kubeletconf"
tests:
bin_op: and
test_items:
- flag: "--streaming-connection-idle-timeout"
set: false
- path: "{.streamingConnectionIdleTimeout}"
set: true
compare:
op: gte
value: "5m"
remediation: |
On the Control Plane, run the command:
ps -ef | grep kubelet
If the "--streaming-connection-idle-timeout" option exists, verify its value.
Edit the Kubernetes Kubelet config file:
Set the argument "streamingConnectionIdleTimeout" to a value of "5m" or greater.
Restart the kubelet service using the following command:
systemctl daemon-reload && systemctl restart kubelet
scored: true
- id: V-242390 # Similar to CIS 3.2.1
text: "The Kubernetes API server must have anonymous authentication disabled (Automated)"
# audit: "/bin/ps -fC kubelet"
audit: "/bin/ps -fC $kubeletbin"
# audit_config: "/bin/cat /etc/kubernetes/kubelet-config.yaml"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: "--anonymous-auth"
path: '{.authentication.anonymous.enabled}'
set: true
compare:
op: eq
value: false
remediation: |
If using a Kubelet config file, edit $kubeletconf to set authentication: anonymous: enabled to
false.
If using executable arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
--anonymous-auth=false
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service

View File

@ -0,0 +1,34 @@
---
controls:
version: "gke-stig-kubernetes-v1r6"
id: 4
text: "Kubernetes Security Policies"
type: "policies"
groups:
- id: 4.1
text: "DISA Category Code I - Pod Security Policies"
checks:
- id: V-242381
text: "The Kubernetes Controller Manager must create unique service accounts for each work payload. (Manual)"
type: "manual"
remediation: |
Create explicit service accounts wherever a Kubernetes workload requires specific access
to the Kubernetes API server.
Modify the configuration of each default service account to include this value
automountServiceAccountToken: false
scored: false
- id: V-242383
text: "User-managed resources must be created in dedicated namespaces. (Manual)"
type: "manual"
remediation: |
Move any user-managed resources from the default, kube-public and kube-node-lease namespaces, to user namespaces.
scored: false
- id: V-242417
text: "Kubernetes must separate user functionality. (Manual)"
type: "manual"
remediation: |
Move any user pods that are present in the Kubernetes system namespaces to user specific namespaces.
scored: false

View File

@ -67,8 +67,8 @@ var (
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: os.Args[0],
Short: "Run CIS Benchmarks checks against a Kubernetes deployment",
Long: `This tool runs the CIS Kubernetes Benchmark (https://www.cisecurity.org/benchmark/kubernetes/)`,
Short: "Run CIS and STIG Benchmarks checks against a Kubernetes deployment",
Long: `This tool runs the CIS and STIG Kubernetes Benchmark (https://www.cisecurity.org/benchmark/kubernetes/)`,
Run: func(cmd *cobra.Command, args []string) {
bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformInfo(), viper.GetViper())
if err != nil {

82
job-gke-stig.yaml Normal file
View File

@ -0,0 +1,82 @@
# Service account role required for 242395
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-bench-sa
namespace: kube-bench
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kube-bench-list-pods
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list"]
resourceNames: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kube-bench-sa-binding
subjects:
- kind: ServiceAccount
name: kube-bench-sa
namespace: kube-bench
roleRef:
kind: ClusterRole
name: kube-bench-list-pods
apiGroup: rbac.authorization.k8s.io
---
apiVersion: batch/v1
kind: Job
metadata:
name: kube-bench
spec:
template:
spec:
serviceAccountName: kube-bench-sa
hostPID: true
containers:
- name: kube-bench
imagePullPolicy: Always
# Push the image to your GCP Artifact Registry and then refer to it here
# image: <region>-docker.pkg.dev/<registry>/<repository>/kube-bench:latest
image: docker.io/aquasec/kube-bench:latest
command:
[
"kube-bench",
"run",
"--benchmark",
"gke-stig-kubernetes-v2r2"
]
volumeMounts:
- name: var-lib-kubelet
mountPath: /var/lib/kubelet
readOnly: true
- name: etc-systemd
mountPath: /etc/systemd
readOnly: true
- name: etc-kubernetes
mountPath: /etc/kubernetes
readOnly: true
- name: home-kubernetes
mountPath: /home/kubernetes
readOnly: true
restartPolicy: Never
volumes:
- name: var-lib-kubelet
hostPath:
path: "/var/lib/kubelet"
- name: etc-systemd
hostPath:
path: "/etc/systemd"
- name: etc-kubernetes
hostPath:
path: "/etc/kubernetes"
- name: home-kubernetes
hostPath:
path: "/home/kubernetes"