1
0
mirror of https://github.com/aquasecurity/kube-bench.git synced 2024-11-28 19:08:15 +00:00

Canonical K8s configuration

This commit is contained in:
Konstantinos Tsakalozos 2024-06-12 12:20:28 +03:00
parent d8fc37649a
commit dc963f5561
7 changed files with 1928 additions and 0 deletions

View File

@ -0,0 +1,64 @@
---
## Version-specific settings that override the values in cfg/config.yaml
master:
apiserver:
confs:
- /var/snap/k8s/common/args/kube-apiserver
bins:
- kube-apiserver
scheduler:
confs:
- /var/snap/k8s/common/args/kube-scheduler
kubeconfig:
- /etc/kubernetes/scheduler.conf
bins:
- kube-scheduler
controllermanager:
confs:
- /var/snap/k8s/common/args/kube-controller-manager
bins:
- kube-controller-manager
kubeconfig:
- /etc/kubernetes/controller.conf
etcd:
confs:
- /var/snap/k8s/common/args/k8s-dqlite
bins:
- k8s-dqlite
etcd:
components:
- etcd
etcd:
bins:
- "k8s-dqlite"
confs:
- /var/snap/microk8s/common/args/k8s-dqlite
defaultconf: /var/snap/microk8s/common/args/k8s-dqlite
defaultdatadir: /var/snap/k8s/common/var/lib/k8s-dqlite
node:
kubelet:
cafile:
- "/etc/kubernetes/pki/client-ca.crt"
svc:
- "/etc/systemd/system/snap.k8s.kubelet.service"
bins:
- "bin/kubelet"
confs:
- "/var/snap/k8s/common/args/kubelet"
kubeconfig:
- "/etc/kubernetes/kubelet.conf"
proxy:
confs:
- /var/snap/k8s/common/args/kube-proxy
kubeconfig:
- /etc/kubernetes/proxy.conf
svc:
- "/etc/systemd/system/snap.k8s.kube-proxy.service"
controlplane:
apiserver:
confs:
- /var/snap/k8s/common/args/kube-apiserver

View File

@ -0,0 +1,48 @@
---
controls:
version: "cis-1.24"
id: 3
text: "Control Plane Configuration"
type: "controlplane"
groups:
- id: 3.1
text: "Authentication and Authorization"
checks:
- id: 3.1.1
text: "Client certificate authentication should not be used for users (Manual)"
type: "manual"
remediation: |
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be
implemented in place of client certificates.
scored: false
- id: 3.2
text: "Logging"
checks:
- id: 3.2.1
text: "Ensure that a minimal audit policy is created (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--audit-policy-file"
set: true
remediation: |
Create an audit policy file for your cluster.
scored: false
- id: 3.2.2
text: "Ensure that the audit policy covers key security concerns (Manual)"
type: "manual"
remediation: |
Review the audit policy provided for the cluster and ensure that it covers
at least the following areas,
- Access to Secrets managed by the cluster. Care should be taken to only
log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in
order to avoid risk of logging sensitive data.
- Modification of Pod and Deployment objects.
- Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.
For most requests, minimally logging at the Metadata level is recommended
(the most basic level of logging).
scored: false

View File

@ -0,0 +1,93 @@
---
controls:
version: "cis-1.24"
id: 2
text: "Datastore Node Configuration"
type: "etcd"
groups:
- id: 2
text: "Datastore Node Configuration"
checks:
- id: 2.1
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)"
tests:
test_items:
- flag: "--not-applicable"
set: false
remediation: |
Not applicable. Canonical K8s uses dqlite and the communication to this service is done through a
local socket (/var/snap/k8s/common/var/lib/k8s-dqlite/k8s-dqlite.sock) accessible to users with root permissions.
scored: true
- id: 2.2
text: "Ensure that the --client-cert-auth argument is set to true (Automated)"
tests:
test_items:
- flag: "--not-applicable"
set: false
remediation: |
Not applicable. Canonical K8s uses dqlite and the communication to this service is done through a
local socket (/var/snap/k8s/common/var/lib/k8s-dqlite/k8s-dqlite.sock) accessible to users with root permissions.
scored: true
- id: 2.3
text: "Ensure that the --auto-tls argument is not set to true (Automated)"
tests:
test_items:
- flag: "--not-applicable"
set: false
remediation: |
Not applicable. Canonical K8s uses dqlite and the communication to this service is done through a
local socket (/var/snap/k8s/common/var/lib/k8s-dqlite/k8s-dqlite.sock) accessible to users with root permissions.
scored: true
- id: 2.4
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)"
audit: "if test -e /var/snap/k8s/common/var/lib/k8s-dqlite/cluster.crt && test -e /var/snap/k8s/common/var/lib/k8s-dqlite/cluster.key; then echo 'certs-found'; fi"
tests:
test_items:
- flag: "certs-found"
remediation: |
The certificate pair for dqlite and tls peer communication is
/var/snap/k8s/common/var/lib/k8s-dqlite/cluster.crt and
/var/snap/k8s/common/var/lib/k8s-dqlite/cluster.key.
scored: true
- id: 2.5
text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)"
audit: "/bin/cat $etcdconf | /bin/grep enable-tls || true; echo $?"
tests:
bin_op: or
test_items:
- flag: "--enable-tls"
compare:
op: eq
value: true
- flag: "--enable-tls"
set: false
remediation: |
Dqlite peer communication uses TLS unless the --enable-tls is set to false in
/var/snap/k8s/common/args/k8s-dqlite.
scored: true
- id: 2.6
text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)"
tests:
test_items:
- flag: "--not-applicable"
set: false
remediation: |
Not applicable. Canonical K8s uses dqlite and tls peer communication uses the certificates
created upon the snap creation.
scored: false
- id: 2.7
text: "Ensure that a unique Certificate Authority is used for the datastore (Manual)"
tests:
test_items:
- flag: "--not-applicable"
set: false
remediation: |
Not applicable. Canonical K8s uses dqlite and tls peer communication uses certificates
created upon cluster setup.
scored: true

View File

@ -0,0 +1,975 @@
---
controls:
version: "cis-1.24"
id: 1
text: "Control Plane Security Configuration"
type: "master"
groups:
- id: 1.1
text: "Control Plane Node Configuration Files"
checks:
- id: 1.1.1
text: "Ensure that the API server configuration file permissions are set to 600 (Automated)"
audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'"
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command on the control plane node.
`chmod 600 $apiserverconf`
scored: true
- id: 1.1.2
text: "Ensure that the API server configuration file ownership is set to root:root (Automated)"
audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
tests:
test_items:
- flag: "root:root"
remediation: |
Run the following command on the control plane node.
`chown root:root $apiserverconf`
scored: true
- id: 1.1.3
text: "Ensure that the controller manager configuration file permissions are set to 600 (Automated)"
audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'"
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command on the control plane node.
`chmod 600 $controllermanagerconf`
scored: true
- id: 1.1.4
text: "Ensure that the controller manager configuration file ownership is set to root:root (Automated)"
audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
tests:
test_items:
- flag: "root:root"
remediation: |
Run the following command on the control plane node.
`chown root:root $controllermanagerconf`
scored: true
- id: 1.1.5
text: "Ensure that the scheduler configuration file permissions are set to 600 (Automated)"
audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'"
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command on the control plane node.
`chmod 600 $schedulerconf`
scored: true
- id: 1.1.6
text: "Ensure that the scheduler configuration file ownership is set to root:root (Automated)"
audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
tests:
test_items:
- flag: "root:root"
remediation: |
Run the following command on the control plane node.
`chown root:root $schedulerconf`
scored: true
- id: 1.1.7
text: "Ensure that the dqlite configuration file permissions are set to 644 or more restrictive (Automated)"
audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c permissions=%a $etcdconf; fi'"
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command on the control plane node.
`chmod 600 $etcdconf`
scored: true
- id: 1.1.8
text: "Ensure that the dqlite configuration file ownership is set to root:root (Automated)"
audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'"
use_multiple_values: true
tests:
test_items:
- flag: "root:root"
remediation: |
Run the following command on the control plane node.
`chown root:root $etcdconf`
scored: true
- id: 1.1.9
text: "Ensure that the Container Network Interface file permissions are set to 600 (Manual)"
audit: |
ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a
find /etc/cni/net.d/05-cilium.conflist -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command on the control plane node.
`chmod 600 /etc/cni/net.d/05-cilium.conflist`
scored: false
- id: 1.1.10
text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)"
audit: |
find /etc/cni/net.d/05-cilium.conflist -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G
use_multiple_values: true
tests:
test_items:
- flag: "root:root"
remediation: |
Run the following command on the control plane node.
`chown root:root /etc/cni/net.d/05-cilium.conflist`
scored: false
- id: 1.1.11
text: "Ensure that the dqlite data directory permissions are set to 700 or more restrictive (Automated)"
audit: |
DATA_DIR='/var/snap/k8s/common/var/lib/k8s-dqlite'
stat -c permissions=%a "$DATA_DIR"
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "700"
remediation: |
Dqlite data are kept by default under /var/snap/k8s/common/var/lib/k8s-dqlite.
To comply with the spirit of this CIS recommendation:
`chmod 700 /var/snap/k8s/common/var/lib/k8s-dqlite`
scored: true
- id: 1.1.12
text: "Ensure that the dqlite data directory ownership is set to root:root (Automated)"
audit: |
DATA_DIR='/var/snap/k8s/common/var/lib/k8s-dqlite'
stat -c %U:%G "$DATA_DIR"
tests:
test_items:
- flag: "root:root"
remediation: |
Dqlite data are kept by default under /var/snap/k8s/common/var/lib/k8s-dqlite.
To comply with the spirit of this CIS recommendation:
`chown root:root /var/snap/k8s/common/var/lib/k8s-dqlite`
scored: true
- id: 1.1.13
text: "Ensure that the admin.conf file permissions are set to 600 (Automated)"
audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'"
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command on the control plane node.
`chmod 600 /etc/kubernetes/admin.conf`
scored: true
- id: 1.1.14
text: "Ensure that the admin.conf file ownership is set to root:root (Automated)"
audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'"
tests:
test_items:
- flag: "root:root"
remediation: |
Run the following command on the control plane node.
`chown root:root /etc/kubernetes/admin.conf`
scored: true
- id: 1.1.15
text: "Ensure that the scheduler.conf file permissions are set to 600 (Automated)"
audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'"
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command on the control plane node.
`chmod 600 $schedulerkubeconfig`
scored: true
- id: 1.1.16
text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)"
audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'"
tests:
test_items:
- flag: "root:root"
remediation: |
Run the following command on the control plane node.
`chown root:root $schedulerkubeconfig`
scored: true
- id: 1.1.17
text: "Ensure that the controller-manager.conf file permissions are set to 600 (Automated)"
audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'"
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command on the control plane node.
`chmod 600 $controllermanagerkubeconfig`
scored: true
- id: 1.1.18
text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)"
audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'"
tests:
test_items:
- flag: "root:root"
remediation: |
Run the following command on the control plane node.
`chown root:root $controllermanagerkubeconfig`
scored: true
- id: 1.1.19
text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)"
audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G"
use_multiple_values: true
tests:
test_items:
- flag: "root:root"
remediation: |
Run the following command on the control plane node.
`chown -R root:root /etc/kubernetes/pki/`
scored: true
- id: 1.1.20
text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 (Manual)"
audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a"
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command on the control plane node.
`chmod -R 600 /etc/kubernetes/pki/*.crt`
scored: false
- id: 1.1.21
text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)"
audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a"
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command on the control plane node.
`chmod -R 600 /etc/kubernetes/pki/*.key`
scored: false
- id: 1.2
text: "API Server"
checks:
- id: 1.2.1
text: "Ensure that the --anonymous-auth argument is set to false (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--anonymous-auth"
compare:
op: eq
value: false
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and set the following argument.
`--anonymous-auth=false`
scored: false
- id: 1.2.2
text: "Ensure that the --token-auth-file parameter is not set (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--token-auth-file"
set: false
remediation: |
Follow the documentation and configure alternate mechanisms for authentication. Then,
edit the API server configuration file $apiserverconf
on the control plane node and remove the --token-auth-file argument.
scored: true
- id: 1.2.3
text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--enable-admission-plugins"
compare:
op: nothave
value: "DenyServiceExternalIPs"
- flag: "--enable-admission-plugins"
set: false
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and remove the `DenyServiceExternalIPs`
from enabled admission plugins.
scored: true
- id: 1.2.4
text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: and
test_items:
- flag: "--kubelet-client-certificate"
- flag: "--kubelet-client-key"
remediation: |
Follow the Kubernetes documentation and set up the TLS connection between the
apiserver and kubelets. Then, edit API server configuration file
$apiserverconf on the control plane node and set the
kubelet client certificate and key parameters as follows.
```
--kubelet-client-certificate=<path/to/client-certificate-file>
--kubelet-client-key=<path/to/client-key-file>
```
scored: true
- id: 1.2.5
text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--kubelet-certificate-authority"
remediation: |
Follow the Kubernetes documentation and setup the TLS connection between
the apiserver and kubelets. Then, edit the API server configuration file
$apiserverconf on the control plane node and set the
--kubelet-certificate-authority parameter to the path to the cert file for the certificate authority.
`--kubelet-certificate-authority=<ca-string>`
scored: true
- id: 1.2.6
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--authorization-mode"
compare:
op: nothave
value: "AlwaysAllow"
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow.
One such example could be as follows.
`--authorization-mode=Node,RBAC`
scored: true
- id: 1.2.7
text: "Ensure that the --authorization-mode argument includes Node (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--authorization-mode"
compare:
op: has
value: "Node"
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and set the --authorization-mode parameter to a value that includes Node.
`--authorization-mode=Node,RBAC`
scored: true
- id: 1.2.8
text: "Ensure that the --authorization-mode argument includes RBAC (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--authorization-mode"
compare:
op: has
value: "RBAC"
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and set the --authorization-mode parameter to a value that includes RBAC,
`--authorization-mode=Node,RBAC`
scored: true
- id: 1.2.9
text: "Ensure that the admission control plugin EventRateLimit is set (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--enable-admission-plugins"
compare:
op: has
value: "EventRateLimit"
remediation: |
Follow the Kubernetes documentation and set the desired limits in a configuration file.
Then, edit the API server configuration file $apiserverconf
and set the following arguments.
```
--enable-admission-plugins=...,EventRateLimit,...
--admission-control-config-file=<path/to/configuration/file>
```
scored: false
- id: 1.2.10
text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--enable-admission-plugins"
compare:
op: nothave
value: AlwaysAdmit
- flag: "--enable-admission-plugins"
set: false
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a
value that does not include AlwaysAdmit.
scored: true
- id: 1.2.11
text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--enable-admission-plugins"
compare:
op: has
value: "AlwaysPullImages"
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and set the --enable-admission-plugins parameter to include
AlwaysPullImages.
`--enable-admission-plugins=...,AlwaysPullImages,...`
scored: false
- id: 1.2.12
text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--enable-admission-plugins"
compare:
op: has
value: "SecurityContextDeny"
- flag: "--enable-admission-plugins"
compare:
op: has
value: "PodSecurityPolicy"
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and set the --enable-admission-plugins parameter to include
SecurityContextDeny, unless PodSecurityPolicy is already in place.
`--enable-admission-plugins=...,SecurityContextDeny,...`
scored: false
- id: 1.2.13
text: "Ensure that the admission control plugin ServiceAccount is set (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--disable-admission-plugins"
compare:
op: nothave
value: "ServiceAccount"
- flag: "--disable-admission-plugins"
set: false
remediation: |
Follow the documentation and create ServiceAccount objects as per your environment.
Then, edit the API server configuration file $apiserverconf
on the control plane node and ensure that the --disable-admission-plugins parameter is set to a
value that does not include ServiceAccount.
scored: true
- id: 1.2.14
text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--disable-admission-plugins"
compare:
op: nothave
value: "NamespaceLifecycle"
- flag: "--disable-admission-plugins"
set: false
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and set the --disable-admission-plugins parameter to
ensure it does not include NamespaceLifecycle.
scored: true
- id: 1.2.15
text: "Ensure that the admission control plugin NodeRestriction is set (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--enable-admission-plugins"
compare:
op: has
value: "NodeRestriction"
remediation: |
Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets.
Then, edit the API server configuration file $apiserverconf
on the control plane node and set the --enable-admission-plugins parameter to a
value that includes NodeRestriction.
`--enable-admission-plugins=...,NodeRestriction,...`
scored: true
- id: 1.2.16
text: "Ensure that the --secure-port argument is not set to 0 (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--secure-port"
compare:
op: gt
value: 0
- flag: "--secure-port"
set: false
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and either remove the --secure-port parameter or
set it to a different (non-zero) desired port.
scored: true
- id: 1.2.17
text: "Ensure that the --profiling argument is set to false (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--profiling"
compare:
op: eq
value: false
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and set the following argument.
`--profiling=false`
scored: true
- id: 1.2.18
text: "Ensure that the --audit-log-path argument is set (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--audit-log-path"
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and set the --audit-log-path parameter to a suitable path and
file where you would like audit logs to be written.
`--audit-log-path=/var/log/apiserver/audit.log`
scored: true
- id: 1.2.19
text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--audit-log-maxage"
compare:
op: gte
value: 30
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and set the --audit-log-maxage parameter to 30
or as an appropriate number of days.
`--audit-log-maxage=30`
scored: true
- id: 1.2.20
text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--audit-log-maxbackup"
compare:
op: gte
value: 10
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate
value.
`--audit-log-maxbackup=10`
scored: true
- id: 1.2.21
text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--audit-log-maxsize"
compare:
op: gte
value: 100
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB.
`--audit-log-maxsize=100`
scored: true
- id: 1.2.22
text: "Ensure that the --request-timeout argument is set as appropriate (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
type: manual
remediation: |
Edit the API server configuration file $apiserverconf
and set the following argument as appropriate and if needed.
`--request-timeout=300s`
scored: false
- id: 1.2.23
text: "Ensure that the --service-account-lookup argument is set to true (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--service-account-lookup"
set: false
- flag: "--service-account-lookup"
compare:
op: eq
value: true
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and set the following argument.
`--service-account-lookup=true`
Alternatively, you can delete the --service-account-lookup argument from this file so
that the default takes effect.
scored: true
- id: 1.2.24
text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--service-account-key-file"
remediation: |
Edit the API server configuration file $apiserverconf
on the control plane node and set the --service-account-key-file parameter
to the public key file for service accounts.
`--service-account-key-file=<filename>`
scored: true
- id: 1.2.25
text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)"
tests:
test_items:
- flag: "--not-applicable"
set: false
remediation: |
Not applicable. Canonical K8s uses dqlite and the communication to this service is done through a
local socket (/var/snap/k8s/common/var/lib/k8s-dqlite/k8s-dqlite.sock) accessible to users with root permissions.
scored: false
- id: 1.2.26
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: and
test_items:
- flag: "--tls-cert-file"
- flag: "--tls-private-key-file"
remediation: |
Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
Then, edit the API server configuration file $apiserverconf
on the control plane node and set the TLS certificate and private key file parameters.
```
--tls-cert-file=<path/to/tls-certificate-file>
--tls-private-key-file=<path/to/tls-key-file>
```
scored: true
- id: 1.2.27
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--client-ca-file"
remediation: |
Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
Then, edit the API server configuration file $apiserverconf
on the control plane node and set the client certificate authority file.
`--client-ca-file=<path/to/client-ca-file>`
scored: true
- id: 1.2.28
text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)"
tests:
test_items:
- flag: "--not-applicable"
set: false
remediation: |
Not applicable. Canonical K8s uses dqlite and the communication to this service is done through a
local socket (/var/snap/k8s/common/var/lib/k8s-dqlite/k8s-dqlite.sock) accessible to users with root permissions.
scored: false
- id: 1.2.29
text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--encryption-provider-config"
remediation: |
Follow the Kubernetes documentation and configure a EncryptionConfig file.
Then, edit the API server configuration file $apiserverconf
on the control plane node and set the --encryption-provider-config parameter to the path of that file.
`--encryption-provider-config=</path/to/EncryptionConfig/File>`
scored: false
- id: 1.2.30
text: "Ensure that encryption providers are appropriately configured (Manual)"
audit: |
ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%')
if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi
tests:
test_items:
- flag: "provider"
compare:
op: valid_elements
value: "aescbc,kms,secretbox"
remediation: |
Follow the Kubernetes documentation and configure a EncryptionConfig file.
In this file, choose aescbc, kms or secretbox as the encryption provider.
scored: false
- id: 1.2.31
text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--tls-cipher-suites"
compare:
op: valid_elements
value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384"
remediation: |
Edit the API server configuration file /etc/kubernetes/manifests/kube-apiserver.yaml
on the control plane node and set the following argument.
```
--tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,
TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384
```
scored: false
- id: 1.3
text: "Controller Manager"
checks:
- id: 1.3.1
text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)"
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
tests:
test_items:
- flag: "--terminated-pod-gc-threshold"
remediation: |
Edit the Controller Manager configuration file $controllermanagerconf
on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold.
`--terminated-pod-gc-threshold=10`
scored: false
- id: 1.3.2
text: "Ensure that the --profiling argument is set to false (Automated)"
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
tests:
test_items:
- flag: "--profiling"
compare:
op: eq
value: false
remediation: |
Edit the Controller Manager configuration file $controllermanagerconf
on the control plane node and set the following argument.
`--profiling=false`
scored: true
- id: 1.3.3
text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)"
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
tests:
test_items:
- flag: "--use-service-account-credentials"
compare:
op: noteq
value: false
remediation: |
Edit the Controller Manager configuration file $controllermanagerconf
on the control plane node to set the following argument.
`--use-service-account-credentials=true`
scored: true
- id: 1.3.4
text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
tests:
test_items:
- flag: "--service-account-private-key-file"
remediation: |
Edit the Controller Manager configuration file $controllermanagerconf
on the control plane node and set the --service-account-private-key-file parameter
to the private key file for service accounts.
`--service-account-private-key-file=<filename>`
scored: true
- id: 1.3.5
text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
tests:
test_items:
- flag: "--root-ca-file"
remediation: |
Edit the Controller Manager configuration file $controllermanagerconf
on the control plane node and set the --root-ca-file parameter to the certificate bundle file.
`--root-ca-file=<path/to/file>`
scored: true
- id: 1.3.6
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--feature-gates"
compare:
op: nothave
value: "RotateKubeletServerCertificate=false"
set: true
- flag: "--feature-gates"
set: false
remediation: |
Edit the Controller Manager configuration file $controllermanagerconf
on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true.
`--feature-gates=RotateKubeletServerCertificate=true`
scored: true
- id: 1.3.7
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
tests:
test_items:
- flag: "--bind-address"
compare:
op: eq
value: "127.0.0.1"
remediation: |
Edit the Controller Manager configuration file $controllermanagerconf
on the control plane node and ensure the correct value for the --bind-address parameter
and restart the controller manager service
scored: true
- id: 1.4
text: "Scheduler"
checks:
- id: 1.4.1
text: "Ensure that the --profiling argument is set to false (Automated)"
audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
tests:
test_items:
- flag: "--profiling"
compare:
op: eq
value: false
remediation: |
Edit the Scheduler configuration file $schedulerconf file
on the control plane node and set the following argument.
`--profiling=false`
scored: true
- id: 1.4.2
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
tests:
test_items:
- flag: "--bind-address"
compare:
op: eq
value: "127.0.0.1"
remediation: |
Edit the Scheduler configuration file $schedulerconf
on the control plane node and ensure the correct value for the --bind-address parameter
and restart the kube-scheduler service
scored: true

472
cfg/ck8s-cis-1.24/node.yaml Normal file
View File

@ -0,0 +1,472 @@
---
controls:
version: "cis-1.24"
id: 4
text: "Worker Node Security Configuration"
type: "node"
groups:
- id: 4.1
text: "Worker Node Configuration Files"
checks:
- id: 4.1.1
text: "Ensure that the kubelet service file permissions are set to 600 (Automated)"
audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi" '
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command on each worker node.
`chmod 600 $kubeletsvc`
scored: true
- id: 4.1.2
text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"'
tests:
bin_op: or
test_items:
- flag: root:root
- flag: "File not found"
remediation: |
Run the following command on each worker node.
`chown root:root $kubeletsvc`
scored: true
- id: 4.1.3
text: "If proxy kubeconfig file exists ensure permissions are set to 600 (Manual)"
audit: '/bin/sh -c "if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi" '
tests:
bin_op: or
test_items:
- flag: "permissions"
set: true
compare:
op: bitmask
value: "600"
remediation: |
Run the following command on each worker node.
`chmod 600 $proxykubeconfig`
scored: false
- id: 4.1.4
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)"
audit: '/bin/sh -c "if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi" '
tests:
bin_op: or
test_items:
- flag: root:root
remediation: |
Run the following command on each worker node.
`chown root:root $proxykubeconfig`
scored: false
- id: 4.1.5
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 (Automated)"
audit: '/bin/sh -c "if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi" '
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command on each worker node.
`chmod 600 $kubeletkubeconfig`
scored: true
- id: 4.1.6
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)"
audit: '/bin/sh -c "if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi" '
tests:
test_items:
- flag: root:root
remediation: |
Run the following command on each worker node.
`chown root:root $kubeletkubeconfig`
scored: true
- id: 4.1.7
text: "Ensure that the certificate authorities file permissions are set to 600 (Manual)"
audit: |
CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq)
if test -z $CAFILE; then CAFILE=$kubeletcafile; fi
if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command to modify the file permissions of the
--client-ca-file.
`chmod 600 /etc/kubernetes/pki/client-ca.crt`
scored: false
- id: 4.1.8
text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)"
audit: |
CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq)
if test -z $CAFILE; then CAFILE=$kubeletcafile; fi
if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi
tests:
test_items:
- flag: root:root
compare:
op: eq
value: root:root
remediation: |
Run the following command to modify the ownership of the --client-ca-file.
`chown root:root /etc/kubernetes/pki/client-ca.crt`
scored: false
- id: 4.1.9
text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 (Manual)"
audit: '/bin/sh -c "if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi" '
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command (using the config file location identified in the Audit step)
`chmod 600 $kubeletconf`
scored: false
- id: 4.1.10
text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)"
audit: '/bin/sh -c "if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi" '
tests:
test_items:
- flag: root:root
remediation: |
Run the following command (using the config file location identified in the Audit step)
`chown root:root $kubeletconf`
scored: false
- id: 4.2
text: "Kubelet"
checks:
- id: 4.2.1
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
audit: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: "--anonymous-auth"
compare:
op: eq
value: false
remediation: |
Edit the kubelet configuration file
$kubeletconf on each worker node and set the following argument.
`--anonymous-auth=false`
Restart the kubelet service.
`snap restart k8s.kubelet`
scored: true
- id: 4.2.2
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
audit: "/bin/cat $kubeletconf"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --authorization-mode
path: '{.authorization.mode}'
compare:
op: nothave
value: AlwaysAllow
remediation: |
Edit the kubelet configuration file
$kubeletconf on each worker node and set the following argument.
`--authorization-mode=Webhook`
Restart the kubelet service:
`snap restart k8s.kubelet`
scored: true
- id: 4.2.3
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
audit: "/bin/cat $kubeletconf"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --client-ca-file
path: '{.authentication.x509.clientCAFile}'
remediation: |
Edit the kubelet configuration file
$kubeletconf on each worker node and set the following argument.
`--client-ca-file=/etc/kubernetes/pki/client-ca.crt`
Restart the kubelet service:
`snap restart k8s.kubelet`
scored: true
- id: 4.2.4
text: "Verify that the --read-only-port argument is set to 0 (Manual)"
audit: "/bin/cat $kubeletconf"
audit_config: "/bin/cat $kubeletconf"
tests:
bin_op: or
test_items:
- flag: "--read-only-port"
path: '{.readOnlyPort}'
compare:
op: eq
value: 0
- flag: "--read-only-port"
path: '{.readOnlyPort}'
set: false
remediation: |
Edit the kubelet configuration file
$kubeletconf on each worker node and set the following argument.
`--read-only-port=0`
Restart the kubelet service:
`snap restart k8s.kubelet`
scored: false
- id: 4.2.5
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)"
audit: "/bin/cat $kubeletconf"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --streaming-connection-idle-timeout
path: '{.streamingConnectionIdleTimeout}'
compare:
op: noteq
value: 0
- flag: --streaming-connection-idle-timeout
path: '{.streamingConnectionIdleTimeout}'
set: false
bin_op: or
remediation: |
Edit the kubelet configuration file
$kubeletconf on each worker node and set the following argument.
`--streaming-connection-idle-timeout=5m`
Restart the kubelet service:
`snap restart k8s.kubelet`
scored: false
- id: 4.2.6
text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)"
audit: "/bin/cat $kubeletconf"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --protect-kernel-defaults
path: '{.protectKernelDefaults}'
compare:
op: eq
value: true
remediation: |
Edit the kubelet configuration file
$kubeletconf on each worker node and set the following argument:
`--protect-kernel-defaults=true`
Restart the kubelet service:
`snap restart k8s.kubelet`
scored: true
- id: 4.2.7
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
audit: "/bin/cat $kubeletconf"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --make-iptables-util-chains
path: '{.makeIPTablesUtilChains}'
compare:
op: eq
value: true
- flag: --make-iptables-util-chains
path: '{.makeIPTablesUtilChains}'
set: false
bin_op: or
remediation: |
Edit the kubelet configuration file
$kubeletconf on each worker node and
set the following argument:
`--make-iptables-util-chains=true`
Restart the kubelet service.
For example: `snap restart k8s.kubelet`
scored: true
- id: 4.2.8
text: "Ensure that the --hostname-override argument is not set (Manual)"
# This is one of those properties that can only be set as a command line argument.
# To check if the property is set as expected, we need to parse the kubelet command
# instead reading the Kubelet Configuration file.
audit: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --hostname-override
set: false
remediation: |
Edit the kubelet configuration file $kubeletconf
on each worker node and remove the --hostname-override argument.
Restart the kubelet service.
`snap restart k8s.kubelet`
scored: false
- id: 4.2.9
text: "Ensure that the --event-qps argument is set to a level which ensures appropriate event capture (Manual)"
audit: "/bin/cat $kubeletconf"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --event-qps
path: '{.eventRecordQPS}'
compare:
op: gte
value: 0
- flag: --event-qps
path: '{.eventRecordQPS}'
set: false
bin_op: or
remediation: |
Edit the kubelet configuration file $kubeletconf on each worker node and
set the --event-qps parameter as appropriate.
Restart the kubelet service.
`snap restart k8s.kubelet`
scored: false
- id: 4.2.10
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
audit: "/bin/cat $kubeletconf"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --tls-cert-file
path: '{.tlsCertFile}'
- flag: --tls-private-key-file
path: '{.tlsPrivateKeyFile}'
remediation: |
Edit the kubelet configuration file $kubeletconf on each worker node and
set the following arguments:
```
--tls-private-key-file=<path/to/private-key-file>
--tls-cert-file=<path/to/tls-certificate-file>
```
Restart the kubelet service.
`snap restart k8s.kubelet`
scored: false
- id: 4.2.11
text: "Ensure that the --rotate-certificates argument is not set to false (Automated)"
audit: "/bin/cat $kubeletconf"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --rotate-certificates
path: '{.rotateCertificates}'
compare:
op: eq
value: true
- flag: --rotate-certificates
path: '{.rotateCertificates}'
set: false
bin_op: or
remediation: |
Edit the kubelet configuration file $kubeletconf on each worker node and
remove the --rotate-certificates=false argument.
Restart the kubelet service.
`snap restart k8s.kubelet`
scored: true
- id: 4.2.12
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)"
audit: "/bin/cat $kubeletconf"
audit_config: "/bin/cat $kubeletconf"
tests:
bin_op: or
test_items:
- flag: RotateKubeletServerCertificate
path: '{.featureGates.RotateKubeletServerCertificate}'
compare:
op: nothave
value: false
- flag: RotateKubeletServerCertificate
path: '{.featureGates.RotateKubeletServerCertificate}'
set: false
remediation: |
Edit the kubelet configuration file $kubeletconf on each worker node and
set the argument --feature-gates=RotateKubeletServerCertificate=true
on each worker node.
Restart the kubelet service.
`snap restart k8s.kubelet`
scored: false
- id: 4.2.13
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
audit: "/bin/cat $kubeletconf"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --tls-cipher-suites
path: '{range .tlsCipherSuites[:]}{}{","}{end}'
compare:
op: valid_elements
value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
remediation: |
Edit the kubelet configuration file $kubeletconf on each worker node and
set the --tls-cipher-suites parameter as follows, or to a subset of these values.
```
--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
```
Restart the kubelet service.
`snap restart k8s.kubelet`
scored: false

View File

@ -0,0 +1,269 @@
---
controls:
version: "cis-1.24"
id: 5
text: "Kubernetes Policies"
type: "policies"
groups:
- id: 5.1
text: "RBAC and Service Accounts"
checks:
- id: 5.1.1
text: "Ensure that the cluster-admin role is only used where required (Manual)"
type: "manual"
remediation: |
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
if they need this role or if they could use a role with fewer privileges.
Where possible, first bind users to a lower privileged role and then remove the
clusterrolebinding to the cluster-admin role :
kubectl delete clusterrolebinding [name]
scored: false
- id: 5.1.2
text: "Minimize access to secrets (Manual)"
type: "manual"
remediation: |
Where possible, remove get, list and watch access to Secret objects in the cluster.
scored: false
- id: 5.1.3
text: "Minimize wildcard use in Roles and ClusterRoles (Manual)"
type: "manual"
remediation: |
Where possible replace any use of wildcards in clusterroles and roles with specific
objects or actions.
scored: false
- id: 5.1.4
text: "Minimize access to create pods (Manual)"
type: "manual"
remediation: |
Where possible, remove create access to pod objects in the cluster.
scored: false
- id: 5.1.5
text: "Ensure that default service accounts are not actively used. (Manual)"
type: "manual"
remediation: |
Create explicit service accounts wherever a Kubernetes workload requires specific access
to the Kubernetes API server.
Modify the configuration of each default service account to include this value
automountServiceAccountToken: false
scored: false
- id: 5.1.6
text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
type: "manual"
remediation: |
Modify the definition of pods and service accounts which do not need to mount service
account tokens to disable it.
scored: false
- id: 5.1.7
text: "Avoid use of system:masters group (Manual)"
type: "manual"
remediation: |
Remove the system:masters group from all users in the cluster.
scored: false
- id: 5.1.8
text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
type: "manual"
remediation: |
Where possible, remove the impersonate, bind and escalate rights from subjects.
scored: false
- id: 5.2
text: "Pod Security Standards"
checks:
- id: 5.2.1
text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)"
type: "manual"
remediation: |
Ensure that either Pod Security Admission or an external policy control system is in place
for every namespace which contains user workloads.
scored: false
- id: 5.2.2
text: "Minimize the admission of privileged containers (Manual)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of privileged containers.
scored: false
- id: 5.2.3
text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of `hostPID` containers.
scored: false
- id: 5.2.4
text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of `hostIPC` containers.
scored: false
- id: 5.2.5
text: "Minimize the admission of containers wishing to share the host network namespace (Automated)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of `hostNetwork` containers.
scored: false
- id: 5.2.6
text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of containers with `.spec.allowPrivilegeEscalation` set to `true`.
scored: false
- id: 5.2.7
text: "Minimize the admission of root containers (Automated)"
type: "manual"
remediation: |
Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot`
or `MustRunAs` with the range of UIDs not including 0, is set.
scored: false
- id: 5.2.8
text: "Minimize the admission of containers with the NET_RAW capability (Automated)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of containers with the `NET_RAW` capability.
scored: false
- id: 5.2.9
text: "Minimize the admission of containers with added capabilities (Automated)"
type: "manual"
remediation: |
Ensure that `allowedCapabilities` is not present in policies for the cluster unless
it is set to an empty array.
scored: false
- id: 5.2.10
text: "Minimize the admission of containers with capabilities assigned (Manual)"
type: "manual"
remediation: |
Review the use of capabilites in applications running on your cluster. Where a namespace
contains applicaions which do not require any Linux capabities to operate consider adding
a PSP which forbids the admission of containers which do not drop all capabilities.
scored: false
- id: 5.2.11
text: "Minimize the admission of Windows HostProcess containers (Manual)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`.
scored: false
- id: 5.2.12
text: "Minimize the admission of HostPath volumes (Manual)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of containers with `hostPath` volumes.
scored: false
- id: 5.2.13
text: "Minimize the admission of containers which use HostPorts (Manual)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of containers which use `hostPort` sections.
scored: false
- id: 5.3
text: "Network Policies and CNI"
checks:
- id: 5.3.1
text: "Ensure that the CNI in use supports NetworkPolicies (Manual)"
type: "manual"
remediation: |
If the CNI plugin in use does not support network policies, consideration should be given to
making use of a different plugin, or finding an alternate mechanism for restricting traffic
in the Kubernetes cluster.
scored: false
- id: 5.3.2
text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)"
type: "manual"
remediation: |
Follow the documentation and create NetworkPolicy objects as you need them.
scored: false
- id: 5.4
text: "Secrets Management"
checks:
- id: 5.4.1
text: "Prefer using Secrets as files over Secrets as environment variables (Manual)"
type: "manual"
remediation: |
If possible, rewrite application code to read Secrets from mounted secret files, rather than
from environment variables.
scored: false
- id: 5.4.2
text: "Consider external secret storage (Manual)"
type: "manual"
remediation: |
Refer to the Secrets management options offered by your cloud provider or a third-party
secrets management solution.
scored: false
- id: 5.5
text: "Extensible Admission Control"
checks:
- id: 5.5.1
text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)"
type: "manual"
remediation: |
Follow the Kubernetes documentation and setup image provenance.
scored: false
- id: 5.7
text: "General Policies"
checks:
- id: 5.7.1
text: "Create administrative boundaries between resources using namespaces (Manual)"
type: "manual"
remediation: |
Follow the documentation and create namespaces for objects in your deployment as you need
them.
scored: false
- id: 5.7.2
text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)"
type: "manual"
remediation: |
Use `securityContext` to enable the docker/default seccomp profile in your pod definitions.
An example is as follows:
securityContext:
seccompProfile:
type: RuntimeDefault
scored: false
- id: 5.7.3
text: "Apply SecurityContext to your Pods and Containers (Manual)"
type: "manual"
remediation: |
Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a
suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker
Containers.
scored: false
- id: 5.7.4
text: "The default namespace should not be used (Manual)"
type: "manual"
remediation: |
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
resources and that all new resources are created in a specific namespace.
scored: false

View File

@ -293,6 +293,7 @@ version_mapping:
"ack-1.0": "ack-1.0"
"cis-1.6-k3s": "cis-1.6-k3s"
"cis-1.24-microk8s": "cis-1.24-microk8s"
"ck8s-cis-1.24": "ck8s-cis-1.24"
"tkgi-1.2.53": "tkgi-1.2.53"
"k3s-cis-1.7": "k3s-cis-1.7"
"k3s-cis-1.23": "k3s-cis-1.23"
@ -347,6 +348,12 @@ target_mapping:
- "node"
- "controlplane"
- "policies"
"ck8s-cis-1.24":
- "master"
- "node"
- "controlplane"
- "etcd"
- "policies"
"cis-1.7":
- "master"
- "node"