diff --git a/cfg/cis-1.6-k3s/config.yaml b/cfg/cis-1.6-k3s/config.yaml new file mode 100644 index 0000000..afc6ade --- /dev/null +++ b/cfg/cis-1.6-k3s/config.yaml @@ -0,0 +1,42 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +master: + components: + - scheduler + - controllermanager + - node + + scheduler: + kubeconfig: + - /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig + defaultkubeconfig: /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig + + controllermanager: + kubeconfig: + - /var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig + defaultkubeconfig: /var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig + +etcd: + components: + - etcd + etcd: + confs: + - /var/lib/rancher/k3s/server/db/etcd/config + defaultconf: /var/lib/rancher/k3s/server/db/etcd/config + +node: + components: + - proxy + - kubelet + proxy: + kubeconfig: + - "/var/lib/rancher/k3s/agent/kubeproxy.kubeconfig" + defaultkubeconfig: "/var/lib/rancher/k3s/agent/kubeproxy.kubeconfig" + kubelet: + kubeconfig: + - "/var/lib/rancher/k3s/agent/kubelet.kubeconfig" + defaultkubeconfig: "/var/lib/rancher/k3s/agent/kubelet.kubeconfig" + cafile: + - "/var/lib/rancher/k3s/server/tls/server-ca.crt" + defaultcafile: "/var/lib/rancher/k3s/server/tls/server-ca.crt" diff --git a/cfg/cis-1.6-k3s/controlplane.yaml b/cfg/cis-1.6-k3s/controlplane.yaml new file mode 100644 index 0000000..23b639c --- /dev/null +++ b/cfg/cis-1.6-k3s/controlplane.yaml @@ -0,0 +1,40 @@ +--- +controls: +version: "cis-1.6-k3s" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "audit-policy-file" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster and pass it to k3s. + e.g. --kube-apiserver-arg='audit-log-path=/var/lib/rancher/k3s/server/logs/audit-log' + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Consider modification of the audit policy in use on the cluster to include these items, at a + minimum. + scored: false diff --git a/cfg/cis-1.6-k3s/etcd.yaml b/cfg/cis-1.6-k3s/etcd.yaml new file mode 100644 index 0000000..227f4f0 --- /dev/null +++ b/cfg/cis-1.6-k3s/etcd.yaml @@ -0,0 +1,129 @@ +--- +controls: +version: "cis-1.6-k3s" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration Files" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate if use etcd as database (Automated)" + audit: grep -E 'cert-file|key-file' $etcdconf + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + By default, K3s uses a config file for etcd that can be found at $etcdconf. + Server and peer cert and key files are specified. No manual remediation needed. + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: grep 'client-cert-auth' $etcdconf + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + By default, K3s uses a config file for etcd that can be found at $etcdconf. + client-cert-auth is set to true. No manual remediation needed. + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: grep 'auto-tls' $etcdconf + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + By default, K3s starts Etcd without this flag. It is set to false by default. + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: grep -A 5 'peer-transport-security' $etcdconf | grep -E 'cert-file|key-file' + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + By default, K3s starts Etcd with a config file found here, $etcdconf. + The config file contains peer-transport-security: which has fields that have the peer cert and peer key files. + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: grep 'client-cert-auth' $etcdconf + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + By default, K3s uses a config file for etcd that can be found at $etcdconf. + Within the file, the client-cert-auth field is set. No manual remediation needed. + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: grep 'peer-auto-tls' $etcdconf + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + By default, K3s uses a config file for etcd that can be found at $etcdconf. + Within the file, it does not contain the peer-auto-tls field. No manual remediation needed. + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: | + if [ -f "$etcdconf" ];then + etcd_ca=$(grep 'trusted-ca-file' $etcdconf | awk -F ":|: *" '{print $NF}'); + apiserver_ca=$(journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "trusted-ca-file" | awk -F "=" '{print $NF}') + if [ "$etcd_ca" == "$apiserver_ca" ]; then + echo 'etcd_and_apiserver_have_same_ca'; + else + echo 'etcd_and_apiserver_ca_not_same1' ; + fi + else + echo 'etcd_and_apiserver_ca_not_same'; return ; + fi + tests: + test_items: + - flag: "etcd_and_apiserver_ca_not_same" + remediation: | + By default, K3s uses a config file for etcd that can be found at $etcdconf + and the trusted-ca-file parameters in it are set to unique values specific to etcd. No manual remediation needed. + scored: false diff --git a/cfg/cis-1.6-k3s/master.yaml b/cfg/cis-1.6-k3s/master.yaml new file mode 100644 index 0000000..f73979b --- /dev/null +++ b/cfg/cis-1.6-k3s/master.yaml @@ -0,0 +1,783 @@ +--- +controls: +version: "cis-1.6-k3s" +id: 1 +text: "Master Node Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Master Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive if etcd is used (Automated)" + audit: stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the below command: + journalctl -u k3s | grep 'Managed etcd' | grep -v grep + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/rancher/k3s/server/db/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd if etcd is used (Not Applicable)" + scored: false + + - id: 1.1.13 + text: "Ensure that the admin.kubeconfig file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the k3s node. + For example, + chmod 644 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.kubeconfig file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the k3s node. + For example, + chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.kubeconfig file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the k3s node. + For example, + chmod 644 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.kubeconfig file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the k3s node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the cloud-controller.kubeconfig file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the $controllermanagerkubeconfig file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the k3s node. + For example, + chown -R root:root /var/lib/rancher/k3s/server/tls + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Automated)" + audit: "find /var/lib/rancher/k3s/server/tls/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 644 /var/lib/rancher/k3s/server/tls/*.crt + scored: true + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" + audit: "find /var/lib/rancher/k3s/server/tls/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: true + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + By default, K3s kube-apiserver is configured to run with --anonymous-auth=false flag and value. + scored: false + + - id: 1.2.2 + text: "Ensure that the --basic-auth-file argument is not set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "basic-auth-file" + tests: + test_items: + - flag: "--basic-auth-file" + set: false + remediation: | + By default, K3s does not run with basic authentication enabled. No manual remediation is needed. + scored: true + + - id: 1.2.3 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "token-auth-file" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + By default, K3s does not run with basic authentication enabled. No manual remediation is needed. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "token-auth-file" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + - flag: "--kubelet-https" + set: false + remediation: | + By default, K3s kube-apiserver doesn't run with the --kubelet-https parameter as it runs with TLS. No manual remediation is needed. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep -E 'kubelet-client-certificate|kubelet-client-key' + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + By default, K3s kube-apiserver is ran with these arguments for secure communication with kubelet. No manual remediation is needed. + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "kubelet-certificate-authority" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + By default, K3s kube-apiserver is ran with this argument for secure communication with kubelet. No manual remediation is needed. + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + By default, K3s sets Node,RBAC as the parameter to the --authorization-mode argument. No manual remediation is needed. + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + By default, K3s sets Node,RBAC as the parameter to the --authorization-mode argument. No manual remediation is needed. + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + By default, K3s sets Node,RBAC as the parameter to the --authorization-mode argument. No manual remediation is needed. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + By default, K3s only sets NodeRestriction,PodSecurityPolicy as the parameter to the --enable-admission-plugins argument. + To configure this, follow the Kubernetes documentation and set the desired limits in a configuration file. + Then refer to K3s's documentation to see how to supply additional api server configuration via the kube-apiserver-arg parameter. + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + By default, K3s only sets NodeRestriction,PodSecurityPolicy as the parameter to the --enable-admission-plugins argument. + No manual remediation needed. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + By default, K3s only sets NodeRestriction,PodSecurityPolicy as the parameter to the --enable-admission-plugins argument. + To configure this, follow the Kubernetes documentation and set the desired limits in a configuration file. + Then refer to K3s's documentation to see how to supply additional api server configuration via the kube-apiserver-arg parameter. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + K3s would need to have the SecurityContextDeny admission plugin enabled by passing it as an argument to K3s. + --kube-apiserver-arg='enable-admission-plugins=SecurityContextDeny + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "ServiceAccount" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not use this argument. + If there's a desire to use this argument, follow the documentation and create ServiceAccount objects as per your environment. + Then refer to K3s's documentation to see how to supply additional api server configuration via the kube-apiserver-arg parameter. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "disable-admission-plugins" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not use this argument. No manual remediation needed. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin PodSecurityPolicy is set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + K3s would need to have the PodSecurityPolicy admission plugin enabled by passing it as an argument to K3s. + --kube-apiserver-arg='enable-admission-plugins=PodSecurityPolicy. + scored: true + + - id: 1.2.17 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + K3s would need to have the NodeRestriction admission plugin enabled by passing it as an argument to K3s. + --kube-apiserver-arg='enable-admission-plugins=NodeRestriction. + scored: true + + - id: 1.2.18 + text: "Ensure that the --insecure-bind-address argument is not set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + test_items: + - flag: "--insecure-bind-address" + set: false + remediation: | + By default, K3s explicitly excludes the use of the --insecure-bind-address parameter. No manual remediation is needed. + scored: true + + - id: 1.2.19 + text: "Ensure that the --insecure-port argument is set to 0 (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "insecure-port" + tests: + test_items: + - flag: "--insecure-port" + compare: + op: eq + value: 0 + remediation: | + By default, K3s starts the kube-apiserver process with this argument's parameter set to 0. No manual remediation is needed. + scored: true + + - id: 1.2.20 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "secure-port" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + By default, K3s sets the parameter of 6444 for the --secure-port argument. No manual remediation is needed. + scored: true + + - id: 1.2.21 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "profiling" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling flag parameter to false. No manual remediation needed. + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "audit-log-path" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + K3s server needs to be run with the following argument, --kube-apiserver-arg='audit-log-path=/path/to/log/file' + scored: true + + - id: 1.2.23 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "audit-log-maxage" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + K3s server needs to be run with the following argument, --kube-apiserver-arg='audit-log-maxage=30' + scored: true + + - id: 1.2.24 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "audit-log-maxbackup" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + K3s server needs to be run with the following argument, --kube-apiserver-arg='audit-log-maxbackup=10' + scored: true + + - id: 1.2.25 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "audit-log-maxsize" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + K3s server needs to be run with the following argument, --kube-apiserver-arg='audit-log-maxsize=100' + scored: true + + - id: 1.2.26 + text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "request-timeout" + tests: + test_items: + - flag: "--request-timeout" + compare: + op: lte + value: 60 + remediation: | + By default, K3s does not set the --request-timeout argument. No manual remediation needed. + scored: true + + - id: 1.2.27 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "service-account-lookup" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + K3s server needs to be run with the following argument, --kube-apiserver-arg='service-account-lookup=true' + scored: true + + - id: 1.2.28 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "service-account-key-file" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + By default, K3s sets the --service-account-key-file explicitly. No manual remediation needed. + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep -E 'etcd-certfile|etcd-keyfile' + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + By default, K3s sets the --etcd-certfile and --etcd-keyfile arguments explicitly. No manual remediation needed. + scored: true + + - id: 1.2.30 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep -E 'tls-cert-file|tls-private-key-file' + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + By default, K3s sets the --tls-cert-file and --tls-private-key-file arguments explicitly. No manual remediation needed. + scored: true + + - id: 1.2.31 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + By default, K3s sets the --client-ca-file argument explicitly. No manual remediation needed. + scored: true + + - id: 1.2.32 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep -E 'etcd-cafile' + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + By default, K3s sets the --etcd-cafile argument explicitly. No manual remediation needed. + scored: true + + - id: 1.2.33 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep -E "encryption-provider-config" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + K3s server needs to be ran with the follow, --kube-apiserver-arg='encryption-provider-config=/path/to/encryption_config'. + This can be done by running k3s with the --secrets-encryptiuon argument which will configure the encryption provider. + scored: false + + - id: 1.2.34 + text: "Ensure that encryption providers are appropriately configured (Manual)" + type: manual + remediation: | + K3s server needs to be run with the following, --secrets-encryption=true, and verify that one of the allowed encryption providers is present. + Run the below command on the master node. + grep aescbc /path/to/encryption-config.json + Verify that aescbc/kms/secretbox is set as the encryption provider for all the desired resources. + scored: true + + - id: 1.2.35 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "tls-cipher-suites" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + remediation: | + By default, K3s explicitly doesn't set this flag. No manual remediation needed. + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: | + journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "terminated-pod-gc-threshold" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + K3s server needs to be run with the following, --kube-controller-manager-arg='terminated-pod-gc-threshold=10. + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "profiling" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling flag parameter to false. No manual remediation needed. + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "use-service-account-credentials" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + K3s server needs to be run with the following, --kube-controller-manager-arg='use-service-account-credentials=true' + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "service-account-private-key-file" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + By default, K3s sets the --service-account-private-key-file argument with the service account key file. No manual remediation needed. + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "root-ca-file" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + By default, K3s sets the --root-ca-file argument with the root ca file. No manual remediation needed. + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "RotateKubeletServerCertificate" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + By default, K3s implements its own logic for certificate generation and rotation. + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "bind-address" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1. No manual remediation needed. + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: journalctl -u k3s | grep "Running kube-scheduler" | tail -n1 | grep "profiling" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling flag parameter to false. No manual remediation needed. + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "bind-address" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1. No manual remediation needed. + scored: true diff --git a/cfg/cis-1.6-k3s/node.yaml b/cfg/cis-1.6-k3s/node.yaml new file mode 100644 index 0000000..1557b5a --- /dev/null +++ b/cfg/cis-1.6-k3s/node.yaml @@ -0,0 +1,253 @@ +--- +controls: +version: "cis-1.6-k3s" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 4.1.3 + text: "If proxy kubeproxy.kubeconfig file exists ensure permissions are set to 644 or more restrictive (Automated)" + audit: stat -c %a $proxykubeconfig + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $proxykubeconfig + scored: true + + - id: 4.1.4 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + By default, K3s creates $kubeletkubeconfig with 644 permissions. No manual remediation needed. + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + By default, K3s creates $kubeletkubeconfig with root:root ownership. No manual remediation needed. + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" + audit: stat -c permissions=%a $kubeletcafile + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + By default, K3s creates $kubeletcafile with 644 permissions. + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: stat -c %U:%G $kubeletcafile + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + By default, K3s creates $kubeletcafile with root:root ownership. + scored: true + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the anonymous-auth argument is set to false (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + By default, K3s starts kubelet with --anonymous-auth set to false. No manual remediation needed. + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" + tests: + test_items: + - flag: --authorization-mode + compare: + op: nothave + value: AlwaysAllow + remediation: | + K3s starts kubelet with Webhook as the value for the --authorization-mode argument. No manual remediation needed. + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" + tests: + test_items: + - flag: --client-ca-file + remediation: | + By default, K3s starts the kubelet process with the --client-ca-file. No manual remediation needed. + scored: true + + - id: 4.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Automated)" + audit: journalctl -u k3s | grep "Running kubelet" | tail -n1 | grep "read-only-port" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + compare: + op: eq + value: 0 + - flag: "--read-only-port" + set: false + remediation: | + By default, K3s starts the kubelet process with the --read-only-port argument set to 0. + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: journalctl -u k3s | grep "Running kubelet" | tail -n1 | grep "streaming-connection-idle-timeout" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + set: false + bin_op: or + remediation: | + By default, K3s does not set --streaming-connection-idle-timeout when starting kubelet. + scored: true + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kubelet" | tail -n1 | grep "protect-kernel-defaults" + tests: + test_items: + - flag: --protect-kernel-defaults + compare: + op: eq + value: true + remediation: | + K3s server needs to be started with the following, --protect-kernel-defaults=true. + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kubelet" | tail -n1 | grep "make-iptables-util-chains" + tests: + test_items: + - flag: --make-iptables-util-chains + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + set: false + bin_op: or + remediation: | + K3s server needs to be run with the following, --kube-apiserver-arg='make-iptables-util-chains=true'. + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Not Applicable)" + scored: false + + - id: 4.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual)" + audit: /bin/ps -fC containerd + tests: + test_items: + - flag: --event-qps + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: journalctl -u k3s | grep "Running kubelet" | tail -n1 | grep -E 'tls-cert-file|tls-private-key-file' + tests: + test_items: + - flag: --tls-cert-file + - flag: --tls-private-key-file + remediation: | + By default, K3s sets the --tls-cert-file and --tls-private-key-file arguments when executing the kubelet process. + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Not Applicable)" + scored: false + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Not Applicable)" + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Applicable)" + scored: false diff --git a/cfg/cis-1.6-k3s/policies.yaml b/cfg/cis-1.6-k3s/policies.yaml new file mode 100644 index 0000000..f6658ce --- /dev/null +++ b/cfg/cis-1.6-k3s/policies.yaml @@ -0,0 +1,260 @@ +--- +controls: +version: "cis-1.6-k3s" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + kubectl get roles --all-namespaces -o yaml + kubectl get clusterroles -o yaml + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.2 + text: "Pod Security Policies" + checks: + - id: 5.2.1 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + kubectl describe psp | grep MustRunAsNonRoot + An operator should apply a PodSecurityPolicy that sets the Rule value to MustRunAsNonRoot. An example of this can be found in the Hardening Guide + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + An operator should apply a PodSecurityPolicy that sets the hostPID value to false explicitly for the PSP it creates. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + An operator should apply a PodSecurityPolicy that sets the HostIPC value to false explicitly for the PSP it creates. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + An operator should apply a PodSecurityPolicy that sets the HostNetwork value to false explicitly for the PSP it creates. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + An operator should apply a PodSecurityPolicy that sets the allowPrivilegeEscalation value to false explicitly for the PSP it creates. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.6 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + An operator should apply a PodSecurityPolicy that sets the runAsUser.Rule value to MustRunAsNonRoot. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.7 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .spec.requiredDropCapabilities[] + An operator should apply a PodSecurityPolicy that sets .spec.requiredDropCapabilities[] to a value of All. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + kubectl get psp + An operator should apply a PodSecurityPolicy that sets allowedCapabilities to anything other than an empty array. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + kubectl get psp + An operator should apply a PodSecurityPolicy that sets requiredDropCapabilities to ALL. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports Network Policies (Manual)" + type: "manual" + remediation: | + By default, K3s use Canal (Calico and Flannel) and fully supports network policies. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + remediation: | + Run the below command on the master node. + for i in kube-system kube-public default; do + kubectl get networkpolicies -n $i; + done + Verify that there are network policies applied to each of the namespaces. + An operator should apply NetworkPolcyies that prevent unneeded traffic from traversing networks unnecessarily. An example of applying a NetworkPolcy can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + remediation: | + Run the following command to find references to objects which use environment variables defined from secrets. + kubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {"\n"}{end}' -A + + if possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + audit: kubectl get namespaces + type: "manual" + remediation: | + Ensure that these namespaces are the ones you need and are adequately administered as per your requirements. + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)" + type: "manual" + remediation: | + Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you + would need to enable alpha features in the apiserver by passing "--feature- + gates=AllAlpha=true" argument. + Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS + parameter to "--feature-gates=AllAlpha=true" + KUBE_API_ARGS="--feature-gates=AllAlpha=true" + Based on your system, restart the kube-apiserver service. For example: + systemctl restart kube-apiserver.service + Use annotations to enable the docker/default seccomp profile in your pod definitions. An + example is as below: + apiVersion: v1 + kind: Pod + metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default + spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + scored: false + + - id: 5.7.3 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Run the below command on the master node. + kubectl get all -n default + The only entries there should be system-managed resources such as the kubernetes service. + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/config.yaml b/cfg/config.yaml index 6d880ef..93ae514 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -258,6 +258,7 @@ version_mapping: "ocp-4.0": "rh-1.0" "aks-1.0": "aks-1.0" "ack-1.0": "ack-1.0" + "cis-1.6-k3s": "cis-1.6-k3s" target_mapping: "cis-1.5": @@ -272,6 +273,12 @@ target_mapping: - "controlplane" - "etcd" - "policies" + "cis-1.6-k3s": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" "cis-1.20": - "master" - "node" diff --git a/docs/architecture.md b/docs/architecture.md index 705ee2f..101c5e7 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -27,6 +27,7 @@ The following table shows the valid targets based on the CIS Benchmark version. | aks-1.0 | controlplane, node, policies, managedservices | | rh-0.7 | master,node| | rh-1.0 | master, controlplane, node, etcd, policies | +| cis-1.6-k3s | master, controlplane, node, etcd, policies | The following table shows the valid DISA STIG versions diff --git a/docs/platforms.md b/docs/platforms.md index a6527d9..98cde62 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -22,4 +22,5 @@ Some defined by other hardenening guides. | CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | | RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | | CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- | +| CIS | [1.6.0-k3s](https://docs.rancher.cn/docs/k3s/security/self-assessment/_index) | cis-1.6-k3s | k3s v1.16-v1.24 | | DISA | [Kubernetes Ver 1, Rel 6](https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_Kubernetes_V1R6_STIG.zip) | eks-stig-kubernetes-v1r6 | EKS |