From 82150fdc639724fc2e6ec62f007c72f75894ff77 Mon Sep 17 00:00:00 2001 From: yoavrotems Date: Wed, 27 Feb 2019 10:39:32 +0000 Subject: [PATCH 01/10] add new config files from the new CIS Kubernetes Benchmark there is a new update at CIS_Kubernetes_Benchmark_v1.4.0 for Kubernetes 1.13 --- cfg/1.13/config.yaml | 29 + cfg/1.13/master.yaml | 1500 ++++++++++++++++++++++++++++++++++++++++++ cfg/1.13/node.yaml | 480 ++++++++++++++ 3 files changed, 2009 insertions(+) create mode 100644 cfg/1.13/config.yaml create mode 100644 cfg/1.13/master.yaml create mode 100644 cfg/1.13/node.yaml diff --git a/cfg/1.13/config.yaml b/cfg/1.13/config.yaml new file mode 100644 index 0000000..3a63b4d --- /dev/null +++ b/cfg/1.13/config.yaml @@ -0,0 +1,29 @@ +--- +## Controls Files. +# These are YAML files that hold all the details for running checks. +# +## Uncomment to use different control file paths. +# masterControls: ./cfg/master.yaml +# nodeControls: ./cfg/node.yaml +# federatedControls: ./cfg/federated.yaml + +master: + apiserver: + defaultconf: /etc/kubernetes/manifests/kube-apiserver.yaml + + scheduler: + defaultconf: /etc/kubernetes/manifests/kube-scheduler.yaml + + controllermanager: + defaultconf: /etc/kubernetes/manifests/kube-controller-manager.yaml + + etcd: + defaultconf: /etc/kubernetes/manifests/etcd.yaml + +node: + kubelet: + defaultconf: /etc/kubernetes/kubelet.conf + defaultsvc: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + + proxy: + defaultconf: /etc/kubernetes/addons/kube-proxy-daemonset.yaml diff --git a/cfg/1.13/master.yaml b/cfg/1.13/master.yaml new file mode 100644 index 0000000..7a9f6bc --- /dev/null +++ b/cfg/1.13/master.yaml @@ -0,0 +1,1500 @@ +--- +controls: +version: 1.11 +id: 1 +text: "Master Node Security Configuration" +type: "master" +groups: +- id: 1.1 + text: "API Server" + checks: + - id: 1.1.1 + text: "Ensure that the --anonymous-auth argument is set to false (Not Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.1.2 + text: "Ensure that the --basic-auth-file argument is not set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--basic-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the master node and remove the --basic-auth-file= + parameter. + scored: true + + - id: 1.1.3 + text: "Ensure that the --insecure-allow-any-token argument is not set (Not Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--insecure-allow-any-token" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and remove the --insecure-allow-any-token + parameter. + scored: true + + - id: 1.1.4 + text: "Ensure that the --kubelet-https argument is set to true (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + set: true + - flag: "--kubelet-https" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and remove the --kubelet-https parameter. + scored: true + + - id: 1.1.5 + text: "Ensure that the --insecure-bind-address argument is not set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--insecure-bind-address" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and remove the --insecure-bind-address + parameter. + scored: true + + - id: 1.1.6 + text: "Ensure that the --insecure-port argument is set to 0 (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--insecure-port" + compare: + op: eq + value: 0 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + apiserver.yaml on the master node and set the below parameter. + --insecure-port=0 + scored: true + + - id: 1.1.7 + text: "Ensure that the --secure-port argument is not set to 0 (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + set: true + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.1.8 + text: "Ensure that the --profiling argument is set to false (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.1.9 + text: "Ensure that the --repair-malformed-updates argument is set to false (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--repair-malformed-updates" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the below parameter. + --repair-malformed-updates=false + scored: true + + - id: 1.1.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.1.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins to + include AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: true + + - id: 1.1.12 + text: "Ensure that the admission control plugin DenyEscalatingExec is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "DenyEscalatingExec" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to a + value that includes DenyEscalatingExec. + --enable-admission-plugins=...,DenyEscalatingExec,... + scored: true + + - id: 1.1.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to + include SecurityContextDeny. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.1.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + --disable-admission-plugins=...,NamespaceLifecycle,... + scored: true + + - id: 1.1.15 + text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --audit-log-path parameter to a suitable + path and file where you would like audit logs to be written, for example: + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.1.16 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --audit-log-maxage parameter to 30 or + as an appropriate number of days: --audit-log-maxage=30 + scored: true + + - id: 1.1.17 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --audit-log-maxbackup parameter to 10 + or to an appropriate value. + --audit-log-maxbackup=10 + scored: true + + - id: 1.1.18 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --audit-log-maxsize parameter to an + appropriate size in MB. For example, to set it as 100 MB: + --audit-log-maxsize=100 + scored: true + + - id: 1.1.19 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --authorization-mode parameter to + values other than AlwaysAllow. One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.1.20 + text: "Ensure that the --token-auth-file parameter is not set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the master node and remove the --token-auth-file= + parameter. + scored: true + + - id: 1.1.21 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between the + apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the master node and set the --kubelet-certificate-authority + parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.1.22 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-client-key" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the master node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.1.23 + text: "Ensure that the --service-account-lookup argument is set to true (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-lookup" + compare: + op: eq + value: true + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the below parameter. + --service-account-lookup=true + scored: true + + - id: 1.1.24 + text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + set: true + remediation: | + Follow the documentation and create Pod Security Policy objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to a + value that includes PodSecurityPolicy : + --enable-admission-plugins=...,PodSecurityPolicy,... + Then restart the API Server. + scored: true + + - id: 1.1.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --service-account-key-file parameter + to the public key file for service accounts: + --service-account-key-file= + scored: true + + - id: 1.1.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as + appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and etcd. Then, edit the API server pod specification file + $apiserverconf on the master node and set the etcd + certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.1.27 + text: "Ensure that the admission control plugin ServiceAccount is set(Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "ServiceAccount" + set: true + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to a + value that includes ServiceAccount. + --enable-admission-plugins=...,ServiceAccount,... + scored: true + + - id: 1.1.28 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set + as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the master node and set the TLS certificate and private key file + parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.1.29 + text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the master node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.1.30 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: has + value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the below parameter. + --tls-cipher- suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM _SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM _SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM _SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + scored: false + + - id: 1.1.31 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and etcd. Then, edit the API server pod specification file + $apiserverconf on the master node and set the etcd + certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.1.32 + text: "Ensure that the --authorization-mode argument is set to Node (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --authorization-mode parameter to a + value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.1.33 + text: "Ensure that the admission control plugin NodeRestriction is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + set: true + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on + kubelets. Then, edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.1.34 + text: "Ensure that the --experimental-encryption-provider-config argument is + set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--experimental-encryption-provider-config" + set: true + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf on the + master node and set the --experimental-encryption-provider-config parameter + to the path of that file: + --experimental-encryption-provider-config= + scored: true + + - id: 1.1.35 + text: "Ensure that the encryption provider is set to aescbc (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + type: "manual" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. In this file, + choose aescbc as the encryption provider. + For example, + kind: EncryptionConfig + apiVersion: v1 + resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: <32-byte base64-encoded secret> + scored: true + + - id: 1.1.36 + text: "Ensure that the admission control plugin EventRateLimit is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + set: true + remediation: | + Follow the Kubernetes documentation and set the desired limits in a + configuration file. Then, edit the API server pod specification file + $apiserverconf and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: true + + - id: 1.1.37 + text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "AdvancedAuditing=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Follow the Kubernetes documentation and set the desired audit policy in the + /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --audit-policy-file=/etc/kubernetes/audit-policy.yaml + scored: true + + - id: 1.1.38 + text: "Ensure that the --request-timeout argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--request-timeout" + set: false + - flag: "--request-timeout" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. For example, + --request-timeout=300s + scored: true + + - id: 1.1.39 + text: "Ensure that the --authorization-mode argument includes RBAC (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + set: true + remediation: | + Edit the API server pod specification file $apiserverbin on the master node and set the --authorization-mode parameter to a value that includes RBAC, for example: --authorization-mode=Node,RBAC + scored: true + +- id: 1.2 + text: "Scheduler" + checks: + - id: 1.2.1 + text: "Ensure that the --profiling argument is set to false (Scored)" + audit: "ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf + file on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.2 + text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)" + audit: "ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + file on the master node and ensure the correct value for the + --address parameter. + scored: true + +- id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, for example: + --terminated-pod-gc-threshold=10 + scored: true + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: eq + value: true + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node and set the --service-account-private- + key-file parameter to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node and set the --root-ca-file parameter to + the certificate bundle file. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--feature-gates" + compare: + op: eq + value: "RotateKubeletServerCertificate=true" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + controller-manager.yaml on the master node and set the --feature-gates parameter to + include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + controller-manager.yaml on the master node and ensure the correct value + for the --address parameter. + scored: true + +- id: 1.4 + text: "Configuration Files" + checks: + - id: 1.4.1 + text: "Ensure that the API server pod specification file permissions are + set to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %a $apiserverconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 $apiserverconf + scored: true + + - id: 1.4.2 + text: "Ensure that the API server pod specification file ownership is set to + root:root (Scored)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root $apiserverconf + scored: true + + - id: 1.4.3 + text: "Ensure that the controller manager pod specification file + permissions are set to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %a $controllermanagerconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 $controllermanagerconf + scored: true + + - id: 1.4.4 + text: "Ensure that the controller manager pod specification file + ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root $controllermanagerconf + scored: true + + - id: 1.4.5 + text: "Ensure that the scheduler pod specification file permissions are set + to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %a $schedulerconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 $schedulerconf + scored: true + + - id: 1.4.6 + text: "Ensure that the scheduler pod specification file ownership is set to + root:root (Scored)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root $schedulerconf + scored: true + + - id: 1.4.7 + text: "Ensure that the etcd pod specification file permissions are set to + 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %a $etcdconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 $etcdconf + scored: true + + - id: 1.4.8 + text: "Ensure that the etcd pod specification file ownership is set to + root:root (Scored)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.4.9 + text: "Ensure that the Container Network Interface file permissions are + set to 644 or more restrictive (Not Scored)" + audit: "stat -c %a " + type: manual + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 + scored: true + + - id: 1.4.10 + text: "Ensure that the Container Network Interface file ownership is set + to root:root (Not Scored)" + audit: "stat -c %U:%G " + type: manual + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root + scored: true + + - id: 1.4.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %a + tests: + test_items: + - flag: "700" + compare: + op: eq + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir , + from the below command: + ps -ef | grep $etcdbin + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.4.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + tests: + test_items: + - flag: "etcd:etcd" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir , + from the below command: + ps -ef | grep $etcdbin + Run the below command (based on the etcd data directory found above). For example, + chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.4.13 + text: "Ensure that the admin.conf file permissions are set to 644 or + more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %a /etc/kubernetes/admin.conf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 /etc/kubernetes/admin.conf + scored: true + + - id: 1.4.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.4.15 + text: "Ensure that the scheduler.conf file permissions are set to 644 or + more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %a /etc/kubernetes/scheduler.conf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + master node. For example, chmod 644 /etc/kubernetes/scheduler.conf + scored: true + + - id: 1.4.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %U:%G /etc/kubernetes/scheduler.conf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + master node. For example, chown root:root /etc/kubernetes/scheduler.conf + scored: true + + - id: 1.4.17 + text: "Ensure that the controller-manager.conf file permissions are set + to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %a /etc/kubernetes/controller-manager.conf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + master node. For example, chmod 644 /etc/kubernetes/controller-manager.conf + scored: true + + - id: 1.4.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %U:%G /etc/kubernetes/controller-manager.conf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + master node. For example, chown root:root /etc/kubernetes/controller-manager.conf + scored: true + + - id: 1.4.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Scored)" + audit: "ls -laR /etc/kubernetes/pki/" + type: "manual" + tests: + test_items: + - flag: "root root" + compare: + op: eq + value: "root root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.4.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Scored)" + audit: "stat -c %n\ %a /etc/kubernetes/pki/*.crt" + type: "manual" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, chmod -R 644 /etc/kubernetes/pki/*.crt + scored: true + + - id: 1.4.21 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Scored)" + audit: "stat -c %n\ %a /etc/kubernetes/pki/*.key" + type: "manual" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, chmod -R 600 /etc/kubernetes/pki/*.key + scored: true + +- id: 1.5 + text: "etcd" + checks: + - id: 1.5.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + tests: + test_items: + - flag: "--cert-file" + set: true + - flag: "--key-file" + set: true + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --ca-file= + --key-file= + scored: true + + - id: 1.5.2 + text: "Ensure that the --client-cert-auth argument is set to true (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 1.5.3 + text: "Ensure that the --auto-tls argument is not set to true (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + set: false + - flag: "--auto-tls" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 1.5.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + type: "manual" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + set: true + - flag: "--peer-key-file" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 1.5.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + type: "manual" + tests: + test_items: + - flag: "--peer-client-cert-auth" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 1.5.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + type: "manual" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + set: false + - flag: "--peer-auto-tls" + compare: + op: eq + value: false + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 1.5.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + type: "manual" + tests: + test_items: + - flag: "--trusted-ca-file" + set: true + remediation: | + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false + +- id: 1.6 + text: "General Security Primitives" + checks: + - id: 1.6.1 + text: "Ensure that the cluster-admin role is only used where required (Not Scored)" + type: "manual" + remediation: | + Remove any unneeded clusterrolebindings : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 1.6.2 + text: "Create administrative boundaries between resources using namespaces (Not Scored)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you + need them. + scored: false + + - id: 1.6.3 + text: "Create network segmentation using Network Policies (Not Scored)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 1.6.4 + text: "Ensure that the seccomp profile is set to docker/default in your pod + definitions (Not Scored)" + type: "manual" + remediation: | + Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you + would need to enable alpha features in the apiserver by passing "--feature- + gates=AllAlpha=true" argument. + Edit the $apiserverconf file on the master node and set the KUBE_API_ARGS + parameter to "--feature-gates=AllAlpha=true" + KUBE_API_ARGS="--feature-gates=AllAlpha=true" + Based on your system, restart the kube-apiserver service. For example: + systemctl restart kube-apiserver.service + Use annotations to enable the docker/default seccomp profile in your pod definitions. An + example is as below: + apiVersion: v1 + kind: Pod + metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default + spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + scored: false + + - id: 1.6.5 + text: "Apply Security Context to Your Pods and Containers (Not Scored)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 1.6.6 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 1.6.7 + text: "Configure Network policies as appropriate (Not Scored)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup network policies as appropriate. + For example, you could create a "default" isolation policy for a Namespace by creating a + NetworkPolicy that selects all pods but does not allow any traffic: + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: default-deny + spec: + podSelector: + scored: false + + - id: 1.6.8 + text: "Place compensating controls in the form of PSP and RBAC for + privileged containers usage (Not Scored)" + type: "manual" + remediation: | + Follow Kubernetes documentation and setup PSP and RBAC authorization for your cluster. + scored: false + +- id: 1.7 + text: "PodSecurityPolicies" + checks: + - id: 1.7.1 + text: "Do not admit privileged containers (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.privileged field is omitted or set to false. + scored: false + + - id: 1.7.2 + text: "Do not admit containers wishing to share the host process ID namespace (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostPID field is omitted or set to false. + scored: false + + - id: 1.7.3 + text: "Do not admit containers wishing to share the host IPC namespace (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostIPC field is omitted or set to false. + scored: false + + - id: 1.7.4 + text: "Do not admit containers wishing to share the host network namespace (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostNetwork field is omitted or set to false. + scored: false + + - id: 1.7.5 + text: " Do not admit containers with allowPrivilegeEscalation (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.allowPrivilegeEscalation field is omitted or set to false. + scored: false + + - id: 1.7.6 + text: "Do not admit root containers (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of UIDs not including 0. + scored: false + + - id: 1.7.7 + text: "Do not admit containers with dangerous capabilities (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + scored: false diff --git a/cfg/1.13/node.yaml b/cfg/1.13/node.yaml new file mode 100644 index 0000000..0a6553c --- /dev/null +++ b/cfg/1.13/node.yaml @@ -0,0 +1,480 @@ +--- +controls: +version: 1.11 +id: 2 +text: "Worker Node Security Configuration" +type: "node" +groups: +- id: 2.1 + text: "Kubelet" + checks: + - id: 2.1.1 + text: "Ensure that the --anonymous-auth argument is set to false (Scored)" + audit: "ps -ef | grep $kubeletbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false . + If using executable arguments, edit the kubelet service file + $kubeletconf on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)" + audit: "ps -ef | grep $kubeletbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. + If using executable arguments, edit the kubelet service file + $kubeletconf on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)" + audit: "ps -ef | grep $kubeletbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletconf on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.4 + text: "Ensure that the --read-only-port argument is set to 0 (Scored)" + audit: "ps -ef | grep $kubeletbin | grep -v grep" + tests: + test_items: + - flag: "--read-only-port" + compare: + op: eq + value: 0 + set: true + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0 . + If using command line arguments, edit the kubelet service file + $kubeletconf on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)" + audit: "ps -ef | grep $kubeletbin | grep -v grep" + tests: + test_items: + - flag: "--streaming-connection-idle-timeout" + compare: + op: noteq + value: 0 + set: true + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletconf on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Scored)" + audit: "ps -ef | grep $kubeletbin | grep -v grep" + tests: + test_items: + - flag: "--protect-kernel-defaults" + compare: + op: eq + value: true + set: true + remediation: | + If using a Kubelet config file, edit the file to set protectKernelDefaults: true . + If using command line arguments, edit the kubelet service file + $kubeletconf on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Scored)" + audit: "ps -ef | grep $kubeletbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--make-iptables-util-chains" + compare: + op: eq + value: true + set: true + - flag: "--make-iptables-util-chains" + set: false + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true . + If using command line arguments, edit the kubelet service file + $kubeletconf on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.8 + text: "Ensure that the --hostname-override argument is not set (Scored)" + audit: "ps -ef | grep $kubeletbin | grep -v grep" + tests: + test_items: + - flag: "--hostname-override" + set: false + remediation: | + Edit the kubelet service file $kubeletconf + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.9 + text: "Ensure that the --event-qps argument is set to 0 (Scored)" + audit: "ps -ef | grep $kubeletbin | grep -v grep" + tests: + test_items: + - flag: "--event-qps" + compare: + op: eq + value: 0 + set: true + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: 0 . + If using command line arguments, edit the kubelet service file + $kubeletconf on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --event-qps=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)" + audit: "ps -ef | grep $kubeletbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate + file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the + corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletconf on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.11 + text: "Ensure that the --cadvisor-port argument is set to 0 (Scored)" + audit: "ps -ef | grep $kubeletbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--cadvisor-port" + compare: + op: eq + value: 0 + set: true + - flag: "--cadvisor-port" + set: false + remediation: | + Edit the kubelet service file $kubeletconf + on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable. + --cadvisor-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.12 + text: "Ensure that the --rotate-certificates argument is not set to false (Scored)" + audit: "ps -ef | grep $kubeletbin | grep -v grep" + tests: + test_items: + - flag: "--rotate-certificates" + compare: + op: eq + value: true + set: true + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true. + If using command line arguments, edit the kubelet service file $kubeletconf + on each worker node and add --rotate-certificates=true argument to the KUBELET_CERTIFICATE_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.13 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)" + audit: "ps -ef | grep $kubeletbin | grep -v grep" + tests: + test_items: + - flag: "RotateKubeletServerCertificate" + compare: + op: eq + value: true + set: true + remediation: | + Edit the kubelet service file $kubeletconf + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.14 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)" + audit: "ps -ef | grep $kubeletbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: eq + value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + set: true + remediation: | + If using a Kubelet config file, edit the file to set TLSCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + If using executable arguments, edit the kubelet service file $kubeletconf on each worker node and set the below parameter. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + scored: false + +- id: 2.2 + text: "Configuration Files" + checks: + - id: 2.2.1 + text: "Ensure that the kubelet.conf file permissions are set to 644 or + more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chmod 644 $kubeletconf + scored: true + + - id: 2.2.2 + text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: root:root + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chown root:root $kubeletconf + scored: true + + - id: 2.2.3 + text: "Ensure that the kubelet service file permissions are set to 644 or + more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %a $kubeletsvc; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: 644 + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chmod 755 $kubeletsvc + scored: true + + - id: 2.2.4 + text: "Ensure that the kubelet service file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chown root:root $kubeletsvc + scored: true + + - id: 2.2.5 + text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $proxyconf; then stat -c %a $proxyconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chmod 644 $proxyconf + scored: true + + - id: 2.2.6 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $proxyconf; then stat -c %U:%G $proxyconf; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chown root:root $proxyconf + scored: true + + - id: 2.2.7 + text: "Ensure that the certificate authorities file permissions are set to + 644 or more restrictive (Scored)" + type: manual + remediation: | + Run the following command to modify the file permissions of the --client-ca-file + chmod 644 + scored: true + + - id: 2.2.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $ca-file; then stat -c %U:%G $ca-file; fi'" + type: manual + remediation: | + Run the following command to modify the ownership of the --client-ca-file . + chown root:root + scored: true + + - id: 2.2.9 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then stat -c %U:%G /var/lib/kubelet/config.yaml; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the following command (using the config file location identied in the Audit step) + chown root:root /etc/kubernetes/kubelet.conf + scored: true + + - id: 2.2.10 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then stat -c %a /var/lib/kubelet/config.yaml; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the following command (using the config file location identied in the Audit step) + chmod 644 /var/lib/kubelet/config.yaml + scored: true From 3f98c1def2ce9108b4184d57ea69c66bfaa7ebd7 Mon Sep 17 00:00:00 2001 From: Abubakr-Sadik Nii Nai Davis Date: Wed, 27 Feb 2019 21:28:02 +0000 Subject: [PATCH 02/10] Fix wrong reference to kubelet.config in node checks. This fix applies to only checks for kubernetes versions 1.8 and 1.11. See https://github.com/aquasecurity/kube-bench/pull/208. --- cfg/1.11/node.yaml | 28 ++++++++++++++-------------- cfg/1.8/node.yaml | 41 ++++++++++++++++++++--------------------- cfg/config.yaml | 1 + 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/cfg/1.11/node.yaml b/cfg/1.11/node.yaml index 713ba47..7b808ba 100644 --- a/cfg/1.11/node.yaml +++ b/cfg/1.11/node.yaml @@ -19,7 +19,7 @@ groups: value: false set: true remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. --allow-privileged=false Based on your system, restart the kubelet service. For example: @@ -41,7 +41,7 @@ groups: If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to false . If using executable arguments, edit the kubelet service file - $kubeletconf on each worker node and + $kubeletsvc on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. --anonymous-auth=false Based on your system, restart the kubelet service. For example: @@ -62,7 +62,7 @@ groups: remediation: | If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If using executable arguments, edit the kubelet service file - $kubeletconf on each worker node and + $kubeletsvc on each worker node and set the below parameter in KUBELET_AUTHZ_ARGS variable. --authorization-mode=Webhook Based on your system, restart the kubelet service. For example: @@ -81,7 +81,7 @@ groups: If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to the location of the client CA file. If using command line arguments, edit the kubelet service file - $kubeletconf on each worker node and + $kubeletsvc on each worker node and set the below parameter in KUBELET_AUTHZ_ARGS variable. --client-ca-file= Based on your system, restart the kubelet service. For example: @@ -102,7 +102,7 @@ groups: remediation: | If using a Kubelet config file, edit the file to set readOnlyPort to 0 . If using command line arguments, edit the kubelet service file - $kubeletconf on each worker node and + $kubeletsvc on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. --read-only-port=0 Based on your system, restart the kubelet service. For example: @@ -124,7 +124,7 @@ groups: If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0. If using command line arguments, edit the kubelet service file - $kubeletconf on each worker node and + $kubeletsvc on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. --streaming-connection-idle-timeout=5m Based on your system, restart the kubelet service. For example: @@ -145,7 +145,7 @@ groups: remediation: | If using a Kubelet config file, edit the file to set protectKernelDefaults: true . If using command line arguments, edit the kubelet service file - $kubeletconf on each worker node and + $kubeletsvc on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. --protect-kernel-defaults=true Based on your system, restart the kubelet service. For example: @@ -169,7 +169,7 @@ groups: remediation: | If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true . If using command line arguments, edit the kubelet service file - $kubeletconf on each worker node and + $kubeletsvc on each worker node and remove the --make-iptables-util-chains argument from the KUBELET_SYSTEM_PODS_ARGS variable. Based on your system, restart the kubelet service. For example: @@ -185,7 +185,7 @@ groups: - flag: "--hostname-override" set: false remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and remove the --hostname-override argument from the KUBELET_SYSTEM_PODS_ARGS variable. Based on your system, restart the kubelet service. For example: @@ -206,7 +206,7 @@ groups: remediation: | If using a Kubelet config file, edit the file to set eventRecordQPS: 0 . If using command line arguments, edit the kubelet service file - $kubeletconf on each worker node and + $kubeletsvc on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. --event-qps=0 Based on your system, restart the kubelet service. For example: @@ -229,7 +229,7 @@ groups: file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file. If using command line arguments, edit the kubelet service file - $kubeletconf on each worker node and + $kubeletsvc on each worker node and set the below parameters in KUBELET_CERTIFICATE_ARGS variable. --tls-cert-file= file= @@ -252,7 +252,7 @@ groups: - flag: "--cadvisor-port" set: false remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable. --cadvisor-port=0 Based on your system, restart the kubelet service. For example: @@ -272,7 +272,7 @@ groups: set: true remediation: | If using a Kubelet config file, edit the file to add the line rotateCertificates: true. - If using command line arguments, edit the kubelet service file $kubeletconf + If using command line arguments, edit the kubelet service file $kubeletsvc on each worker node and add --rotate-certificates=true argument to the KUBELET_CERTIFICATE_ARGS variable. Based on your system, restart the kubelet service. For example: systemctl daemon-reload @@ -290,7 +290,7 @@ groups: value: true set: true remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. --feature-gates=RotateKubeletServerCertificate=true Based on your system, restart the kubelet service. For example: diff --git a/cfg/1.8/node.yaml b/cfg/1.8/node.yaml index d279c2e..db47007 100644 --- a/cfg/1.8/node.yaml +++ b/cfg/1.8/node.yaml @@ -19,7 +19,7 @@ groups: value: false set: true remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. --allow-privileged=false Based on your system, restart the kubelet service. For example: @@ -38,7 +38,7 @@ groups: value: false set: true remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. --anonymous-auth=false Based on your system, restart the kubelet service. For example: @@ -57,7 +57,7 @@ groups: value: "AlwaysAllow" set: true remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and set the below parameter in KUBELET_AUTHZ_ARGS variable. --authorization-mode=Webhook Based on your system, restart the kubelet service. For example: @@ -73,7 +73,7 @@ groups: - flag: "--client-ca-file" set: true remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and set the below parameter in KUBELET_AUTHZ_ARGS variable. --client-ca-file= Based on your system, restart the kubelet service. For example: @@ -92,7 +92,7 @@ groups: value: 0 set: true remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. --read-only-port=0 Based on your system, restart the kubelet service. For example: @@ -111,7 +111,7 @@ groups: value: 0 set: true remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. --streaming-connection-idle-timeout=5m Based on your system, restart the kubelet service. For example: @@ -130,7 +130,7 @@ groups: value: true set: true remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. --protect-kernel-defaults=true Based on your system, restart the kubelet service. For example: @@ -150,7 +150,7 @@ groups: value: true set: true remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and remove the --make-iptables-util-chains argument from the KUBELET_SYSTEM_PODS_ARGS variable. Based on your system, restart the kubelet service. For example: @@ -169,7 +169,7 @@ groups: value: false set: true remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. --keep-terminated-pod-volumes=false Based on your system, restart the kubelet service. For example: @@ -185,7 +185,7 @@ groups: - flag: "--hostname-override" set: false remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and remove the --hostname-override argument from the KUBELET_SYSTEM_PODS_ARGS variable. Based on your system, restart the kubelet service. For example: @@ -204,7 +204,7 @@ groups: value: 0 set: true remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. --event-qps=0 Based on your system, restart the kubelet service. For example: @@ -223,8 +223,7 @@ groups: set: true remediation: | Follow the Kubernetes documentation and set up the TLS connection on the Kubelet. - Then edit the kubelet service file /etc/systemd/system/kubelet.service.d/10- - kubeadm.conf on each worker node and set the below parameters in + Then edit the kubelet service file $kubeletsvc on each worker node and set the below parameters in KUBELET_CERTIFICATE_ARGS variable. --tls-cert-file= file= @@ -245,7 +244,7 @@ groups: value: 0 set: true remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable. --cadvisor-port=0 Based on your system, restart the kubelet service. For example: @@ -264,7 +263,7 @@ groups: value: true set: true remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and remove the --feature- gates=RotateKubeletClientCertificate=false argument from the KUBELET_CERTIFICATE_ARGS variable. @@ -284,7 +283,7 @@ groups: value: true set: true remediation: | - Edit the kubelet service file $kubeletconf + Edit the kubelet service file $kubeletsvc on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. --feature-gates=RotateKubeletServerCertificate=true Based on your system, restart the kubelet service. For example: @@ -336,13 +335,13 @@ groups: remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, - chown root:root /etc/kubernetes/kubelet.conf + chown root:root $kubeletconf scored: true - id: 2.2.3 text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'" + audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %a $kubeletsvc; fi'" tests: bin_op: or test_items: @@ -364,12 +363,12 @@ groups: remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, - chmod 755 $kubeletconf + chmod 755 $kubeletsvc scored: true - id: 2.2.4 text: "2.2.4 Ensure that the kubelet service file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'" + audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'" tests: test_items: - flag: "root:root" @@ -377,7 +376,7 @@ groups: remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, - chown root:root $kubeletconf + chown root:root $kubeletsvc scored: true - id: 2.2.5 diff --git a/cfg/config.yaml b/cfg/config.yaml index f25ebc0..563bb5e 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -82,6 +82,7 @@ node: - /etc/kubernetes/kubelet.conf - /etc/kubernetes/kubelet defaultconf: "/etc/kubernetes/kubelet.conf" + defaultsvc: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf" proxy: bins: From a88b0703d86e7b26752c7cd4695704b08d7c5f60 Mon Sep 17 00:00:00 2001 From: Abubakr-Sadik Nii Nai Davis Date: Wed, 27 Feb 2019 22:08:57 +0000 Subject: [PATCH 03/10] Add kubeconfig variable substitution for kubelet and proxy. There are checks for the kubeconfig for both kubelet and proxy which the current kube-bench implementation does not check for properly. kube-bench checks the wrong files. This PR adds support for variable substitution for all the config file types are that should be checked in the CIS benchmarks. This PR also fixes a buggy in CIS 1.3.0 check 2.2.9, which checks for ownership of the kubelet config file /var/lib/kubelet/config.yaml but recommends changing ownership of kubelet kubeconfig file /etc/kubernetes/kubelet.conf as remediation. --- cfg/1.11/node.yaml | 24 ++++++++++++------------ cfg/1.8/config.yaml | 23 ----------------------- cfg/1.8/node.yaml | 16 ++++++++-------- cfg/config.yaml | 8 +++----- cmd/common.go | 2 ++ cmd/util.go | 31 +++++++++++++++++++++++++++++++ 6 files changed, 56 insertions(+), 48 deletions(-) diff --git a/cfg/1.11/node.yaml b/cfg/1.11/node.yaml index 7b808ba..82a4d7c 100644 --- a/cfg/1.11/node.yaml +++ b/cfg/1.11/node.yaml @@ -320,7 +320,7 @@ groups: - id: 2.2.1 text: "Ensure that the kubelet.conf file permissions are set to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'" + audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %a $kubeletkubeconfig; fi'" tests: bin_op: or test_items: @@ -342,12 +342,12 @@ groups: remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, - chmod 644 $kubeletconf + chmod 644 $kubeletkubeconfig scored: true - id: 2.2.2 text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'" + audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'" tests: test_items: - flag: "root:root" @@ -358,7 +358,7 @@ groups: remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, - chown root:root $kubeletconf + chown root:root $kubeletkubeconfig scored: true - id: 2.2.3 @@ -404,7 +404,7 @@ groups: - id: 2.2.5 text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $proxyconf; then stat -c %a $proxyconf; fi'" + audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %a $proxykubeconfig; fi'" tests: bin_op: or test_items: @@ -426,12 +426,12 @@ groups: remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, - chmod 644 $proxyconf + chmod 644 $proxykubeconfig scored: true - id: 2.2.6 text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e $proxyconf; then stat -c %U:%G $proxyconf; fi'" + audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'" tests: test_items: - flag: "root:root" @@ -439,7 +439,7 @@ groups: remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, - chown root:root $proxyconf + chown root:root $proxykubeconfig scored: true - id: 2.2.7 @@ -462,19 +462,19 @@ groups: - id: 2.2.9 text: "Ensure that the kubelet configuration file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then stat -c %U:%G /var/lib/kubelet/config.yaml; fi'" + audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'" tests: test_items: - flag: "root:root" set: true remediation: | Run the following command (using the config file location identied in the Audit step) - chown root:root /etc/kubernetes/kubelet.conf + chown root:root $kubeletconf scored: true - id: 2.2.10 text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then stat -c %a /var/lib/kubelet/config.yaml; fi'" + audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'" tests: bin_op: or test_items: @@ -495,5 +495,5 @@ groups: set: true remediation: | Run the following command (using the config file location identied in the Audit step) - chmod 644 /var/lib/kubelet/config.yaml + chmod 644 $kubeletconf scored: true diff --git a/cfg/1.8/config.yaml b/cfg/1.8/config.yaml index 1fec47e..3caaa5d 100644 --- a/cfg/1.8/config.yaml +++ b/cfg/1.8/config.yaml @@ -9,36 +9,13 @@ master: apiserver: - confs: - - /etc/kubernetes/manifests/kube-apiserver.yaml - - /etc/kubernetes/manifests/kube-apiserver.manifest defaultconf: /etc/kubernetes/manifests/kube-apiserver.yaml scheduler: - confs: - - /etc/kubernetes/manifests/kube-scheduler.yaml - - /etc/kubernetes/manifests/kube-scheduler.manifest defaultconf: /etc/kubernetes/manifests/kube-scheduler.yaml controllermanager: - confs: - - /etc/kubernetes/manifests/kube-controller-manager.yaml - - /etc/kubernetes/manifests/kube-controller-manager.manifest defaultconf: /etc/kubernetes/manifests/kube-controller-manager.yaml etcd: - confs: - - /etc/kubernetes/manifests/etcd.yaml - - /etc/kubernetes/manifests/etcd.manifest defaultconf: /etc/kubernetes/manifests/etcd.yaml - -node: - kubelet: - confs: - - /etc/systemd/system/kubelet.service.d/10-kubeadm.conf - defaultconf: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf - - proxy: - confs: - - /etc/kubernetes/addons/kube-proxy-daemonset.yaml - defaultconf: /etc/kubernetes/addons/kube-proxy-daemonset.yaml diff --git a/cfg/1.8/node.yaml b/cfg/1.8/node.yaml index db47007..014ced4 100644 --- a/cfg/1.8/node.yaml +++ b/cfg/1.8/node.yaml @@ -297,7 +297,7 @@ groups: - id: 2.2.1 text: "Ensure that the kubelet.conf file permissions are set to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'" + audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %a $kubeletkubeconfig; fi'" tests: bin_op: or test_items: @@ -319,12 +319,12 @@ groups: remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, - chmod 644 $kubeletconf + chmod 644 $kubeletkubeconfig scored: true - id: 2.2.2 text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'" + audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'" tests: test_items: - flag: "root:root" @@ -335,7 +335,7 @@ groups: remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, - chown root:root $kubeletconf + chown root:root $kubeletkubeconfig scored: true - id: 2.2.3 @@ -382,7 +382,7 @@ groups: - id: 2.2.5 text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $proxyconf; then stat -c %a $proxyconf; fi'" + audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %a $proxykubeconfig; fi'" tests: bin_op: or test_items: @@ -404,12 +404,12 @@ groups: remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, - chmod 644 $proxyconf + chmod 644 $proxykubeconfig scored: true - id: 2.2.6 text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e $proxyconf; then stat -c %U:%G $proxyconf; fi'" + audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'" tests: test_items: - flag: "root:root" @@ -417,7 +417,7 @@ groups: remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, - chown root:root $proxyconf + chown root:root $proxykubeconfig scored: true - id: 2.2.7 diff --git a/cfg/config.yaml b/cfg/config.yaml index 563bb5e..82ed1a1 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -78,11 +78,9 @@ node: bins: - "hyperkube kubelet" - "kubelet" - confs: - - /etc/kubernetes/kubelet.conf - - /etc/kubernetes/kubelet - defaultconf: "/etc/kubernetes/kubelet.conf" + defaultconf: "/var/lib/kubelet/config.yaml" defaultsvc: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf" + defaultkubeconfig: "/etc/kubernetes/kubelet.conf" proxy: bins: @@ -90,9 +88,9 @@ node: - "hyperkube proxy" - "proxy" confs: - - /etc/kubernetes/proxy.conf - /etc/kubernetes/proxy - /etc/kubernetes/addons/kube-proxy-daemonset.yaml + defaultkubeconfig: "/etc/kubernetes/proxy.conf" federated: components: diff --git a/cmd/common.go b/cmd/common.go index 2cfa310..b4d46c2 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -83,12 +83,14 @@ func runChecks(nodetype check.NodeType) { binmap := getBinaries(typeConf) confmap := getConfigFiles(typeConf) svcmap := getServiceFiles(typeConf) + kubeconfmap := getKubeConfigFiles(typeConf) // Variable substitutions. Replace all occurrences of variables in controls files. s := string(in) s = makeSubstitutions(s, "bin", binmap) s = makeSubstitutions(s, "conf", confmap) s = makeSubstitutions(s, "svc", svcmap) + s = makeSubstitutions(s, "kubeconfig", kubeconfmap) controls, err := check.NewControls(nodetype, []byte(s)) if err != nil { diff --git a/cmd/util.go b/cmd/util.go index 97cd94a..c3ae950 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -219,6 +219,37 @@ func getServiceFiles(v *viper.Viper) map[string]string { return svcmap } +// getKubeConfigFiles finds which of the set of candidate kubeconfig files exist +func getKubeConfigFiles(v *viper.Viper) map[string]string { + kubeconfigmap := make(map[string]string) + + for _, component := range v.GetStringSlice("components") { + s := v.Sub(component) + if s == nil { + continue + } + + // See if any of the candidate config files exist + kubeconfig := findConfigFile(s.GetStringSlice("kubeconfig")) + if kubeconfig == "" { + if s.IsSet("defaultkubeconfig") { + kubeconfig = s.GetString("defaultkubeconfig") + glog.V(2).Info(fmt.Sprintf("Using default kubeconfig file name '%s' for component %s", kubeconfig, component)) + } else { + // Default the service file name that we'll substitute to the name of the component + glog.V(2).Info(fmt.Sprintf("Missing service file for %s", component)) + kubeconfig = component + } + } else { + glog.V(2).Info(fmt.Sprintf("Component %s uses service file '%s'", component, kubeconfig)) + } + + kubeconfigmap[component] = kubeconfig + } + + return kubeconfigmap +} + // verifyBin checks that the binary specified is running func verifyBin(bin string) bool { From d255b49d4bec626f4d65c3540060bcbaf483d6e6 Mon Sep 17 00:00:00 2001 From: Abubakr-Sadik Nii Nai Davis Date: Sat, 2 Mar 2019 17:13:10 +0000 Subject: [PATCH 04/10] Revert 1.8 config file. --- cfg/1.8/config.yaml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/cfg/1.8/config.yaml b/cfg/1.8/config.yaml index 3caaa5d..284ff84 100644 --- a/cfg/1.8/config.yaml +++ b/cfg/1.8/config.yaml @@ -9,13 +9,34 @@ master: apiserver: + confs: + - /etc/kubernetes/manifests/kube-apiserver.yaml + - /etc/kubernetes/manifests/kube-apiserver.manifest defaultconf: /etc/kubernetes/manifests/kube-apiserver.yaml scheduler: + confs: + - /etc/kubernetes/manifests/kube-scheduler.yaml + - /etc/kubernetes/manifests/kube-scheduler.manifest defaultconf: /etc/kubernetes/manifests/kube-scheduler.yaml controllermanager: + confs: + - /etc/kubernetes/manifests/kube-controller-manager.yaml + - /etc/kubernetes/manifests/kube-controller-manager.manifest defaultconf: /etc/kubernetes/manifests/kube-controller-manager.yaml etcd: + confs: + - /etc/kubernetes/manifests/etcd.yaml + - /etc/kubernetes/manifests/etcd.manifest defaultconf: /etc/kubernetes/manifests/etcd.yaml + +node: + kubelet: + defaultconf: /var/lib/kubelet/config.yaml + defaultsvc: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + defaultkubeconfig: /etc/kubernetes/kubelet.conf + + proxy: + defaultconf: /etc/kubernetes/addons/kube-proxy-daemonset.yaml From a7d9e06c1b3884c9af9ba1a7e6f0669a07ba2519 Mon Sep 17 00:00:00 2001 From: yoavrotems Date: Wed, 6 Mar 2019 13:23:18 +0200 Subject: [PATCH 05/10] Delete config.yaml replace with the new config.yaml file --- cfg/1.13/config.yaml | 29 ----------------------------- 1 file changed, 29 deletions(-) delete mode 100644 cfg/1.13/config.yaml diff --git a/cfg/1.13/config.yaml b/cfg/1.13/config.yaml deleted file mode 100644 index 3a63b4d..0000000 --- a/cfg/1.13/config.yaml +++ /dev/null @@ -1,29 +0,0 @@ ---- -## Controls Files. -# These are YAML files that hold all the details for running checks. -# -## Uncomment to use different control file paths. -# masterControls: ./cfg/master.yaml -# nodeControls: ./cfg/node.yaml -# federatedControls: ./cfg/federated.yaml - -master: - apiserver: - defaultconf: /etc/kubernetes/manifests/kube-apiserver.yaml - - scheduler: - defaultconf: /etc/kubernetes/manifests/kube-scheduler.yaml - - controllermanager: - defaultconf: /etc/kubernetes/manifests/kube-controller-manager.yaml - - etcd: - defaultconf: /etc/kubernetes/manifests/etcd.yaml - -node: - kubelet: - defaultconf: /etc/kubernetes/kubelet.conf - defaultsvc: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf - - proxy: - defaultconf: /etc/kubernetes/addons/kube-proxy-daemonset.yaml From 5f09ecef44160503fd3a4fdb392010bfaa7841e9 Mon Sep 17 00:00:00 2001 From: yoavrotems Date: Wed, 6 Mar 2019 13:23:49 +0200 Subject: [PATCH 06/10] Delete master.yaml replace with the new master.yaml file --- cfg/1.13/master.yaml | 1500 ------------------------------------------ 1 file changed, 1500 deletions(-) delete mode 100644 cfg/1.13/master.yaml diff --git a/cfg/1.13/master.yaml b/cfg/1.13/master.yaml deleted file mode 100644 index 7a9f6bc..0000000 --- a/cfg/1.13/master.yaml +++ /dev/null @@ -1,1500 +0,0 @@ ---- -controls: -version: 1.11 -id: 1 -text: "Master Node Security Configuration" -type: "master" -groups: -- id: 1.1 - text: "API Server" - checks: - - id: 1.1.1 - text: "Ensure that the --anonymous-auth argument is set to false (Not Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--anonymous-auth" - compare: - op: eq - value: false - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the below parameter. - --anonymous-auth=false - scored: false - - - id: 1.1.2 - text: "Ensure that the --basic-auth-file argument is not set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--basic-auth-file" - set: false - remediation: | - Follow the documentation and configure alternate mechanisms for authentication. Then, - edit the API server pod specification file $apiserverconf - on the master node and remove the --basic-auth-file= - parameter. - scored: true - - - id: 1.1.3 - text: "Ensure that the --insecure-allow-any-token argument is not set (Not Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--insecure-allow-any-token" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and remove the --insecure-allow-any-token - parameter. - scored: true - - - id: 1.1.4 - text: "Ensure that the --kubelet-https argument is set to true (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--kubelet-https" - compare: - op: eq - value: true - set: true - - flag: "--kubelet-https" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and remove the --kubelet-https parameter. - scored: true - - - id: 1.1.5 - text: "Ensure that the --insecure-bind-address argument is not set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--insecure-bind-address" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and remove the --insecure-bind-address - parameter. - scored: true - - - id: 1.1.6 - text: "Ensure that the --insecure-port argument is set to 0 (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--insecure-port" - compare: - op: eq - value: 0 - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - apiserver.yaml on the master node and set the below parameter. - --insecure-port=0 - scored: true - - - id: 1.1.7 - text: "Ensure that the --secure-port argument is not set to 0 (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--secure-port" - compare: - op: gt - value: 0 - set: true - - flag: "--secure-port" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and either remove the --secure-port parameter or - set it to a different (non-zero) desired port. - scored: true - - - id: 1.1.8 - text: "Ensure that the --profiling argument is set to false (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the below parameter. - --profiling=false - scored: true - - - id: 1.1.9 - text: "Ensure that the --repair-malformed-updates argument is set to false (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--repair-malformed-updates" - compare: - op: eq - value: false - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the below parameter. - --repair-malformed-updates=false - scored: true - - - id: 1.1.10 - text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: nothave - value: AlwaysAdmit - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --enable-admission-plugins parameter to a - value that does not include AlwaysAdmit. - scored: true - - - id: 1.1.11 - text: "Ensure that the admission control plugin AlwaysPullImages is set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "AlwaysPullImages" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --enable-admission-plugins to - include AlwaysPullImages. - --enable-admission-plugins=...,AlwaysPullImages,... - scored: true - - - id: 1.1.12 - text: "Ensure that the admission control plugin DenyEscalatingExec is set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "DenyEscalatingExec" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --enable-admission-plugins parameter to a - value that includes DenyEscalatingExec. - --enable-admission-plugins=...,DenyEscalatingExec,... - scored: true - - - id: 1.1.13 - text: "Ensure that the admission control plugin SecurityContextDeny is set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "SecurityContextDeny" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --enable-admission-plugins parameter to - include SecurityContextDeny. - --enable-admission-plugins=...,SecurityContextDeny,... - scored: false - - - id: 1.1.14 - text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--disable-admission-plugins" - compare: - op: nothave - value: "NamespaceLifecycle" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --disable-admission-plugins parameter to - ensure it does not include NamespaceLifecycle. - --disable-admission-plugins=...,NamespaceLifecycle,... - scored: true - - - id: 1.1.15 - text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-path" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --audit-log-path parameter to a suitable - path and file where you would like audit logs to be written, for example: - --audit-log-path=/var/log/apiserver/audit.log - scored: true - - - id: 1.1.16 - text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxage" - compare: - op: gte - value: 30 - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --audit-log-maxage parameter to 30 or - as an appropriate number of days: --audit-log-maxage=30 - scored: true - - - id: 1.1.17 - text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxbackup" - compare: - op: gte - value: 10 - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --audit-log-maxbackup parameter to 10 - or to an appropriate value. - --audit-log-maxbackup=10 - scored: true - - - id: 1.1.18 - text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxsize" - compare: - op: gte - value: 100 - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --audit-log-maxsize parameter to an - appropriate size in MB. For example, to set it as 100 MB: - --audit-log-maxsize=100 - scored: true - - - id: 1.1.19 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: nothave - value: "AlwaysAllow" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --authorization-mode parameter to - values other than AlwaysAllow. One such example could be as below. - --authorization-mode=RBAC - scored: true - - - id: 1.1.20 - text: "Ensure that the --token-auth-file parameter is not set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--token-auth-file" - set: false - remediation: | - Follow the documentation and configure alternate mechanisms for authentication. Then, - edit the API server pod specification file $apiserverconf - on the master node and remove the --token-auth-file= - parameter. - scored: true - - - id: 1.1.21 - text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--kubelet-certificate-authority" - set: true - remediation: | - Follow the Kubernetes documentation and setup the TLS connection between the - apiserver and kubelets. Then, edit the API server pod specification file - $apiserverconf on the master node and set the --kubelet-certificate-authority - parameter to the path to the cert file for the certificate authority. - --kubelet-certificate-authority= - scored: true - - - id: 1.1.22 - text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--kubelet-client-certificate" - set: true - - flag: "--kubelet-client-key" - set: true - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the - apiserver and kubelets. Then, edit API server pod specification file - $apiserverconf on the master node and set the - kubelet client certificate and key parameters as below. - --kubelet-client-certificate= - --kubelet-client-key= - scored: true - - - id: 1.1.23 - text: "Ensure that the --service-account-lookup argument is set to true (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--service-account-lookup" - compare: - op: eq - value: true - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the below parameter. - --service-account-lookup=true - scored: true - - - id: 1.1.24 - text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "PodSecurityPolicy" - set: true - remediation: | - Follow the documentation and create Pod Security Policy objects as per your environment. - Then, edit the API server pod specification file $apiserverconf - on the master node and set the --enable-admission-plugins parameter to a - value that includes PodSecurityPolicy : - --enable-admission-plugins=...,PodSecurityPolicy,... - Then restart the API Server. - scored: true - - - id: 1.1.25 - text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--service-account-key-file" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --service-account-key-file parameter - to the public key file for service accounts: - --service-account-key-file= - scored: true - - - id: 1.1.26 - text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as - appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--etcd-certfile" - set: true - - flag: "--etcd-keyfile" - set: true - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the - apiserver and etcd. Then, edit the API server pod specification file - $apiserverconf on the master node and set the etcd - certificate and key file parameters. - --etcd-certfile= - --etcd-keyfile= - scored: true - - - id: 1.1.27 - text: "Ensure that the admission control plugin ServiceAccount is set(Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "ServiceAccount" - set: true - remediation: | - Follow the documentation and create ServiceAccount objects as per your environment. - Then, edit the API server pod specification file $apiserverconf - on the master node and set the --enable-admission-plugins parameter to a - value that includes ServiceAccount. - --enable-admission-plugins=...,ServiceAccount,... - scored: true - - - id: 1.1.28 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set - as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--tls-cert-file" - set: true - - flag: "--tls-private-key-file" - set: true - remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the master node and set the TLS certificate and private key file - parameters. - --tls-cert-file= - --tls-private-key-file= - scored: true - - - id: 1.1.29 - text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--client-ca-file" - set: true - remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the master node and set the client certificate authority file. - --client-ca-file= - scored: true - - - id: 1.1.30 - text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--tls-cipher-suites" - compare: - op: has - value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the below parameter. - --tls-cipher- suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM _SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM _SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM _SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - scored: false - - - id: 1.1.31 - text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--etcd-cafile" - set: true - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the - apiserver and etcd. Then, edit the API server pod specification file - $apiserverconf on the master node and set the etcd - certificate authority file parameter. - --etcd-cafile= - scored: true - - - id: 1.1.32 - text: "Ensure that the --authorization-mode argument is set to Node (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: has - value: "Node" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --authorization-mode parameter to a - value that includes Node. - --authorization-mode=Node,RBAC - scored: true - - - id: 1.1.33 - text: "Ensure that the admission control plugin NodeRestriction is set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "NodeRestriction" - set: true - remediation: | - Follow the Kubernetes documentation and configure NodeRestriction plug-in on - kubelets. Then, edit the API server pod specification file $apiserverconf - on the master node and set the --enable-admission-plugins parameter to a - value that includes NodeRestriction. - --enable-admission-plugins=...,NodeRestriction,... - scored: true - - - id: 1.1.34 - text: "Ensure that the --experimental-encryption-provider-config argument is - set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--experimental-encryption-provider-config" - set: true - remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - Then, edit the API server pod specification file $apiserverconf on the - master node and set the --experimental-encryption-provider-config parameter - to the path of that file: - --experimental-encryption-provider-config= - scored: true - - - id: 1.1.35 - text: "Ensure that the encryption provider is set to aescbc (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - type: "manual" - remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. In this file, - choose aescbc as the encryption provider. - For example, - kind: EncryptionConfig - apiVersion: v1 - resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: <32-byte base64-encoded secret> - scored: true - - - id: 1.1.36 - text: "Ensure that the admission control plugin EventRateLimit is set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "EventRateLimit" - set: true - remediation: | - Follow the Kubernetes documentation and set the desired limits in a - configuration file. Then, edit the API server pod specification file - $apiserverconf and set the below parameters. - --enable-admission-plugins=...,EventRateLimit,... - --admission-control-config-file= - scored: true - - - id: 1.1.37 - text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--feature-gates" - compare: - op: nothave - value: "AdvancedAuditing=false" - set: true - - flag: "--feature-gates" - set: false - remediation: | - Follow the Kubernetes documentation and set the desired audit policy in the - /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf - and set the below parameters. - --audit-policy-file=/etc/kubernetes/audit-policy.yaml - scored: true - - - id: 1.1.38 - text: "Ensure that the --request-timeout argument is set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--request-timeout" - set: false - - flag: "--request-timeout" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - and set the below parameter as appropriate and if needed. For example, - --request-timeout=300s - scored: true - - - id: 1.1.39 - text: "Ensure that the --authorization-mode argument includes RBAC (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: has - value: "RBAC" - set: true - remediation: | - Edit the API server pod specification file $apiserverbin on the master node and set the --authorization-mode parameter to a value that includes RBAC, for example: --authorization-mode=Node,RBAC - scored: true - -- id: 1.2 - text: "Scheduler" - checks: - - id: 1.2.1 - text: "Ensure that the --profiling argument is set to false (Scored)" - audit: "ps -ef | grep $schedulerbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - set: true - remediation: | - Edit the Scheduler pod specification file $schedulerconf - file on the master node and set the below parameter. - --profiling=false - scored: true - - - id: 1.2.2 - text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)" - audit: "ps -ef | grep $schedulerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--address" - compare: - op: eq - value: "127.0.0.1" - set: true - - flag: "--address" - set: false - remediation: | - Edit the Scheduler pod specification file $schedulerconf - file on the master node and ensure the correct value for the - --address parameter. - scored: true - -- id: 1.3 - text: "Controller Manager" - checks: - - id: 1.3.1 - text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)" - audit: "ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--terminated-pod-gc-threshold" - set: true - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, for example: - --terminated-pod-gc-threshold=10 - scored: true - - - id: 1.3.2 - text: "Ensure that the --profiling argument is set to false (Scored)" - audit: "ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - set: true - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the master node and set the below parameter. - --profiling=false - scored: true - - - id: 1.3.3 - text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)" - audit: "ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--use-service-account-credentials" - compare: - op: eq - value: true - set: true - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the master node to set the below parameter. - --use-service-account-credentials=true - scored: true - - - id: 1.3.4 - text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)" - audit: "ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--service-account-private-key-file" - set: true - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the master node and set the --service-account-private- - key-file parameter to the private key file for service accounts. - --service-account-private-key-file= - scored: true - - - id: 1.3.5 - text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)" - audit: "ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--root-ca-file" - set: true - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the master node and set the --root-ca-file parameter to - the certificate bundle file. - --root-ca-file= - scored: true - - - id: 1.3.6 - text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)" - audit: "ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--feature-gates" - compare: - op: eq - value: "RotateKubeletServerCertificate=true" - set: true - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - controller-manager.yaml on the master node and set the --feature-gates parameter to - include RotateKubeletServerCertificate=true. - --feature-gates=RotateKubeletServerCertificate=true - scored: true - - - id: 1.3.7 - text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)" - audit: "ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--address" - compare: - op: eq - value: "127.0.0.1" - set: true - - flag: "--address" - set: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - controller-manager.yaml on the master node and ensure the correct value - for the --address parameter. - scored: true - -- id: 1.4 - text: "Configuration Files" - checks: - - id: 1.4.1 - text: "Ensure that the API server pod specification file permissions are - set to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %a $apiserverconf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chmod 644 $apiserverconf - scored: true - - - id: 1.4.2 - text: "Ensure that the API server pod specification file ownership is set to - root:root (Scored)" - audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chown root:root $apiserverconf - scored: true - - - id: 1.4.3 - text: "Ensure that the controller manager pod specification file - permissions are set to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %a $controllermanagerconf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chmod 644 $controllermanagerconf - scored: true - - - id: 1.4.4 - text: "Ensure that the controller manager pod specification file - ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chown root:root $controllermanagerconf - scored: true - - - id: 1.4.5 - text: "Ensure that the scheduler pod specification file permissions are set - to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %a $schedulerconf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chmod 644 $schedulerconf - scored: true - - - id: 1.4.6 - text: "Ensure that the scheduler pod specification file ownership is set to - root:root (Scored)" - audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chown root:root $schedulerconf - scored: true - - - id: 1.4.7 - text: "Ensure that the etcd pod specification file permissions are set to - 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %a $etcdconf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chmod 644 $etcdconf - scored: true - - - id: 1.4.8 - text: "Ensure that the etcd pod specification file ownership is set to - root:root (Scored)" - audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chown root:root $etcdconf - scored: true - - - id: 1.4.9 - text: "Ensure that the Container Network Interface file permissions are - set to 644 or more restrictive (Not Scored)" - audit: "stat -c %a " - type: manual - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chmod 644 - scored: true - - - id: 1.4.10 - text: "Ensure that the Container Network Interface file ownership is set - to root:root (Not Scored)" - audit: "stat -c %U:%G " - type: manual - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chown root:root - scored: true - - - id: 1.4.11 - text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %a - tests: - test_items: - - flag: "700" - compare: - op: eq - value: "700" - set: true - remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir , - from the below command: - ps -ef | grep $etcdbin - Run the below command (based on the etcd data directory found above). For example, - chmod 700 /var/lib/etcd - scored: true - - - id: 1.4.12 - text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G - tests: - test_items: - - flag: "etcd:etcd" - set: true - remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir , - from the below command: - ps -ef | grep $etcdbin - Run the below command (based on the etcd data directory found above). For example, - chown etcd:etcd /var/lib/etcd - scored: true - - - id: 1.4.13 - text: "Ensure that the admin.conf file permissions are set to 644 or - more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %a /etc/kubernetes/admin.conf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chmod 644 /etc/kubernetes/admin.conf - scored: true - - - id: 1.4.14 - text: "Ensure that the admin.conf file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chown root:root /etc/kubernetes/admin.conf - scored: true - - - id: 1.4.15 - text: "Ensure that the scheduler.conf file permissions are set to 644 or - more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %a /etc/kubernetes/scheduler.conf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the - master node. For example, chmod 644 /etc/kubernetes/scheduler.conf - scored: true - - - id: 1.4.16 - text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %U:%G /etc/kubernetes/scheduler.conf; fi'" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the - master node. For example, chown root:root /etc/kubernetes/scheduler.conf - scored: true - - - id: 1.4.17 - text: "Ensure that the controller-manager.conf file permissions are set - to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %a /etc/kubernetes/controller-manager.conf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the - master node. For example, chmod 644 /etc/kubernetes/controller-manager.conf - scored: true - - - id: 1.4.18 - text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %U:%G /etc/kubernetes/controller-manager.conf; fi'" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the - master node. For example, chown root:root /etc/kubernetes/controller-manager.conf - scored: true - - - id: 1.4.19 - text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Scored)" - audit: "ls -laR /etc/kubernetes/pki/" - type: "manual" - tests: - test_items: - - flag: "root root" - compare: - op: eq - value: "root root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, chown -R root:root /etc/kubernetes/pki/ - scored: true - - - id: 1.4.20 - text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Scored)" - audit: "stat -c %n\ %a /etc/kubernetes/pki/*.crt" - type: "manual" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, chmod -R 644 /etc/kubernetes/pki/*.crt - scored: true - - - id: 1.4.21 - text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Scored)" - audit: "stat -c %n\ %a /etc/kubernetes/pki/*.key" - type: "manual" - tests: - test_items: - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, chmod -R 600 /etc/kubernetes/pki/*.key - scored: true - -- id: 1.5 - text: "etcd" - checks: - - id: 1.5.1 - text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)" - audit: "ps -ef | grep $etcdbin | grep -v grep" - tests: - test_items: - - flag: "--cert-file" - set: true - - flag: "--key-file" - set: true - remediation: | - Follow the etcd service documentation and configure TLS encryption. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameters. - --ca-file= - --key-file= - scored: true - - - id: 1.5.2 - text: "Ensure that the --client-cert-auth argument is set to true (Scored)" - audit: "ps -ef | grep $etcdbin | grep -v grep" - tests: - test_items: - - flag: "--client-cert-auth" - compare: - op: eq - value: true - set: true - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --client-cert-auth="true" - scored: true - - - id: 1.5.3 - text: "Ensure that the --auto-tls argument is not set to true (Scored)" - audit: "ps -ef | grep $etcdbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--auto-tls" - set: false - - flag: "--auto-tls" - compare: - op: eq - value: false - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and either remove the --auto-tls parameter or set it to false. - --auto-tls=false - scored: true - - - id: 1.5.4 - text: "Ensure that the --peer-cert-file and --peer-key-file arguments are - set as appropriate (Scored)" - audit: "ps -ef | grep $etcdbin | grep -v grep" - type: "manual" - tests: - bin_op: and - test_items: - - flag: "--peer-cert-file" - set: true - - flag: "--peer-key-file" - set: true - remediation: | - Follow the etcd service documentation and configure peer TLS encryption as appropriate - for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameters. - --peer-client-file= - --peer-key-file= - scored: true - - - id: 1.5.5 - text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)" - audit: "ps -ef | grep $etcdbin | grep -v grep" - type: "manual" - tests: - test_items: - - flag: "--peer-client-cert-auth" - compare: - op: eq - value: true - set: true - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --peer-client-cert-auth=true - scored: true - - - id: 1.5.6 - text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)" - audit: "ps -ef | grep $etcdbin | grep -v grep" - type: "manual" - tests: - bin_op: or - test_items: - - flag: "--peer-auto-tls" - set: false - - flag: "--peer-auto-tls" - compare: - op: eq - value: false - set: true - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and either remove the --peer-auto-tls parameter or set it to false. - --peer-auto-tls=false - scored: true - - - id: 1.5.7 - text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)" - audit: "ps -ef | grep $etcdbin | grep -v grep" - type: "manual" - tests: - test_items: - - flag: "--trusted-ca-file" - set: true - remediation: | - Follow the etcd documentation and create a dedicated certificate authority setup for the - etcd service. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameter. - --trusted-ca-file= - scored: false - -- id: 1.6 - text: "General Security Primitives" - checks: - - id: 1.6.1 - text: "Ensure that the cluster-admin role is only used where required (Not Scored)" - type: "manual" - remediation: | - Remove any unneeded clusterrolebindings : - kubectl delete clusterrolebinding [name] - scored: false - - - id: 1.6.2 - text: "Create administrative boundaries between resources using namespaces (Not Scored)" - type: "manual" - remediation: | - Follow the documentation and create namespaces for objects in your deployment as you - need them. - scored: false - - - id: 1.6.3 - text: "Create network segmentation using Network Policies (Not Scored)" - type: "manual" - remediation: | - Follow the documentation and create NetworkPolicy objects as you need them. - scored: false - - - id: 1.6.4 - text: "Ensure that the seccomp profile is set to docker/default in your pod - definitions (Not Scored)" - type: "manual" - remediation: | - Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you - would need to enable alpha features in the apiserver by passing "--feature- - gates=AllAlpha=true" argument. - Edit the $apiserverconf file on the master node and set the KUBE_API_ARGS - parameter to "--feature-gates=AllAlpha=true" - KUBE_API_ARGS="--feature-gates=AllAlpha=true" - Based on your system, restart the kube-apiserver service. For example: - systemctl restart kube-apiserver.service - Use annotations to enable the docker/default seccomp profile in your pod definitions. An - example is as below: - apiVersion: v1 - kind: Pod - metadata: - name: trustworthy-pod - annotations: - seccomp.security.alpha.kubernetes.io/pod: docker/default - spec: - containers: - - name: trustworthy-container - image: sotrustworthy:latest - scored: false - - - id: 1.6.5 - text: "Apply Security Context to Your Pods and Containers (Not Scored)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and apply security contexts to your pods. For a - suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker - Containers. - scored: false - - - id: 1.6.6 - text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and setup image provenance. - scored: false - - - id: 1.6.7 - text: "Configure Network policies as appropriate (Not Scored)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and setup network policies as appropriate. - For example, you could create a "default" isolation policy for a Namespace by creating a - NetworkPolicy that selects all pods but does not allow any traffic: - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: default-deny - spec: - podSelector: - scored: false - - - id: 1.6.8 - text: "Place compensating controls in the form of PSP and RBAC for - privileged containers usage (Not Scored)" - type: "manual" - remediation: | - Follow Kubernetes documentation and setup PSP and RBAC authorization for your cluster. - scored: false - -- id: 1.7 - text: "PodSecurityPolicies" - checks: - - id: 1.7.1 - text: "Do not admit privileged containers (Not Scored)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.privileged field is omitted or set to false. - scored: false - - - id: 1.7.2 - text: "Do not admit containers wishing to share the host process ID namespace (Not Scored)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostPID field is omitted or set to false. - scored: false - - - id: 1.7.3 - text: "Do not admit containers wishing to share the host IPC namespace (Not Scored)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostIPC field is omitted or set to false. - scored: false - - - id: 1.7.4 - text: "Do not admit containers wishing to share the host network namespace (Not Scored)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostNetwork field is omitted or set to false. - scored: false - - - id: 1.7.5 - text: " Do not admit containers with allowPrivilegeEscalation (Not Scored)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.allowPrivilegeEscalation field is omitted or set to false. - scored: false - - - id: 1.7.6 - text: "Do not admit root containers (Not Scored)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of UIDs not including 0. - scored: false - - - id: 1.7.7 - text: "Do not admit containers with dangerous capabilities (Not Scored)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.requiredDropCapabilities is set to include either NET_RAW or ALL. - scored: false From e534392525da3da22f88fecc3801a92d2e0cb3cc Mon Sep 17 00:00:00 2001 From: yoavrotems Date: Wed, 6 Mar 2019 13:24:14 +0200 Subject: [PATCH 07/10] Delete node.yaml replace with the new node.yaml file --- cfg/1.13/node.yaml | 480 --------------------------------------------- 1 file changed, 480 deletions(-) delete mode 100644 cfg/1.13/node.yaml diff --git a/cfg/1.13/node.yaml b/cfg/1.13/node.yaml deleted file mode 100644 index 0a6553c..0000000 --- a/cfg/1.13/node.yaml +++ /dev/null @@ -1,480 +0,0 @@ ---- -controls: -version: 1.11 -id: 2 -text: "Worker Node Security Configuration" -type: "node" -groups: -- id: 2.1 - text: "Kubelet" - checks: - - id: 2.1.1 - text: "Ensure that the --anonymous-auth argument is set to false (Scored)" - audit: "ps -ef | grep $kubeletbin | grep -v grep" - tests: - test_items: - - flag: "--anonymous-auth" - compare: - op: eq - value: false - set: true - remediation: | - If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to - false . - If using executable arguments, edit the kubelet service file - $kubeletconf on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --anonymous-auth=false - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 2.1.2 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)" - audit: "ps -ef | grep $kubeletbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: nothave - value: "AlwaysAllow" - set: true - remediation: | - If using a Kubelet config file, edit the file to set authorization: mode to Webhook. - If using executable arguments, edit the kubelet service file - $kubeletconf on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --authorization-mode=Webhook - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 2.1.3 - text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)" - audit: "ps -ef | grep $kubeletbin | grep -v grep" - tests: - test_items: - - flag: "--client-ca-file" - set: true - remediation: | - If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to - the location of the client CA file. - If using command line arguments, edit the kubelet service file - $kubeletconf on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --client-ca-file= - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 2.1.4 - text: "Ensure that the --read-only-port argument is set to 0 (Scored)" - audit: "ps -ef | grep $kubeletbin | grep -v grep" - tests: - test_items: - - flag: "--read-only-port" - compare: - op: eq - value: 0 - set: true - remediation: | - If using a Kubelet config file, edit the file to set readOnlyPort to 0 . - If using command line arguments, edit the kubelet service file - $kubeletconf on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --read-only-port=0 - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 2.1.5 - text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)" - audit: "ps -ef | grep $kubeletbin | grep -v grep" - tests: - test_items: - - flag: "--streaming-connection-idle-timeout" - compare: - op: noteq - value: 0 - set: true - remediation: | - If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a - value other than 0. - If using command line arguments, edit the kubelet service file - $kubeletconf on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --streaming-connection-idle-timeout=5m - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 2.1.6 - text: "Ensure that the --protect-kernel-defaults argument is set to true (Scored)" - audit: "ps -ef | grep $kubeletbin | grep -v grep" - tests: - test_items: - - flag: "--protect-kernel-defaults" - compare: - op: eq - value: true - set: true - remediation: | - If using a Kubelet config file, edit the file to set protectKernelDefaults: true . - If using command line arguments, edit the kubelet service file - $kubeletconf on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --protect-kernel-defaults=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 2.1.7 - text: "Ensure that the --make-iptables-util-chains argument is set to true (Scored)" - audit: "ps -ef | grep $kubeletbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--make-iptables-util-chains" - compare: - op: eq - value: true - set: true - - flag: "--make-iptables-util-chains" - set: false - remediation: | - If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true . - If using command line arguments, edit the kubelet service file - $kubeletconf on each worker node and - remove the --make-iptables-util-chains argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 2.1.8 - text: "Ensure that the --hostname-override argument is not set (Scored)" - audit: "ps -ef | grep $kubeletbin | grep -v grep" - tests: - test_items: - - flag: "--hostname-override" - set: false - remediation: | - Edit the kubelet service file $kubeletconf - on each worker node and remove the --hostname-override argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 2.1.9 - text: "Ensure that the --event-qps argument is set to 0 (Scored)" - audit: "ps -ef | grep $kubeletbin | grep -v grep" - tests: - test_items: - - flag: "--event-qps" - compare: - op: eq - value: 0 - set: true - remediation: | - If using a Kubelet config file, edit the file to set eventRecordQPS: 0 . - If using command line arguments, edit the kubelet service file - $kubeletconf on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --event-qps=0 - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 2.1.10 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)" - audit: "ps -ef | grep $kubeletbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--tls-cert-file" - set: true - - flag: "--tls-private-key-file" - set: true - remediation: | - If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate - file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the - corresponding private key file. - If using command line arguments, edit the kubelet service file - $kubeletconf on each worker node and - set the below parameters in KUBELET_CERTIFICATE_ARGS variable. - --tls-cert-file= - file= - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 2.1.11 - text: "Ensure that the --cadvisor-port argument is set to 0 (Scored)" - audit: "ps -ef | grep $kubeletbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--cadvisor-port" - compare: - op: eq - value: 0 - set: true - - flag: "--cadvisor-port" - set: false - remediation: | - Edit the kubelet service file $kubeletconf - on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable. - --cadvisor-port=0 - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 2.1.12 - text: "Ensure that the --rotate-certificates argument is not set to false (Scored)" - audit: "ps -ef | grep $kubeletbin | grep -v grep" - tests: - test_items: - - flag: "--rotate-certificates" - compare: - op: eq - value: true - set: true - remediation: | - If using a Kubelet config file, edit the file to add the line rotateCertificates: true. - If using command line arguments, edit the kubelet service file $kubeletconf - on each worker node and add --rotate-certificates=true argument to the KUBELET_CERTIFICATE_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 2.1.13 - text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)" - audit: "ps -ef | grep $kubeletbin | grep -v grep" - tests: - test_items: - - flag: "RotateKubeletServerCertificate" - compare: - op: eq - value: true - set: true - remediation: | - Edit the kubelet service file $kubeletconf - on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. - --feature-gates=RotateKubeletServerCertificate=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 2.1.14 - text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)" - audit: "ps -ef | grep $kubeletbin | grep -v grep" - tests: - test_items: - - flag: "--tls-cipher-suites" - compare: - op: eq - value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - set: true - remediation: | - If using a Kubelet config file, edit the file to set TLSCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - If using executable arguments, edit the kubelet service file $kubeletconf on each worker node and set the below parameter. - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - scored: false - -- id: 2.2 - text: "Configuration Files" - checks: - - id: 2.2.1 - text: "Ensure that the kubelet.conf file permissions are set to 644 or - more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the each worker - node. For example, - chmod 644 $kubeletconf - scored: true - - - id: 2.2.2 - text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: root:root - set: true - remediation: | - Run the below command (based on the file location on your system) on the each worker - node. For example, - chown root:root $kubeletconf - scored: true - - - id: 2.2.3 - text: "Ensure that the kubelet service file permissions are set to 644 or - more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %a $kubeletsvc; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: 644 - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the each worker - node. For example, - chmod 755 $kubeletsvc - scored: true - - - id: 2.2.4 - text: "Ensure that the kubelet service file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the each worker - node. For example, - chown root:root $kubeletsvc - scored: true - - - id: 2.2.5 - text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $proxyconf; then stat -c %a $proxyconf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the each worker - node. For example, - chmod 644 $proxyconf - scored: true - - - id: 2.2.6 - text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e $proxyconf; then stat -c %U:%G $proxyconf; fi'" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the each worker - node. For example, - chown root:root $proxyconf - scored: true - - - id: 2.2.7 - text: "Ensure that the certificate authorities file permissions are set to - 644 or more restrictive (Scored)" - type: manual - remediation: | - Run the following command to modify the file permissions of the --client-ca-file - chmod 644 - scored: true - - - id: 2.2.8 - text: "Ensure that the client certificate authorities file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e $ca-file; then stat -c %U:%G $ca-file; fi'" - type: manual - remediation: | - Run the following command to modify the ownership of the --client-ca-file . - chown root:root - scored: true - - - id: 2.2.9 - text: "Ensure that the kubelet configuration file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then stat -c %U:%G /var/lib/kubelet/config.yaml; fi'" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Run the following command (using the config file location identied in the Audit step) - chown root:root /etc/kubernetes/kubelet.conf - scored: true - - - id: 2.2.10 - text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then stat -c %a /var/lib/kubelet/config.yaml; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the following command (using the config file location identied in the Audit step) - chmod 644 /var/lib/kubelet/config.yaml - scored: true From c6102f0a1bdb56cf3a534957cd46c67ad393d155 Mon Sep 17 00:00:00 2001 From: yoavrotems Date: Wed, 6 Mar 2019 11:26:36 +0000 Subject: [PATCH 08/10] Fix the files Fix the start from 1.11 to 1.13 and adding changes from pull #227, and pull #228. --- cfg/1.13/config.yaml | 29 + cfg/1.13/master.yaml | 1500 ++++++++++++++++++++++++++++++++++++++++++ cfg/1.13/node.yaml | 480 ++++++++++++++ 3 files changed, 2009 insertions(+) create mode 100644 cfg/1.13/config.yaml create mode 100644 cfg/1.13/master.yaml create mode 100644 cfg/1.13/node.yaml diff --git a/cfg/1.13/config.yaml b/cfg/1.13/config.yaml new file mode 100644 index 0000000..3a63b4d --- /dev/null +++ b/cfg/1.13/config.yaml @@ -0,0 +1,29 @@ +--- +## Controls Files. +# These are YAML files that hold all the details for running checks. +# +## Uncomment to use different control file paths. +# masterControls: ./cfg/master.yaml +# nodeControls: ./cfg/node.yaml +# federatedControls: ./cfg/federated.yaml + +master: + apiserver: + defaultconf: /etc/kubernetes/manifests/kube-apiserver.yaml + + scheduler: + defaultconf: /etc/kubernetes/manifests/kube-scheduler.yaml + + controllermanager: + defaultconf: /etc/kubernetes/manifests/kube-controller-manager.yaml + + etcd: + defaultconf: /etc/kubernetes/manifests/etcd.yaml + +node: + kubelet: + defaultconf: /etc/kubernetes/kubelet.conf + defaultsvc: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + + proxy: + defaultconf: /etc/kubernetes/addons/kube-proxy-daemonset.yaml diff --git a/cfg/1.13/master.yaml b/cfg/1.13/master.yaml new file mode 100644 index 0000000..9518b35 --- /dev/null +++ b/cfg/1.13/master.yaml @@ -0,0 +1,1500 @@ +--- +controls: +version: 1.13 +id: 1 +text: "Master Node Security Configuration" +type: "master" +groups: +- id: 1.1 + text: "API Server" + checks: + - id: 1.1.1 + text: "Ensure that the --anonymous-auth argument is set to false (Not Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.1.2 + text: "Ensure that the --basic-auth-file argument is not set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--basic-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the master node and remove the --basic-auth-file= + parameter. + scored: true + + - id: 1.1.3 + text: "Ensure that the --insecure-allow-any-token argument is not set (Not Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--insecure-allow-any-token" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and remove the --insecure-allow-any-token + parameter. + scored: true + + - id: 1.1.4 + text: "Ensure that the --kubelet-https argument is set to true (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + set: true + - flag: "--kubelet-https" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and remove the --kubelet-https parameter. + scored: true + + - id: 1.1.5 + text: "Ensure that the --insecure-bind-address argument is not set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--insecure-bind-address" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and remove the --insecure-bind-address + parameter. + scored: true + + - id: 1.1.6 + text: "Ensure that the --insecure-port argument is set to 0 (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--insecure-port" + compare: + op: eq + value: 0 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + apiserver.yaml on the master node and set the below parameter. + --insecure-port=0 + scored: true + + - id: 1.1.7 + text: "Ensure that the --secure-port argument is not set to 0 (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + set: true + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.1.8 + text: "Ensure that the --profiling argument is set to false (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.1.9 + text: "Ensure that the --repair-malformed-updates argument is set to false (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--repair-malformed-updates" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the below parameter. + --repair-malformed-updates=false + scored: true + + - id: 1.1.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.1.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins to + include AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: true + + - id: 1.1.12 + text: "Ensure that the admission control plugin DenyEscalatingExec is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "DenyEscalatingExec" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to a + value that includes DenyEscalatingExec. + --enable-admission-plugins=...,DenyEscalatingExec,... + scored: true + + - id: 1.1.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to + include SecurityContextDeny. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.1.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + --disable-admission-plugins=...,NamespaceLifecycle,... + scored: true + + - id: 1.1.15 + text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --audit-log-path parameter to a suitable + path and file where you would like audit logs to be written, for example: + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.1.16 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --audit-log-maxage parameter to 30 or + as an appropriate number of days: --audit-log-maxage=30 + scored: true + + - id: 1.1.17 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --audit-log-maxbackup parameter to 10 + or to an appropriate value. + --audit-log-maxbackup=10 + scored: true + + - id: 1.1.18 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --audit-log-maxsize parameter to an + appropriate size in MB. For example, to set it as 100 MB: + --audit-log-maxsize=100 + scored: true + + - id: 1.1.19 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --authorization-mode parameter to + values other than AlwaysAllow. One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.1.20 + text: "Ensure that the --token-auth-file parameter is not set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the master node and remove the --token-auth-file= + parameter. + scored: true + + - id: 1.1.21 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between the + apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the master node and set the --kubelet-certificate-authority + parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.1.22 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-client-key" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the master node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.1.23 + text: "Ensure that the --service-account-lookup argument is set to true (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-lookup" + compare: + op: eq + value: true + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the below parameter. + --service-account-lookup=true + scored: true + + - id: 1.1.24 + text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + set: true + remediation: | + Follow the documentation and create Pod Security Policy objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to a + value that includes PodSecurityPolicy : + --enable-admission-plugins=...,PodSecurityPolicy,... + Then restart the API Server. + scored: true + + - id: 1.1.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --service-account-key-file parameter + to the public key file for service accounts: + --service-account-key-file= + scored: true + + - id: 1.1.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as + appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and etcd. Then, edit the API server pod specification file + $apiserverconf on the master node and set the etcd + certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.1.27 + text: "Ensure that the admission control plugin ServiceAccount is set(Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "ServiceAccount" + set: true + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to a + value that includes ServiceAccount. + --enable-admission-plugins=...,ServiceAccount,... + scored: true + + - id: 1.1.28 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set + as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the master node and set the TLS certificate and private key file + parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.1.29 + text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the master node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.1.30 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: has + value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the below parameter. + --tls-cipher- suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM _SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM _SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM _SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + scored: false + + - id: 1.1.31 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and etcd. Then, edit the API server pod specification file + $apiserverconf on the master node and set the etcd + certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.1.32 + text: "Ensure that the --authorization-mode argument is set to Node (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --authorization-mode parameter to a + value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.1.33 + text: "Ensure that the admission control plugin NodeRestriction is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + set: true + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on + kubelets. Then, edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.1.34 + text: "Ensure that the --experimental-encryption-provider-config argument is + set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--experimental-encryption-provider-config" + set: true + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf on the + master node and set the --experimental-encryption-provider-config parameter + to the path of that file: + --experimental-encryption-provider-config= + scored: true + + - id: 1.1.35 + text: "Ensure that the encryption provider is set to aescbc (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + type: "manual" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. In this file, + choose aescbc as the encryption provider. + For example, + kind: EncryptionConfig + apiVersion: v1 + resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: <32-byte base64-encoded secret> + scored: true + + - id: 1.1.36 + text: "Ensure that the admission control plugin EventRateLimit is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + set: true + remediation: | + Follow the Kubernetes documentation and set the desired limits in a + configuration file. Then, edit the API server pod specification file + $apiserverconf and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: true + + - id: 1.1.37 + text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "AdvancedAuditing=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Follow the Kubernetes documentation and set the desired audit policy in the + /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --audit-policy-file=/etc/kubernetes/audit-policy.yaml + scored: true + + - id: 1.1.38 + text: "Ensure that the --request-timeout argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--request-timeout" + set: false + - flag: "--request-timeout" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. For example, + --request-timeout=300s + scored: true + + - id: 1.1.39 + text: "Ensure that the --authorization-mode argument includes RBAC (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + set: true + remediation: | + Edit the API server pod specification file $apiserverbin on the master node and set the --authorization-mode parameter to a value that includes RBAC, for example: --authorization-mode=Node,RBAC + scored: true + +- id: 1.2 + text: "Scheduler" + checks: + - id: 1.2.1 + text: "Ensure that the --profiling argument is set to false (Scored)" + audit: "ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf + file on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.2 + text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)" + audit: "ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + file on the master node and ensure the correct value for the + --address parameter. + scored: true + +- id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, for example: + --terminated-pod-gc-threshold=10 + scored: true + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: eq + value: true + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node and set the --service-account-private- + key-file parameter to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node and set the --root-ca-file parameter to + the certificate bundle file. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--feature-gates" + compare: + op: eq + value: "RotateKubeletServerCertificate=true" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + controller-manager.yaml on the master node and set the --feature-gates parameter to + include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + controller-manager.yaml on the master node and ensure the correct value + for the --address parameter. + scored: true + +- id: 1.4 + text: "Configuration Files" + checks: + - id: 1.4.1 + text: "Ensure that the API server pod specification file permissions are + set to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %a $apiserverconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 $apiserverconf + scored: true + + - id: 1.4.2 + text: "Ensure that the API server pod specification file ownership is set to + root:root (Scored)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root $apiserverconf + scored: true + + - id: 1.4.3 + text: "Ensure that the controller manager pod specification file + permissions are set to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %a $controllermanagerconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 $controllermanagerconf + scored: true + + - id: 1.4.4 + text: "Ensure that the controller manager pod specification file + ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root $controllermanagerconf + scored: true + + - id: 1.4.5 + text: "Ensure that the scheduler pod specification file permissions are set + to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %a $schedulerconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 $schedulerconf + scored: true + + - id: 1.4.6 + text: "Ensure that the scheduler pod specification file ownership is set to + root:root (Scored)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root $schedulerconf + scored: true + + - id: 1.4.7 + text: "Ensure that the etcd pod specification file permissions are set to + 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %a $etcdconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 $etcdconf + scored: true + + - id: 1.4.8 + text: "Ensure that the etcd pod specification file ownership is set to + root:root (Scored)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.4.9 + text: "Ensure that the Container Network Interface file permissions are + set to 644 or more restrictive (Not Scored)" + audit: "stat -c %a " + type: manual + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 + scored: true + + - id: 1.4.10 + text: "Ensure that the Container Network Interface file ownership is set + to root:root (Not Scored)" + audit: "stat -c %U:%G " + type: manual + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root + scored: true + + - id: 1.4.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %a + tests: + test_items: + - flag: "700" + compare: + op: eq + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir , + from the below command: + ps -ef | grep $etcdbin + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.4.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + tests: + test_items: + - flag: "etcd:etcd" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir , + from the below command: + ps -ef | grep $etcdbin + Run the below command (based on the etcd data directory found above). For example, + chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.4.13 + text: "Ensure that the admin.conf file permissions are set to 644 or + more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %a /etc/kubernetes/admin.conf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 /etc/kubernetes/admin.conf + scored: true + + - id: 1.4.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.4.15 + text: "Ensure that the scheduler.conf file permissions are set to 644 or + more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %a /etc/kubernetes/scheduler.conf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + master node. For example, chmod 644 /etc/kubernetes/scheduler.conf + scored: true + + - id: 1.4.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %U:%G /etc/kubernetes/scheduler.conf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + master node. For example, chown root:root /etc/kubernetes/scheduler.conf + scored: true + + - id: 1.4.17 + text: "Ensure that the controller-manager.conf file permissions are set + to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %a /etc/kubernetes/controller-manager.conf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + master node. For example, chmod 644 /etc/kubernetes/controller-manager.conf + scored: true + + - id: 1.4.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %U:%G /etc/kubernetes/controller-manager.conf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + master node. For example, chown root:root /etc/kubernetes/controller-manager.conf + scored: true + + - id: 1.4.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Scored)" + audit: "ls -laR /etc/kubernetes/pki/" + type: "manual" + tests: + test_items: + - flag: "root root" + compare: + op: eq + value: "root root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.4.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Scored)" + audit: "stat -c %n\ %a /etc/kubernetes/pki/*.crt" + type: "manual" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, chmod -R 644 /etc/kubernetes/pki/*.crt + scored: true + + - id: 1.4.21 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Scored)" + audit: "stat -c %n\ %a /etc/kubernetes/pki/*.key" + type: "manual" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, chmod -R 600 /etc/kubernetes/pki/*.key + scored: true + +- id: 1.5 + text: "etcd" + checks: + - id: 1.5.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + tests: + test_items: + - flag: "--cert-file" + set: true + - flag: "--key-file" + set: true + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --ca-file= + --key-file= + scored: true + + - id: 1.5.2 + text: "Ensure that the --client-cert-auth argument is set to true (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 1.5.3 + text: "Ensure that the --auto-tls argument is not set to true (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + set: false + - flag: "--auto-tls" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 1.5.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + type: "manual" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + set: true + - flag: "--peer-key-file" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 1.5.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + type: "manual" + tests: + test_items: + - flag: "--peer-client-cert-auth" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 1.5.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + type: "manual" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + set: false + - flag: "--peer-auto-tls" + compare: + op: eq + value: false + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 1.5.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + type: "manual" + tests: + test_items: + - flag: "--trusted-ca-file" + set: true + remediation: | + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false + +- id: 1.6 + text: "General Security Primitives" + checks: + - id: 1.6.1 + text: "Ensure that the cluster-admin role is only used where required (Not Scored)" + type: "manual" + remediation: | + Remove any unneeded clusterrolebindings : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 1.6.2 + text: "Create administrative boundaries between resources using namespaces (Not Scored)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you + need them. + scored: false + + - id: 1.6.3 + text: "Create network segmentation using Network Policies (Not Scored)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 1.6.4 + text: "Ensure that the seccomp profile is set to docker/default in your pod + definitions (Not Scored)" + type: "manual" + remediation: | + Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you + would need to enable alpha features in the apiserver by passing "--feature- + gates=AllAlpha=true" argument. + Edit the $apiserverconf file on the master node and set the KUBE_API_ARGS + parameter to "--feature-gates=AllAlpha=true" + KUBE_API_ARGS="--feature-gates=AllAlpha=true" + Based on your system, restart the kube-apiserver service. For example: + systemctl restart kube-apiserver.service + Use annotations to enable the docker/default seccomp profile in your pod definitions. An + example is as below: + apiVersion: v1 + kind: Pod + metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default + spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + scored: false + + - id: 1.6.5 + text: "Apply Security Context to Your Pods and Containers (Not Scored)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 1.6.6 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 1.6.7 + text: "Configure Network policies as appropriate (Not Scored)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup network policies as appropriate. + For example, you could create a "default" isolation policy for a Namespace by creating a + NetworkPolicy that selects all pods but does not allow any traffic: + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: default-deny + spec: + podSelector: + scored: false + + - id: 1.6.8 + text: "Place compensating controls in the form of PSP and RBAC for + privileged containers usage (Not Scored)" + type: "manual" + remediation: | + Follow Kubernetes documentation and setup PSP and RBAC authorization for your cluster. + scored: false + +- id: 1.7 + text: "PodSecurityPolicies" + checks: + - id: 1.7.1 + text: "Do not admit privileged containers (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.privileged field is omitted or set to false. + scored: false + + - id: 1.7.2 + text: "Do not admit containers wishing to share the host process ID namespace (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostPID field is omitted or set to false. + scored: false + + - id: 1.7.3 + text: "Do not admit containers wishing to share the host IPC namespace (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostIPC field is omitted or set to false. + scored: false + + - id: 1.7.4 + text: "Do not admit containers wishing to share the host network namespace (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostNetwork field is omitted or set to false. + scored: false + + - id: 1.7.5 + text: " Do not admit containers with allowPrivilegeEscalation (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.allowPrivilegeEscalation field is omitted or set to false. + scored: false + + - id: 1.7.6 + text: "Do not admit root containers (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of UIDs not including 0. + scored: false + + - id: 1.7.7 + text: "Do not admit containers with dangerous capabilities (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + scored: false diff --git a/cfg/1.13/node.yaml b/cfg/1.13/node.yaml new file mode 100644 index 0000000..daa8aa1 --- /dev/null +++ b/cfg/1.13/node.yaml @@ -0,0 +1,480 @@ +--- +controls: +version: 1.13 +id: 2 +text: "Worker Node Security Configuration" +type: "node" +groups: +- id: 2.1 + text: "Kubelet" + checks: + - id: 2.1.1 + text: "Ensure that the --anonymous-auth argument is set to false (Scored)" + audit: "ps -fC $kubeletbin" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false . + If using executable arguments, edit the kubelet service file + $kubeletconf on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)" + audit: "ps -fC $kubeletbin" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)" + audit: "ps -fC $kubeletbin" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.4 + text: "Ensure that the --read-only-port argument is set to 0 (Scored)" + audit: "ps -fC $kubeletbin" + tests: + test_items: + - flag: "--read-only-port" + compare: + op: eq + value: 0 + set: true + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0 . + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)" + audit: "ps -fC $kubeletbin" + tests: + test_items: + - flag: "--streaming-connection-idle-timeout" + compare: + op: noteq + value: 0 + set: true + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Scored)" + audit: "ps -fC $kubeletbin" + tests: + test_items: + - flag: "--protect-kernel-defaults" + compare: + op: eq + value: true + set: true + remediation: | + If using a Kubelet config file, edit the file to set protectKernelDefaults: true . + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Scored)" + audit: "ps -fC $kubeletbin" + tests: + bin_op: or + test_items: + - flag: "--make-iptables-util-chains" + compare: + op: eq + value: true + set: true + - flag: "--make-iptables-util-chains" + set: false + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true . + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.8 + text: "Ensure that the --hostname-override argument is not set (Scored)" + audit: "ps -fC $kubeletbin" + tests: + test_items: + - flag: "--hostname-override" + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.9 + text: "Ensure that the --event-qps argument is set to 0 (Scored)" + audit: "ps -fC $kubeletbin" + tests: + test_items: + - flag: "--event-qps" + compare: + op: eq + value: 0 + set: true + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: 0 . + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --event-qps=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)" + audit: "ps -fC $kubeletbin" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate + file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the + corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.11 + text: "Ensure that the --cadvisor-port argument is set to 0 (Scored)" + audit: "ps -fC $kubeletbin" + tests: + bin_op: or + test_items: + - flag: "--cadvisor-port" + compare: + op: eq + value: 0 + set: true + - flag: "--cadvisor-port" + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable. + --cadvisor-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.12 + text: "Ensure that the --rotate-certificates argument is not set to false (Scored)" + audit: "ps -fC $kubeletbin" + tests: + test_items: + - flag: "--rotate-certificates" + compare: + op: eq + value: true + set: true + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true. + If using command line arguments, edit the kubelet service file $kubeletsvc + on each worker node and add --rotate-certificates=true argument to the KUBELET_CERTIFICATE_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.13 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)" + audit: "ps -fC $kubeletbin" + tests: + test_items: + - flag: "RotateKubeletServerCertificate" + compare: + op: eq + value: true + set: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.14 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)" + audit: "ps -fC $kubeletbin" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: eq + value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + set: true + remediation: | + If using a Kubelet config file, edit the file to set TLSCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + If using executable arguments, edit the kubelet service file $kubeletconf on each worker node and set the below parameter. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + scored: false + +- id: 2.2 + text: "Configuration Files" + checks: + - id: 2.2.1 + text: "Ensure that the kubelet.conf file permissions are set to 644 or + more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %a $kubeletkubeconfig; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chmod 644 $kubeletkubeconfig + scored: true + + - id: 2.2.2 + text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: root:root + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 2.2.3 + text: "Ensure that the kubelet service file permissions are set to 644 or + more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %a $kubeletsvc; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: 644 + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chmod 755 $kubeletsvc + scored: true + + - id: 2.2.4 + text: "Ensure that the kubelet service file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chown root:root $kubeletsvc + scored: true + + - id: 2.2.5 + text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %a $proxykubeconfig; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chmod 644 $proxykubeconfig + scored: true + + - id: 2.2.6 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chown root:root $proxykubeconfig + scored: true + + - id: 2.2.7 + text: "Ensure that the certificate authorities file permissions are set to + 644 or more restrictive (Scored)" + type: manual + remediation: | + Run the following command to modify the file permissions of the --client-ca-file + chmod 644 + scored: true + + - id: 2.2.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $ca-file; then stat -c %U:%G $ca-file; fi'" + type: manual + remediation: | + Run the following command to modify the ownership of the --client-ca-file . + chown root:root + scored: true + + - id: 2.2.9 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the following command (using the config file location identied in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 2.2.10 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the following command (using the config file location identied in the Audit step) + chmod 644 $kubeletconf + scored: true From 53ed68a0b2c9aaf40413bfe741b3badc81a32137 Mon Sep 17 00:00:00 2001 From: Abubakr-Sadik Nii Nai Davis Date: Wed, 6 Mar 2019 11:52:13 +0000 Subject: [PATCH 09/10] Clean up OCP benchmark config. The OCP benchmarks uses configs for only binary component variable names. This commit cleans up the OCP config by removing all configuration except those component binaries required to run kube-bench on OCP installations and adds missing ones. --- cfg/ocp-3.10/config.yaml | 13 ++++--------- cfg/ocp-3.10/master.yaml | 2 +- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/cfg/ocp-3.10/config.yaml b/cfg/ocp-3.10/config.yaml index b3057bf..cfca6b2 100644 --- a/cfg/ocp-3.10/config.yaml +++ b/cfg/ocp-3.10/config.yaml @@ -11,25 +11,20 @@ master: apiserver: bins: - openshift start master api - defaultconf: /etc/origin/master/master-config.yaml scheduler: bins: - openshift start master controllers - defaultconf: /etc/origin/master/master-config.yaml controllermanager: bins: - openshift start master controllers - defaultconf: /etc/origin/master/master-config.yaml - - etcd: - defaultconf: /etc/kubernetes/manifests/etcd.yaml node: kubelet: - defaultconf: /etc/kubernetes/kubelet.conf - defaultsvc: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + bins: + - openshift start network proxy: - defaultconf: /etc/kubernetes/addons/kube-proxy-daemonset.yaml + bins: + - openshift start network diff --git a/cfg/ocp-3.10/master.yaml b/cfg/ocp-3.10/master.yaml index 9dd4b57..3cb07bf 100644 --- a/cfg/ocp-3.10/master.yaml +++ b/cfg/ocp-3.10/master.yaml @@ -1043,7 +1043,7 @@ groups: remediation: | On the etcd server node, get the etcd data directory, passed as an argument --data-dir , from the below command: - ps -ef | grep $etcdbin + ps -ef | grep etcd Run the below command (based on the etcd data directory found above). For example, chmod 700 /var/lib/etcd scored: true From 9b3628e76acb319d4c98be90ccda518c8ec6c442 Mon Sep 17 00:00:00 2001 From: Liz Rice Date: Thu, 7 Mar 2019 11:18:06 +0000 Subject: [PATCH 10/10] Update openshift executable config for #236 --- cfg/config.yaml | 3 +++ cfg/ocp-3.10/config.yaml | 14 +++----------- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/cfg/config.yaml b/cfg/config.yaml index 82ed1a1..09fdc09 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -24,6 +24,7 @@ master: bins: - "kube-apiserver" - "hyperkube apiserver" + - "hyperkube kube-apiserver" - "apiserver" confs: - /etc/kubernetes/apiserver.conf @@ -34,6 +35,7 @@ master: bins: - "kube-scheduler" - "hyperkube scheduler" + - "hyperkube kube-scheduler" - "scheduler" confs: - /etc/kubernetes/scheduler.conf @@ -44,6 +46,7 @@ master: bins: - "kube-controller-manager" - "hyperkube controller-manager" + - "hyperkube kube-controller-manager" - "controller-manager" confs: - /etc/kubernetes/controller-manager.conf diff --git a/cfg/ocp-3.10/config.yaml b/cfg/ocp-3.10/config.yaml index cfca6b2..4b78264 100644 --- a/cfg/ocp-3.10/config.yaml +++ b/cfg/ocp-3.10/config.yaml @@ -10,21 +10,13 @@ master: apiserver: bins: - - openshift start master api + - hypershift openshift-kube-apiserver - scheduler: + etcd: bins: - - openshift start master controllers - - controllermanager: - bins: - - openshift start master controllers + - openshift start etcd node: - kubelet: - bins: - - openshift start network - proxy: bins: - openshift start network