From 4d3144ca21c0a57fe8bfe02da14907f456a3eb9c Mon Sep 17 00:00:00 2001 From: Florent Delannoy Date: Mon, 11 Mar 2019 18:05:33 +0000 Subject: [PATCH 01/15] Support JSON and YAML configuration Support new configuration options besides --flags: - JSON file through `jsonpath` - YAML file through `yamlpath` These new options are fully backwards-compatible with the existing tests. Added a new profile, 1.11-json, that expects a JSON kubelet configuration file and scores accordingly. This profile is compatible with EKS. --- Gopkg.lock | 11 + README.md | 25 +- cfg/1.11-json/config.yaml | 30 + cfg/1.11-json/master.yaml | 1446 +++++++++++++++++++++++++++++++++++++ cfg/1.11-json/node.yaml | 508 +++++++++++++ check/controls_test.go | 33 +- check/data | 129 ++++ check/test.go | 103 ++- check/test_test.go | 60 ++ 9 files changed, 2305 insertions(+), 40 deletions(-) create mode 100644 cfg/1.11-json/config.yaml create mode 100644 cfg/1.11-json/master.yaml create mode 100644 cfg/1.11-json/node.yaml diff --git a/Gopkg.lock b/Gopkg.lock index 4f431e3..e74b52e 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -189,6 +189,17 @@ pruneopts = "UT" revision = "c95af922eae69f190717a0b7148960af8c55a072" +[[projects]] + digest = "1:e8e3acc03397f71fad44385631e665c639a8d55bd187bcfa6e70b695e3705edd" + name = "k8s.io/client-go" + packages = [ + "third_party/forked/golang/template", + "util/jsonpath", + ] + pruneopts = "UT" + revision = "e64494209f554a6723674bd494d69445fb76a1d4" + version = "v10.0.0" + [solve-meta] analyzer-name = "dep" analyzer-version = 1 diff --git a/README.md b/README.md index 1bfe745..230a550 100644 --- a/README.md +++ b/README.md @@ -149,7 +149,7 @@ These groups are further organized under `controls` which can be of the type `ma ## Tests Tests are the items we actually look for to determine if a check is successful or not. Checks can have multiple tests, which must all be successful for the check to pass. -The syntax for tests: +The syntax for tests operating on a flag: ``` tests: - flag: @@ -159,6 +159,29 @@ tests: value: ... ``` + +If using a JSON config file, the syntax is: +``` +tests: +- jsonpath: + set: + compare: + op: + value: +... +``` + +And for a YAML config file: +``` +tests: +- yamlpath: + set: + compare: + op: + value: +... +``` + Tests have various `operations` which are used to compare the output of audit commands for success. These operations are: diff --git a/cfg/1.11-json/config.yaml b/cfg/1.11-json/config.yaml new file mode 100644 index 0000000..9e923de --- /dev/null +++ b/cfg/1.11-json/config.yaml @@ -0,0 +1,30 @@ +--- +## Controls Files. +# These are YAML files that hold all the details for running checks. +# +## Uncomment to use different control file paths. +# masterControls: ./cfg/master.yaml +# nodeControls: ./cfg/node.yaml +# federatedControls: ./cfg/federated.yaml + +# Master nodes are controlled by EKS and not user-accessible +master: + components: [] + +node: + kubernetes: + confs: + - "/var/lib/kubelet/kubeconfig" + kubeconfig: + - "/var/lib/kubelet/kubeconfig" + + kubelet: + bins: + - "hyperkube kubelet" + - "kubelet" + defaultconf: "/etc/kubernetes/kubelet/kubelet-config.json" + defaultsvc: "/etc/systemd/system/kubelet.service" + defaultkubeconfig: "/var/lib/kubelet/kubeconfig" + + proxy: + defaultkubeconfig: "/var/lib/kubelet/kubeconfig" diff --git a/cfg/1.11-json/master.yaml b/cfg/1.11-json/master.yaml new file mode 100644 index 0000000..0456578 --- /dev/null +++ b/cfg/1.11-json/master.yaml @@ -0,0 +1,1446 @@ +--- +controls: +version: 1.11 +id: 1 +text: "Master Node Security Configuration" +type: "master" +groups: +- id: 1.1 + text: "API Server" + checks: + - id: 1.1.1 + text: "Ensure that the --anonymous-auth argument is set to false (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the below parameter. + --anonymous-auth=false + scored: true + + - id: 1.1.2 + text: "Ensure that the --basic-auth-file argument is not set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--basic-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the master node and remove the --basic-auth-file= + parameter. + scored: true + + - id: 1.1.3 + text: "Ensure that the --insecure-allow-any-token argument is not set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--insecure-allow-any-token" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and remove the --insecure-allow-any-token + parameter. + scored: true + + - id: 1.1.4 + text: "Ensure that the --kubelet-https argument is set to true (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + set: true + - flag: "--kubelet-https" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and remove the --kubelet-https parameter. + scored: true + + - id: 1.1.5 + text: "Ensure that the --insecure-bind-address argument is not set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--insecure-bind-address" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and remove the --insecure-bind-address + parameter. + scored: true + + - id: 1.1.6 + text: "Ensure that the --insecure-port argument is set to 0 (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--insecure-port" + compare: + op: eq + value: 0 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + apiserver.yaml on the master node and set the below parameter. + --insecure-port=0 + scored: true + + - id: 1.1.7 + text: "Ensure that the --secure-port argument is not set to 0 (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + set: true + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.1.8 + text: "Ensure that the --profiling argument is set to false (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.1.9 + text: "Ensure that the --repair-malformed-updates argument is set to false (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--repair-malformed-updates" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the below parameter. + --repair-malformed-updates=false + scored: true + + - id: 1.1.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.1.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins to + include AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: true + + - id: 1.1.12 + text: "Ensure that the admission control plugin DenyEscalatingExec is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "DenyEscalatingExec" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to a + value that includes DenyEscalatingExec. + --enable-admission-plugins=...,DenyEscalatingExec,... + scored: true + + - id: 1.1.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to + include SecurityContextDeny. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: true + + - id: 1.1.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + set: true + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + --disable-admission-plugins=...,NamespaceLifecycle,... + scored: true + + - id: 1.1.15 + text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --audit-log-path parameter to a suitable + path and file where you would like audit logs to be written, for example: + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.1.16 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --audit-log-maxage parameter to 30 or + as an appropriate number of days: --audit-log-maxage=30 + scored: true + + - id: 1.1.17 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --audit-log-maxbackup parameter to 10 + or to an appropriate value. + --audit-log-maxbackup=10 + scored: true + + - id: 1.1.18 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --audit-log-maxsize parameter to an + appropriate size in MB. For example, to set it as 100 MB: + --audit-log-maxsize=100 + scored: true + + - id: 1.1.19 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --authorization-mode parameter to + values other than AlwaysAllow. One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.1.20 + text: "Ensure that the --token-auth-file parameter is not set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the master node and remove the --token-auth-file= + parameter. + scored: true + + - id: 1.1.21 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between the + apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the master node and set the --kubelet-certificate-authority + parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.1.22 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-client-key" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the master node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.1.23 + text: "Ensure that the --service-account-lookup argument is set to true (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-lookup" + compare: + op: eq + value: true + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the below parameter. + --service-account-lookup=true + scored: true + + - id: 1.1.24 + text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + set: true + remediation: | + Follow the documentation and create Pod Security Policy objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to a + value that includes PodSecurityPolicy : + --enable-admission-plugins=...,PodSecurityPolicy,... + Then restart the API Server. + scored: true + + - id: 1.1.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --service-account-key-file parameter + to the public key file for service accounts: + --service-account-key-file= + scored: true + + - id: 1.1.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as + appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and etcd. Then, edit the API server pod specification file + $apiserverconf on the master node and set the etcd + certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.1.27 + text: "Ensure that the admission control plugin ServiceAccount is set(Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "ServiceAccount" + set: true + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to a + value that includes ServiceAccount. + --enable-admission-plugins=...,ServiceAccount,... + scored: true + + - id: 1.1.28 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set + as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the master node and set the TLS certificate and private key file + parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.1.29 + text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the master node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.1.30 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: has + value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the below parameter. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + scored: false + + - id: 1.1.31 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and etcd. Then, edit the API server pod specification file + $apiserverconf on the master node and set the etcd + certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.1.32 + text: "Ensure that the --authorization-mode argument is set to Node (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the master node and set the --authorization-mode parameter to a + value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.1.33 + text: "Ensure that the admission control plugin NodeRestriction is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + set: true + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on + kubelets. Then, edit the API server pod specification file $apiserverconf + on the master node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.1.34 + text: "Ensure that the --experimental-encryption-provider-config argument is + set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--experimental-encryption-provider-config" + set: true + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf on the + master node and set the --experimental-encryption-provider-config parameter + to the path of that file: + --experimental-encryption-provider-config= + scored: true + + - id: 1.1.35 + text: "Ensure that the encryption provider is set to aescbc (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + type: "manual" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. In this file, + choose aescbc as the encryption provider. + For example, + kind: EncryptionConfig + apiVersion: v1 + resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: <32-byte base64-encoded secret> + scored: true + + - id: 1.1.36 + text: "Ensure that the admission control plugin EventRateLimit is set (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + set: true + remediation: | + Follow the Kubernetes documentation and set the desired limits in a + configuration file. Then, edit the API server pod specification file + $apiserverconf and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: true + + - id: 1.1.37 + text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "AdvancedAuditing=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Follow the Kubernetes documentation and set the desired audit policy in the + /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --audit-policy-file=/etc/kubernetes/audit-policy.yaml + scored: true + + - id: 1.1.38 + text: "Ensure that the --request-timeout argument is set as appropriate (Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--request-timeout" + set: false + - flag: "--request-timeout" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. For example, + --request-timeout=300s + scored: true + + - id: 1.1.39 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers ( Not Scored)" + audit: "ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: eq + value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + set: true + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests + kube-apiserver.yaml on the master node and set the below parameter. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + scored: false + +- id: 1.2 + text: "Scheduler" + checks: + - id: 1.2.1 + text: "Ensure that the --profiling argument is set to false (Scored)" + audit: "ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf + file on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.2 + text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)" + audit: "ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + file on the master node and ensure the correct value for the + --address parameter. + scored: true + +- id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, for example: + --terminated-pod-gc-threshold=10 + scored: true + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: eq + value: true + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node and set the --service-account-private- + key-file parameter to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node and set the --root-ca-file parameter to + the certificate bundle file. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--feature-gates" + compare: + op: eq + value: "RotateKubeletServerCertificate=true" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + controller-manager.yaml on the master node and set the --feature-gates parameter to + include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)" + audit: "ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + controller-manager.yaml on the master node and ensure the correct value + for the --address parameter. + scored: true + +- id: 1.4 + text: "Configuration Files" + checks: + - id: 1.4.1 + text: "Ensure that the API server pod specification file permissions are + set to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %a $apiserverconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 $apiserverconf + scored: true + + - id: 1.4.2 + text: "Ensure that the API server pod specification file ownership is set to + root:root (Scored)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root $apiserverconf + scored: true + + - id: 1.4.3 + text: "Ensure that the controller manager pod specification file + permissions are set to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %a $controllermanagerconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 $controllermanagerconf + scored: true + + - id: 1.4.4 + text: "Ensure that the controller manager pod specification file + ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root $controllermanagerconf + scored: true + + - id: 1.4.5 + text: "Ensure that the scheduler pod specification file permissions are set + to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %a $schedulerconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 $schedulerconf + scored: true + + - id: 1.4.6 + text: "Ensure that the scheduler pod specification file ownership is set to + root:root (Scored)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root $schedulerconf + scored: true + + - id: 1.4.7 + text: "Ensure that the etcd pod specification file permissions are set to + 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %a $etcdconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 $etcdconf + scored: true + + - id: 1.4.8 + text: "Ensure that the etcd pod specification file ownership is set to + root:root (Scored)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.4.9 + text: "Ensure that the Container Network Interface file permissions are + set to 644 or more restrictive (Not Scored)" + audit: "stat -c %a " + type: manual + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 + scored: true + + - id: 1.4.10 + text: "Ensure that the Container Network Interface file ownership is set + to root:root (Not Scored)" + audit: "stat -c %U:%G " + type: manual + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root + scored: true + + - id: 1.4.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %a + tests: + test_items: + - flag: "700" + compare: + op: eq + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir , + from the below command: + ps -ef | grep $etcdbin + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.4.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + tests: + test_items: + - flag: "etcd:etcd" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir , + from the below command: + ps -ef | grep $etcdbin + Run the below command (based on the etcd data directory found above). For example, + chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.4.13 + text: "Ensure that the admin.conf file permissions are set to 644 or + more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %a /etc/kubernetes/admin.conf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 /etc/kubernetes/admin.conf + scored: true + + - id: 1.4.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.4.15 + text: "Ensure that the scheduler.conf file permissions are set to 644 or + more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %a /etc/kubernetes/scheduler.conf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + master node. For example, chmod 644 /etc/kubernetes/scheduler.conf + scored: true + + - id: 1.4.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %U:%G /etc/kubernetes/scheduler.conf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + master node. For example, chown root:root /etc/kubernetes/scheduler.conf + scored: true + + - id: 1.4.17 + text: "Ensure that the controller-manager.conf file permissions are set + to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %a /etc/kubernetes/controller-manager.conf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + master node. For example, chmod 644 /etc/kubernetes/controller-manager.conf + scored: true + + - id: 1.4.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %U:%G /etc/kubernetes/controller-manager.conf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + master node. For example, chown root:root /etc/kubernetes/controller-manager.conf + scored: true + +- id: 1.5 + text: "etcd" + checks: + - id: 1.5.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + tests: + test_items: + - flag: "--cert-file" + set: true + - flag: "--key-file" + set: true + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --ca-file= + --key-file= + scored: true + + - id: 1.5.2 + text: "Ensure that the --client-cert-auth argument is set to true (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 1.5.3 + text: "Ensure that the --auto-tls argument is not set to true (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + set: false + - flag: "--auto-tls" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 1.5.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + type: "manual" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + set: true + - flag: "--peer-key-file" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 1.5.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + type: "manual" + tests: + test_items: + - flag: "--peer-client-cert-auth" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 1.5.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + type: "manual" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + set: false + - flag: "--peer-auto-tls" + compare: + op: eq + value: false + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 1.5.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)" + audit: "ps -ef | grep $etcdbin | grep -v grep" + type: "manual" + tests: + test_items: + - flag: "--trusted-ca-file" + set: true + remediation: | + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false + +- id: 1.6 + text: "General Security Primitives" + checks: + - id: 1.6.1 + text: "Ensure that the cluster-admin role is only used where required (Not Scored)" + type: "manual" + remediation: | + Remove any unneeded clusterrolebindings : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 1.6.2 + text: "Create administrative boundaries between resources using namespaces (Not Scored)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you + need them. + scored: false + + - id: 1.6.3 + text: "Create network segmentation using Network Policies (Not Scored)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 1.6.4 + text: "Ensure that the seccomp profile is set to docker/default in your pod + definitions (Not Scored)" + type: "manual" + remediation: | + Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you + would need to enable alpha features in the apiserver by passing "--feature- + gates=AllAlpha=true" argument. + Edit the $apiserverconf file on the master node and set the KUBE_API_ARGS + parameter to "--feature-gates=AllAlpha=true" + KUBE_API_ARGS="--feature-gates=AllAlpha=true" + Based on your system, restart the kube-apiserver service. For example: + systemctl restart kube-apiserver.service + Use annotations to enable the docker/default seccomp profile in your pod definitions. An + example is as below: + apiVersion: v1 + kind: Pod + metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default + spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + scored: false + + - id: 1.6.5 + text: "Apply Security Context to Your Pods and Containers (Not Scored)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 1.6.6 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 1.6.7 + text: "Configure Network policies as appropriate (Not Scored)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup network policies as appropriate. + For example, you could create a "default" isolation policy for a Namespace by creating a + NetworkPolicy that selects all pods but does not allow any traffic: + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: default-deny + spec: + podSelector: + scored: false + + - id: 1.6.8 + text: "Place compensating controls in the form of PSP and RBAC for + privileged containers usage (Not Scored)" + type: "manual" + remediation: | + Follow Kubernetes documentation and setup PSP and RBAC authorization for your cluster. + scored: false + +- id: 1.7 + text: "PodSecurityPolicies" + checks: + - id: 1.7.1 + text: "Do not admit privileged containers (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.privileged field is omitted or set to false. + scored: false + + - id: 1.7.2 + text: "Do not admit containers wishing to share the host process ID namespace (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostPID field is omitted or set to false. + scored: false + + - id: 1.7.3 + text: "Do not admit containers wishing to share the host IPC namespace (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostIPC field is omitted or set to false. + scored: false + + - id: 1.7.4 + text: "Do not admit containers wishing to share the host network namespace (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostNetwork field is omitted or set to false. + scored: false + + - id: 1.7.5 + text: "Do not admit containers with allowPrivilegeEscalation (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.allowPrivilegeEscalation field is omitted or set to false. + scored: false + + - id: 1.7.6 + text: "Do not admit root containers (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of UIDs not including 0. + scored: false + + - id: 1.7.7 + text: "Do not admit containers with dangerous capabilities (Not Scored)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + scored: false diff --git a/cfg/1.11-json/node.yaml b/cfg/1.11-json/node.yaml new file mode 100644 index 0000000..3bd277b --- /dev/null +++ b/cfg/1.11-json/node.yaml @@ -0,0 +1,508 @@ +--- +controls: +version: 1.11 +id: 2 +text: "Worker Node Security Configuration" +type: "node" +groups: +- id: 2.1 + text: "Kubelet" + checks: + - id: 2.1.1 + text: "Ensure that the --allow-privileged argument is set to false (Scored)" + audit: "ps -fC $kubeletbin" + tests: + test_items: + - flag: "--allow-privileged" + compare: + op: eq + value: false + set: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --allow-privileged=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.2 + text: "Ensure that the --anonymous-auth argument is set to false (Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - jsonpath: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: false + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false . + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.3 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - jsonpath: "{.authorization.mode}" + compare: + op: noteq + value: "AlwaysAllow" + set: true + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.4 + text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - jsonpath: "{.authentication.x509.clientCAFile}" + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.5 + text: "Ensure that the --read-only-port argument is set to 0 (Scored)" + audit: "cat $kubeletconf" + tests: + bin_op: or + test_items: + - jsonpath: "{.readOnlyPort}" + set: false + - jsonpath: "{.readOnlyPort}" + compare: + op: eq + value: "0" + set: true + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0 . + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.6 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)" + audit: "cat $kubeletconf" + tests: + bin_op: or + test_items: + - jsonpath: "{.streamingConnectionIdleTimeout}" + set: false + - jsonpath: "{.streamingConnectionIdleTimeout}" + compare: + op: noteq + value: 0 + set: true + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.7 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - jsonpath: "{.protectKernelDefaults}" + compare: + op: eq + value: true + set: true + remediation: | + If using a Kubelet config file, edit the file to set protectKernelDefaults: true . + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.8 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Scored)" + audit: "cat $kubeletconf" + tests: + bin_op: or + test_items: + - jsonpath: "{.makeIPTablesUtilChains}" + set: false + - jsonpath: "{.makeIPTablesUtilChains}" + compare: + op: eq + value: true + set: true + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true . + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.9 + text: "Ensure that the --hostname-override argument is not set (Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - jsonpath: "{.hostnameOverride}" + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.10 + text: "Ensure that the --event-qps argument is set to 0 (Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - jsonpath: "{.eventRecordQPS}" + compare: + op: eq + value: 0 + set: true + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: 0 . + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --event-qps=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.11 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)" + audit: "cat $kubeletconf" + tests: + bin_op: and + test_items: + - jsonpath: "{.tlsCertFile}" + set: true + - jsonpath: "{.tlsPrivateKeyFile}" + set: true + remediation: | + If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate + file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the + corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.12 + text: "Ensure that the --cadvisor-port argument is set to 0 (Scored)" + audit: "cat $kubeletconf" + tests: + bin_op: or + test_items: + - jsonpath: "{.cadvisorPort}" + compare: + op: eq + value: 0 + set: true + - jsonpath: "{.cadvisorPort}" + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable. + --cadvisor-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.13 + text: "Ensure that the --rotate-certificates argument is not set to false (Scored)" + audit: "cat $kubeletconf" + tests: + bin_op: or + test_items: + - jsonpath: "{.rotateCertificates}" + set: false + - jsonpath: "{.rotateCertificates}" + compare: + op: noteq + value: "false" + set: true + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true. + If using command line arguments, edit the kubelet service file $kubeletsvc + on each worker node and add --rotate-certificates=true argument to the KUBELET_CERTIFICATE_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.14 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - jsonpath: "{.featureGates.RotateKubeletServerCertificate}" + compare: + op: eq + value: true + set: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.15 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - jsonpath: "{.tlsCipherSuites}" + compare: + op: eq + value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + set: true + remediation: | + If using a Kubelet config file, edit the file to set TLSCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + If using executable arguments, edit the kubelet service file $kubeletconf on each worker node and set the below parameter. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + scored: false + +- id: 2.2 + text: "Configuration Files" + checks: + - id: 2.2.1 + text: "Ensure that the kubelet.conf file permissions are set to 644 or + more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %a $kubeletkubeconfig; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chmod 644 $kubeletkubeconfig + scored: true + + - id: 2.2.2 + text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: root:root + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 2.2.3 + text: "Ensure that the kubelet service file permissions are set to 644 or + more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %a $kubeletsvc; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: 644 + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chmod 755 $kubeletsvc + scored: true + + - id: 2.2.4 + text: "Ensure that the kubelet service file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chown root:root $kubeletsvc + scored: true + + - id: 2.2.5 + text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %a $proxykubeconfig; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chmod 644 $proxykubeconfig + scored: true + + - id: 2.2.6 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chown root:root $proxykubeconfig + scored: true + + - id: 2.2.7 + text: "Ensure that the certificate authorities file permissions are set to + 644 or more restrictive (Scored)" + type: manual + remediation: | + Run the following command to modify the file permissions of the --client-ca-file + chmod 644 + scored: true + + - id: 2.2.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $ca-file; then stat -c %U:%G $ca-file; fi'" + type: manual + remediation: | + Run the following command to modify the ownership of the --client-ca-file . + chown root:root + scored: true + + - id: 2.2.9 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the following command (using the config file location identied in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 2.2.10 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the following command (using the config file location identied in the Audit step) + chmod 644 $kubeletconf + scored: true diff --git a/check/controls_test.go b/check/controls_test.go index 3cf9b60..17c62e5 100644 --- a/check/controls_test.go +++ b/check/controls_test.go @@ -2,6 +2,8 @@ package check import ( "io/ioutil" + "os" + "path/filepath" "testing" yaml "gopkg.in/yaml.v2" @@ -11,31 +13,28 @@ const cfgDir = "../cfg/" // validate that the files we're shipping are valid YAML func TestYamlFiles(t *testing.T) { - // TODO: make this list dynamic - dirs := []string{"1.6/", "1.7/"} - - for _, dir := range dirs { - dir = cfgDir + dir - - files, err := ioutil.ReadDir(dir) + err := filepath.Walk(cfgDir, func(path string, info os.FileInfo, err error) error { if err != nil { - t.Fatalf("error reading %s directory: %v", dir, err) + t.Fatalf("failure accessing path %q: %v\n", path, err) } - - for _, file := range files { - - fileName := file.Name() - in, err := ioutil.ReadFile(dir + fileName) + if !info.IsDir() { + t.Logf("reading file: %s", path) + in, err := ioutil.ReadFile(path) if err != nil { - t.Fatalf("error opening file %s: %v", fileName, err) + t.Fatalf("error opening file %s: %v", path, err) } c := new(Controls) - err = yaml.Unmarshal(in, c) - if err != nil { - t.Fatalf("failed to load YAML from %s: %v", fileName, err) + if err == nil { + t.Logf("YAML file successfully unmarshalled: %s", path) + } else { + t.Fatalf("failed to load YAML from %s: %v", path, err) } } + return nil + }) + if err != nil { + t.Fatalf("failure walking cfg dir: %v\n", err) } } diff --git a/check/data b/check/data index 88bdc85..b3a4cfe 100644 --- a/check/data +++ b/check/data @@ -157,4 +157,133 @@ groups: value: Something set: true + - id: 14 + text: "jsonpath correct value on field" + tests: + test_items: + - jsonpath: "{.readOnlyPort}" + compare: + op: eq + value: 15000 + set: true + - jsonpath: "{.readOnlyPort}" + compare: + op: gte + value: 15000 + set: true + - jsonpath: "{.readOnlyPort}" + compare: + op: lte + value: 15000 + set: true + - id: 15 + text: "jsonpath correct case-sensitive value on string field" + tests: + test_items: + - jsonpath: "{.stringValue}" + compare: + op: noteq + value: "None" + set: true + - jsonpath: "{.stringValue}" + compare: + op: noteq + value: "webhook,Something,RBAC" + set: true + - jsonpath: "{.stringValue}" + compare: + op: eq + value: "WebHook,Something,RBAC" + set: true + + - id: 16 + text: "jsonpath correct value on boolean field" + tests: + test_items: + - jsonpath: "{.trueValue}" + compare: + op: noteq + value: somethingElse + set: true + - jsonpath: "{.trueValue}" + compare: + op: noteq + value: false + set: true + - jsonpath: "{.trueValue}" + compare: + op: eq + value: true + set: true + + - id: 17 + text: "jsonpath field absent" + tests: + test_items: + - jsonpath: "{.notARealField}" + set: false + + - id: 18 + text: "jsonpath correct value on nested field" + tests: + test_items: + - jsonpath: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: "false" + set: true + + - id: 19 + text: "yamlpath correct value on field" + tests: + test_items: + - yamlpath: "{.readOnlyPort}" + compare: + op: gt + value: 14999 + set: true + + - id: 20 + text: "yamlpath field absent" + tests: + test_items: + - yamlpath: "{.fieldThatIsUnset}" + set: false + + - id: 21 + text: "yamlpath correct value on nested field" + tests: + test_items: + - yamlpath: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: "false" + set: true + + - id: 22 + text: "jsonpath on invalid json" + tests: + test_items: + - jsonpath: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: "false" + set: true + + - id: 23 + text: "jsonpath with broken expression" + tests: + test_items: + - jsonpath: "{.missingClosingBrace" + set: true + + - id: 24 + text: "yamlpath on invalid yaml" + tests: + test_items: + - yamlpath: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: "false" + set: true diff --git a/check/test.go b/check/test.go index 7a74634..6ac8d0a 100644 --- a/check/test.go +++ b/check/test.go @@ -15,11 +15,16 @@ package check import ( + "bytes" + "encoding/json" "fmt" "os" "regexp" "strconv" "strings" + + yaml "gopkg.in/yaml.v2" + "k8s.io/client-go/util/jsonpath" ) // test: @@ -37,11 +42,13 @@ const ( ) type testItem struct { - Flag string - Output string - Value string - Set bool - Compare compare + Flag string + Jsonpath string + Yamlpath string + Output string + Value string + Set bool + Compare compare } type compare struct { @@ -54,33 +61,85 @@ type testOutput struct { actualResult string } +func failTestItem(s string) *testOutput { + return &testOutput{testResult: false, actualResult: s} +} + func (t *testItem) execute(s string) *testOutput { result := &testOutput{} - match := strings.Contains(s, t.Flag) + var match bool + var flagVal string + + if t.Flag != "" { + // Flag comparison: check if the flag is present in the input + match = strings.Contains(s, t.Flag) + } else { + // Means either t.Jsonpath != "" or t.Yamlpath != "" + // Find out and convert the input as needed + buf := new(bytes.Buffer) + var jsonInterface interface{} + var pathExpression string + + if t.Yamlpath != "" { + pathExpression = t.Yamlpath + err := yaml.Unmarshal([]byte(s), &jsonInterface) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to load YAML from provided input \"%s\": %v\n", s, err) + return failTestItem("failed to load YAML") + } + } else if t.Jsonpath != "" { + pathExpression = t.Jsonpath + err := json.Unmarshal([]byte(s), &jsonInterface) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to load JSON from provided input: \"%s\": %v\n", s, err) + return failTestItem("failed to load JSON") + } + } + + // Parse the jsonpath/yamlpath expression... + j := jsonpath.New("jsonpath") + j.AllowMissingKeys(true) + err := j.Parse(pathExpression) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to parse path expression \"%s\": %v\n", pathExpression, err) + return failTestItem("unable to parse path expression") + } + + err = j.Execute(buf, jsonInterface) + if err != nil { + fmt.Fprintf(os.Stderr, "error executing path expression \"%s\": %v\n", pathExpression, err) + return failTestItem("error executing path expression") + } + + jsonpathResult := fmt.Sprintf("%s", buf) + match = (jsonpathResult != "") + flagVal = jsonpathResult + } if t.Set { - var flagVal string isset := match if isset && t.Compare.Op != "" { - // Expects flags in the form; - // --flag=somevalue - // --flag - // somevalue - //pttn := `(` + t.Flag + `)(=)*([^\s,]*) *` - pttn := `(` + t.Flag + `)(=)*([^\s]*) *` - flagRe := regexp.MustCompile(pttn) - vals := flagRe.FindStringSubmatch(s) + if t.Flag != "" { + // Expects flags in the form; + // --flag=somevalue + // --flag + // somevalue + //pttn := `(` + t.Flag + `)(=)*([^\s,]*) *` + pttn := `(` + t.Flag + `)(=)*([^\s]*) *` + flagRe := regexp.MustCompile(pttn) + vals := flagRe.FindStringSubmatch(s) - if len(vals) > 0 { - if vals[3] != "" { - flagVal = vals[3] + if len(vals) > 0 { + if vals[3] != "" { + flagVal = vals[3] + } else { + flagVal = vals[1] + } } else { - flagVal = vals[1] + fmt.Fprintf(os.Stderr, "invalid flag in testitem definition") + os.Exit(1) } - } else { - fmt.Fprintf(os.Stderr, "invalid flag in testitem definition") - os.Exit(1) } result.actualResult = strings.ToLower(flagVal) diff --git a/check/test_test.go b/check/test_test.go index 4b96e07..24ba757 100644 --- a/check/test_test.go +++ b/check/test_test.go @@ -110,6 +110,38 @@ func TestTestExecute(t *testing.T) { controls.Groups[0].Checks[13], "2:45 ../kubernetes/kube-apiserver --option --admission-control=Something ---audit-log-maxage=40", }, + { + controls.Groups[0].Checks[14], + "{\"readOnlyPort\": 15000}", + }, + { + controls.Groups[0].Checks[15], + "{\"stringValue\": \"WebHook,Something,RBAC\"}", + }, + { + controls.Groups[0].Checks[16], + "{\"trueValue\": true}", + }, + { + controls.Groups[0].Checks[17], + "{\"readOnlyPort\": 15000}", + }, + { + controls.Groups[0].Checks[18], + "{\"authentication\": { \"anonymous\": {\"enabled\": false}}}", + }, + { + controls.Groups[0].Checks[19], + "readOnlyPort: 15000", + }, + { + controls.Groups[0].Checks[20], + "readOnlyPort: 15000", + }, + { + controls.Groups[0].Checks[21], + "authentication:\n anonymous:\n enabled: false", + }, } for _, c := range cases { @@ -119,3 +151,31 @@ func TestTestExecute(t *testing.T) { } } } + +func TestTestExecuteExceptions(t *testing.T) { + + cases := []struct { + *Check + str string + }{ + { + controls.Groups[0].Checks[22], + "this is not valid json {} at all", + }, + { + controls.Groups[0].Checks[23], + "{\"key\": \"value\"}", + }, + { + controls.Groups[0].Checks[24], + "broken } yaml\nenabled: true", + }, + } + + for _, c := range cases { + res := c.Tests.execute(c.str).testResult + if res { + t.Errorf("%s, expected:%v, got:%v\n", c.Text, false, res) + } + } +} From abfc38d6725f62df7ae4e9c8df562913e9864a35 Mon Sep 17 00:00:00 2001 From: Florent Delannoy Date: Thu, 21 Mar 2019 15:05:20 +0000 Subject: [PATCH 02/15] Update documentation after review --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 230a550..a687e0e 100644 --- a/README.md +++ b/README.md @@ -149,7 +149,7 @@ These groups are further organized under `controls` which can be of the type `ma ## Tests Tests are the items we actually look for to determine if a check is successful or not. Checks can have multiple tests, which must all be successful for the check to pass. -The syntax for tests operating on a flag: +The syntax for tests: ``` tests: - flag: @@ -160,7 +160,8 @@ tests: ... ``` -If using a JSON config file, the syntax is: +You can also define jsonpath and yamlpath tests using the following syntax: + ``` tests: - jsonpath: @@ -171,7 +172,6 @@ tests: ... ``` -And for a YAML config file: ``` tests: - yamlpath: @@ -211,4 +211,4 @@ Next you'll have to build the kube-bench docker image using `make build-docker`, Finally we can use the `make kind-run` target to run the current version of kube-bench in the cluster and follow the logs of pods created. (Ctrl+C to exit) -Everytime you want to test a change, you'll need to rebuild the docker image and push it to cluster before running it again. ( `make build-docker kind-push kind-run` ) \ No newline at end of file +Everytime you want to test a change, you'll need to rebuild the docker image and push it to cluster before running it again. ( `make build-docker kind-push kind-run` ) From 9b034024a76c7af85e56836c3b773fd51af7b05f Mon Sep 17 00:00:00 2001 From: Liz Rice Date: Thu, 11 Apr 2019 10:21:19 +0100 Subject: [PATCH 03/15] Complete merge where test numbers changes --- check/test_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/check/test_test.go b/check/test_test.go index 2c15b15..308dcad 100644 --- a/check/test_test.go +++ b/check/test_test.go @@ -119,8 +119,8 @@ func TestTestExecute(t *testing.T) { // check for ':' as argument-value separator, with no space between arg and val controls.Groups[0].Checks[14], "2:45 kube-apiserver some-arg:some-val --admission-control=Something ---audit-log-maxage=40", - }, - { + }, + { controls.Groups[0].Checks[15], "{\"readOnlyPort\": 15000}", }, @@ -169,15 +169,15 @@ func TestTestExecuteExceptions(t *testing.T) { str string }{ { - controls.Groups[0].Checks[22], + controls.Groups[0].Checks[23], "this is not valid json {} at all", }, { - controls.Groups[0].Checks[23], + controls.Groups[0].Checks[24], "{\"key\": \"value\"}", }, { - controls.Groups[0].Checks[24], + controls.Groups[0].Checks[25], "broken } yaml\nenabled: true", }, } From 902a10f1c791197474aafa1da519f3473dc8e35f Mon Sep 17 00:00:00 2001 From: Liz Rice Date: Thu, 11 Apr 2019 17:09:33 +0100 Subject: [PATCH 04/15] Just have one path for both json and yaml --- README.md | 12 +---------- cfg/1.11-json/config.yaml | 3 --- cfg/1.11-json/node.yaml | 40 ++++++++++++++++++------------------ check/data | 38 +++++++++++++++++----------------- check/test.go | 43 ++++++++++++++++----------------------- 5 files changed, 58 insertions(+), 78 deletions(-) diff --git a/README.md b/README.md index c9ae166..06a9fae 100644 --- a/README.md +++ b/README.md @@ -195,17 +195,7 @@ You can also define jsonpath and yamlpath tests using the following syntax: ``` tests: -- jsonpath: - set: - compare: - op: - value: -... -``` - -``` -tests: -- yamlpath: +- path: set: compare: op: diff --git a/cfg/1.11-json/config.yaml b/cfg/1.11-json/config.yaml index 9e923de..b34336d 100644 --- a/cfg/1.11-json/config.yaml +++ b/cfg/1.11-json/config.yaml @@ -8,9 +8,6 @@ # federatedControls: ./cfg/federated.yaml # Master nodes are controlled by EKS and not user-accessible -master: - components: [] - node: kubernetes: confs: diff --git a/cfg/1.11-json/node.yaml b/cfg/1.11-json/node.yaml index 3bd277b..88ae739 100644 --- a/cfg/1.11-json/node.yaml +++ b/cfg/1.11-json/node.yaml @@ -32,7 +32,7 @@ groups: audit: "cat $kubeletconf" tests: test_items: - - jsonpath: "{.authentication.anonymous.enabled}" + - path: "{.authentication.anonymous.enabled}" compare: op: eq value: false @@ -54,7 +54,7 @@ groups: audit: "cat $kubeletconf" tests: test_items: - - jsonpath: "{.authorization.mode}" + - path: "{.authorization.mode}" compare: op: noteq value: "AlwaysAllow" @@ -75,7 +75,7 @@ groups: audit: "cat $kubeletconf" tests: test_items: - - jsonpath: "{.authentication.x509.clientCAFile}" + - path: "{.authentication.x509.clientCAFile}" set: true remediation: | If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to @@ -95,9 +95,9 @@ groups: tests: bin_op: or test_items: - - jsonpath: "{.readOnlyPort}" + - path: "{.readOnlyPort}" set: false - - jsonpath: "{.readOnlyPort}" + - path: "{.readOnlyPort}" compare: op: eq value: "0" @@ -119,9 +119,9 @@ groups: tests: bin_op: or test_items: - - jsonpath: "{.streamingConnectionIdleTimeout}" + - path: "{.streamingConnectionIdleTimeout}" set: false - - jsonpath: "{.streamingConnectionIdleTimeout}" + - path: "{.streamingConnectionIdleTimeout}" compare: op: noteq value: 0 @@ -143,7 +143,7 @@ groups: audit: "cat $kubeletconf" tests: test_items: - - jsonpath: "{.protectKernelDefaults}" + - path: "{.protectKernelDefaults}" compare: op: eq value: true @@ -165,9 +165,9 @@ groups: tests: bin_op: or test_items: - - jsonpath: "{.makeIPTablesUtilChains}" + - path: "{.makeIPTablesUtilChains}" set: false - - jsonpath: "{.makeIPTablesUtilChains}" + - path: "{.makeIPTablesUtilChains}" compare: op: eq value: true @@ -188,7 +188,7 @@ groups: audit: "cat $kubeletconf" tests: test_items: - - jsonpath: "{.hostnameOverride}" + - path: "{.hostnameOverride}" set: false remediation: | Edit the kubelet service file $kubeletsvc @@ -204,7 +204,7 @@ groups: audit: "cat $kubeletconf" tests: test_items: - - jsonpath: "{.eventRecordQPS}" + - path: "{.eventRecordQPS}" compare: op: eq value: 0 @@ -226,9 +226,9 @@ groups: tests: bin_op: and test_items: - - jsonpath: "{.tlsCertFile}" + - path: "{.tlsCertFile}" set: true - - jsonpath: "{.tlsPrivateKeyFile}" + - path: "{.tlsPrivateKeyFile}" set: true remediation: | If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate @@ -250,12 +250,12 @@ groups: tests: bin_op: or test_items: - - jsonpath: "{.cadvisorPort}" + - path: "{.cadvisorPort}" compare: op: eq value: 0 set: true - - jsonpath: "{.cadvisorPort}" + - path: "{.cadvisorPort}" set: false remediation: | Edit the kubelet service file $kubeletsvc @@ -272,9 +272,9 @@ groups: tests: bin_op: or test_items: - - jsonpath: "{.rotateCertificates}" + - path: "{.rotateCertificates}" set: false - - jsonpath: "{.rotateCertificates}" + - path: "{.rotateCertificates}" compare: op: noteq value: "false" @@ -293,7 +293,7 @@ groups: audit: "cat $kubeletconf" tests: test_items: - - jsonpath: "{.featureGates.RotateKubeletServerCertificate}" + - path: "{.featureGates.RotateKubeletServerCertificate}" compare: op: eq value: true @@ -312,7 +312,7 @@ groups: audit: "cat $kubeletconf" tests: test_items: - - jsonpath: "{.tlsCipherSuites}" + - path: "{.tlsCipherSuites}" compare: op: eq value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" diff --git a/check/data b/check/data index c474e6a..116a5f9 100644 --- a/check/data +++ b/check/data @@ -171,17 +171,17 @@ groups: text: "jsonpath correct value on field" tests: test_items: - - jsonpath: "{.readOnlyPort}" + - path: "{.readOnlyPort}" compare: op: eq value: 15000 set: true - - jsonpath: "{.readOnlyPort}" + - path: "{.readOnlyPort}" compare: op: gte value: 15000 set: true - - jsonpath: "{.readOnlyPort}" + - path: "{.readOnlyPort}" compare: op: lte value: 15000 @@ -191,17 +191,17 @@ groups: text: "jsonpath correct case-sensitive value on string field" tests: test_items: - - jsonpath: "{.stringValue}" + - path: "{.stringValue}" compare: op: noteq value: "None" set: true - - jsonpath: "{.stringValue}" + - path: "{.stringValue}" compare: op: noteq value: "webhook,Something,RBAC" set: true - - jsonpath: "{.stringValue}" + - path: "{.stringValue}" compare: op: eq value: "WebHook,Something,RBAC" @@ -211,17 +211,17 @@ groups: text: "jsonpath correct value on boolean field" tests: test_items: - - jsonpath: "{.trueValue}" + - path: "{.trueValue}" compare: op: noteq value: somethingElse set: true - - jsonpath: "{.trueValue}" + - path: "{.trueValue}" compare: op: noteq value: false set: true - - jsonpath: "{.trueValue}" + - path: "{.trueValue}" compare: op: eq value: true @@ -231,14 +231,14 @@ groups: text: "jsonpath field absent" tests: test_items: - - jsonpath: "{.notARealField}" + - path: "{.notARealField}" set: false - id: 19 text: "jsonpath correct value on nested field" tests: test_items: - - jsonpath: "{.authentication.anonymous.enabled}" + - path: "{.authentication.anonymous.enabled}" compare: op: eq value: "false" @@ -248,7 +248,7 @@ groups: text: "yamlpath correct value on field" tests: test_items: - - yamlpath: "{.readOnlyPort}" + - path: "{.readOnlyPort}" compare: op: gt value: 14999 @@ -258,41 +258,41 @@ groups: text: "yamlpath field absent" tests: test_items: - - yamlpath: "{.fieldThatIsUnset}" + - path: "{.fieldThatIsUnset}" set: false - id: 22 text: "yamlpath correct value on nested field" tests: test_items: - - yamlpath: "{.authentication.anonymous.enabled}" + - path: "{.authentication.anonymous.enabled}" compare: op: eq value: "false" set: true - id: 23 - text: "jsonpath on invalid json" + text: "path on invalid json" tests: test_items: - - jsonpath: "{.authentication.anonymous.enabled}" + - path: "{.authentication.anonymous.enabled}" compare: op: eq value: "false" set: true - id: 24 - text: "jsonpath with broken expression" + text: "path with broken expression" tests: test_items: - - jsonpath: "{.missingClosingBrace" + - path: "{.missingClosingBrace" set: true - id: 25 text: "yamlpath on invalid yaml" tests: test_items: - - yamlpath: "{.authentication.anonymous.enabled}" + - path: "{.authentication.anonymous.enabled}" compare: op: eq value: "false" diff --git a/check/test.go b/check/test.go index 924e1c4..9ddb469 100644 --- a/check/test.go +++ b/check/test.go @@ -42,13 +42,12 @@ const ( ) type testItem struct { - Flag string - Jsonpath string - Yamlpath string - Output string - Value string - Set bool - Compare compare + Flag string + Path string + Output string + Value string + Set bool + Compare compare } type compare struct { @@ -74,40 +73,34 @@ func (t *testItem) execute(s string) *testOutput { // Flag comparison: check if the flag is present in the input match = strings.Contains(s, t.Flag) } else { - // Means either t.Jsonpath != "" or t.Yamlpath != "" - // Find out and convert the input as needed + // Path != "" - we don't know whether it's YAML or JSON but + // we can just try one then the other buf := new(bytes.Buffer) var jsonInterface interface{} - var pathExpression string - if t.Yamlpath != "" { - pathExpression = t.Yamlpath - err := yaml.Unmarshal([]byte(s), &jsonInterface) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to load YAML from provided input \"%s\": %v\n", s, err) - return failTestItem("failed to load YAML") - } - } else if t.Jsonpath != "" { - pathExpression = t.Jsonpath + if t.Path != "" { err := json.Unmarshal([]byte(s), &jsonInterface) if err != nil { - fmt.Fprintf(os.Stderr, "failed to load JSON from provided input: \"%s\": %v\n", s, err) - return failTestItem("failed to load JSON") + err := yaml.Unmarshal([]byte(s), &jsonInterface) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to load YAML or JSON from provided input \"%s\": %v\n", s, err) + return failTestItem("failed to load YAML or JSON") + } } } // Parse the jsonpath/yamlpath expression... j := jsonpath.New("jsonpath") j.AllowMissingKeys(true) - err := j.Parse(pathExpression) + err := j.Parse(t.Path) if err != nil { - fmt.Fprintf(os.Stderr, "unable to parse path expression \"%s\": %v\n", pathExpression, err) + fmt.Fprintf(os.Stderr, "unable to parse path expression \"%s\": %v\n", t.Path, err) return failTestItem("unable to parse path expression") } err = j.Execute(buf, jsonInterface) if err != nil { - fmt.Fprintf(os.Stderr, "error executing path expression \"%s\": %v\n", pathExpression, err) + fmt.Fprintf(os.Stderr, "error executing path expression \"%s\": %v\n", t.Path, err) return failTestItem("error executing path expression") } @@ -123,7 +116,7 @@ func (t *testItem) execute(s string) *testOutput { if t.Flag != "" { // Expects flags in the form; // --flag=somevalue - // flag: somevalue + // flag: somevalue // --flag // somevalue pttn := `(` + t.Flag + `)(=|: *)*([^\s]*) *` From 596dae03d9ea22dd8dd129967bf65028de56a3cd Mon Sep 17 00:00:00 2001 From: Liz Rice Date: Thu, 11 Apr 2019 17:05:57 +0100 Subject: [PATCH 05/15] Don't assume master if 0 master binaries specified --- cmd/common.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cmd/common.go b/cmd/common.go index 9e29ede..2d6463c 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -202,9 +202,15 @@ func isMaster() bool { _ = loadConfig(check.MASTER) glog.V(2).Info("Checking if the current node is running master components") masterConf := viper.Sub(string(check.MASTER)) - if _, err := getBinaries(masterConf); err != nil { + components, err := getBinaries(masterConf) + + if err != nil { glog.V(2).Info(err) return false } + if len(components) == 0 { + glog.V(2).Info("No master binaries specified") + return false + } return true } From de623220e1a94dc6fdba123cfd5f133e508bec0d Mon Sep 17 00:00:00 2001 From: Liz Rice Date: Thu, 11 Apr 2019 18:28:08 +0100 Subject: [PATCH 06/15] No need to load config just to check if components are running. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This also allows for there to be no master.yaml file, for environments where such a thing doesn’t need to exist --- cmd/common.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/common.go b/cmd/common.go index 2d6463c..ed6e9b5 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -199,7 +199,6 @@ func loadConfig(nodetype check.NodeType) string { // isMaster verify if master components are running on the node. func isMaster() bool { - _ = loadConfig(check.MASTER) glog.V(2).Info("Checking if the current node is running master components") masterConf := viper.Sub(string(check.MASTER)) components, err := getBinaries(masterConf) From 27dc75fefa873226cac71a007af4847d03ebd75e Mon Sep 17 00:00:00 2001 From: Liz Rice Date: Thu, 11 Apr 2019 18:36:30 +0100 Subject: [PATCH 07/15] No need for unused master config file. Better comments in config file --- cfg/1.11-json/config.yaml | 9 +- cfg/1.11-json/master.yaml | 1446 ------------------------------------- 2 files changed, 1 insertion(+), 1454 deletions(-) delete mode 100644 cfg/1.11-json/master.yaml diff --git a/cfg/1.11-json/config.yaml b/cfg/1.11-json/config.yaml index b34336d..ce3c054 100644 --- a/cfg/1.11-json/config.yaml +++ b/cfg/1.11-json/config.yaml @@ -1,12 +1,5 @@ --- -## Controls Files. -# These are YAML files that hold all the details for running checks. -# -## Uncomment to use different control file paths. -# masterControls: ./cfg/master.yaml -# nodeControls: ./cfg/node.yaml -# federatedControls: ./cfg/federated.yaml - +# Config file for systems such as EKS where config is in JSON files # Master nodes are controlled by EKS and not user-accessible node: kubernetes: diff --git a/cfg/1.11-json/master.yaml b/cfg/1.11-json/master.yaml deleted file mode 100644 index 0456578..0000000 --- a/cfg/1.11-json/master.yaml +++ /dev/null @@ -1,1446 +0,0 @@ ---- -controls: -version: 1.11 -id: 1 -text: "Master Node Security Configuration" -type: "master" -groups: -- id: 1.1 - text: "API Server" - checks: - - id: 1.1.1 - text: "Ensure that the --anonymous-auth argument is set to false (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--anonymous-auth" - compare: - op: eq - value: false - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the below parameter. - --anonymous-auth=false - scored: true - - - id: 1.1.2 - text: "Ensure that the --basic-auth-file argument is not set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--basic-auth-file" - set: false - remediation: | - Follow the documentation and configure alternate mechanisms for authentication. Then, - edit the API server pod specification file $apiserverconf - on the master node and remove the --basic-auth-file= - parameter. - scored: true - - - id: 1.1.3 - text: "Ensure that the --insecure-allow-any-token argument is not set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--insecure-allow-any-token" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and remove the --insecure-allow-any-token - parameter. - scored: true - - - id: 1.1.4 - text: "Ensure that the --kubelet-https argument is set to true (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--kubelet-https" - compare: - op: eq - value: true - set: true - - flag: "--kubelet-https" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and remove the --kubelet-https parameter. - scored: true - - - id: 1.1.5 - text: "Ensure that the --insecure-bind-address argument is not set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--insecure-bind-address" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and remove the --insecure-bind-address - parameter. - scored: true - - - id: 1.1.6 - text: "Ensure that the --insecure-port argument is set to 0 (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--insecure-port" - compare: - op: eq - value: 0 - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - apiserver.yaml on the master node and set the below parameter. - --insecure-port=0 - scored: true - - - id: 1.1.7 - text: "Ensure that the --secure-port argument is not set to 0 (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--secure-port" - compare: - op: gt - value: 0 - set: true - - flag: "--secure-port" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and either remove the --secure-port parameter or - set it to a different (non-zero) desired port. - scored: true - - - id: 1.1.8 - text: "Ensure that the --profiling argument is set to false (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the below parameter. - --profiling=false - scored: true - - - id: 1.1.9 - text: "Ensure that the --repair-malformed-updates argument is set to false (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--repair-malformed-updates" - compare: - op: eq - value: false - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the below parameter. - --repair-malformed-updates=false - scored: true - - - id: 1.1.10 - text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: nothave - value: AlwaysAdmit - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --enable-admission-plugins parameter to a - value that does not include AlwaysAdmit. - scored: true - - - id: 1.1.11 - text: "Ensure that the admission control plugin AlwaysPullImages is set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "AlwaysPullImages" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --enable-admission-plugins to - include AlwaysPullImages. - --enable-admission-plugins=...,AlwaysPullImages,... - scored: true - - - id: 1.1.12 - text: "Ensure that the admission control plugin DenyEscalatingExec is set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "DenyEscalatingExec" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --enable-admission-plugins parameter to a - value that includes DenyEscalatingExec. - --enable-admission-plugins=...,DenyEscalatingExec,... - scored: true - - - id: 1.1.13 - text: "Ensure that the admission control plugin SecurityContextDeny is set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "SecurityContextDeny" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --enable-admission-plugins parameter to - include SecurityContextDeny. - --enable-admission-plugins=...,SecurityContextDeny,... - scored: true - - - id: 1.1.14 - text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--disable-admission-plugins" - compare: - op: nothave - value: "NamespaceLifecycle" - set: true - - flag: "--disable-admission-plugins" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --disable-admission-plugins parameter to - ensure it does not include NamespaceLifecycle. - --disable-admission-plugins=...,NamespaceLifecycle,... - scored: true - - - id: 1.1.15 - text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-path" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --audit-log-path parameter to a suitable - path and file where you would like audit logs to be written, for example: - --audit-log-path=/var/log/apiserver/audit.log - scored: true - - - id: 1.1.16 - text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxage" - compare: - op: gte - value: 30 - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --audit-log-maxage parameter to 30 or - as an appropriate number of days: --audit-log-maxage=30 - scored: true - - - id: 1.1.17 - text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxbackup" - compare: - op: gte - value: 10 - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --audit-log-maxbackup parameter to 10 - or to an appropriate value. - --audit-log-maxbackup=10 - scored: true - - - id: 1.1.18 - text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxsize" - compare: - op: gte - value: 100 - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --audit-log-maxsize parameter to an - appropriate size in MB. For example, to set it as 100 MB: - --audit-log-maxsize=100 - scored: true - - - id: 1.1.19 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: nothave - value: "AlwaysAllow" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --authorization-mode parameter to - values other than AlwaysAllow. One such example could be as below. - --authorization-mode=RBAC - scored: true - - - id: 1.1.20 - text: "Ensure that the --token-auth-file parameter is not set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--token-auth-file" - set: false - remediation: | - Follow the documentation and configure alternate mechanisms for authentication. Then, - edit the API server pod specification file $apiserverconf - on the master node and remove the --token-auth-file= - parameter. - scored: true - - - id: 1.1.21 - text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--kubelet-certificate-authority" - set: true - remediation: | - Follow the Kubernetes documentation and setup the TLS connection between the - apiserver and kubelets. Then, edit the API server pod specification file - $apiserverconf on the master node and set the --kubelet-certificate-authority - parameter to the path to the cert file for the certificate authority. - --kubelet-certificate-authority= - scored: true - - - id: 1.1.22 - text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--kubelet-client-certificate" - set: true - - flag: "--kubelet-client-key" - set: true - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the - apiserver and kubelets. Then, edit API server pod specification file - $apiserverconf on the master node and set the - kubelet client certificate and key parameters as below. - --kubelet-client-certificate= - --kubelet-client-key= - scored: true - - - id: 1.1.23 - text: "Ensure that the --service-account-lookup argument is set to true (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--service-account-lookup" - compare: - op: eq - value: true - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the below parameter. - --service-account-lookup=true - scored: true - - - id: 1.1.24 - text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "PodSecurityPolicy" - set: true - remediation: | - Follow the documentation and create Pod Security Policy objects as per your environment. - Then, edit the API server pod specification file $apiserverconf - on the master node and set the --enable-admission-plugins parameter to a - value that includes PodSecurityPolicy : - --enable-admission-plugins=...,PodSecurityPolicy,... - Then restart the API Server. - scored: true - - - id: 1.1.25 - text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--service-account-key-file" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --service-account-key-file parameter - to the public key file for service accounts: - --service-account-key-file= - scored: true - - - id: 1.1.26 - text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as - appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--etcd-certfile" - set: true - - flag: "--etcd-keyfile" - set: true - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the - apiserver and etcd. Then, edit the API server pod specification file - $apiserverconf on the master node and set the etcd - certificate and key file parameters. - --etcd-certfile= - --etcd-keyfile= - scored: true - - - id: 1.1.27 - text: "Ensure that the admission control plugin ServiceAccount is set(Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "ServiceAccount" - set: true - remediation: | - Follow the documentation and create ServiceAccount objects as per your environment. - Then, edit the API server pod specification file $apiserverconf - on the master node and set the --enable-admission-plugins parameter to a - value that includes ServiceAccount. - --enable-admission-plugins=...,ServiceAccount,... - scored: true - - - id: 1.1.28 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set - as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--tls-cert-file" - set: true - - flag: "--tls-private-key-file" - set: true - remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the master node and set the TLS certificate and private key file - parameters. - --tls-cert-file= - --tls-private-key-file= - scored: true - - - id: 1.1.29 - text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--client-ca-file" - set: true - remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the master node and set the client certificate authority file. - --client-ca-file= - scored: true - - - id: 1.1.30 - text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--tls-cipher-suites" - compare: - op: has - value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the below parameter. - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - scored: false - - - id: 1.1.31 - text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--etcd-cafile" - set: true - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the - apiserver and etcd. Then, edit the API server pod specification file - $apiserverconf on the master node and set the etcd - certificate authority file parameter. - --etcd-cafile= - scored: true - - - id: 1.1.32 - text: "Ensure that the --authorization-mode argument is set to Node (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: has - value: "Node" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the master node and set the --authorization-mode parameter to a - value that includes Node. - --authorization-mode=Node,RBAC - scored: true - - - id: 1.1.33 - text: "Ensure that the admission control plugin NodeRestriction is set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "NodeRestriction" - set: true - remediation: | - Follow the Kubernetes documentation and configure NodeRestriction plug-in on - kubelets. Then, edit the API server pod specification file $apiserverconf - on the master node and set the --enable-admission-plugins parameter to a - value that includes NodeRestriction. - --enable-admission-plugins=...,NodeRestriction,... - scored: true - - - id: 1.1.34 - text: "Ensure that the --experimental-encryption-provider-config argument is - set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--experimental-encryption-provider-config" - set: true - remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - Then, edit the API server pod specification file $apiserverconf on the - master node and set the --experimental-encryption-provider-config parameter - to the path of that file: - --experimental-encryption-provider-config= - scored: true - - - id: 1.1.35 - text: "Ensure that the encryption provider is set to aescbc (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - type: "manual" - remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. In this file, - choose aescbc as the encryption provider. - For example, - kind: EncryptionConfig - apiVersion: v1 - resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: <32-byte base64-encoded secret> - scored: true - - - id: 1.1.36 - text: "Ensure that the admission control plugin EventRateLimit is set (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "EventRateLimit" - set: true - remediation: | - Follow the Kubernetes documentation and set the desired limits in a - configuration file. Then, edit the API server pod specification file - $apiserverconf and set the below parameters. - --enable-admission-plugins=...,EventRateLimit,... - --admission-control-config-file= - scored: true - - - id: 1.1.37 - text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--feature-gates" - compare: - op: nothave - value: "AdvancedAuditing=false" - set: true - - flag: "--feature-gates" - set: false - remediation: | - Follow the Kubernetes documentation and set the desired audit policy in the - /etc/kubernetes/audit-policy.yaml file. Then, edit the API server pod specification file $apiserverconf - and set the below parameters. - --audit-policy-file=/etc/kubernetes/audit-policy.yaml - scored: true - - - id: 1.1.38 - text: "Ensure that the --request-timeout argument is set as appropriate (Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--request-timeout" - set: false - - flag: "--request-timeout" - set: true - remediation: | - Edit the API server pod specification file $apiserverconf - and set the below parameter as appropriate and if needed. For example, - --request-timeout=300s - scored: true - - - id: 1.1.39 - text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers ( Not Scored)" - audit: "ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--tls-cipher-suites" - compare: - op: eq - value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - set: true - remediation: | - Edit the API server pod specification file /etc/kubernetes/manifests - kube-apiserver.yaml on the master node and set the below parameter. - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - scored: false - -- id: 1.2 - text: "Scheduler" - checks: - - id: 1.2.1 - text: "Ensure that the --profiling argument is set to false (Scored)" - audit: "ps -ef | grep $schedulerbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - set: true - remediation: | - Edit the Scheduler pod specification file $schedulerconf - file on the master node and set the below parameter. - --profiling=false - scored: true - - - id: 1.2.2 - text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)" - audit: "ps -ef | grep $schedulerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--address" - compare: - op: eq - value: "127.0.0.1" - set: true - - flag: "--address" - set: false - remediation: | - Edit the Scheduler pod specification file $schedulerconf - file on the master node and ensure the correct value for the - --address parameter. - scored: true - -- id: 1.3 - text: "Controller Manager" - checks: - - id: 1.3.1 - text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)" - audit: "ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--terminated-pod-gc-threshold" - set: true - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, for example: - --terminated-pod-gc-threshold=10 - scored: true - - - id: 1.3.2 - text: "Ensure that the --profiling argument is set to false (Scored)" - audit: "ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - set: true - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the master node and set the below parameter. - --profiling=false - scored: true - - - id: 1.3.3 - text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)" - audit: "ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--use-service-account-credentials" - compare: - op: eq - value: true - set: true - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the master node to set the below parameter. - --use-service-account-credentials=true - scored: true - - - id: 1.3.4 - text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)" - audit: "ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--service-account-private-key-file" - set: true - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the master node and set the --service-account-private- - key-file parameter to the private key file for service accounts. - --service-account-private-key-file= - scored: true - - - id: 1.3.5 - text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)" - audit: "ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--root-ca-file" - set: true - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the master node and set the --root-ca-file parameter to - the certificate bundle file. - --root-ca-file= - scored: true - - - id: 1.3.6 - text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)" - audit: "ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--feature-gates" - compare: - op: eq - value: "RotateKubeletServerCertificate=true" - set: true - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - controller-manager.yaml on the master node and set the --feature-gates parameter to - include RotateKubeletServerCertificate=true. - --feature-gates=RotateKubeletServerCertificate=true - scored: true - - - id: 1.3.7 - text: "Ensure that the --address argument is set to 127.0.0.1 (Scored)" - audit: "ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--address" - compare: - op: eq - value: "127.0.0.1" - set: true - - flag: "--address" - set: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - controller-manager.yaml on the master node and ensure the correct value - for the --address parameter. - scored: true - -- id: 1.4 - text: "Configuration Files" - checks: - - id: 1.4.1 - text: "Ensure that the API server pod specification file permissions are - set to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %a $apiserverconf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chmod 644 $apiserverconf - scored: true - - - id: 1.4.2 - text: "Ensure that the API server pod specification file ownership is set to - root:root (Scored)" - audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chown root:root $apiserverconf - scored: true - - - id: 1.4.3 - text: "Ensure that the controller manager pod specification file - permissions are set to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %a $controllermanagerconf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chmod 644 $controllermanagerconf - scored: true - - - id: 1.4.4 - text: "Ensure that the controller manager pod specification file - ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chown root:root $controllermanagerconf - scored: true - - - id: 1.4.5 - text: "Ensure that the scheduler pod specification file permissions are set - to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %a $schedulerconf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chmod 644 $schedulerconf - scored: true - - - id: 1.4.6 - text: "Ensure that the scheduler pod specification file ownership is set to - root:root (Scored)" - audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chown root:root $schedulerconf - scored: true - - - id: 1.4.7 - text: "Ensure that the etcd pod specification file permissions are set to - 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %a $etcdconf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chmod 644 $etcdconf - scored: true - - - id: 1.4.8 - text: "Ensure that the etcd pod specification file ownership is set to - root:root (Scored)" - audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chown root:root $etcdconf - scored: true - - - id: 1.4.9 - text: "Ensure that the Container Network Interface file permissions are - set to 644 or more restrictive (Not Scored)" - audit: "stat -c %a " - type: manual - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chmod 644 - scored: true - - - id: 1.4.10 - text: "Ensure that the Container Network Interface file ownership is set - to root:root (Not Scored)" - audit: "stat -c %U:%G " - type: manual - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chown root:root - scored: true - - - id: 1.4.11 - text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %a - tests: - test_items: - - flag: "700" - compare: - op: eq - value: "700" - set: true - remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir , - from the below command: - ps -ef | grep $etcdbin - Run the below command (based on the etcd data directory found above). For example, - chmod 700 /var/lib/etcd - scored: true - - - id: 1.4.12 - text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G - tests: - test_items: - - flag: "etcd:etcd" - set: true - remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir , - from the below command: - ps -ef | grep $etcdbin - Run the below command (based on the etcd data directory found above). For example, - chown etcd:etcd /var/lib/etcd - scored: true - - - id: 1.4.13 - text: "Ensure that the admin.conf file permissions are set to 644 or - more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %a /etc/kubernetes/admin.conf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chmod 644 /etc/kubernetes/admin.conf - scored: true - - - id: 1.4.14 - text: "Ensure that the admin.conf file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the master node. - For example, - chown root:root /etc/kubernetes/admin.conf - scored: true - - - id: 1.4.15 - text: "Ensure that the scheduler.conf file permissions are set to 644 or - more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %a /etc/kubernetes/scheduler.conf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the - master node. For example, chmod 644 /etc/kubernetes/scheduler.conf - scored: true - - - id: 1.4.16 - text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/scheduler.conf; then stat -c %U:%G /etc/kubernetes/scheduler.conf; fi'" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the - master node. For example, chown root:root /etc/kubernetes/scheduler.conf - scored: true - - - id: 1.4.17 - text: "Ensure that the controller-manager.conf file permissions are set - to 644 or more restrictive (Scored)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %a /etc/kubernetes/controller-manager.conf; fi'" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command (based on the file location on your system) on the - master node. For example, chmod 644 /etc/kubernetes/controller-manager.conf - scored: true - - - id: 1.4.18 - text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/controller-manager.conf; then stat -c %U:%G /etc/kubernetes/controller-manager.conf; fi'" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command (based on the file location on your system) on the - master node. For example, chown root:root /etc/kubernetes/controller-manager.conf - scored: true - -- id: 1.5 - text: "etcd" - checks: - - id: 1.5.1 - text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)" - audit: "ps -ef | grep $etcdbin | grep -v grep" - tests: - test_items: - - flag: "--cert-file" - set: true - - flag: "--key-file" - set: true - remediation: | - Follow the etcd service documentation and configure TLS encryption. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameters. - --ca-file= - --key-file= - scored: true - - - id: 1.5.2 - text: "Ensure that the --client-cert-auth argument is set to true (Scored)" - audit: "ps -ef | grep $etcdbin | grep -v grep" - tests: - test_items: - - flag: "--client-cert-auth" - compare: - op: eq - value: true - set: true - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --client-cert-auth="true" - scored: true - - - id: 1.5.3 - text: "Ensure that the --auto-tls argument is not set to true (Scored)" - audit: "ps -ef | grep $etcdbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--auto-tls" - set: false - - flag: "--auto-tls" - compare: - op: eq - value: false - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and either remove the --auto-tls parameter or set it to false. - --auto-tls=false - scored: true - - - id: 1.5.4 - text: "Ensure that the --peer-cert-file and --peer-key-file arguments are - set as appropriate (Scored)" - audit: "ps -ef | grep $etcdbin | grep -v grep" - type: "manual" - tests: - bin_op: and - test_items: - - flag: "--peer-cert-file" - set: true - - flag: "--peer-key-file" - set: true - remediation: | - Follow the etcd service documentation and configure peer TLS encryption as appropriate - for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameters. - --peer-client-file= - --peer-key-file= - scored: true - - - id: 1.5.5 - text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)" - audit: "ps -ef | grep $etcdbin | grep -v grep" - type: "manual" - tests: - test_items: - - flag: "--peer-client-cert-auth" - compare: - op: eq - value: true - set: true - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --peer-client-cert-auth=true - scored: true - - - id: 1.5.6 - text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)" - audit: "ps -ef | grep $etcdbin | grep -v grep" - type: "manual" - tests: - bin_op: or - test_items: - - flag: "--peer-auto-tls" - set: false - - flag: "--peer-auto-tls" - compare: - op: eq - value: false - set: true - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and either remove the --peer-auto-tls parameter or set it to false. - --peer-auto-tls=false - scored: true - - - id: 1.5.7 - text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)" - audit: "ps -ef | grep $etcdbin | grep -v grep" - type: "manual" - tests: - test_items: - - flag: "--trusted-ca-file" - set: true - remediation: | - Follow the etcd documentation and create a dedicated certificate authority setup for the - etcd service. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameter. - --trusted-ca-file= - scored: false - -- id: 1.6 - text: "General Security Primitives" - checks: - - id: 1.6.1 - text: "Ensure that the cluster-admin role is only used where required (Not Scored)" - type: "manual" - remediation: | - Remove any unneeded clusterrolebindings : - kubectl delete clusterrolebinding [name] - scored: false - - - id: 1.6.2 - text: "Create administrative boundaries between resources using namespaces (Not Scored)" - type: "manual" - remediation: | - Follow the documentation and create namespaces for objects in your deployment as you - need them. - scored: false - - - id: 1.6.3 - text: "Create network segmentation using Network Policies (Not Scored)" - type: "manual" - remediation: | - Follow the documentation and create NetworkPolicy objects as you need them. - scored: false - - - id: 1.6.4 - text: "Ensure that the seccomp profile is set to docker/default in your pod - definitions (Not Scored)" - type: "manual" - remediation: | - Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you - would need to enable alpha features in the apiserver by passing "--feature- - gates=AllAlpha=true" argument. - Edit the $apiserverconf file on the master node and set the KUBE_API_ARGS - parameter to "--feature-gates=AllAlpha=true" - KUBE_API_ARGS="--feature-gates=AllAlpha=true" - Based on your system, restart the kube-apiserver service. For example: - systemctl restart kube-apiserver.service - Use annotations to enable the docker/default seccomp profile in your pod definitions. An - example is as below: - apiVersion: v1 - kind: Pod - metadata: - name: trustworthy-pod - annotations: - seccomp.security.alpha.kubernetes.io/pod: docker/default - spec: - containers: - - name: trustworthy-container - image: sotrustworthy:latest - scored: false - - - id: 1.6.5 - text: "Apply Security Context to Your Pods and Containers (Not Scored)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and apply security contexts to your pods. For a - suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker - Containers. - scored: false - - - id: 1.6.6 - text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and setup image provenance. - scored: false - - - id: 1.6.7 - text: "Configure Network policies as appropriate (Not Scored)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and setup network policies as appropriate. - For example, you could create a "default" isolation policy for a Namespace by creating a - NetworkPolicy that selects all pods but does not allow any traffic: - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: default-deny - spec: - podSelector: - scored: false - - - id: 1.6.8 - text: "Place compensating controls in the form of PSP and RBAC for - privileged containers usage (Not Scored)" - type: "manual" - remediation: | - Follow Kubernetes documentation and setup PSP and RBAC authorization for your cluster. - scored: false - -- id: 1.7 - text: "PodSecurityPolicies" - checks: - - id: 1.7.1 - text: "Do not admit privileged containers (Not Scored)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.privileged field is omitted or set to false. - scored: false - - - id: 1.7.2 - text: "Do not admit containers wishing to share the host process ID namespace (Not Scored)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostPID field is omitted or set to false. - scored: false - - - id: 1.7.3 - text: "Do not admit containers wishing to share the host IPC namespace (Not Scored)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostIPC field is omitted or set to false. - scored: false - - - id: 1.7.4 - text: "Do not admit containers wishing to share the host network namespace (Not Scored)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.hostNetwork field is omitted or set to false. - scored: false - - - id: 1.7.5 - text: "Do not admit containers with allowPrivilegeEscalation (Not Scored)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.allowPrivilegeEscalation field is omitted or set to false. - scored: false - - - id: 1.7.6 - text: "Do not admit root containers (Not Scored)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of UIDs not including 0. - scored: false - - - id: 1.7.7 - text: "Do not admit containers with dangerous capabilities (Not Scored)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the .spec.requiredDropCapabilities is set to include either NET_RAW or ALL. - scored: false From fa60fb68fd5b2386bab6b37a477d8c320f5c18b0 Mon Sep 17 00:00:00 2001 From: Liz Rice Date: Thu, 11 Apr 2019 18:45:16 +0100 Subject: [PATCH 08/15] Add job for EKS --- job-eks.yaml | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 job-eks.yaml diff --git a/job-eks.yaml b/job-eks.yaml new file mode 100644 index 0000000..d51909f --- /dev/null +++ b/job-eks.yaml @@ -0,0 +1,34 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + spec: + hostPID: true + containers: + - name: kube-bench + # Push the image to your ECR and then refer to it here + image: + command: ["kube-bench", "--version", "1.11-json"] + volumeMounts: + - name: var-lib-kubelet + mountPath: /var/lib/kubelet + - name: etc-systemd + mountPath: /etc/systemd + - name: etc-kubernetes + mountPath: /etc/kubernetes + restartPolicy: Never + volumes: + - name: var-lib-kubelet + hostPath: + path: "/var/lib/kubelet" + - name: etc-systemd + hostPath: + path: "/etc/systemd" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" + - name: usr-bin + hostPath: + path: "/usr/bin" From a613f6f0284b1164312ad19f7bd88bd46da10fdd Mon Sep 17 00:00:00 2001 From: Liz Rice Date: Thu, 11 Apr 2019 19:00:17 +0100 Subject: [PATCH 09/15] Document job for EKS --- README.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 06a9fae..76c7d8f 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,7 @@ NAME READY STATUS RESTARTS AGE kube-bench-j76s9 0/1 Completed 0 11s # The results are held in the pod's logs -k logs kube-bench-j76s9 +kubectl logs kube-bench-j76s9 [INFO] 1 Master Node Security Configuration [INFO] 1.1 API Server ... @@ -84,6 +84,15 @@ To run the tests on the master node, the pod needs to be scheduled on that node. The default labels applied to master nodes has changed since Kubernetes 1.11, so if you are using an older version you may need to modify the nodeSelector and tolerations to run the job on the master node. +### Running in an EKS cluster + +There is a `job-eks.yaml` file for running the kube-bench node checks on an EKS cluster. **Note that you must update the image reference in `job-eks.yaml`.** Typically you will push the container image for kube-bench to ECR and refer to it there in the YAML file. + +There are two significant differences on EKS: + +* It uses [config files in JSON format](https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/) +* It's not possible to schedule jobs onto the master node, so master checks can't be performed + ### Installing from a container This command copies the kube-bench binary and configuration files to your host from the Docker container: From e70f50b2b5a1064b1a72460da7feb7cbf38d0d58 Mon Sep 17 00:00:00 2001 From: yoavrotems Date: Tue, 16 Apr 2019 06:01:51 +0000 Subject: [PATCH 10/15] update files --- cfg/ocp-3.10/master.yaml | 2954 +++++++++++++++++++------------------- cfg/ocp-3.10/node.yaml | 752 +++++----- 2 files changed, 1830 insertions(+), 1876 deletions(-) diff --git a/cfg/ocp-3.10/master.yaml b/cfg/ocp-3.10/master.yaml index 3cb07bf..4c44044 100644 --- a/cfg/ocp-3.10/master.yaml +++ b/cfg/ocp-3.10/master.yaml @@ -1,1500 +1,1454 @@ ---- -controls: -version: 1.6 -id: 1 -text: "Master Node Security Configuration" -type: "master" -groups: -- id: 1.1 - text: "API Server" - checks: - - id: 1.1.1 - text: "Ensure that the --anonymous-auth argument is set to false (Scored)" - type: "skip" - scored: true - - - id: 1.1.2 - text: "Ensure that the --basic-auth-file argument is not set (Scored)" - audit: "grep -A2 basic-auth-file /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "--basic-auth-file" - compare: - op: eq - value: "" - set: false - remediation: | - Edit the kubernetes master config file /etc/origin/master/master-config.yaml and - remove the basic-auth-file entry. - - kubernetesMasterConfig: -  apiServerArguments: -    basic-auth-file: -    - /path/to/any/file - scored: true - - - id: 1.1.3 - text: "Ensure that the --insecure-allow-any-token argument is not set (Scored)" - type: "skip" - scored: true - - - id: 1.1.4 - text: "Ensure that the --kubelet-https argument is set to true (Scored)" - audit: "grep -A4 kubeletClientInfo /etc/origin/master/master-config.yaml" - tests: - bin_op: and - test_items: - - flag: "kubeletClientInfo:" - compare: - op: eq - value: "kubeletClientInfo:" - set: true - - flag: "ca: ca-bundle.crt" - compare: - op: has - value: "ca-bundle.crt" - set: true - - flag: "certFile: master.kubelet-client.crt" - compare: - op: has - value: "master.kubelet-client.crt" - set: true - - flag: "keyFile: master.kubelet-client.key" - compare: - op: has - value: "master.kubelet-client.key" - set: true - - flag: "port: 10250" - compare: - op: eq - value: "port: 10250" - set: true - remediation: | - Edit the kubernetes master config file /etc/origin/master/master-config.yaml - and change it to match the below. - - kubeletClientInfo: -  ca: ca-bundle.crt -  certFile: master.kubelet-client.crt -  keyFile: master.kubelet-client.key -  port: 10250 - scored: true - - - id: 1.1.5 - text: "Ensure that the --insecure-bind-address argument is not set (Scored)" - audit: "grep -A2 insecure-bind-address /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "insecure-bind-address" - set: false - remediation: | - Edit the kubernetes master config file /etc/origin/master/master-config.yaml - and remove the insecure-bind-address entry. - - kubernetesMasterConfig: -  apiServerArguments: -    insecure-bind-address: -    - 127.0.0.1 - scored: true - - - id: 1.1.6 - text: "Ensure that the --insecure-port argument is set to 0 (Scored)" - audit: "grep -A2 insecure-port /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "insecure-port" - set: false - remediation: | - Edit the kubernetes master config file /etc/origin/master/master-config.yaml - and remove the insecure-port entry. - - kubernetesMasterConfig: -  apiServerArguments: -   insecure-port: -  - 0 - scored: true - - - id: 1.1.7 - text: "Ensure that the --secure-port argument is not set to 0 (Scored)" - audit: "grep -A2 secure-port /etc/origin/master/master-config.yaml" - tests: - bin_op: or - test_items: - - flag: "secure-port" - set: false - - flag: "secure-port" - compare: - op: nothave - value: "0" - set: true - remediation: | - Edit the kubernetes master config file /etc/origin/master/master-config.yaml - and either remove the secure-port parameter or set it to a different (non-zero) - desired port. - - kubernetesMasterConfig: -  apiServerArguments: -   secure-port: -  - 8443 - scored: true - - - id: 1.1.8 - text: "Ensure that the --profiling argument is set to false (Scored)" - type: "skip" - scored: true - - - id: 1.1.9 - text: "Ensure that the --repair-malformed-updates argument is set to false (Scored)" - audit: "grep -A2 repair-malformed-updates /etc/origin/master/master-config.yaml" - tests: - bin_op: or - test_items: - - flag: "repair-malformed-updates" - set: false - - flag: "repair-malformed-updates" - compare: - op: has - value: "true" - set: true - remediation: | - Edit the kubernetes master config file /etc/origin/master/master-config.yaml - and remove the repair-malformed-updates entry or set repair-malformed-updates=true. - scored: true - - - id: 1.1.10 - text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)" - audit: "grep -A4 AlwaysAdmit /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "AlwaysAdmit" - set: false - remediation: | - Edit the kubernetes master config file /etc/origin/master/master-config.yaml - and remove the the entry below. - - AlwaysAdmit: - configuration: - kind: DefaultAdmissionConfig - apiVersion: v1 - disable: false - scored: true - - - id: 1.1.11 - text: "Ensure that the admission control plugin AlwaysPullImages is set (Scored)" - audit: "grep -A4 AlwaysPullImages /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "disable: false" - compare: - op: has - value: "false" - set: true - remediation: | - Edit the kubernetes master config file /etc/origin/master/master-config.yaml - and add the the entry below. - - admissionConfig: - pluginConfig: - AlwaysPullImages: - configuration: - kind: DefaultAdmissionConfig - apiVersion: v1 - disable: false - scored: true - - - id: 1.1.12 - text: "Ensure that the admission control plugin DenyEscalatingExec is set (Scored)" - type: "skip" - scored: true - - - id: 1.1.13 - text: "Ensure that the admission control plugin SecurityContextDeny is set (Scored)" - type: "skip" - scored: true - - - id: 1.1.14 - text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)" - audit: "grep -A4 NamespaceLifecycle /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "NamespaceLifecycle" - set: false - remediation: | - Edit the kubernetes master config file /etc/origin/master/master-config.yaml - and remove the following entry. - - NamespaceLifecycle: - configuration: - kind: DefaultAdmissionConfig - apiVersion: v1 - disable: true - scored: true - - - id: 1.1.15 - text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)" - audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "enabled: true" - compare: - op: has - value: "true" - set: true - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml, update the following entry and restart the API server. - - auditConfig: - auditFilePath: "/var/log/audit-ocp.log" - enabled: true - maximumFileRetentionDays: 10 - maximumFileSizeMegabytes: 100 - maximumRetainedFiles: 10 - - Make the same changes in the inventory/ansible variables so the changes are not - lost when an upgrade occurs. - scored: true - - - id: 1.1.16 - text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)" - audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "maximumFileRetentionDays: 10" - compare: - op: has - value: "maximumFileRetentionDays" - set: true - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml, - update the maximumFileRetentionDays entry and restart the API server. - - auditConfig: - auditFilePath: "/var/log/audit-ocp.log" - enabled: true - maximumFileRetentionDays: 10 - maximumFileSizeMegabytes: 100 - maximumRetainedFiles: 10 - - Make the same changes in the inventory/ansible variables so the changes are not - lost when an upgrade occurs. - scored: true - - - id: 1.1.17 - text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)" - audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "maximumRetainedFiles: 10" - compare: - op: has - value: "maximumRetainedFiles" - set: true - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml, update the maximumRetainedFiles entry, - set enabled to true and restart the API server. - - auditConfig: - auditFilePath: "/var/log/audit-ocp.log" - enabled: true - maximumFileRetentionDays: 10 - maximumFileSizeMegabytes: 100 - maximumRetainedFiles: 10 - - Make the same changes in the inventory/ansible variables so the changes are not - lost when an upgrade occurs. - scored: true - - - id: 1.1.18 - text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)" - audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "maximumFileSizeMegabytes: 100" - compare: - op: has - value: "maximumFileSizeMegabytes" - set: true - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml, update the maximumFileSizeMegabytes entry, - set enabled to true and restart the API server. - - auditConfig: - auditFilePath: "/var/log/audit-ocp.log" - enabled: true - maximumFileRetentionDays: 10 - maximumFileSizeMegabytes: 100 - maximumRetainedFiles: 10 - - Make the same changes in the inventory/ansible variables so the changes are not - lost when an upgrade occurs. - scored: true - - - id: 1.1.19 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)" - audit: "grep -A1 authorization-mode /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "authorization-mode" - set: false - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove the authorization-mode - entry. - - kubernetesMasterConfig: -  apiServerArguments: -    authorization-mode: -    - AllowAll - scored: true - - - id: 1.1.20 - text: "Ensure that the --token-auth-file parameter is not set (Scored)" - audit: "grep token-auth-file /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "token-auth-file" - set: false - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove the token-auth-file - entry under apiserverArguments section. - - kubernetesMasterConfig: -  apiServerArguments: -    token-auth-file: -    - /path/to/file - scored: true - - - id: 1.1.21 - text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)" - audit: "grep -A1 kubelet-certificate-authority /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "kubelet-certificate-authority" - set: false - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove the following - configuration under apiserverArguments section. - - kubernetesMasterConfig: -  apiServerArguments: -    kubelet-certificat-authority: -    - /path/to/ca - scored: true - - - id: 1.1.22 - text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)" - audit: "grep -A4 kubeletClientInfo /etc/origin/master/master-config.yaml" - tests: - bin_op: and - test_items: - - flag: "keyFile: master.kubelet-client.key" - compare: - op: has - value: "keyFile: master.kubelet-client.key" - set: true - - flag: "certFile: master.kubelet-client.crt" - compare: - op: has - value: "certFile: master.kubelet-client.crt" - set: true - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml and add the following - configuration under kubeletClientInfo - - kubeletClientInfo: -  ca: ca-bundle.crt -  certFile: master.kubelet-client.crt -  keyFile: master.kubelet-client.key - port: 10250 - scored: true - - - id: 1.1.23 - text: "Ensure that the --service-account-lookup argument is set to true" - type: skip - scored: true - - - id: 1.1.24 - text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)" - type: "skip" - scored: true - - - id: 1.1.25 - text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)" - audit: "grep -A9 serviceAccountConfig /etc/origin/master/master-config.yaml" - tests: - bin_op: and - test_items: - - flag: "privateKeyFile: serviceaccounts.private.key" - compare: - op: has - value: "privateKeyFile: serviceaccounts.private.key" - set: true - - flag: "serviceaccounts.public.key" - compare: - op: has - value: "serviceaccounts.public.key" - set: true - remediation: | - OpenShift API server does not use the service-account-key-file argument. - Even if value is set in master-config.yaml, it will not be used to verify - service account tokens, as it is in upstream Kubernetes. The ServiceAccount - token authenticator is configured with serviceAccountConfig.publicKeyFiles in - the master-config.yaml. OpenShift does not reuse the apiserver TLS key. - - Edit the Openshift master config file /etc/origin/master/master-config.yaml and set the privateKeyFile - and publicKeyFile configuration under serviceAccountConfig. - - serviceAccountConfig: -  limitSecretReferences: false -  managedNames: - - default -  - builder -  - deployer -  masterCA: ca-bundle.crt -   privateKeyFile: serviceaccounts.private.key -  publicKeyFiles: -  - serviceaccounts.public.key - - Verify that privateKeyFile and publicKeyFile exist and set. - scored: true - - - id: 1.1.26 - text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Scored)" - audit: "grep -A3 etcdClientInfo /etc/origin/master/master-config.yaml" - tests: - bin_op: and - test_items: - - flag: "certFile: master.etcd-client.crt" - compare: - op: has - value: "certFile: master.etcd-client.crt" - set: true - - flag: "keyFile: master.etcd-client.key" - compare: - op: has - value: "keyFile: master.etcd-client.key" - set: true - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml and set keyFile and certFile - under etcdClientInfo like below. - - etcdClientInfo: -  ca: master.etcd-ca.crt - certFile: master.etcd-client.crt - keyFile: master.etcd-client.key - scored: true - - - id: 1.1.27 - text: "Ensure that the admission control plugin ServiceAccount is set (Scored)" - audit: "grep -A4 ServiceAccount /etc/origin/master/master-config.yaml" - tests: - bin_op: or - test_items: - - flag: "ServiceAccount" - set: false - - flag: "disable: false" - compare: - op: has - value: "disable: false" - set: true - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml and enable ServiceAccount - admission control policy. - - ServiceAccount: - configuration: - kind: DefaultAdmissionConfig - apiVersion: v1 - disable: false - scored: true - - - id: 1.1.28 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)" - audit: "grep -A7 servingInfo /etc/origin/master/master-config.yaml" - tests: - bin_op: and - test_items: - - flag: "certFile: master.server.crt" - compare: - op: has - value: "certFile: master.server.crt" - set: true - - flag: "keyFile: master.server.key" - compare: - op: has - value: "keyFile: master.server.key" - set: true - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml and set keyFile and certFile under servingInfo. - - servingInfo: -  bindAddress: 0.0.0.0:8443 -   bindNetwork: tcp4 - certFile: master.server.crt - clientCA: ca.crt - keyFile: master.server.key - maxRequestsInFlight: 500 - requestTimeoutSeconds: 3600 - scored: true - - - id: 1.1.29 - text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)" - audit: "grep -A7 servingInfo /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "clientCA: ca.crt" - compare: - op: has - value: "clientCA: ca.crt" - set: true - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml and set clientCA under servingInfo. - - servingInfo: -  bindAddress: 0.0.0.0:8443 -   bindNetwork: tcp4 - certFile: master.server.crt - clientCA: ca.crt - keyFile: master.server.key - maxRequestsInFlight: 500 - requestTimeoutSeconds: 3600 - scored: true - - - id: 1.1.30 - text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)" - audit: "grep -A3 etcdClientInfo /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "ca: master.etcd-ca.crt" - compare: - op: has - value: "ca: master.etcd-ca.crt" - set: true - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml and set ca under etcdClientInfo. - - etcdClientInfo: -   ca: master.etcd-ca.crt - certFile: master.etcd-client.crt - keyFile: master.etcd-client.key - scored: true - - - id: 1.1.31 - text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)" - type: "skip" - scored: true - - - id: 1.1.32 - text: "Ensure that the --authorization-mode argument is set to Node (Scored)" - audit: "grep -A4 NodeRestriction /etc/origin/master/master-config.yaml" - tests: - bin_op: or - test_items: - - flag: "NodeRestriction" - set: false - - flag: "disable: false" - compare: - op: has - value: "disable: false" - set: true - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml and enable NodeRestriction ca under etcdClientInfo. - - NodeRestriction: - configuration: - kind: DefaultAdmissionConfig - apiVersion: v1 - disable: false - scored: true - - - id: 1.1.33 - text: "Ensure that the --experimental-encryption-provider-config argument is set as appropriate (Scored)" - audit: "grep -A1 experimental-encryption-provider-config /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "experimental-encryption-provider-config:" - compare: - op: has - value: "experimental-encryption-provider-config:" - set: true - remediation: | - Follow the instructions in the documentation to configure encryption. - https://docs.openshift.com/container-platform/3.10/admin_guide/encrypting_data.html - scored: true - - - id: 1.1.34 - text: "Ensure that the encryption provider is set to aescbc (Scored)" - audit: "grep -A1 experimental-encryption-provider-config /etc/origin/master/master-config.yaml | sed -n '2p' | awk '{ print $2 }' | xargs grep -A1 providers" - tests: - test_items: - - flag: "aescbc:" - compare: - op: has - value: "aescbc:" - set: true - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml and set aescbc as the first provider in encryption provider config. - See https://docs.openshift.com/container-platform/3.10/admin_guide/encrypting_data.html. - scored: true - - - id: 1.1.35 - text: "Ensure that the admission control policy is set to EventRateLimit (Scored)" - audit: "grep -A4 EventRateLimit /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "disable: false" - compare: - op: has - value: "disable: false" - set: true - remediation: | - Follow the documentation to enable the EventRateLimit plugin. - https://docs.openshift.com/container-platform/3.10/architecture/additional_concepts/admission_controllers.html#admission-controllers-general-admission-rules - scored: true - - - id: 1.1.36 - text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)" - audit: "grep AdvancedAuditing /etc/origin/master/master-config.yaml" - tests: - bin_op: or - test_items: - - flag: "AdvancedAuditing" - compare: - op: eq - value: "true" - set: true - - flag: "AdvancedAuditing" - set: false - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml and enable AdvancedAuditing, - - kubernetesMasterConfig: -  apiServerArguments: - feature-gates: - - AdvancedAuditing=true - scored: true - - # Review 1.1.37 in Aquasec shared doc, the tests are net zero. - - id: 1.1.37 - text: "Ensure that the --request-timeout argument is set as appropriate (Scored)" - audit: "grep request-timeout /etc/origin/master/master-config.yaml" - type: manual - remediation: | - change the request-timeout value in the  /etc/origin/master/master-config.yaml - scored: true - - -- id: 1.2 - text: "Scheduler" - checks: - - id: 1.2.1 - text: "Ensure that the --profiling argument is set to false (Scored)" - type: "skip" - scored: true - - -- id: 1.3 - text: "Controller Manager" - checks: - - id: 1.3.1 - text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)" - audit: "grep terminated-pod-gc-threshold -A1 /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "true" - compare: - op: has - value: "true" - set: true - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml and enable terminated-pod-gc-threshold. - - kubernetesMasterConfig: -  controllerArguments: -     terminated-pod-gc-threshold: -    - true - - Enabling the "terminated-pod-gc-threshold" settings is optional. - scored: true - - - id: 1.3.2 - text: "Ensure that the --profiling argument is set to false (Scored)" - type: "skip" - scored: true - - - id: 1.3.3 - text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)" - audit: "grep -A2 use-service-account-credentials /etc/origin/master/master-config.yaml" - tests: - bin_op: or - test_items: - - flag: "use-service-account-credentials" - set: false - - flag: "true" - compare: - op: has - value: "true" - set: true - remediation: | - Edit the Openshift master config file /etc/origin/master/master-config.yaml and set use-service-account-credentials - to true under controllerArguments section. - - kubernetesMasterConfig: -  controllerArguments: -     use-service-account-credentials: -     - true - scored: true - - # Review 1.3.4 - - id: 1.3.4 - text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)" - audit: | - grep -A9 serviceAccountConfig /etc/origin/master/master-config.yaml | grep privateKeyFile; - grep -A2 service-account-private-key-file /etc/origin/master/master-config.yaml - tests: - bin_op: and - test_items: - - flag: "privateKeyFile: serviceaccounts.private.key" - compare: - op: has - value: "privateKeyFile" - - flag: "service-account-private-key-file" - set: false - remediation: - Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove service-account-private-key-file - scored: true - - # Review 1.3.5 - - id: 1.3.5 - text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)" - audit: "/bin/sh -c 'grep root-ca-file /etc/origin/master/master-config.yaml; grep -A9 serviceAccountConfig /etc/origin/master/master-config.yaml'" - tests: - bin_op: and - test_items: - - flag: "root-ca-file=/etc/origin/master/ca-bundle.crt" - compare: - op: has - value: "/etc/origin/master/ca-bundle.crt" - set: true - test_items: - - flag: "masterCA: ca-bundle.crt" - compare: - op: has - value: "ca-bundle.crt" - set: true - remediation: - Reset to OpenShift defaults OpenShift starts kube-controller-manager with - root-ca-file=/etc/origin/master/ca-bundle.crt by default.  OpenShift Advanced - Installation creates this certificate authority and configuration without any - configuration required. - - https://docs.openshift.com/container-platform/3.10/admin_guide/service_accounts.html" - scored: true - - - id: 1.3.6 - text: "Apply Security Context to Your Pods and Containers (Not Scored)" - type: "skip" - scored: false - - - id: 1.3.7 - text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)" - audit: "grep -B3 RotateKubeletServerCertificate=true /etc/origin/master/master-config.yaml" - tests: - test_items: - - flag: "RotateKubeletServerCertificate" - compare: - op: eq - value: "true" - set: true - remediation: - If you decide not to enable the RotateKubeletServerCertificate feature, - be sure to use the Ansible playbooks provided with the OpenShift installer to - automate re-deploying certificates. - scored: true - - -- id: 1.4 - text: "Configuration Files" - checks: - - id: 1.4.1 - text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored)" - audit: "stat -c %a /etc/origin/node/pods/apiserver.yaml" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command. - - chmod 644 /etc/origin/node/pods/apiserver.yaml - scored: true - - - id: 1.4.2 - text: "Ensure that the API server pod specification file ownership is set to root:root (Scored)" - audit: "stat -c %U:%G /etc/origin/node/pods/apiserver.yaml" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command on the master node. - - chown root:root /etc/origin/node/pods/apiserver.yaml - scored: true - - - id: 1.4.3 - text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Scored)" - audit: "stat -c %a /etc/origin/node/pods/controller.yaml" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command on the master node. - - chmod 644 /etc/origin/node/pods/controllermanager.yaml - scored: true - - - id: 1.4.4 - text: "Ensure that the controller manager pod specification file ownership is set to root:root (Scored)" - audit: "stat -c %U:%G /etc/origin/node/pods/controller.yaml" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command on the master node. - - chown root:root /etc/origin/node/pods/controllermanager.yaml - scored: true - - - id: 1.4.5 - text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Scored)" - audit: "stat -c %a /etc/origin/node/pods/apiserver.yaml" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command. - - chmod 644 /etc/origin/node/pods/apiserver.yaml - scored: true - - - id: 1.4.6 - text: "Ensure that the scheduler pod specification file ownership is set to root:root (Scored)" - audit: "stat -c %U:%G /etc/origin/node/pods/apiserver.yaml" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command on the master node. - - chown root:root /etc/origin/node/pods/apiserver.yaml - scored: true - - - id: 1.4.7 - text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Scored)" - audit: "stat -c %a /etc/origin/node/pods/etcd.yaml" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command. - - chmod 644 /etc/origin/node/pods/etcd.yaml - scored: true - - - id: 1.4.8 - text: "Ensure that the etcd pod specification file ownership is set to root:root (Scored)" - audit: "stat -c %U:%G /etc/origin/node/pods/etcd.yaml" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command on the master node. - - chown root:root /etc/origin/node/pods/etcd.yaml - scored: true - - - id: 1.4.9 - text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Scored)" - audit: "stat -c %a /etc/origin/openvswitch/" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command. - - chmod 644 /etc/origin/openvswitch/ - scored: true - - - id: 1.4.10 - text: "Ensure that the Container Network Interface file ownership is set to root:root (Scored)" - audit: "stat -c %U:%G /etc/origin/openvswitch/" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command on the master node. - - chown root:root /etc/origin/openvswitch/ - scored: true - - - id: 1.4.11 - text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive(Scored)" - audit: "stat -c %a /var/lib/etcd" - tests: - test_items: - - flag: "700" - compare: - op: eq - value: "700" - set: true - remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir , - from the below command: - ps -ef | grep etcd - Run the below command (based on the etcd data directory found above). For example, - chmod 700 /var/lib/etcd - scored: true - - - id: 1.4.12 - text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)" - audit: "stat -c %U:%G /var/lib/etcd" - tests: - test_items: - - flag: "etcd:etcd" - compare: - op: eq - value: "etcd:etcd" - set: true - remediation: | - Run the below command on the master node. - - chown etcd:etcd /var/lib/etcd - scored: true - - - id: 1.4.13 - text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Scored)" - audit: "stat -c %a /etc/origin/master/admin.kubeconfig" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command. - - chmod 644 /etc/origin/master/admin.kubeconfig" - scored: true - - - id: 1.4.14 - text: "Ensure that the admin.conf file ownership is set to root:root (Scored)" - audit: "stat -c %U:%G /etc/origin/master/admin.kubeconfig" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command on the master node. - - chown root:root /etc/origin/master/admin.kubeconfig - scored: true - - - id: 1.4.15 - text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Scored)" - audit: "stat -c %a /etc/origin/master/openshift-master.kubeconfig" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command. - - chmod 644 /etc/origin/master/openshift-master.kubeconfig - scored: true - - - id: 1.4.16 - text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)" - audit: "stat -c %U:%G /etc/origin/master/openshift-master.kubeconfig" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command on the master node. - - chown root:root /etc/origin/master/openshift-master.kubeconfig - scored: true - - - id: 1.4.17 - text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Scored)" - audit: "stat -c %a /etc/origin/master/openshift-master.kubeconfig" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command. - - chmod 644 /etc/origin/master/openshift-master.kubeconfig - scored: true - - - id: 1.4.18 - text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)" - audit: "stat -c %U:%G /etc/origin/master/openshift-master.kubeconfig" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: "root:root" - set: true - remediation: | - Run the below command on the master node. - - chown root:root /etc/origin/master/openshift-master.kubeconfig - scored: true - - -- id: 1.5 - text: "Etcd" - checks: - - id: 1.5.1 - text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)" - audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_CERT_FILE=/etc/etcd/server.crt /proc/1/environ; /usr/local/bin/master-exec etcd etcd grep etcd_key_file=/etc/etcd/server.key /proc/1/environ; grep ETCD_CERT_FILE=/etc/etcd/server.crt /etc/etcd/etcd.conf; grep ETCD_KEY_FILE=/etc/etcd/server.key /etc/etcd/etcd.conf'" - tests: - bin_op: and - test_items: - - flag: "Binary file /proc/1/environ matches" - compare: - op: has - value: "Binary file /proc/1/environ matches" - set: true - - flag: "ETCD_CERT_FILE=/etc/etcd/server.crt" - compare: - op: has - value: "ETCD_CERT_FILE=/etc/etcd/server.crt" - set: true - - flag: "ETCD_KEY_FILE=/etc/etcd/server.key" - compare: - op: has - value: "ETCD_KEY_FILE=/etc/etcd/server.key" - set: true - remediation: | - Reset to the OpenShift default configuration. - scored: true - - - id: 1.5.2 - text: "Ensure that the --client-cert-auth argument is set to true (Scored)" - audit: "/bin/sh -c'/usr/local/bin/master-exec etcd etcd grep ETCD_CLIENT_CERT_AUTH=true /proc/1/environ; grep ETCD_CLIENT_CERT_AUTH /etc/etcd/etcd.conf'" - tests: - bin_op: and - test_items: - - flag: "Binary file /proc/1/environ matches" - compare: - op: has - value: "Binary file /proc/1/environ matches" - set: true - - flag: "ETCD_CLIENT_CERT_AUTH=true" - compare: - op: has - value: "ETCD_CLIENT_CERT_AUTH=true" - set: true - remediation: | - Reset to the OpenShift default configuration. - scored: true - - - id: 1.5.3 - text: "Ensure that the --auto-tls argument is not set to true (Scored)" - audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_AUTO_TLS /proc/1/environ; grep ETCD_AUTO_TLS /etc/etcd/etcd.conf'" - tests: - bin_op: or - test_items: - - flag: "ETCD_AUTO_TLS=false" - compare: - op: has - value: "ETCD_AUTO_TLS=false" - set: true - - flag: "#ETCD_AUTO_TLS" - compare: - op: has - value: "#ETCD_AUTO_TLS" - set: true - remediation: | - Reset to the OpenShift default configuration. - scored: true - - - id: 1.5.4 - text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Scored)" - audit: "/bin/sh -c'/usr/local/bin/master-exec etcd etcd grep ETCD_PEER_CERT_FILE=/etc/etcd/peer.crt /proc/1/environ; /usr/local/bin/master-exec etcd etcd grep ETCD_PEER_KEY_FILE=/etc/etcd/peer.key /proc/1/environ; grep ETCD_PEER_CERT_FILE /etc/etcd/etcd.conf; grep ETCD_PEER_KEY_FILE /etc/etcd/etcd.conf'" - tests: - bin_op: and - test_items: - - flag: "Binary file /proc/1/environ matches" - compare: - op: has - value: "Binary file /proc/1/environ matches" - set: true - - flag: "ETCD_PEER_CERT_FILE=/etc/etcd/peer.crt" - compare: - op: has - value: "ETCD_PEER_CERT_FILE=/etc/etcd/peer.crt" - set: true - - flag: "ETCD_PEER_KEY_FILE=/etc/etcd/peer.key" - compare: - op: has - value: "ETCD_PEER_KEY_FILE=/etc/etcd/peer.key" - set: true - remediation: | - Reset to the OpenShift default configuration. - scored: true - - - id: 1.5.5 - text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)" - audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_PEER_CLIENT_CERT_AUTH=true /proc/1/environ; grep ETCD_PEER_CLIENT_CERT_AUTH /etc/etcd/etcd.conf'" - tests: - bin_op: and - test_items: - - flag: "Binary file /proc/1/environ matches" - compare: - op: has - value: "Binary file /proc/1/environ matches" - set: true - - flag: "ETCD_PEER_CLIENT_CERT_AUTH=true" - compare: - op: has - value: "ETCD_PEER_CLIENT_CERT_AUTH=true" - set: true - remediation: | - Reset to the OpenShift default configuration. - scored: true - - - id: 1.5.6 - text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)" - audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_PEER_AUTO_TLS /proc/1/environ; grep ETCD_PEER_AUTO_TLS /etc/etcd/etcd.conf'" - tests: - bin_op: and - test_items: - - flag: "Binary file /proc/1/environ matches" - compare: - op: has - value: "Binary file /proc/1/environ matches" - set: true - - flag: "#ETCD_PEER_AUTO_TLS=false" - compare: - op: has - value: "#ETCD_PEER_AUTO_TLS=false" - set: true - remediation: | - Reset to the OpenShift default configuration. - scored: true - - - id: 1.5.7 - text: "Ensure that the --wal-dir argument is set as appropriate Scored)" - type: "skip" - scored: true - - - id: 1.5.8 - text: "Ensure that the --max-wals argument is set to 0 (Scored)" - type: "skip" - scored: true - - - id: 1.5.9 - text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)" - audit: "openssl x509 -in /etc/origin/master/master.etcd-ca.crt -subject -issuer -noout | sed 's/@/ /'" - tests: - test_items: - - flag: "issuer= /CN=etcd-signer" - compare: - op: has - value: "issuer= /CN=etcd-signer" - set: true - remediation: | - Reset to the OpenShift default configuration. - scored: false - - -- id: 1.6 - text: "General Security Primitives" - checks: - - id: 1.6.1 - text: "Ensure that the cluster-admin role is only used where required (Not Scored)" - type: "manual" - remediation: | - Review users, groups, serviceaccounts bound to cluster-admin: - oc get clusterrolebindings | grep cluster-admin - - Review users and groups bound to cluster-admin and decide whether they require - such access. Consider creating least-privilege roles for users and service accounts - scored: false - - - id: 1.6.2 - text: "Create Pod Security Policies for your cluster (Not Scored)" - type: "manual" - remediation: | - Review Security Context Constraints: - oc get scc - - Use OpenShift's Security Context Constraint feature, which has been contributed - to Kubernetes as Pod Security Policies. PSPs are still beta in Kubernetes 1.10. - OpenShift ships with two SCCs: restricted and privileged. - - The two default SCCs will be created when the master is started. The restricted - SCC is granted to all authenticated users by default. - - https://docs.openshift.com/container-platform/3.10/admin_guide/manage_scc.html" - scored: false - - - id: 1.6.3 - text: "Create administrative boundaries between resources using namespaces (Not Scored)" - type: "manual" - remediation: | - Review projects: - oc get projects - scored: false - - - id: 1.6.4 - text: "Create network segmentation using Network Policies (Not Scored)" - type: "manual" - remediation: | - Verify on masters the plugin being used: - grep networkPluginName /etc/origin/master/master-config.yaml - - OpenShift provides multi-tenant networking isolation (using Open vSwich and - vXLAN), to segregate network traffic between containers belonging to different - tenants (users or applications) while running on a shared cluster. Red Hat also - works with 3rd-party SDN vendors to provide the same level of capabilities - integrated with OpenShift. OpenShift SDN is included a part of OpenShift - subscription. - - OpenShift supports Kubernetes NetworkPolicy. Administrator must configure - NetworkPolicies if desired. - - https://docs.openshift.com/container-platform/3.10/architecture/networking/sdn.html#architecture-additional-concepts-sdn - - Ansible Inventory variable: os_sdn_network_plugin_name: - https://docs.openshift.com/container-platform/3.10/install/configuring_inventory_file.html - scored: false - - - id: 1.6.5 - text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Not Scored)" - type: "manual" - remediation: | - Verify SCCs that have been configured with seccomp: - oc get scc -ocustom-columns=NAME:.metadata.name,SECCOMP-PROFILES:.seccompProfiles - - OpenShift does not enable seccomp by default. To configure seccomp profiles that - are applied to pods run by the SCC, follow the instructions in the - documentation: - - https://docs.openshift.com/container-platform/3.9/admin_guide/seccomp.html#admin-guide-seccomp - scored: false - - - id: 1.6.6 - text: "Apply Security Context to Your Pods and Containers (Not Scored)" - type: "manual" - remediation: | - Review SCCs: - oc describe scc - - Use OpenShift's Security Context Constraint feature, which has been contributed - to Kubernetes as Pod Security Policies. PSPs are still beta in Kubernetes 1.10. - - OpenShift ships with two SCCs: restricted and privileged. The two default SCCs - will be created when the master is started. The restricted SCC is granted to - all authenticated users by default. - - All pods are run under the restricted SCC by default. Running a pod under any - other SCC requires an account with cluster admin capabilities to grant access - for the service account. - - SecurityContextConstraints limit what securityContext is applied to pods and - containers. - - https://docs.openshift.com/container-platform/3.10/admin_guide/manage_scc.html - scored: false - - - id: 1.6.7 - text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)" - type: "manual" - remediation: | - Review imagePolicyConfig in /etc/origin/master/master-config.yaml. - scored: false - - - id: 1.6.8 - text: "Configure Network policies as appropriate (Not Scored)" - type: "manual" - remediation: | - If ovs-networkplugin is used, review network policies: - oc get networkpolicies - - OpenShift supports Kubernetes NetworkPolicy via ovs-networkpolicy plugin. - If choosing ovs-multitenant plugin, each namespace is isolated in its own - netnamespace by default. - scored: false - - - id: 1.6.9 - text: "Place compensating controls in the form of PSP and RBAC for privileged containers usage (Not Scored)" - type: "manual" - remediation: | - 1) Determine all sccs allowing privileged containers: - oc get scc -ocustom-columns=NAME:.metadata.name,ALLOWS_PRIVILEGED:.allowPrivilegedContainer - 2) Review users and groups assigned to sccs allowing priviliged containers: - oc describe sccs - - Use OpenShift's Security Context Constraint feature, which has been contributed - to Kubernetes as Pod Security Policies. PSPs are still beta in Kubernetes 1.10. - - OpenShift ships with two SCCs: restricted and privileged. The two default SCCs - will be created when the master is started. The restricted SCC is granted to all - authenticated users by default. - - Similar scenarios are documented in the SCC - documentation, which outlines granting SCC access to specific serviceaccounts. - Administrators may create least-restrictive SCCs based on individual container - needs. - - For example, if a container only requires running as the root user, the anyuid - SCC can be used, which will not expose additional access granted by running - privileged containers. - - https://docs.openshift.com/container-platform/3.10/admin_guide/manage_scc.html - scored: false +--- +controls: +version: 3.10 +id: 1 +text: "Securing the OpenShift Master" +type: "master" +groups: + +- id: 1 + text: "Protecting the API Server" + checks: + - id: 1.1 + text: "Maintain default behavior for anonymous access" + type: "skip" + scored: true + + - id: 1.2 + text: "Verify that the basic-auth-file method is not enabled" + audit: "grep -A2 basic-auth-file /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "--basic-auth-file" + compare: + op: eq + value: "" + set: false + remediation: | + Edit the kubernetes master config file /etc/origin/master/master-config.yaml and + remove the basic-auth-file entry. + + kubernetesMasterConfig: +  apiServerArguments: +    basic-auth-file: +    - /path/to/any/file + scored: true + + - id: 1.3 + text: "Insecure Tokens" + type: "skip" + scored: true + + - id: 1.4 + text: "Secure communications between the API server and master nodes" + audit: "grep -A4 kubeletClientInfo /etc/origin/master/master-config.yaml" + tests: + bin_op: and + test_items: + - flag: "kubeletClientInfo:" + compare: + op: eq + value: "kubeletClientInfo:" + set: true + - flag: "ca: ca-bundle.crt" + compare: + op: has + value: "ca-bundle.crt" + set: true + - flag: "certFile: master.kubelet-client.crt" + compare: + op: has + value: "master.kubelet-client.crt" + set: true + - flag: "keyFile: master.kubelet-client.key" + compare: + op: has + value: "master.kubelet-client.key" + set: true + - flag: "port: 10250" + compare: + op: eq + value: "port: 10250" + set: true + remediation: | + Edit the kubernetes master config file /etc/origin/master/master-config.yaml + and change it to match the below. + + kubeletClientInfo: +  ca: ca-bundle.crt +  certFile: master.kubelet-client.crt +  keyFile: master.kubelet-client.key +  port: 10250 + scored: true + + - id: 1.5 + text: "Prevent insecure bindings" + audit: "grep -A2 insecure-bind-address /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "insecure-bind-address" + set: false + remediation: | + Edit the kubernetes master config file /etc/origin/master/master-config.yaml + and remove the insecure-bind-address entry. + + kubernetesMasterConfig: +  apiServerArguments: +    insecure-bind-address: +    - 127.0.0.1 + scored: true + + - id: 1.6 + text: "Prevent insecure port access" + audit: "grep -A2 insecure-port /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "insecure-port" + set: false + remediation: | + Edit the kubernetes master config file /etc/origin/master/master-config.yaml + and remove the insecure-port entry. + + kubernetesMasterConfig: +  apiServerArguments: +   insecure-port: +  - 0 + scored: true + + - id: 1.7 + text: "Use Secure Ports for API Server Traffic" + audit: "grep -A2 secure-port /etc/origin/master/master-config.yaml" + tests: + bin_op: or + test_items: + - flag: "secure-port" + set: false + - flag: "secure-port" + compare: + op: nothave + value: "0" + set: true + remediation: | + Edit the kubernetes master config file /etc/origin/master/master-config.yaml + and either remove the secure-port parameter or set it to a different (non-zero) + desired port. + + kubernetesMasterConfig: +  apiServerArguments: +   secure-port: +  - 8443 + scored: true + + - id: 1.8 + text: "Do not expose API server profiling data" + type: "skip" + scored: true + + - id: 1.9 + text: "Verify repair-malformed-updates argument for API compatibility" + audit: "grep -A2 repair-malformed-updates /etc/origin/master/master-config.yaml" + tests: + bin_op: or + test_items: + - flag: "repair-malformed-updates" + set: false + - flag: "repair-malformed-updates" + compare: + op: has + value: "true" + set: true + remediation: | + Edit the kubernetes master config file /etc/origin/master/master-config.yaml + and remove the repair-malformed-updates entry or set repair-malformed-updates=true. + scored: true + + - id: 1.10 + text: "Verify that the AlwaysAdmit admission controller is disabled" + audit: "grep -A4 AlwaysAdmit /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "AlwaysAdmit" + set: false + remediation: | + Edit the kubernetes master config file /etc/origin/master/master-config.yaml + and remove the the entry below. + + AlwaysAdmit: + configuration: + kind: DefaultAdmissionConfig + apiVersion: v1 + disable: false + scored: true + + - id: 1.11 + text: "Manage the AlwaysPullImages admission controller" + audit: "grep -A4 AlwaysPullImages /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "disable: false" + compare: + op: has + value: "false" + set: true + remediation: | + Edit the kubernetes master config file /etc/origin/master/master-config.yaml + and add the the entry below. + + admissionConfig: + pluginConfig: + AlwaysPullImages: + configuration: + kind: DefaultAdmissionConfig + apiVersion: v1 + disable: false + scored: true + + - id: 1.12 + text: "Use Security Context Constraints instead of DenyEscalatingExec admission" + type: "skip" + scored: true + + - id: 1.13 + text: "Use Security Context Constraints instead of the SecurityContextDeny admission controller" + type: "skip" + scored: true + + - id: 1.14 + text: "Manage the NamespaceLifecycle admission controller" + audit: "grep -A4 NamespaceLifecycle /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "NamespaceLifecycle" + set: false + remediation: | + Edit the kubernetes master config file /etc/origin/master/master-config.yaml + and remove the following entry. + + NamespaceLifecycle: + configuration: + kind: DefaultAdmissionConfig + apiVersion: v1 + disable: true + scored: true + + - id: 1.15 + text: "Configure API server auditing - audit log file path" + audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "enabled: true" + compare: + op: has + value: "true" + set: true + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml, update the following entry and restart the API server. + + auditConfig: + auditFilePath: ""/etc/origin/master/audit-ocp.log"" + enabled: true + maximumFileRetentionDays: 30 + maximumFileSizeMegabytes: 10 + maximumRetainedFiles: 10 + + Make the same changes in the inventory/ansible variables so the changes are not + lost when an upgrade occurs. + scored: true + + - id: 1.16 + text: "Configure API server auditing - audit log retention" + audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "maximumFileRetentionDays: 30" + compare: + op: has + value: "maximumFileRetentionDays" + set: true + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml, + update the maximumFileRetentionDays entry and restart the API server. + + auditConfig: + auditFilePath: ""/etc/origin/master/audit-ocp.log"" + enabled: true + maximumFileRetentionDays: 30 + maximumFileSizeMegabytes: 10 + maximumRetainedFiles: 10 + + Make the same changes in the inventory/ansible variables so the changes are not + lost when an upgrade occurs. + scored: true + + - id: 1.17 + text: "Configure API server auditing - audit log backup retention" + audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "maximumRetainedFiles: 10" + compare: + op: has + value: "maximumRetainedFiles" + set: true + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml, update the maximumRetainedFiles entry, + set enabled to true and restart the API server. + + auditConfig: + auditFilePath: ""/etc/origin/master/audit-ocp.log"" + enabled: true + maximumFileRetentionDays: 30 + maximumFileSizeMegabytes: 10 + maximumRetainedFiles: 10 + + Make the same changes in the inventory/ansible variables so the changes are not + lost when an upgrade occurs. + scored: true + + - id: 1.18 + text: "Configure audit log file size" + audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "maximumFileSizeMegabytes: 30" + compare: + op: has + value: "maximumFileSizeMegabytes" + set: true + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml, update the maximumFileSizeMegabytes entry, + set enabled to true and restart the API server. + + auditConfig: + auditFilePath: ""/etc/origin/master/audit-ocp.log"" + enabled: true + maximumFileRetentionDays: 30 + maximumFileSizeMegabytes: 10 + maximumRetainedFiles: 10 + + Make the same changes in the inventory/ansible variables so the changes are not + lost when an upgrade occurs. + scored: true + + - id: 1.19 + text: "Verify that authorization-mode is not set to AlwaysAllow" + audit: "grep -A1 authorization-mode /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "authorization-mode" + set: false + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove the authorization-mode + entry. + + kubernetesMasterConfig: +  apiServerArguments: +    authorization-mode: +    - AllowAll + scored: true + + - id: 1.20 + text: "Verify that the token-auth-file flag is not set" + audit: "grep token-auth-file /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "token-auth-file" + set: false + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove the token-auth-file + entry under apiserverArguments section. + + kubernetesMasterConfig: +  apiServerArguments: +    token-auth-file: +    - /path/to/file + scored: true + + - id: 1.21 + text: "Verify the API server certificate authority" + audit: "grep -A1 kubelet-certificate-authority /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "kubelet-certificate-authority" + set: false + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove the following + configuration under apiserverArguments section. + + kubernetesMasterConfig: +  apiServerArguments: +    kubelet-certificat-authority: +    - /path/to/ca + scored: true + + - id: 1.22 + text: "Verify the API server client certificate and client key" + audit: "grep -A4 kubeletClientInfo /etc/origin/master/master-config.yaml" + tests: + bin_op: and + test_items: + - flag: "keyFile: master.kubelet-client.key" + compare: + op: has + value: "keyFile: master.kubelet-client.key" + set: true + - flag: "certFile: master.kubelet-client.crt" + compare: + op: has + value: "certFile: master.kubelet-client.crt" + set: true + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml and add the following + configuration under kubeletClientInfo + + kubeletClientInfo: +  ca: ca-bundle.crt +  certFile: master.kubelet-client.crt +  keyFile: master.kubelet-client.key + port: 10250 + scored: true + + - id: 1.23 + text: "Verify that the service account lookup flag is not set" + type: skip + scored: true + + - id: 1.24 + text: "Verify the PodSecurityPolicy is disabled to ensure use of SecurityContextConstraints" + type: "skip" + scored: true + + - id: 1.25 + text: "Verify that the service account key file argument is not set" + audit: "grep -A9 serviceAccountConfig /etc/origin/master/master-config.yaml" + tests: + bin_op: and + test_items: + - flag: "privateKeyFile: serviceaccounts.private.key" + compare: + op: has + value: "privateKeyFile: serviceaccounts.private.key" + set: true + - flag: "serviceaccounts.public.key" + compare: + op: has + value: "serviceaccounts.public.key" + set: true + remediation: | + OpenShift API server does not use the service-account-key-file argument. + Even if value is set in master-config.yaml, it will not be used to verify + service account tokens, as it is in upstream Kubernetes. The ServiceAccount + token authenticator is configured with serviceAccountConfig.publicKeyFiles in + the master-config.yaml. OpenShift does not reuse the apiserver TLS key. + + Edit the Openshift master config file /etc/origin/master/master-config.yaml and set the privateKeyFile + and publicKeyFile configuration under serviceAccountConfig. + + serviceAccountConfig: +  limitSecretReferences: false +  managedNames: + - default +  - builder +  - deployer +  masterCA: ca-bundle.crt +   privateKeyFile: serviceaccounts.private.key +  publicKeyFiles: +  - serviceaccounts.public.key + + Verify that privateKeyFile and publicKeyFile exist and set. + scored: true + + - id: 1.26 + text: "Verify the certificate and key used for communication with etcd" + audit: "grep -A3 etcdClientInfo /etc/origin/master/master-config.yaml" + tests: + bin_op: and + test_items: + - flag: "certFile: master.etcd-client.crt" + compare: + op: has + value: "certFile: master.etcd-client.crt" + set: true + - flag: "keyFile: master.etcd-client.key" + compare: + op: has + value: "keyFile: master.etcd-client.key" + set: true + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml and set keyFile and certFile + under etcdClientInfo like below. + + etcdClientInfo: +  ca: master.etcd-ca.crt + certFile: master.etcd-client.crt + keyFile: master.etcd-client.key + scored: true + + - id: 1.27 + text: "Verify that the ServiceAccount admission controller is enabled" + audit: "grep -A4 ServiceAccount /etc/origin/master/master-config.yaml" + tests: + bin_op: or + test_items: + - flag: "ServiceAccount" + set: false + - flag: "disable: false" + compare: + op: has + value: "disable: false" + set: true + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml and enable ServiceAccount + admission control policy. + + ServiceAccount: + configuration: + kind: DefaultAdmissionConfig + apiVersion: v1 + disable: false + scored: true + + - id: 1.28 + text: "Verify the certificate and key used to encrypt API server traffic" + audit: "grep -A7 servingInfo /etc/origin/master/master-config.yaml" + tests: + bin_op: and + test_items: + - flag: "certFile: master.server.crt" + compare: + op: has + value: "certFile: master.server.crt" + set: true + - flag: "keyFile: master.server.key" + compare: + op: has + value: "keyFile: master.server.key" + set: true + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml and set keyFile and certFile under servingInfo. + + servingInfo: +  bindAddress: 0.0.0.0:8443 +   bindNetwork: tcp4 + certFile: master.server.crt + clientCA: ca.crt + keyFile: master.server.key + maxRequestsInFlight: 500 + requestTimeoutSeconds: 3600 + scored: true + + - id: 1.29 + text: "Verify that the --client-ca-file argument is not set" + audit: "grep client-ca-file /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "clientCA: ca.crt" + set: false + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml and set clientCA under servingInfo. + + servingInfo: +  bindAddress: 0.0.0.0:8443 +   bindNetwork: tcp4 + certFile: master.server.crt + clientCA: ca.crt + keyFile: master.server.key + maxRequestsInFlight: 500 + requestTimeoutSeconds: 3600 + scored: true + + - id: 1.30 + text: "Verify the CA used for communication with etcd" + audit: "grep -A3 etcdClientInfo /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "ca: master.etcd-ca.crt" + compare: + op: has + value: "ca: master.etcd-ca.crt" + set: true + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml and set ca under etcdClientInfo. + + etcdClientInfo: +   ca: master.etcd-ca.crt + certFile: master.etcd-client.crt + keyFile: master.etcd-client.key + scored: true + + - id: 1.31 + text: "Verify that the authorization-mode argument is not set" + type: "skip" + scored: true + + - id: 1.32 + text: "Verify that the NodeRestriction admission controller is enabled" + audit: "grep -A4 NodeRestriction /etc/origin/master/master-config.yaml" + tests: + bin_op: or + test_items: + - flag: "NodeRestriction" + set: false + - flag: "disable: false" + compare: + op: has + value: "disable: false" + set: true + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml and enable NodeRestriction ca under etcdClientInfo. + + NodeRestriction: + configuration: + kind: DefaultAdmissionConfig + apiVersion: v1 + disable: false + scored: true + + - id: 1.33 + text: "Configure encryption of data at rest in etcd datastore" + audit: "grep -A1 experimental-encryption-provider-config /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "experimental-encryption-provider-config:" + compare: + op: has + value: "experimental-encryption-provider-config:" + set: true + remediation: | + Follow the instructions in the documentation to configure encryption. + https://docs.openshift.com/container-platform/3.10/admin_guide/encrypting_data.html + scored: true + + - id: 1.34 + text: "Set the encryption provider to aescbc for etcd data at rest" + audit: "grep -A1 experimental-encryption-provider-config /etc/origin/master/master-config.yaml | sed -n '2p' | awk '{ print $2 }' | xargs grep -A1 providers" + tests: + test_items: + - flag: "aescbc:" + compare: + op: has + value: "aescbc:" + set: true + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml and set aescbc as the first provider in encryption provider config. + See https://docs.openshift.com/container-platform/3.10/admin_guide/encrypting_data.html. + scored: true + + - id: 1.35 + text: "Enable the EventRateLimit plugin" + audit: "grep -A4 EventRateLimit /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "disable: false" + compare: + op: has + value: "disable: false" + set: true + remediation: | + Follow the documentation to enable the EventRateLimit plugin. + https://docs.openshift.com/container-platform/3.10/architecture/additional_concepts/admission_controllers.html#admission-controllers-general-admission-rules + scored: true + + - id: 1.36 + text: "Configure advanced auditing" + audit: "grep AdvancedAuditing /etc/origin/master/master-config.yaml" + tests: + bin_op: or + test_items: + - flag: "AdvancedAuditing" + compare: + op: eq + value: "true" + set: true + - flag: "AdvancedAuditing" + set: false + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml and enable AdvancedAuditing, + + kubernetesMasterConfig: +  apiServerArguments: + feature-gates: + - AdvancedAuditing=true + scored: true + + # Review 1.1.37 in Aquasec shared doc, the tests are net zero. + - id: 1.37 + text: "Adjust the request timeout argument for your cluster resources" + audit: "grep request-timeout /etc/origin/master/master-config.yaml" + type: manual + remediation: | + change the request-timeout value in the  /etc/origin/master/master-config.yaml + scored: true + + +- id: 2 + text: "Scheduler" + checks: + - id: 2.1 + text: "Verify that Scheduler profiling is not exposed to the web" + type: "skip" + scored: true + + +- id: 3 + text: "Controller Manager" + checks: + - id: 3.1 + text: "Adjust the terminated-pod-gc-threshold argument as needed" + audit: "grep terminated-pod-gc-threshold -A1 /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "terminated-pod-gc-threshold:" + compare: + op: has + value: "12500" + set: true + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml and enable terminated-pod-gc-threshold. + + kubernetesMasterConfig: +  controllerArguments: +     terminated-pod-gc-threshold: +    - true + + Enabling the "terminated-pod-gc-threshold" settings is optional. + scored: true + + - id: 3.2 + text: "Verify that Controller profiling is not exposed to the web" + type: "skip" + scored: true + + - id: 3.3 + text: "Verify that the --use-service-account-credentials argument is set to true" + audit: "grep -A2 use-service-account-credentials /etc/origin/master/master-config.yaml" + tests: + bin_op: or + test_items: + - flag: "use-service-account-credentials" + set: false + - flag: "true" + compare: + op: has + value: "true" + set: true + remediation: | + Edit the Openshift master config file /etc/origin/master/master-config.yaml and set use-service-account-credentials + to true under controllerArguments section. + + kubernetesMasterConfig: +  controllerArguments: +     use-service-account-credentials: +     - true + scored: true + + # Review 3.4 + - id: 3.4 + text: "Verify that the --service-account-private-key-file argument is set as appropriate" + audit: | + grep -A9 serviceAccountConfig /etc/origin/master/master-config.yaml | grep privateKeyFile; + grep -A2 service-account-private-key-file /etc/origin/master/master-config.yaml + tests: + bin_op: and + test_items: + - flag: "privateKeyFile: serviceaccounts.private.key" + compare: + op: has + value: "privateKeyFile" + - flag: "service-account-private-key-file" + set: false + remediation: + Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove service-account-private-key-file + scored: true + + # Review 3.5 + - id: 3.5 + text: "Verify that the --root-ca-file argument is set as appropriate" + audit: "/bin/sh -c 'grep root-ca-file /etc/origin/master/master-config.yaml; grep -A9 serviceAccountConfig /etc/origin/master/master-config.yaml'" + tests: + bin_op: and + test_items: + - flag: "root-ca-file=/etc/origin/master/ca-bundle.crt" + compare: + op: has + value: "/etc/origin/master/ca-bundle.crt" + set: true + test_items: + - flag: "masterCA: ca-bundle.crt" + compare: + op: has + value: "ca-bundle.crt" + set: true + remediation: + Reset to OpenShift defaults OpenShift starts kube-controller-manager with + root-ca-file=/etc/origin/master/ca-bundle.crt by default.  OpenShift Advanced + Installation creates this certificate authority and configuration without any + configuration required. + + https://docs.openshift.com/container-platform/3.10/admin_guide/service_accounts.html" + scored: true + + - id: 3.6 + text: "Verify that Security Context Constraints are applied to Your Pods and Containers" + type: "skip" + scored: false + + - id: 3.7 + text: "Manage certificate rotation" + audit: "grep -B3 RotateKubeletServerCertificate=true /etc/origin/master/master-config.yaml" + tests: + test_items: + - flag: "RotateKubeletServerCertificate" + compare: + op: eq + value: "true" + set: true + remediation: + If you decide not to enable the RotateKubeletServerCertificate feature, + be sure to use the Ansible playbooks provided with the OpenShift installer to + automate re-deploying certificates. + scored: true + + +- id: 4 + text: "Configuration Files" + checks: + - id: 4.1 + text: "Verify the OpenShift default permissions for the API server pod specification file" + audit: "stat -c %a /etc/origin/node/pods/apiserver.yaml" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command. + + chmod 600 /etc/origin/node/pods/apiserver.yaml + scored: true + + - id: 4.2 + text: "Verify the OpenShift default file ownership for the API server pod specification file" + audit: "stat -c %U:%G /etc/origin/node/pods/apiserver.yaml" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command on the master node. + + chown root:root /etc/origin/node/pods/apiserver.yaml + scored: true + + - id: 4.3 + text: "Verify the OpenShift default file permissions for the controller manager pod specification file" + audit: "stat -c %a /etc/origin/node/pods/controller.yaml" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command on the master node. + + chmod 600 /etc/origin/node/pods/controller.yaml + scored: true + + - id: 4.4 + text: "Verify the OpenShift default ownership for the controller manager pod specification file" + audit: "stat -c %U:%G /etc/origin/node/pods/controller.yaml" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command on the master node. + + chown root:root /etc/origin/node/pods/controller.yaml + scored: true + + - id: 4.5 + text: "Verify the OpenShift default permissions for the scheduler pod specification file" + audit: "stat -c %a /etc/origin/node/pods/controller.yaml" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command. + + chmod 600 stat -c %a /etc/origin/node/pods/controller.yaml + scored: true + + - id: 4.6 + text: "Verify the scheduler pod specification file ownership set by OpenShift" + audit: "stat -c %u:%g /etc/origin/node/pods/controller.yaml" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command on the master node. + + chown root:root /etc/origin/node/pods/controller.yaml + scored: true + + - id: 4.7 + text: "Verify the OpenShift default etcd pod specification file permissions" + audit: "stat -c %a /etc/origin/node/pods/etcd.yaml" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command. + + chmod 600 /etc/origin/node/pods/etcd.yaml + scored: true + + - id: 4.8 + text: "Verify the OpenShift default etcd pod specification file ownership" + audit: "stat -c %U:%G /etc/origin/node/pods/etcd.yaml" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command on the master node. + + chown root:root /etc/origin/node/pods/etcd.yaml + scored: true + + - id: 4.9 + text: "Verify the default OpenShift Container Network Interface file permissions" + audit: "stat -c %a /etc/origin/openvswitch/ /etc/cni/net.d/" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command. + + chmod 644 -R /etc/origin/openvswitch/ /etc/cni/net.d/ + scored: true + + - id: 4.10 + text: "Verify the default OpenShift Container Network Interface file ownership" + audit: "stat -c %U:%G /etc/origin/openvswitch/ /etc/cni/net.d/" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command on the master node. + + chown root:root /etc/origin/openvswitch/ /etc/cni/net.d/ + scored: true + + - id: 4.11 + text: "Verify the default OpenShift etcd data directory permissions" + audit: "stat -c %a /var/lib/etcd" + tests: + test_items: + - flag: "700" + compare: + op: eq + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir , + from the below command: + ps -ef | grep etcd + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 4.12 + text: "Verify the default OpenShift etcd data directory ownership" + audit: "stat -c %U:%G /var/lib/etcd" + tests: + test_items: + - flag: "etcd:etcd" + compare: + op: eq + value: "etcd:etcd" + set: true + remediation: | + Run the below command on the master node. + + chown etcd:etcd /var/lib/etcd + scored: true + + - id: 4.13 + text: "Verify the default OpenShift admin.conf file permissions" + audit: "stat -c %a /etc/origin/master/admin.kubeconfig" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command. + + chmod 644 /etc/origin/master/admin.kubeconfig" + scored: true + + - id: 4.14 + text: "Verify the default OpenShift admin.conf file ownership" + audit: "stat -c %U:%G /etc/origin/master/admin.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command on the master node. + + chown root:root /etc/origin/master/admin.kubeconfig + scored: true + + - id: 4.15 + text: "Verify the default OpenShift scheduler.conf file permissions" + audit: "stat -c %a /etc/origin/master/openshift-master.kubeconfig" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command. + + chmod 644 /etc/origin/master/openshift-master.kubeconfig + scored: true + + - id: 4.16 + text: "Verify the default OpenShift scheduler.conf file ownership" + audit: "stat -c %U:%G /etc/origin/master/openshift-master.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command on the master node. + + chown root:root /etc/origin/master/openshift-master.kubeconfig + scored: true + + - id: 4.17 + text: "Verify the default Openshift controller-manager.conf file permissions" + audit: "stat -c %a /etc/origin/master/openshift-master.kubeconfig" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command. + + chmod 644 /etc/origin/master/openshift-master.kubeconfig + scored: true + + - id: 4.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)" + audit: "stat -c %U:%G /etc/origin/master/openshift-master.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command on the master node. + + chown root:root /etc/origin/master/openshift-master.kubeconfig + scored: true + + +- id: 5 + text: "Etcd" + checks: + - id: 5.1 + text: "Verify the default OpenShift cert-file and key-file configuration" + audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_CERT_FILE=/etc/etcd/server.crt /proc/1/environ; /usr/local/bin/master-exec etcd etcd grep etcd_key_file=/etc/etcd/server.key /proc/1/environ; grep ETCD_CERT_FILE=/etc/etcd/server.crt /etc/etcd/etcd.conf; grep ETCD_KEY_FILE=/etc/etcd/server.key /etc/etcd/etcd.conf'" + tests: + bin_op: and + test_items: + - : "Binary file /proc/1/environ matches" + compare: + op: has + value: "Binary file /proc/1/environ matches" + set: true + - flag: "ETCD_CERT_FILE=/etc/etcd/server.crt" + compare: + op: has + value: "ETCD_CERT_FILE=/etc/etcd/server.crt" + set: true + - flag: "ETCD_KEY_FILE=/etc/etcd/server.key" + compare: + op: has + value: "ETCD_KEY_FILE=/etc/etcd/server.key" + set: true + remediation: | + Reset to the OpenShift default configuration. + scored: true + + - id: 5.2 + text: "Verify the default OpenShift setting for the client-cert-auth argument" + audit: "/bin/sh -c'/usr/local/bin/master-exec etcd etcd grep ETCD_CLIENT_CERT_AUTH=true /proc/1/environ; grep ETCD_CLIENT_CERT_AUTH /etc/etcd/etcd.conf'" + tests: + bin_op: and + test_items: + - flag: "Binary file /proc/1/environ matches" + compare: + op: has + value: "Binary file /proc/1/environ matches" + set: true + - flag: "ETCD_CLIENT_CERT_AUTH=true" + compare: + op: has + value: "ETCD_CLIENT_CERT_AUTH=true" + set: true + remediation: | + Reset to the OpenShift default configuration. + scored: true + + - id: 5.3 + text: "Verify the OpenShift default values for etcd_auto_tls" + audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_AUTO_TLS /proc/1/environ; grep ETCD_AUTO_TLS /etc/etcd/etcd.conf'" + tests: + bin_op: or + test_items: + - flag: "ETCD_AUTO_TLS=false" + compare: + op: has + value: "ETCD_AUTO_TLS=false" + set: true + - flag: "#ETCD_AUTO_TLS" + compare: + op: has + value: "#ETCD_AUTO_TLS" + set: true + remediation: | + Reset to the OpenShift default configuration. + scored: true + + - id: 5.4 + text: "Verify the OpenShift default peer-cert-file and peer-key-file arguments for etcd" + audit: "/bin/sh -c'/usr/local/bin/master-exec etcd etcd grep ETCD_PEER_CERT_FILE=/etc/etcd/peer.crt /proc/1/environ; /usr/local/bin/master-exec etcd etcd grep ETCD_PEER_KEY_FILE=/etc/etcd/peer.key /proc/1/environ; grep ETCD_PEER_CERT_FILE /etc/etcd/etcd.conf; grep ETCD_PEER_KEY_FILE /etc/etcd/etcd.conf'" + tests: + bin_op: and + test_items: + - flag: "Binary file /proc/1/environ matches" + compare: + op: has + value: "Binary file /proc/1/environ matches" + set: true + - flag: "ETCD_PEER_CERT_FILE=/etc/etcd/peer.crt" + compare: + op: has + value: "ETCD_PEER_CERT_FILE=/etc/etcd/peer.crt" + set: true + - flag: "ETCD_PEER_KEY_FILE=/etc/etcd/peer.key" + compare: + op: has + value: "ETCD_PEER_KEY_FILE=/etc/etcd/peer.key" + set: true + remediation: | + Reset to the OpenShift default configuration. + scored: true + + - id: 5.5 + text: "Verify the OpenShift default configuration for the peer-client-cert-auth" + audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_PEER_CLIENT_CERT_AUTH=true /proc/1/environ; grep ETCD_PEER_CLIENT_CERT_AUTH /etc/etcd/etcd.conf'" + tests: + bin_op: and + test_items: + - flag: "Binary file /proc/1/environ matches" + compare: + op: has + value: "Binary file /proc/1/environ matches" + set: true + - flag: "ETCD_PEER_CLIENT_CERT_AUTH=true" + compare: + op: has + value: "ETCD_PEER_CLIENT_CERT_AUTH=true" + set: true + remediation: | + Reset to the OpenShift default configuration. + scored: true + + - id: 5.6 + text: "Verify the OpenShift default configuration for the peer-auto-tls argument" + audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_PEER_AUTO_TLS /proc/1/environ; grep ETCD_PEER_AUTO_TLS /etc/etcd/etcd.conf'" + tests: + bin_op: and + test_items: + - flag: "Binary file /proc/1/environ matches" + compare: + op: has + value: "Binary file /proc/1/environ matches" + set: true + - flag: "#ETCD_PEER_AUTO_TLS=false" + compare: + op: has + value: "#ETCD_PEER_AUTO_TLS=false" + set: true + remediation: | + Reset to the OpenShift default configuration. + scored: true + + - id: 5.7 + text: "Optionally modify the wal-dir argument" + type: "skip" + scored: true + + - id: 5.8 + text: "Optionally modify the max-wals argument" + type: "skip" + scored: true + + - id: 5.9 + text: "Verify the OpenShift default configuration for the etcd Certificate Authority" + audit: "openssl x509 -in /etc/origin/master/master.etcd-ca.crt -subject -issuer -noout | sed 's/@/ /'" + tests: + test_items: + - flag: "issuer= /CN=etcd-signer" + compare: + op: has + value: "issuer= /CN=etcd-signer" + set: true + remediation: | + Reset to the OpenShift default configuration. + scored: false + + +- id: 6 + text: "General Security Primitives" + checks: + - id: 6.1 + text: "Ensure that the cluster-admin role is only used where required" + type: "manual" + remediation: | + Review users, groups, serviceaccounts bound to cluster-admin: + oc get clusterrolebindings | grep cluster-admin + + Review users and groups bound to cluster-admin and decide whether they require + such access. Consider creating least-privilege roles for users and service accounts + scored: false + + - id: 6.2 + text: "Verify Security Context Constraints as in use" + type: "manual" + remediation: | + Review Security Context Constraints: + oc get scc + + Use OpenShift's Security Context Constraint feature, which has been contributed + to Kubernetes as Pod Security Policies. PSPs are still beta in Kubernetes 1.10. + OpenShift ships with two SCCs: restricted and privileged. + + The two default SCCs will be created when the master is started. The restricted + SCC is granted to all authenticated users by default. + + https://docs.openshift.com/container-platform/3.10/admin_guide/manage_scc.html" + scored: false + + - id: 6.3 + text: "Use OpenShift projects to maintain boundaries between resources" + type: "manual" + remediation: | + Review projects: + oc get projects + scored: false + + - id: 6.4 + text: "Create network segmentation using the Multi-tenant plugin or Network Policies" + type: "manual" + remediation: | + Verify on masters the plugin being used: + grep networkPluginName /etc/origin/master/master-config.yaml + + OpenShift provides multi-tenant networking isolation (using Open vSwich and + vXLAN), to segregate network traffic between containers belonging to different + tenants (users or applications) while running on a shared cluster. Red Hat also + works with 3rd-party SDN vendors to provide the same level of capabilities + integrated with OpenShift. OpenShift SDN is included a part of OpenShift + subscription. + + OpenShift supports Kubernetes NetworkPolicy. Administrator must configure + NetworkPolicies if desired. + + https://docs.openshift.com/container-platform/3.10/architecture/networking/sdn.html#architecture-additional-concepts-sdn + + Ansible Inventory variable: os_sdn_network_plugin_name: + https://docs.openshift.com/container-platform/3.10/install/configuring_inventory_file.html + scored: false + + - id: 6.5 + text: "Enable seccomp and configure custom Security Context Constraints" + type: "manual" + remediation: | + Verify SCCs that have been configured with seccomp: + oc get scc -ocustom-columns=NAME:.metadata.name,SECCOMP-PROFILES:.seccompProfiles + + OpenShift does not enable seccomp by default. To configure seccomp profiles that + are applied to pods run by the SCC, follow the instructions in the + documentation: + + https://docs.openshift.com/container-platform/3.9/admin_guide/seccomp.html#admin-guide-seccomp + scored: false + + - id: 6.6 + text: "Review Security Context Constraints" + type: "manual" + remediation: | + Review SCCs: + oc describe scc + + Use OpenShift's Security Context Constraint feature, which has been contributed + to Kubernetes as Pod Security Policies. PSPs are still beta in Kubernetes 1.10. + + OpenShift ships with two SCCs: restricted and privileged. The two default SCCs + will be created when the master is started. The restricted SCC is granted to + all authenticated users by default. + + All pods are run under the restricted SCC by default. Running a pod under any + other SCC requires an account with cluster admin capabilities to grant access + for the service account. + + SecurityContextConstraints limit what securityContext is applied to pods and + containers. + + https://docs.openshift.com/container-platform/3.10/admin_guide/manage_scc.html + scored: false + + - id: 6.7 + text: "Manage Image Provenance using ImagePolicyWebhook admission controller" + type: "manual" + remediation: | + Review imagePolicyConfig in /etc/origin/master/master-config.yaml. + scored: false + + - id: 6.8 + text: "Configure Network policies as appropriate" + type: "manual" + remediation: | + If ovs-networkplugin is used, review network policies: + oc get networkpolicies + + OpenShift supports Kubernetes NetworkPolicy via ovs-networkpolicy plugin. + If choosing ovs-multitenant plugin, each namespace is isolated in its own + netnamespace by default. + scored: false + + - id: 6.9 + text: "Use Security Context Constraints as compensating controls for privileged containers" + type: "manual" + remediation: | + 1) Determine all sccs allowing privileged containers: + oc get scc -ocustom-columns=NAME:.metadata.name,ALLOWS_PRIVILEGED:.allowPrivilegedContainer + 2) Review users and groups assigned to sccs allowing priviliged containers: + oc describe sccs + + Use OpenShift's Security Context Constraint feature, which has been contributed + to Kubernetes as Pod Security Policies. PSPs are still beta in Kubernetes 1.10. + + OpenShift ships with two SCCs: restricted and privileged. The two default SCCs + will be created when the master is started. The restricted SCC is granted to all + authenticated users by default. + + Similar scenarios are documented in the SCC + documentation, which outlines granting SCC access to specific serviceaccounts. + Administrators may create least-restrictive SCCs based on individual container + needs. + + For example, if a container only requires running as the root user, the anyuid + SCC can be used, which will not expose additional access granted by running + privileged containers. + + https://docs.openshift.com/container-platform/3.10/admin_guide/manage_scc.html + scored: false diff --git a/cfg/ocp-3.10/node.yaml b/cfg/ocp-3.10/node.yaml index c537cf4..1fbe549 100644 --- a/cfg/ocp-3.10/node.yaml +++ b/cfg/ocp-3.10/node.yaml @@ -1,376 +1,376 @@ ---- -controls: -id: 2 -text: "Worker Node Security Configuration" -type: "node" -groups: -- id: 2.1 - text: "Kubelet" - checks: - - id: 2.1.1 - text: "Ensure that the --allow-privileged argument is set to false (Scored)" - type: "skip" - scored: true - - - id: 2.1.2 - text: "Ensure that the --anonymous-auth argument is set to false (Scored)" - type: "skip" - scored: true - - - id: 2.1.3 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)" - audit: "grep -A1 authorization-mode /etc/origin/node/node-config.yaml" - tests: - bin_op: or - test_items: - - flag: "authorization-mode" - set: false - - flag: "authorization-mode: Webhook" - compare: - op: has - value: "Webhook" - set: true - remediation: | - Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove authorization-mode under - kubeletArguments in /etc/origin/node/node-config.yaml or set it to "Webhook". - scored: true - - - id: 2.1.4 - text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)" - audit: "grep -A1 client-ca-file /etc/origin/node/node-config.yaml" - tests: - test_items: - - flag: "client-ca-file" - set: false - remediation: | - Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove any configuration returned by the following: - grep -A1 client-ca-file /etc/origin/node/node-config.yaml - - Reset to the OpenShift default. - See https://github.com/openshift/openshift-ansible/blob/release-3.10/roles/openshift_node_group/templates/node-config.yaml.j2#L65 - The config file does not have this defined in kubeletArgument, but in PodManifestConfig. - scored: true - - - id: 2.1.5 - text: "Ensure that the --read-only-port argument is set to 0 (Scored)" - audit: "grep -A1 read-only-port /etc/origin/node/node-config.yaml" - tests: - bin_op: or - test_items: - - flag: "read-only-port" - set: false - - flag: "read-only-port: 0" - compare: - op: has - value: "0" - set: true - remediation: | - Edit the Openshift node config file /etc/origin/node/node-config.yaml and removed so that the OpenShift default is applied. - scored: true - - - id: 2.1.6 - text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)" - audit: "grep -A1 streaming-connection-idle-timeout /etc/origin/node/node-config.yaml" - tests: - bin_op: or - test_items: - - flag: "streaming-connection-idle-timeout" - set: false - - flag: "0" - set: false - remediation: | - Edit the Openshift node config file /etc/origin/node/node-config.yaml and set the streaming-connection-timeout - value like the following in node-config.yaml. - - kubeletArguments: -  streaming-connection-idle-timeout: -    - "5m" - scored: true - - - id: 2.1.7 - text: "Ensure that the --protect-kernel-defaults argument is set to true (Scored)" - type: "skip" - scored: true - - - id: 2.1.8 - text: "Ensure that the --make-iptables-util-chains argument is set to true (Scored)" - audit: "grep -A1 make-iptables-util-chains /etc/origin/node/node-config.yaml" - tests: - bin_op: or - test_items: - - flag: "make-iptables-util-chains" - set: false - - flag: "make-iptables-util-chains: true" - compare: - op: has - value: "true" - set: true - remediation: | - Edit the Openshift node config file /etc/origin/node/node-config.yaml and reset make-iptables-util-chains to the OpenShift - default value of true. - scored: true - - id: 2.1.9 - text: "Ensure that the --keep-terminated-pod-volumeskeep-terminated-pod-volumes argument is set to false (Scored)" - audit: "grep -A1 keep-terminated-pod-volumes /etc/origin/node/node-config.yaml" - tests: - test_items: - - flag: "keep-terminated-pod-volumes: false" - compare: - op: has - value: "false" - set: true - remediation: | - Reset to the OpenShift defaults - scored: true - - - id: 2.1.10 - text: "Ensure that the --hostname-override argument is not set (Scored)" - type: "skip" - scored: true - - - id: 2.1.11 - text: "Ensure that the --event-qps argument is set to 0 (Scored)" - audit: "grep -A1 event-qps /etc/origin/node/node-config.yaml" - tests: - bin_op: or - test_items: - - flag: "event-qps" - set: false - - flag: "event-qps: 0" - compare: - op: has - value: "0" - set: true - remediation: | - Edit the Openshift node config file /etc/origin/node/node-config.yaml set the event-qps argument to 0 in - the kubeletArguments section of. - scored: true - - - id: 2.1.12 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)" - audit: "grep -A1 cert-dir /etc/origin/node/node-config.yaml" - tests: - test_items: - - flag: "/etc/origin/node/certificates" - compare: - op: has - value: "/etc/origin/node/certificates" - set: true - remediation: | - Reset to the OpenShift default values. - scored: true - - - id: 2.1.13 - text: "Ensure that the --cadvisor-port argument is set to 0 (Scored)" - audit: "grep -A1 cadvisor-port /etc/origin/node/node-config.yaml" - tests: - bin_op: or - test_items: - - flag: "cadvisor-port" - set: false - - flag: "cadvisor-port: 0" - compare: - op: has - value: "0" - set: true - remediation: | - Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove the cadvisor-port flag - if it is set in the kubeletArguments section. - scored: true - - - id: 2.1.14 - text: "Ensure that the RotateKubeletClientCertificate argument is not set to false (Scored)" - audit: "grep -B1 RotateKubeletClientCertificate=true /etc/origin/node/node-config.yaml" - tests: - test_items: - - flag: "RotateKubeletClientCertificate=true" - compare: - op: has - value: "true" - set: true - remediation: | - Edit the Openshift node config file /etc/origin/node/node-config.yaml and set RotateKubeletClientCertificate to true. - scored: true - - - id: 2.1.15 - text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)" - audit: "grep -B1 RotateKubeletServerCertificate=true /etc/origin/node/node-config.yaml" - test: - test_items: - - flag: "RotateKubeletServerCertificate=true" - compare: - op: has - value: "true" - set: true - remediation: | - Edit the Openshift node config file /etc/origin/node/node-config.yaml and set RotateKubeletServerCertificate to true. - scored: true - - -- id: 2.2 - text: "Configuration Files" - checks: - - id: 2.2.1 - text: "Ensure that the kubelet.conf file permissions are set to 644 or more restrictive (Scored)" - audit: "stat -c %a /etc/origin/node/node.kubeconfig" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command on each worker node. - chmod 644 /etc/origin/node/node.kubeconfig - scored: true - - - id: 2.2.2 - text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)" - audit: "stat -c %U:%G /etc/origin/node/node.kubeconfig" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: root:root - set: true - remediation: | - Run the below command on each worker node. - chown root:root /etc/origin/node/node.kubeconfig - scored: true - - - id: 2.2.3 - text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored)" - audit: "stat -c %a /etc/systemd/system/atomic-openshift-node.service" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command on each worker node. - chmod 644 /etc/systemd/system/atomic-openshift-node.service - scored: true - - - id: 2.2.4 - text: "Ensure that the kubelet service file ownership is set to root:root (Scored)" - audit: "stat -c %U:%G /etc/systemd/system/atomic-openshift-node.service" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: root:root - set: true - remediation: | - Run the below command on each worker node. - chown root:root /etc/systemd/system/atomic-openshift-node.service - scored: true - - - id: 2.2.5 - text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)" - audit: "stat -c %a /etc/origin/node/node.kubeconfig" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command on each worker node. - chmod 644 /etc/origin/node/node.kubeconfig - scored: true - - - id: 2.2.6 - text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)" - audit: "stat -c %U:%G /etc/origin/node/node.kubeconfig" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: root:root - set: true - remediation: | - Run the below command on each worker node. - chown root:root /etc/origin/node/node.kubeconfig - scored: true - - - id: 2.2.7 - text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Scored)" - audit: "stat -c %a /etc/origin/node/client-ca.crt" - tests: - bin_op: or - test_items: - - flag: "644" - compare: - op: eq - value: "644" - set: true - - flag: "640" - compare: - op: eq - value: "640" - set: true - - flag: "600" - compare: - op: eq - value: "600" - set: true - remediation: | - Run the below command on each worker node. - chmod 644 /etc/origin/node/client-ca.crt - scored: true - - - id: 2.2.8 - text: "Ensure that the client certificate authorities file ownership is set to root:root (Scored)" - audit: "stat -c %U:%G /etc/origin/node/client-ca.crt" - tests: - test_items: - - flag: "root:root" - compare: - op: eq - value: root:root - set: true - remediation: | - Run the below command on each worker node. - chown root:root /etc/origin/node/client-ca.crt - scored: true +--- +controls: +id: 2 +text: "Worker Node Security Configuration" +type: "node" +groups: +- id: 7 + text: "Kubelet" + checks: + - id: 7.1 + text: "Use Security Context Constraints to manage privileged containers as needed" + type: "skip" + scored: true + + - id: 7.2 + text: "Ensure anonymous-auth is not disabled" + type: "skip" + scored: true + + - id: 7.3 + text: "Verify that the --authorization-mode argument is set to WebHook)" + audit: "grep -A1 authorization-mode /etc/origin/node/node-config.yaml" + tests: + bin_op: or + test_items: + - flag: "authorization-mode" + set: false + - flag: "authorization-mode: Webhook" + compare: + op: has + value: "Webhook" + set: true + remediation: | + Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove authorization-mode under + kubeletArguments in /etc/origin/node/node-config.yaml or set it to "Webhook". + scored: true + + - id: 7.4 + text: "Verify the OpenShift default for the client-ca-file argument" + audit: "grep -A1 client-ca-file /etc/origin/node/node-config.yaml" + tests: + test_items: + - flag: "client-ca-file" + set: false + remediation: | + Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove any configuration returned by the following: + grep -A1 client-ca-file /etc/origin/node/node-config.yaml + + Reset to the OpenShift default. + See https://github.com/openshift/openshift-ansible/blob/release-3.10/roles/openshift_node_group/templates/node-config.yaml.j2#L65 + The config file does not have this defined in kubeletArgument, but in PodManifestConfig. + scored: true + + - id: 7.5 + text: "Verify the OpenShift default setting for the read-only-port argumen" + audit: "grep -A1 read-only-port /etc/origin/node/node-config.yaml" + tests: + bin_op: or + test_items: + - flag: "read-only-port" + set: false + - flag: "read-only-port: 0" + compare: + op: has + value: "0" + set: true + remediation: | + Edit the Openshift node config file /etc/origin/node/node-config.yaml and removed so that the OpenShift default is applied. + scored: true + + - id: 7.6 + text: "Adjust the streaming-connection-idle-timeout argument" + audit: "grep -A1 streaming-connection-idle-timeout /etc/origin/node/node-config.yaml" + tests: + bin_op: or + test_items: + - flag: "streaming-connection-idle-timeout" + set: false + - flag: "5m" + set: false + remediation: | + Edit the Openshift node config file /etc/origin/node/node-config.yaml and set the streaming-connection-timeout + value like the following in node-config.yaml. + + kubeletArguments: +  streaming-connection-idle-timeout: +    - "5m" + scored: true + + - id: 7.7 + text: "Verify the OpenShift defaults for the protect-kernel-defaults argument" + type: "skip" + scored: true + + - id: 7.8 + text: "Verify the OpenShift default value of true for the make-iptables-util-chains argument" + audit: "grep -A1 make-iptables-util-chains /etc/origin/node/node-config.yaml" + tests: + bin_op: or + test_items: + - flag: "make-iptables-util-chains" + set: false + - flag: "make-iptables-util-chains: true" + compare: + op: has + value: "true" + set: true + remediation: | + Edit the Openshift node config file /etc/origin/node/node-config.yaml and reset make-iptables-util-chains to the OpenShift + default value of true. + scored: true + + - id: 7.9 + text: "Verify that the --keep-terminated-pod-volumes argument is set to false" + audit: "grep -A1 keep-terminated-pod-volumes /etc/origin/node/node-config.yaml" + tests: + test_items: + - flag: "keep-terminated-pod-volumes: false" + compare: + op: has + value: "false" + set: true + remediation: | + Reset to the OpenShift defaults + scored: true + + - id: 7.10 + text: "Verify the OpenShift defaults for the hostname-override argument" + type: "skip" + scored: true + + - id: 7.11 + text: "Set the --event-qps argument to 0" + audit: "grep -A1 event-qps /etc/origin/node/node-config.yaml" + tests: + bin_op: or + test_items: + - flag: "event-qps" + set: false + - flag: "event-qps: 0" + compare: + op: has + value: "0" + set: true + remediation: | + Edit the Openshift node config file /etc/origin/node/node-config.yaml set the event-qps argument to 0 in + the kubeletArguments section of. + scored: true + + - id: 7.12 + text: "Verify the OpenShift cert-dir flag for HTTPS traffic" + audit: "grep -A1 cert-dir /etc/origin/node/node-config.yaml" + tests: + test_items: + - flag: "/etc/origin/node/certificates" + compare: + op: has + value: "/etc/origin/node/certificates" + set: true + remediation: | + Reset to the OpenShift default values. + scored: true + + - id: 7.13 + text: "Verify the OpenShift default of 0 for the cadvisor-port argument" + audit: "grep -A1 cadvisor-port /etc/origin/node/node-config.yaml" + tests: + bin_op: or + test_items: + - flag: "cadvisor-port" + set: false + - flag: "cadvisor-port: 0" + compare: + op: has + value: "0" + set: true + remediation: | + Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove the cadvisor-port flag + if it is set in the kubeletArguments section. + scored: true + + - id: 7.14 + text: "Verify that the RotateKubeletClientCertificate argument is set to true" + audit: "grep -B1 RotateKubeletClientCertificate=true /etc/origin/node/node-config.yaml" + tests: + test_items: + - flag: "RotateKubeletClientCertificate=true" + compare: + op: has + value: "true" + set: true + remediation: | + Edit the Openshift node config file /etc/origin/node/node-config.yaml and set RotateKubeletClientCertificate to true. + scored: true + + - id: 7.15 + text: "Verify that the RotateKubeletServerCertificate argument is set to true" + audit: "grep -B1 RotateKubeletServerCertificate=true /etc/origin/node/node-config.yaml" + test: + test_items: + - flag: "RotateKubeletServerCertificate=true" + compare: + op: has + value: "true" + set: true + remediation: | + Edit the Openshift node config file /etc/origin/node/node-config.yaml and set RotateKubeletServerCertificate to true. + scored: true + + +- id: 8 + text: "Configuration Files" + checks: + - id: 8.1 + text: "Verify the OpenShift default permissions for the kubelet.conf file" + audit: "stat -c %a /etc/origin/node/node.kubeconfig" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command on each worker node. + chmod 644 /etc/origin/node/node.kubeconfig + scored: true + + - id: 8.2 + text: "Verify the kubeconfig file ownership of root:root" + audit: "stat -c %U:%G /etc/origin/node/node.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: root:root + set: true + remediation: | + Run the below command on each worker node. + chown root:root /etc/origin/node/node.kubeconfig + scored: true + + - id: 8.3 + text: "Verify the kubelet service file permissions of 644" + audit: "stat -c %a /etc/systemd/system/atomic-openshift-node.service" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command on each worker node. + chmod 644 /etc/systemd/system/atomic-openshift-node.service + scored: true + + - id: 8.4 + text: "Verify the kubelet service file ownership of root:root" + audit: "stat -c %U:%G /etc/systemd/system/atomic-openshift-node.service" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: root:root + set: true + remediation: | + Run the below command on each worker node. + chown root:root /etc/systemd/system/atomic-openshift-node.service + scored: true + + - id: 8.5 + text: "Verify the OpenShift default permissions for the proxy kubeconfig file" + audit: "stat -c %a /etc/origin/node/node.kubeconfig" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command on each worker node. + chmod 644 /etc/origin/node/node.kubeconfig + scored: true + + - id: 8.6 + text: "Verify the proxy kubeconfig file ownership of root:root" + audit: "stat -c %U:%G /etc/origin/node/node.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: root:root + set: true + remediation: | + Run the below command on each worker node. + chown root:root /etc/origin/node/node.kubeconfig + scored: true + + - id: 8.7 + text: "Verify the OpenShift default permissions for the certificate authorities file." + audit: "stat -c %a /etc/origin/node/client-ca.crt" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command on each worker node. + chmod 644 /etc/origin/node/client-ca.crt + scored: true + + - id: 8.8 + text: "Verify the client certificate authorities file ownership of root:root" + audit: "stat -c %U:%G /etc/origin/node/client-ca.crt" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: root:root + set: true + remediation: | + Run the below command on each worker node. + chown root:root /etc/origin/node/client-ca.crt + scored: true From d05d71553fc0631a1624944dc0d24005f1f26709 Mon Sep 17 00:00:00 2001 From: Liz Rice Date: Tue, 23 Apr 2019 10:57:15 +0100 Subject: [PATCH 11/15] Tiny typo --- cfg/ocp-3.10/node.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cfg/ocp-3.10/node.yaml b/cfg/ocp-3.10/node.yaml index 1fbe549..7b62b08 100644 --- a/cfg/ocp-3.10/node.yaml +++ b/cfg/ocp-3.10/node.yaml @@ -52,7 +52,7 @@ groups: scored: true - id: 7.5 - text: "Verify the OpenShift default setting for the read-only-port argumen" + text: "Verify the OpenShift default setting for the read-only-port argument" audit: "grep -A1 read-only-port /etc/origin/node/node-config.yaml" tests: bin_op: or From b4419e810f133dd168eb5d429daa2b54f867dfd9 Mon Sep 17 00:00:00 2001 From: Liz Rice Date: Tue, 23 Apr 2019 11:01:38 +0100 Subject: [PATCH 12/15] Tiny typo --- cfg/ocp-3.10/node.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cfg/ocp-3.10/node.yaml b/cfg/ocp-3.10/node.yaml index 7b62b08..fc27642 100644 --- a/cfg/ocp-3.10/node.yaml +++ b/cfg/ocp-3.10/node.yaml @@ -18,7 +18,7 @@ groups: scored: true - id: 7.3 - text: "Verify that the --authorization-mode argument is set to WebHook)" + text: "Verify that the --authorization-mode argument is set to WebHook" audit: "grep -A1 authorization-mode /etc/origin/node/node-config.yaml" tests: bin_op: or From 7e8dfbc6eaf6018f19e55a8be6d48c8038b2b00d Mon Sep 17 00:00:00 2001 From: Liz Rice Date: Tue, 23 Apr 2019 11:41:48 +0100 Subject: [PATCH 13/15] Fix invalid YAML --- cfg/ocp-3.10/master.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cfg/ocp-3.10/master.yaml b/cfg/ocp-3.10/master.yaml index 4c44044..ed35fcd 100644 --- a/cfg/ocp-3.10/master.yaml +++ b/cfg/ocp-3.10/master.yaml @@ -1157,7 +1157,7 @@ groups: tests: bin_op: and test_items: - - : "Binary file /proc/1/environ matches" + - flag: "Binary file /proc/1/environ matches" compare: op: has value: "Binary file /proc/1/environ matches" From f9d0f4acc1b3aa74d004e03b6585f2a6d87d72b1 Mon Sep 17 00:00:00 2001 From: Liz Rice Date: Tue, 23 Apr 2019 11:59:54 +0100 Subject: [PATCH 14/15] Add OCP info into the README --- README.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 915b8af..2c0eaa1 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,8 @@ kube-bench supports the tests for Kubernetes as defined in the CIS Benchmarks 1. By default kube-bench will determine the test set to run based on the Kubernetes version running on the machine. +There is also preliminary support for Red Hat's Openshift Hardening Guide for 3.10 and 3.11. Please note that kube-bench does not automatically detect Openshift - see below. + ## Installation You can choose to @@ -47,7 +49,8 @@ You can even use your own configs by mounting them over the default ones in `/op docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -t -v path/to/my-config.yaml:/opt/kube-bench/cfg/config.yaml aquasec/kube-bench:latest [master|node] ``` -> Note: the tests require either the kubelet or kubectl binary in the path in order to know the Kubernetes version. You can pass `-v $(which kubectl):/usr/bin/kubectl` to the above invocations to resolve this. +> Note: the tests require either the kubelet or kubectl binary in the path in order to know the Kubernetes +. You can pass `-v $(which kubectl):/usr/bin/kubectl` to the above invocations to resolve this. ### Running in a kubernetes cluster @@ -112,6 +115,9 @@ go build -o kube-bench . ./kube-bench ``` +## Running on OpenShift + +kube-bench includes a set of test files for Red Hat's OpenShift hardening guide for OCP 3.10 and 3.11. To run this you will need to specify `--version ocp-3.10` when you run the `kube-bench` command (either directly or through YAML). This config version is valid for OCP 3.10 and 3.11. ## Configuration From ceb44583dd1f37752c1dc4ebd1d55b799e1058f2 Mon Sep 17 00:00:00 2001 From: Liz Rice Date: Tue, 23 Apr 2019 16:07:27 +0100 Subject: [PATCH 15/15] Tidy up a couple of things --- README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 2c0eaa1..56e815d 100644 --- a/README.md +++ b/README.md @@ -49,15 +49,14 @@ You can even use your own configs by mounting them over the default ones in `/op docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -t -v path/to/my-config.yaml:/opt/kube-bench/cfg/config.yaml aquasec/kube-bench:latest [master|node] ``` -> Note: the tests require either the kubelet or kubectl binary in the path in order to know the Kubernetes -. You can pass `-v $(which kubectl):/usr/bin/kubectl` to the above invocations to resolve this. +> Note: the tests require either the kubelet or kubectl binary in the path in order to auto-detect the Kubernetes version. You can pass `-v $(which kubectl):/usr/bin/kubectl` to the above invocations to resolve this. ### Running in a kubernetes cluster You can run kube-bench inside a pod, but it will need access to the host's PID namespace in order to check the running processes, as well as access to some directories on the host where config files and other files are stored. Master nodes are automatically detected by kube-bench and will run master checks when possible. -The detection is done by verifying that mandatory components for master are running. (see [config file](#configuration). +The detection is done by verifying that mandatory components for master, as defined in the config files, are running (see [Configuration](#configuration)). The supplied `job.yaml` file can be applied to run the tests as a job. For example: