From ca8743c1f7f7fbd63ed9d0826076861c8690c420 Mon Sep 17 00:00:00 2001 From: KiranBodipi <62982917+KiranBodipi@users.noreply.github.com> Date: Thu, 1 Jun 2023 19:07:50 +0530 Subject: [PATCH] add support VMware Tanzu(TKGI) Benchmarks v1.2.53 (#1452) * add Support VMware Tanzu(TKGI) Benchmarks v1.2.53 with this change, we are adding 1. latest kubernetes cis benchmarks for VMware Tanzu1.2.53 2. logic to kube-bench so that kube-bench can auto detect vmware platform, will be able to execute the respective vmware tkgi compliance checks. 3. job-tkgi.yaml file to run the benchmark as a job in tkgi cluster Reference Document for checks: https://network.pivotal.io/products/p-compliance-scanner/#/releases/1248397 * add Support VMware Tanzu(TKGI) Benchmarks v1.2.53 with this change, we are adding 1. latest kubernetes cis benchmarks for VMware Tanzu1.2.53 2. logic to kube-bench so that kube-bench can auto detect vmware platform, will be able to execute the respective vmware tkgi compliance checks. 3. job-tkgi.yaml file to run the benchmark as a job in tkgi cluster Reference Document for checks: https://network.pivotal.io/products/p-compliance-scanner/#/releases/1248397 --- cfg/config.yaml | 7 + cfg/tkgi-1.2.53/config.yaml | 2 + cfg/tkgi-1.2.53/controlplane.yaml | 67 ++ cfg/tkgi-1.2.53/etcd.yaml | 121 ++++ cfg/tkgi-1.2.53/master.yaml | 1098 +++++++++++++++++++++++++++++ cfg/tkgi-1.2.53/node.yaml | 418 +++++++++++ cfg/tkgi-1.2.53/policies.yaml | 287 ++++++++ cmd/util.go | 4 +- docs/platforms.md | 1 + docs/running.md | 15 + job-tkgi.yaml | 54 ++ 11 files changed, 2073 insertions(+), 1 deletion(-) create mode 100644 cfg/tkgi-1.2.53/config.yaml create mode 100644 cfg/tkgi-1.2.53/controlplane.yaml create mode 100644 cfg/tkgi-1.2.53/etcd.yaml create mode 100644 cfg/tkgi-1.2.53/master.yaml create mode 100644 cfg/tkgi-1.2.53/node.yaml create mode 100644 cfg/tkgi-1.2.53/policies.yaml create mode 100644 job-tkgi.yaml diff --git a/cfg/config.yaml b/cfg/config.yaml index 861fe45..8e06b88 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -270,6 +270,7 @@ version_mapping: "aks-1.0": "aks-1.0" "ack-1.0": "ack-1.0" "cis-1.6-k3s": "cis-1.6-k3s" + "tkgi-1.2.53": "tkgi-1.2.53" target_mapping: "cis-1.5": @@ -372,3 +373,9 @@ target_mapping: - "controlplane" - "policies" - "managedservices" + "tkgi-1.2.53": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" diff --git a/cfg/tkgi-1.2.53/config.yaml b/cfg/tkgi-1.2.53/config.yaml new file mode 100644 index 0000000..b783945 --- /dev/null +++ b/cfg/tkgi-1.2.53/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/tkgi-1.2.53/controlplane.yaml b/cfg/tkgi-1.2.53/controlplane.yaml new file mode 100644 index 0000000..4f3ab67 --- /dev/null +++ b/cfg/tkgi-1.2.53/controlplane.yaml @@ -0,0 +1,67 @@ +--- +controls: +version: "tkgi-1.2.53" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users" + audit: ps -ef | grep kube-apiserver | grep -- "--oidc-issuer-url=" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + Exception + This setting is site-specific. It can be set in the "Configure created clusters to use UAA as the OIDC provider." + section of the "UAA" + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-policy-file=" + tests: + test_items: + - flag: "--audit-policy-file" + remediation: | + Create an audit policy file for your cluster. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns" + audit: | + diff /var/vcap/jobs/kube-apiserver/config/audit_policy.yml \ <(echo "--- apiVersion: audit.k8s.io/v1beta1 kind: + Policy rules: - level: None resources: - group: '' resources: - endpoints - services - services/status users: - + system:kube-proxy verbs: - watch - level: None resources: - group: '' resources: - nodes - nodes/status users: - + kubelet verbs: - get - level: None resources: - group: '' resources: - nodes - nodes/status userGroups: - + system:nodes verbs: - get - level: None namespaces: - kube-system resources: - group: '' resources: - + endpoints users: - system:kube-controller-manager - system:kube-scheduler - system:serviceaccount:kube- + system:endpoint-controller verbs: - get - update - level: None resources: - group: '' resources: - namespaces - + namespaces/status - namespaces/finalize users: - system:apiserver verbs: - get - level: None resources: - + group: metrics.k8s.io users: - system:kube-controller-manager verbs: - get - list - level: None + nonResourceURLs: - \"/healthz*\" - \"/version\" - \"/swagger*\" - level: None resources: - group: '' resources: - + events - level: Request omitStages: - RequestReceived resources: - group: '' resources: - nodes/status - + pods/status userGroups: - system:nodes verbs: - update - patch - level: Request omitStages: - + RequestReceived users: - system:serviceaccount:kube-system:namespace-controller verbs: - deletecollection - + level: Metadata omitStages: - RequestReceived resources: - group: '' resources: - secrets - configmaps - group: + authentication.k8s.io resources: - tokenreviews - level: Request omitStages: - RequestReceived resources: - + group: '' - group: admissionregistration.k8s.io - group: apiextensions.k8s.io - group: apiregistration.k8s.io - + group: apps - group: authentication.k8s.io - group: authorization.k8s.io - group: autoscaling - group: batch - + group: certificates.k8s.io - group: extensions - group: metrics.k8s.io - group: networking.k8s.io - group: policy - + group: rbac.authorization.k8s.io - group: settings.k8s.io - group: storage.k8s.io verbs: - get - list - watch - level: + RequestResponse omitStages: - RequestReceived resources: - group: '' - group: admissionregistration.k8s.io - + group: apiextensions.k8s.io - group: apiregistration.k8s.io - group: apps - group: authentication.k8s.io - group: + authorization.k8s.io - group: autoscaling - group: batch - group: certificates.k8s.io - group: extensions - group: + metrics.k8s.io - group: networking.k8s.io - group: policy - group: rbac.authorization.k8s.io - group: + settings.k8s.io - group: storage.k8s.io - level: Metadata omitStages: - RequestReceived ") + type: "manual" + remediation: | + Consider modification of the audit policy in use on the cluster to include these items, at a + minimum. + scored: false diff --git a/cfg/tkgi-1.2.53/etcd.yaml b/cfg/tkgi-1.2.53/etcd.yaml new file mode 100644 index 0000000..8f99b7f --- /dev/null +++ b/cfg/tkgi-1.2.53/etcd.yaml @@ -0,0 +1,121 @@ +--- +controls: +version: "tkgi-1.2.53" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration Files" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate" + audit: ps -ef | grep etcd | grep -- "--cert-file=/var/vcap/jobs/etcd/config/etcd.crt" | grep -- "--key-file=/var/vcap/jobs/etcd/config/etcd.key" + type: manual + tests: + bin_op: and + test_items: + - flag: "--cert-file" + - flag: "--key-file" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: false + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true" + audit: ps -ef | grep etcd | grep -- "--client\-cert\-auth" + tests: + test_items: + - flag: "--client-cert-auth" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file etcd config on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true" + audit: ps -ef | grep etcd | grep -v -- "--auto-tls" + tests: + test_items: + - flag: "--auto-tls" + compare: + op: eq + value: true + set: false + remediation: | + Edit the etcd pod specification file etcd config on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate" + audit: ps -ef | grep etcd | grep -- "--peer-cert-file=/var/vcap/jobs/etcd/config/peer.crt" | grep -- "--peer-key-file=/var/vcap/jobs/etcd/config/peer.key" + type: manual + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + - flag: "--peer-key-file" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file etcd config on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: false + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true" + audit: ps -ef | grep etcd | grep -- "--peer\-client\-cert\-auth" + tests: + test_items: + - flag: "--peer-client-cert-auth" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file etcd config on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true" + audit: ps -ef | grep etcd | grep -v -- "--peer-auto-tls" + tests: + test_items: + - flag: "--peer-auto-tls" + compare: + op: eq + value: true + set: false + remediation: | + Edit the etcd pod specification file etcd config on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd" + audit: diff /var/vcap/jobs/kube-apiserver/config/kubernetes-ca.pem /var/vcap/jobs/etcd/config/etcd-ca.crt | grep -c"^>" | grep -v "^0$" + type: manual + tests: + test_items: + - flag: "--trusted-ca-file" + remediation: | + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file etcd config on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/tkgi-1.2.53/master.yaml b/cfg/tkgi-1.2.53/master.yaml new file mode 100644 index 0000000..8d19457 --- /dev/null +++ b/cfg/tkgi-1.2.53/master.yaml @@ -0,0 +1,1098 @@ +--- +controls: +version: "tkgi-1.2.53" +id: 1 +text: "Master Node Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Master Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kube-apiserver/config/bpm.yml + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + master node. + For example, chmod 644 /var/vcap/jobs/kube-apiserver/config/bpm.yml + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kube-apiserver/config/bpm.yml + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the + master node. + For example, chown root:root /var/vcap/jobs/kube-apiserver/config/bpm.yml + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kube-controller-manager/config/bpm.yml + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + master node. + For example, chmod 644 /var/vcap/jobs/kube-apiserver/config/bpm.yml + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kube-controller-manager/config/bpm.yml + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the + master node. + For example, chown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kube-scheduler/config/bpm.yml + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + master node. + For example, chown 644 /var/vcap/jobs/kube-scheduler/config/bpm.yml + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kube-scheduler/config/bpm.yml + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /var/vcap/jobs/kube-scheduler/config/bpm.yml + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/etcd/config/bpm.yml + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 stat -c permissions=%a /var/vcap/jobs/etcd/config/bpm.yml + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/etcd/config/bpm.yml + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /var/vcap/jobs/etcd/config/bpm.yml + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive" + audit: find ((CNI_DIR))/config/ -type f -not -perm 640 | awk 'END{print NR}' | grep "^0$" + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root" + audit: find ((CNI_DIR))/config/ -type f -not -user root -or -not -group root | awk 'END{print NR}' | grep "^0$" + type: manual + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive" + audit: stat -c permissions=%a /var/vcap/store/etcd/ + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/vcap/store/etcd/ + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd" + audit: stat -c %U:%G /var/vcap/store/etcd/ + type: manual + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/vcap/store/etcd/ + Exception: All bosh processes run as vcap user + The etcd data directory ownership is vcap:vcap + scored: false + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /etc/kubernetes/admin.conf + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 /etc/kubernetes/admin.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root" + audit: stat -c %U:%G /etc/kubernetes/admin.conf + type: manual + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /etc/kubernetes/admin.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on + master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.15 + text: "Ensure that the scheduler configuration file permissions are set to 644" + audit: stat -c permissions=%a /etc/kubernetes/scheduler.conf + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 /etc/kubernetes/scheduler.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on + master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.16 + text: "Ensure that the scheduler configuration file ownership is set to root:root" + audit: stat -c %U:%G /etc/kubernetes/scheduler.conf + type: manual + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /etc/kubernetes/scheduler.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on + master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.17 + text: "Ensure that the controller manager configuration file permissions are set to 644" + audit: stat -c permissions=%a /etc/kubernetes/controller-manager.conf + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 /etc/kubernetes/controller-manager.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on + master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.18 + text: "Ensure that the controller manager configuration file ownership is set to root:root" + audit: stat -c %U:%G /etc/kubernetes/controller-manager.conf + type: manual + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /etc/kubernetes/controller-manager.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on + master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root" + audit: | + find -L /var/vcap/jobs/kube-apiserver/config /var/vcap/jobs/kube-controller-manager/config /var/vcap/jobs/kube- + scheduler/config ((CNI_DIR))/config /var/vcap/jobs/etcd/config | sort -u | xargs ls -ld | awk '{ print $3 " " $4}' | + grep -c -v "root root" | grep "^0$" + type: manual + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown -R root:root /etc/kubernetes/pki/ + Exception + Files are group owned by vcap + scored: false + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive" + audit: | + find -L /var/vcap/jobs/kube-apiserver/config \( -name '*.crt' -or -name '*.pem' \) -and -not -perm 640 | grep -v + "packages/golang" | grep -v "packages/ncp_rootfs" | awk 'END{print NR}' | grep "^0$" + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 644 /etc/kubernetes/pki/*.crt + Exception + Ignoring packages/golang as the package includes test certs used by golang. Ignoring packages/ncp_rootfs on + TKG1 with NSX-T container plugin uses the package is used as the overlay filesystem `mount | grep + "packages/ncp_rootfs"` + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600" + audit: | + find -L /var/vcap/jobs/kube-apiserver/config -name '*.key' -and -not -perm 600 | awk 'END{print NR}' | grep "^0$" + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: eq + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + Exception + Permission on etcd .key files is set to 640, to allow read access to vcap group + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false" + audit: ps -ef | grep kube-apiserver | grep -- "--anonymous-auth=false" + type: manual + tests: + test_items: + - flag: "--anonymous-auth=false" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the below parameter. + --anonymous-auth=false + Exception + The flag is set to true to enable API discoveribility. + "Starting in 1.6, the ABAC and RBAC authorizers require explicit authorization of the system:anonymous user or the + system:unauthenticated group, so legacy policy rules that grant access to the * user or * group do not include + anonymous users." + -authorization-mode is set to RBAC + scored: false + + - id: 1.2.2 + text: "Ensure that the --basic-auth-file argument is not set" + audit: ps -ef | grep kube-apiserver | grep -v -- "--basic-auth-file" + tests: + test_items: + - flag: "--basic-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file kube-apiserver + on the master node and remove the --basic-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --token-auth-file parameter is not set" + audit: ps -ef | grep "/var/vcap/packages/kubernetes/bin/kube-apiserve[r]" | grep -v tini | grep -v -- "--token-auth-file=" + type: manual + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file /var/vcap/packages/kubernetes/bin/kube-apiserve[r] + on the master node and remove the --token-auth-file= parameter. + Exception + Since k8s processes' lifecyle are managed by BOSH, token based authentication is required when processes + restart. The file has 0640 permission and root:vcap ownership + scored: false + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true" + audit: ps -ef | grep kube-apiserver | grep -v -- "--kubelet-https=true" + tests: + test_items: + - flag: "--kubelet-https=true" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -- "--kubelet-client-certificate=/var/vcap/jobs/kube-apiserver/config/kubelet- + client-cert.pem" | grep -- "--kubelet-client-key=/var/vcap/jobs/kube-apiserver/config/kubelet-client-key.pem" + type: manual + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + kube-apiserver on the master node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: false + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate" + audit: ps -ef | grep kube-apiserver | grep -- "--kubelet-certificate-authority=" + type: manual + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + kube-apiserver on the master node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + Exception + JIRA ticket #PKS-696 created to investigate a fix. PR opened to address the issue https://github.com/cloudfoundry- + incubator/kubo-release/pull/179 + scored: false + + - id: 1.2.7 + text: "Ensure API server authorization modes does not include AlwaysAllow" + audit: | + ps -ef | grep kube-apiserver | grep -- "--authorization-mode" && ps -ef | grep kube-apiserver | grep -v -- "-- + authorization-mode=\(\w\+\|,\)*AlwaysAllow\(\w\+\|,\)*" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--authorization-mode=\(\w\+\|,\)*Node\(\w\+\|,\)* --" + type: manual + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + Exception + This flag can be added using Kubernetes Profiles. Please follow instructions here https://docs.pivotal.io/tkgi/1- + 8/k8s-profiles.html + scored: false + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--authorization-mode=\(\w\+\|,\)*RBAC\(\w\+\|,\)* --" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --authorization-mode parameter to a value that includes RBAC, + for example: + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*EventRateLimit\ + (\w\+\|,\)*" + type: manual + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file kube-apiserver + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + Exception + "Note: This is an Alpha feature in the Kubernetes v1.13" + Control provides rate limiting and is site-specific + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set" + audit: | + ps -ef | grep kube-apiserver | grep -v -- "--enable-admission-plugins=\(\w\+\|,\)*AlwaysAdmit\(\w\+\|,\)*" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*AlwaysPullImages\ + (\w\+\|,\)* --" + type: manual + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + Exception + "Credentials would be required to pull the private images every time. Also, in trusted + environments, this might increases load on network, registry, and decreases speed. + This setting could impact offline or isolated clusters, which have images pre-loaded and do + not have access to a registry to pull in-use images. This setting is not appropriate for + clusters which use this configuration." + TKGi is packages with pre-loaded images. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*SecurityContextDeny\ + (\w\+\|,\)* --" + type: manual + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + Exception + This setting is site-specific. It can be set in the "Admission Plugins" section of the appropriate "Plan" + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--disable-admission-plugins=\(\w\+\|,\)*ServiceAccount\ + (\w\+\|,\)* --" + tests: + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file kube-apiserver + on the master node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--disable-admission-plugins=\ + (\w\+\|,\)*NamespaceLifecycle\(\w\+\|,\)* --" + tests: + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin PodSecurityPolicy is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*PodSecurityPolicy\ + (\w\+\|,\)* --" + type: manual + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Follow the documentation and create Pod Security Policy objects as per your environment. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the --enable-admission-plugins parameter to a + value that includes PodSecurityPolicy: + --enable-admission-plugins=...,PodSecurityPolicy,... + Then restart the API Server. + Exception + This setting is site-specific. It can be set in the "Admission Plugins" section of the appropriate "Plan" + scored: false + + - id: 1.2.17 + text: "Ensure that the admission control plugin NodeRestriction is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*NodeRestriction\ + (\w\+\|,\)* --" + type: manual + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + Exception + PR opened to address the issue https://github.com/cloudfoundry-incubator/kubo-release/pull/179" + scored: true + + - id: 1.2.18 + text: "Ensure that the --insecure-bind-address argument is not set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--insecure-bind-address" + tests: + test_items: + - flag: "--insecure-bind-address" + set: false + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and remove the --insecure-bind-address parameter. + scored: true + + - id: 1.2.19 + text: "Ensure that the --insecure-port argument is set to 0" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--insecure-port=0" + type: manual + tests: + test_items: + - flag: "--insecure-port=0" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the below parameter. + --insecure-port=0 + Exception + Related to 1.2.1 + The insecure port is 8080, and is binding only to localhost on the master node, in use by other components on the + master that are bypassing authn/z. + The components connecting to the APIServer are: + kube-controller-manager + kube-proxy + kube-scheduler + Pods are not scheduled on the master node. + scored: false + + - id: 1.2.20 + text: "Ensure that the --secure-port argument is not set to 0" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--secure-port=0" + tests: + test_items: + - flag: "--secure-port" + compare: + op: noteq + value: 0 + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.21 + text: "Ensure that the --profiling argument is set to false" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--profiling=false" + tests: + test_items: + - flag: "--profiling=false" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-path argument is set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-path=\/var\/vcap\/sys\/log\/kube-apiserver\/audit.log" + type: manual + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example: + --audit-log-path=/var/log/apiserver/audit.log + scored: false + + - id: 1.2.23 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-maxage=30" + type: manual + tests: + test_items: + - flag: "--audit-log-maxage=30" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days: + --audit-log-maxage=30 + Exception + This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html + scored: false + + - id: 1.2.24 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-maxbackup=10" + type: manual + tests: + test_items: + - flag: "--audit-log-maxbackup=10" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. + --audit-log-maxbackup=10 + Exception + This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html + scored: false + + - id: 1.2.25 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-maxsize=100" + type: manual + tests: + test_items: + - flag: "--audit-log-maxsize=100" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB: + --audit-log-maxsize=100 + Exception + This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html + scored: false + + - id: 1.2.26 + text: "Ensure that the --request-timeout argument is set as appropriate" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--request-timeout=" + type: manual + tests: + test_items: + - flag: "--request-timeout" + remediation: | + Edit the API server pod specification file kube-apiserver + and set the below parameter as appropriate and if needed. + For example, + --request-timeout=300s + scored: false + + - id: 1.2.27 + text: "Ensure that the --service-account-lookup argument is set to true" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--service-account-lookup" + tests: + test_items: + - flag: "--service-account-lookup=true" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.28 + text: "Ensure that the --service-account-key-file argument is set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--service-account-key-file=/var/vcap/jobs/kube- + apiserver/config/service-account-public-key.pem" + type: manual + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --service-account-key-file parameter + to the public key file for service accounts: + --service-account-key-file= + scored: false + + - id: 1.2.29 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--etcd-certfile=/var/vcap/jobs/kube-apiserver/config/etcd- + client.crt" | grep -- "--etcd-keyfile=/var/vcap/jobs/kube-apiserver/config/etcd-client.key" + type: manual + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: false + + - id: 1.2.30 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--tls-cert-file=/var/vcap/jobs/kube-apiserver/config/kubernetes.pem" | grep -- "--tls-private-key-file=/var/vcap/jobs/kube- + apiserver/config/kubernetes-key.pem" + type: manual + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: false + + - id: 1.2.31 + text: "Ensure that the --client-ca-file argument is set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--client-ca-file=/var/vcap/jobs/kube-apiserver/config/kubernetes-ca.pem" + type: manual + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the client certificate authority file. + --client-ca-file= + scored: false + + - id: 1.2.32 + text: "Ensure that the --etcd-cafile argument is set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--etcd-cafile=/var/vcap/jobs/kube-apiserver/config/etcd-ca.crt" + type: manual + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: false + + - id: 1.2.33 + text: "Ensure that the --encryption-provider-config argument is set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--encryption-provider-config=" + type: manual + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config= + Exception + Encrypting Secrets in an etcd database can be enabled using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles-encrypt-etcd.html + scored: false + + - id: 1.2.34 + text: "Ensure that the encryption provider is set to aescbc" + audit: | + ENC_CONF=`ps -ef | grep kube-apiserver | grep -v tini | sed $'s/ /\\\\\\n/g' | grep -- '--encryption-provider- + config=' | cut -d'=' -f2` grep -- "- \(aescbc\|kms\|secretbox\):" $ENC_CONF + type: manual + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + Exception + Encrypting Secrets in an etcd database can be enabled using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles-encrypt-etcd.html + scored: false + + - id: 1.2.35 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--tls-cipher-suites=" + type: manual + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the master node and set the below parameter. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM + _SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM + _SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM + _SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate" + audit: ps -ef | grep kube-controller-manager | grep -- "--terminated-pod-gc-threshold=100" + type: manual + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example: + --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure controller manager profiling is disabled" + audit: ps -ef | grep kube-controller-manager | grep -- "--profiling=false" + tests: + test_items: + - flag: "--profiling=false" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true" + audit: ps -ef | grep kube-controller-manager | grep -- "--use\-service\-account\-credentials=true" + tests: + test_items: + - flag: "--use-service-account-credentials=true" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate" + audit: | + ps -ef | grep kube-controller-manager | grep -- "--service\-account\-private\-key\-file=\/var\/vcap\/jobs\/kube\- + controller\-manager\/config\/service\-account\-private\-key.pem" + type: manual + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: false + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate" + audit: | + ps -ef | grep kube-controller-manager | grep -- "--root\-ca\-file=\/var\/vcap\/jobs\/kube\-controller\-manager\/config\/ca.pem" + type: manual + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: false + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true" + audit: | + ps -ef | grep kube-controller-manager | grep -- "--feature-gates=\ + (\w\+\|,\)*RotateKubeletServerCertificate=true\(\w\+\|,\)*" + type: manual + tests: + test_items: + - flag: "--feature-gates=RotateKubeletServerCertificate=true" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + Exception + Certificate rotation is handled by Credhub + scored: false + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1" + audit: | + ps -ef | grep "/var/vcap/packages/kubernetes/bin/kube-controller-manage[r]" | grep -v tini | grep -- "--bind-address=127.0.0.1" + type: manual + tests: + test_items: + - flag: "--bind-address=127.0.0.1" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and ensure the correct value for the --bind-address parameter + Exception + This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html + scored: false + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false" + audit: ps -ef | grep kube-scheduler | grep -v tini | grep -- "--profiling=false" + tests: + test_items: + - flag: "--profiling=false" + remediation: | + Edit the Scheduler pod specification file scheduler config file + on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1" + audit: ps -ef | grep "/var/vcap/packages/kubernetes/bin/kube-schedule[r]" | grep -v tini | grep -- "--bind-address=127.0.0.1" + type: manual + tests: + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + remediation: | + Edit the Scheduler pod specification file scheduler config + on the master node and ensure the correct value for the --bind-address parameter + Exception + This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html + scored: false diff --git a/cfg/tkgi-1.2.53/node.yaml b/cfg/tkgi-1.2.53/node.yaml new file mode 100644 index 0000000..8e0f095 --- /dev/null +++ b/cfg/tkgi-1.2.53/node.yaml @@ -0,0 +1,418 @@ +--- +controls: +version: "tkgi-1.2.53" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kubelet/monit + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 /var/vcap/jobs/kubelet/monit + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kubelet/monit + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root /var/vcap/jobs/kubelet/monit + Exception + File is group owned by vcap + scored: true + + - id: 4.1.3 + text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kube-proxy/config/kubeconfig + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 /var/vcap/jobs/kube-proxy/config/kubeconfig + scored: true + + - id: 4.1.4 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kube-proxy/config/kubeconfig + type: manual + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root /var/vcap/jobs/kube-proxy/config/kubeconfig + Exception + File is group owned by vcap + scored: false + + - id: 4.1.5 + text: "Ensure that the kubelet.conf file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kube-proxy/config/kubeconfig + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 /var/vcap/jobs/kube-proxy/config/kubeconfig + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on worker + scored: false + + - id: 4.1.6 + text: "Ensure that the kubelet.conf file ownership is set to root:root" + audit: stat -c %U:%G /etc/kubernetes/kubelet.conf + type: manual + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root /etc/kubernetes/kubelet.conf + Exception + file ownership is vcap:vcap + scored: false + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kubelet/config/kubelet-client-ca.pem + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 644 + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kubelet/config/kubelet-client-ca.pem + type: manual + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + Exception + File is group owned by vcap + scored: false + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 /var/vcap/jobs/kubelet/config/kubeletconfig.yml + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kubelet/config/kubeletconfig.yml + type: manual + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root /var/vcap/jobs/kubelet/config/kubeletconfig.yml + Exception + File is group owned by vcap + scored: false + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the anonymous-auth argument is set to false" + audit: grep "^authentication:\n\s{2}anonymous:\n\s{4}enabled:\sfalse$" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "enabled: false" + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow" + audit: | + grep "^authorization:\n\s{2}mode: AlwaysAllow$" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "AlwaysAllow" + set: false + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate" + audit: | + grep ^authentication:\n\s{2}anonymous:\n\s{4}enabled:\sfalse\n(\s{2}webhook:\n\s{4}cacheTTL:\s\d+s\n\s{4}enabled:.*\n)? + \s{2}x509:\n\s{4}clientCAFile:\s"\/var\/vcap\/jobs\/kubelet\/config\/kubelet-client-ca\.pem" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "clientCAFile" + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Ensure that the --read-only-port argument is set to 0" + audit: | + grep "readOnlyPort: 0" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "readOnlyPort: 0" + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0" + audit: | + grep -- "streamingConnectionIdleTimeout: 0" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "streamingConnectionIdleTimeout: 0" + set: false + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true" + audit: | + grep -- "protectKernelDefaults: true" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "protectKernelDefaults: true" + remediation: | + If using a Kubelet config file, edit the file to set protectKernelDefaults: true. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true" + audit: | + grep -- "makeIPTablesUtilChains: true" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "makeIPTablesUtilChains: true" + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set" + audit: | + ps -ef | grep [k]ubelet | grep -- --[c]onfig=/var/vcap/jobs/kubelet/config/kubeletconfig.yml | grep -v -- --hostname-override + type: manual + remediation: | + Edit the kubelet service file + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Exception + On GCE, the hostname needs to be set to the instance name so the gce cloud provider can manage the instance. + In other cases its set to the IP address of the VM. + scored: false + + - id: 4.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture" + audit: grep -- "--event-qps" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + type: manual + tests: + test_items: + - flag: "--event-qps" + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate" + audit: | + grep ^tlsCertFile:\s\"\/var\/vcap\/jobs\/kubelet\/config\/kubelet\.pem\"\ntlsPrivateKeyFile:\s\"\/var\/vcap\/jobs\/kubelet\/config\/kubelet-key\.pem\"$ + /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + bin_op: and + test_items: + - flag: "tlsCertFile" + - flag: "tlsPrivateKeyFile" + remediation: | + If using a Kubelet config file, edit the file to set tlsCertFile to the location + of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false" + audit: ps -ef | grep kubele[t] | grep -- "--rotate-certificates=false" + type: manual + tests: + test_items: + - flag: "--rotate-certificates=false" + set: false + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Exception + Certificate rotation is handled by Credhub + scored: false + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true" + audit: ps -ef | grep kubele[t] | grep -- "--feature-gates=\(\w\+\|,\)*RotateKubeletServerCertificate=true\(\w\+\|,\)*" + type: manual + tests: + test_items: + - flag: "RotateKubeletServerCertificate=true" + remediation: | + Edit the kubelet service file + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Exception + Certificate rotation is handled by Credhub + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers" + audit: ps -ef | grep kubele[t] | grep -- "--tls-cipher- + suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" + type: manual + tests: + test_items: + - flag: --tls-cipher-suites + compare: + op: regex + value: (TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256|TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256|TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305|TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384|TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305|TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384|TLS_RSA_WITH_AES_256_GCM_SHA384|TLS_RSA_WITH_AES_128_GCM_SHA256) + remediation: | + If using a Kubelet config file, edit the file to set TLSCipherSuites: to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/tkgi-1.2.53/policies.yaml b/cfg/tkgi-1.2.53/policies.yaml new file mode 100644 index 0000000..ef5f1ad --- /dev/null +++ b/cfg/tkgi-1.2.53/policies.yaml @@ -0,0 +1,287 @@ +--- +controls: +version: "tkgi-1.2.53" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + Exception + This is site-specific setting. + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + Exception + This is site-specific setting. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + Exception + This is site-specific setting. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + Exception + This is site-specific setting. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used." + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + Exception + This is site-specific setting. + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + Exception + This is site-specific setting. + scored: false + + - id: 5.2 + text: "Pod Security Policies" + checks: + - id: 5.2.1 + text: "Minimize the admission of privileged containers" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that + the .spec.privileged field is omitted or set to false. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostPID field is omitted or set to false. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostIPC field is omitted or set to false. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostNetwork field is omitted or set to false. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.allowPrivilegeEscalation field is omitted or set to false. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of root containers" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of + UIDs not including 0. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of containers with the NET_RAW capability" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with added capabilities" + type: "manual" + remediation: | + Ensure that allowedCapabilities is not present in PSPs for the cluster unless + it is set to an empty array. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with capabilities assigned" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + Exception + This is site-specific setting. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports Network Policies" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + Exception + This is site-specific setting. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have Network Policies defined" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + Exception + This is site-specific setting. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using secrets as files over secrets as environment variables" + type: "manual" + remediation: | + if possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + Exception + This is site-specific setting. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + Exception + This is site-specific setting. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + Exception + This is site-specific setting. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + Exception + This is site-specific setting. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your pod definitions" + type: "manual" + remediation: | + Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you + would need to enable alpha features in the apiserver by passing "--feature- + gates=AllAlpha=true" argument. + Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS + parameter to "--feature-gates=AllAlpha=true" + KUBE_API_ARGS="--feature-gates=AllAlpha=true" + Based on your system, restart the kube-apiserver service. For example: + systemctl restart kube-apiserver.service + Use annotations to enable the docker/default seccomp profile in your pod definitions. An + example is as below: + apiVersion: v1 + kind: Pod + metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default + spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + Exception + This is site-specific setting. + scored: false + + - id: 5.7.3 + text: "Apply Security Context to Your Pods and Containers " + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + Exception + This is site-specific setting. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + Exception + This is site-specific setting. + scored: false diff --git a/cmd/util.go b/cmd/util.go index a5765bc..5902159 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -447,7 +447,7 @@ func getPlatformInfo() Platform { } func getPlatformInfoFromVersion(s string) Platform { - versionRe := regexp.MustCompile(`v(\d+\.\d+)\.\d+-(\w+)(?:[.\-])\w+`) + versionRe := regexp.MustCompile(`v(\d+\.\d+)\.\d+[-+](\w+)(?:[.\-])\w+`) subs := versionRe.FindStringSubmatch(s) if len(subs) < 3 { return Platform{} @@ -479,6 +479,8 @@ func getPlatformBenchmarkVersion(platform Platform) string { case "4.1": return "rh-1.0" } + case "vmware": + return "tkgi-1.2.53" } return "" } diff --git a/docs/platforms.md b/docs/platforms.md index 904d787..b02223a 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -27,3 +27,4 @@ Some defined by other hardenening guides. | CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- | | CIS | [1.6.0-k3s](https://docs.rancher.cn/docs/k3s/security/self-assessment/_index) | cis-1.6-k3s | k3s v1.16-v1.24 | | DISA | [Kubernetes Ver 1, Rel 6](https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_Kubernetes_V1R6_STIG.zip) | eks-stig-kubernetes-v1r6 | EKS | +| CIS | [TKGI 1.2.53](https://network.pivotal.io/products/p-compliance-scanner#/releases/1248397) | tkgi-1.2.53 | vmware | diff --git a/docs/running.md b/docs/running.md index 67c5e5b..fb7fc68 100644 --- a/docs/running.md +++ b/docs/running.md @@ -177,3 +177,18 @@ To run the benchmark as a job in your ACK cluster apply the included `job-ack.ya ``` kubectl apply -f job-ack.yaml ``` + +### Running in a VMware TKGI cluster + +| CIS Benchmark | Targets | +|---------------|--------------------------------------------| +| tkgi-1.2.53 | master, etcd, controlplane, node, policies | + +kube-bench includes benchmarks for VMware tkgi platform. +To run this you will need to specify `--benchmark tkgi-1.2.53` when you run the `kube-bench` command. + +To run the benchmark as a job in your VMware tkgi cluster apply the included `job-tkgi.yaml`. + +``` +kubectl apply -f job-tkgi.yaml +``` \ No newline at end of file diff --git a/job-tkgi.yaml b/job-tkgi.yaml new file mode 100644 index 0000000..3ac760c --- /dev/null +++ b/job-tkgi.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + spec: + hostPID: true + containers: + - name: kube-bench + image: docker.io/aquasec/kube-bench:latest + command: + [ + "kube-bench", + "run", + "--targets", + "node,policies", + "--benchmark", + "tkgi-1.2.53", + ] + volumeMounts: + - name: var-vcap-jobs + mountPath: /var/vcap/jobs + readOnly: true + - name: var-vcap-packages + mountPath: /var/vcap/packages + readOnly: true + - name: var-vcap-store-etcd + mountPath: /var/vcap/store/etcd + readOnly: true + - name: var-vcap-sys + mountPath: /var/vcap/sys + readOnly: true + - name: etc-kubernetes + mountPath: /etc/kubernetes + readOnly: true + restartPolicy: Never + volumes: + - name: var-vcap-jobs + hostPath: + path: "/var/vcap/jobs" + - name: var-vcap-packages + hostPath: + path: "/var/vcap/packages" + - name: var-vcap-store-etcd + hostPath: + path: "/var/vcap/store/etcd" + - name: var-vcap-sys + hostPath: + path: "/var/vcap/sys" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes"