From 07e01cf38c54583cb21f281b97a67b0b05c8dc76 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Thu, 15 Sep 2022 14:04:54 +0800 Subject: [PATCH] Support CIS Amazon Elastic Kubernetes Service (EKS) Benchmark v1.1.0 (#1222) * Support CIS Amazon Elastic Kubernetes Service (EKS) Benchmark v1.1.0 * fix yaml lint error --- cfg/config.yaml | 7 + cfg/eks-1.1.0/config.yaml | 9 + cfg/eks-1.1.0/controlplane.yaml | 14 ++ cfg/eks-1.1.0/managedservices.yaml | 154 ++++++++++++++ cfg/eks-1.1.0/master.yaml | 6 + cfg/eks-1.1.0/node.yaml | 330 +++++++++++++++++++++++++++++ cfg/eks-1.1.0/policies.yaml | 205 ++++++++++++++++++ cmd/common_test.go | 6 + cmd/util.go | 2 +- cmd/util_test.go | 2 +- docs/architecture.md | 1 + docs/platforms.md | 1 + job-eks-asff.yaml | 4 +- job-eks.yaml | 2 +- 14 files changed, 738 insertions(+), 5 deletions(-) create mode 100644 cfg/eks-1.1.0/config.yaml create mode 100644 cfg/eks-1.1.0/controlplane.yaml create mode 100644 cfg/eks-1.1.0/managedservices.yaml create mode 100644 cfg/eks-1.1.0/master.yaml create mode 100644 cfg/eks-1.1.0/node.yaml create mode 100644 cfg/eks-1.1.0/policies.yaml diff --git a/cfg/config.yaml b/cfg/config.yaml index bb0e5f3..6d880ef 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -250,6 +250,7 @@ version_mapping: "1.22": "cis-1.23" "1.23": "cis-1.23" "eks-1.0.1": "eks-1.0.1" + "eks-1.1.0": "eks-1.1.0" "gke-1.0": "gke-1.0" "gke-1.2.0": "gke-1.2.0" "ocp-3.10": "rh-0.7" @@ -302,6 +303,12 @@ target_mapping: - "controlplane" - "policies" - "managedservices" + "eks-1.1.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" "rh-0.7": - "master" - "node" diff --git a/cfg/eks-1.1.0/config.yaml b/cfg/eks-1.1.0/config.yaml new file mode 100644 index 0000000..17301a7 --- /dev/null +++ b/cfg/eks-1.1.0/config.yaml @@ -0,0 +1,9 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml +## These settings are required if you are using the --asff option to report findings to AWS Security Hub +## AWS account number is required. +AWS_ACCOUNT: "" +## AWS region is required. +AWS_REGION: "" +## EKS Cluster ARN is required. +CLUSTER_ARN: "" diff --git a/cfg/eks-1.1.0/controlplane.yaml b/cfg/eks-1.1.0/controlplane.yaml new file mode 100644 index 0000000..b432a66 --- /dev/null +++ b/cfg/eks-1.1.0/controlplane.yaml @@ -0,0 +1,14 @@ +--- +controls: +version: "eks-1.1.0" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Logging" + checks: + - id: 2.1.1 + text: "Enable audit logs (Manual)" + remediation: "Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler." + scored: false diff --git a/cfg/eks-1.1.0/managedservices.yaml b/cfg/eks-1.1.0/managedservices.yaml new file mode 100644 index 0000000..50575d8 --- /dev/null +++ b/cfg/eks-1.1.0/managedservices.yaml @@ -0,0 +1,154 @@ +--- +controls: +version: "eks-1.1.0" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third-party provider (Manual)" + type: "manual" + remediation: | + To utilize AWS ECR for Image scanning please follow the steps below: + + To create a repository configured for scan on push (AWS CLI): + aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + + To edit the settings of an existing repository (AWS CLI): + aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + + Use the following steps to start a manual image scan using the AWS Management Console. + Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories. + From the navigation bar, choose the Region to create your repository in. + In the navigation pane, choose Repositories. + On the Repositories page, choose the repository that contains the image to scan. + On the Images page, select the image to scan and then choose Scan. + scored: false + + - id: 5.1.2 + text: "Minimize user access to Amazon ECR (Manual)" + type: "manual" + remediation: | + Before you use IAM to manage access to Amazon ECR, you should understand what IAM features + are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other + AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide. + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for Amazon ECR (Manual)" + type: "manual" + remediation: | + You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites. + + The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess + the following IAM policy permissions for Amazon ECR. + + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken" + ], + "Resource": "*" + } + ] + } + scored: false + + - id: 5.1.4 + text: "Minimize Container Registries to only those approved (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.2 + text: "Identity and Access Management (IAM)" + checks: + - id: 5.2.1 + text: "Prefer using dedicated Amazon EKS Service Accounts (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.3 + text: "AWS Key Management Service (KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS (Manual)" + type: "manual" + remediation: | + This process can only be performed during Cluster Creation. + + Enable 'Secrets Encryption' during Amazon EKS cluster creation as described + in the links within the 'References' section. + scored: false + + - id: 5.4 + text: "Cluster Networking" + checks: + - id: 5.4.1 + text: "Restrict Access to the Control Plane Endpoint (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.2 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.3 + text: "Ensure clusters are created with Private Nodes (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.4 + text: "Ensure Network Policy is Enabled and set as appropriate (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.5 + text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + + - id: 5.5 + text: "Authentication and Authorization" + checks: + - id: 5.5.1 + text: "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes (Manual)" + type: "manual" + remediation: | + Refer to the 'Managing users or IAM roles for your cluster' in Amazon EKS documentation. + scored: false + + + - id: 5.6 + text: "Other Cluster Configurations" + checks: + - id: 5.6.1 + text: "Consider Fargate for running untrusted workloads (Manual)" + type: "manual" + remediation: | + Create a Fargate profile for your cluster Before you can schedule pods running on Fargate + in your cluster, you must define a Fargate profile that specifies which pods should use + Fargate when they are launched. For more information, see AWS Fargate profile. + + Note: If you created your cluster with eksctl using the --fargate option, then a Fargate profile has + already been created for your cluster with selectors for all pods in the kube-system + and default namespaces. Use the following procedure to create Fargate profiles for + any other namespaces you would like to use with Fargate. + scored: false diff --git a/cfg/eks-1.1.0/master.yaml b/cfg/eks-1.1.0/master.yaml new file mode 100644 index 0000000..51de360 --- /dev/null +++ b/cfg/eks-1.1.0/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "eks-1.1.0" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cfg/eks-1.1.0/node.yaml b/cfg/eks-1.1.0/node.yaml new file mode 100644 index 0000000..003be58 --- /dev/null +++ b/cfg/eks-1.1.0/node.yaml @@ -0,0 +1,330 @@ +--- +controls: +version: "eks-1.1.0" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: false + + - id: 3.1.2 + text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: false + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: false + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + set: true + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + set: true + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: true + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: true + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set protectKernelDefaults: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) " + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: true + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.9 + text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: true + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.11 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: true + compare: + op: eq + value: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + - id: 3.3 + text: "Container Optimized OS" + checks: + - id: 3.3.1 + text: "Prefer using Container-Optimized OS when possible (Manual)" + remediation: "No remediation" + scored: false diff --git a/cfg/eks-1.1.0/policies.yaml b/cfg/eks-1.1.0/policies.yaml new file mode 100644 index 0000000..e4d13a1 --- /dev/null +++ b/cfg/eks-1.1.0/policies.yaml @@ -0,0 +1,205 @@ +--- +controls: +version: "eks-1.1.0" +id: 4 +text: "Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 4.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 4.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.2 + text: "Pod Security Policies" + checks: + - id: 4.2.1 + text: "Minimize the admission of privileged containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that + the .spec.privileged field is omitted or set to false. + scored: false + + - id: 4.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostPID field is omitted or set to false. + scored: false + + - id: 4.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostIPC field is omitted or set to false. + scored: false + + - id: 4.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostNetwork field is omitted or set to false. + scored: false + + - id: 4.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.allowPrivilegeEscalation field is omitted or set to false. + scored: false + + - id: 4.2.6 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of + UIDs not including 0. + scored: false + + - id: 4.2.7 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + scored: false + + - id: 4.2.8 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that allowedCapabilities is not present in PSPs for the cluster unless + it is set to an empty array. + scored: false + + - id: 4.2.9 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 4.3 + text: "CNI Plugin" + checks: + - id: 4.3.1 + text: "Ensure that the latest CNI version is used (Manual)" + type: "manual" + remediation: | + Review the documentation of AWS CNI plugin, and ensure latest CNI version is used. + scored: false + + - id: 4.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 4.4 + text: "Secrets Management" + checks: + - id: 4.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.5 + text: "Extensible Admission Control" + checks: [] + + - id: 4.6 + text: "General Policies" + checks: + - id: 4.6.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.6.2 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 4.6.3 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cmd/common_test.go b/cmd/common_test.go index 5f22897..f24b80d 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -442,6 +442,12 @@ func TestValidTargets(t *testing.T) { targets: []string{"node", "policies", "controlplane", "managedservices"}, expected: true, }, + { + name: "eks-1.1.0 valid", + benchmark: "eks-1.1.0", + targets: []string{"node", "policies", "controlplane", "managedservices"}, + expected: true, + }, } for _, c := range cases { diff --git a/cmd/util.go b/cmd/util.go index 12546f5..2ab2cca 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -461,7 +461,7 @@ func getPlatformBenchmarkVersion(platform Platform) string { glog.V(3).Infof("getPlatformBenchmarkVersion platform: %s", platform) switch platform.Name { case "eks": - return "eks-1.0.1" + return "eks-1.1.0" case "gke": switch platform.Version { case "1.15", "1.16", "1.17", "1.18", "1.19": diff --git a/cmd/util_test.go b/cmd/util_test.go index 79a505a..7ced42a 100644 --- a/cmd/util_test.go +++ b/cmd/util_test.go @@ -582,7 +582,7 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) { args: args{ platform: Platform{Name: "eks"}, }, - want: "eks-1.0.1", + want: "eks-1.1.0", }, { name: "gke 1.19", diff --git a/docs/architecture.md b/docs/architecture.md index b423a71..705ee2f 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -22,6 +22,7 @@ The following table shows the valid targets based on the CIS Benchmark version. | gke-1.0 | master, controlplane, node, etcd, policies, managedservices | | gke-1.2.0 | controlplane, node, policies, managedservices | | eks-1.0.1 | controlplane, node, policies, managedservices | +| eks-1.1.0 | controlplane, node, policies, managedservices | | ack-1.0 | master, controlplane, node, etcd, policies, managedservices | | aks-1.0 | controlplane, node, policies, managedservices | | rh-0.7 | master,node| diff --git a/docs/platforms.md b/docs/platforms.md index 8d4e3d7..a6527d9 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -17,6 +17,7 @@ Some defined by other hardenening guides. | CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | | CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | | CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | +| CIS | [EKS 1.1.0](https://workbench.cisecurity.org/benchmarks/6248) | eks-1.1.0 | EKS | | CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK | | CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | | RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | diff --git a/job-eks-asff.yaml b/job-eks-asff.yaml index 5e079cc..64d91c5 100644 --- a/job-eks-asff.yaml +++ b/job-eks-asff.yaml @@ -40,7 +40,7 @@ spec: "--targets", "node", "--benchmark", - "eks-1.0.1", + "eks-1.1.0", "--asff", ] env: @@ -59,7 +59,7 @@ spec: mountPath: /etc/kubernetes readOnly: true - name: kube-bench-eks-config - mountPath: "/opt/kube-bench/cfg/eks-1.0.1/config.yaml" + mountPath: "/opt/kube-bench/cfg/eks-1.1.0/config.yaml" subPath: config.yaml readOnly: true restartPolicy: Never diff --git a/job-eks.yaml b/job-eks.yaml index 5a946fa..d960183 100644 --- a/job-eks.yaml +++ b/job-eks.yaml @@ -20,7 +20,7 @@ spec: "--targets", "node", "--benchmark", - "eks-1.0.1", + "eks-1.1.0", ] volumeMounts: - name: var-lib-kubelet