mirror of
https://github.com/aquasecurity/kube-bench.git
synced 2024-11-22 08:08:07 +00:00
support CIS Amazon Elastic Kubernetes Service (EKS) Benchmark v1.2.0 (#1449)
closes #1448
This commit is contained in:
parent
124c57c6f4
commit
60dde65d72
@ -261,6 +261,7 @@ version_mapping:
|
|||||||
"1.25": "cis-1.7"
|
"1.25": "cis-1.7"
|
||||||
"eks-1.0.1": "eks-1.0.1"
|
"eks-1.0.1": "eks-1.0.1"
|
||||||
"eks-1.1.0": "eks-1.1.0"
|
"eks-1.1.0": "eks-1.1.0"
|
||||||
|
"eks-1.2.0": "eks-1.2.0"
|
||||||
"gke-1.0": "gke-1.0"
|
"gke-1.0": "gke-1.0"
|
||||||
"gke-1.2.0": "gke-1.2.0"
|
"gke-1.2.0": "gke-1.2.0"
|
||||||
"ocp-3.10": "rh-0.7"
|
"ocp-3.10": "rh-0.7"
|
||||||
@ -338,6 +339,12 @@ target_mapping:
|
|||||||
- "controlplane"
|
- "controlplane"
|
||||||
- "policies"
|
- "policies"
|
||||||
- "managedservices"
|
- "managedservices"
|
||||||
|
"eks-1.2.0":
|
||||||
|
- "master"
|
||||||
|
- "node"
|
||||||
|
- "controlplane"
|
||||||
|
- "policies"
|
||||||
|
- "managedservices"
|
||||||
"rh-0.7":
|
"rh-0.7":
|
||||||
- "master"
|
- "master"
|
||||||
- "node"
|
- "node"
|
||||||
|
9
cfg/eks-1.2.0/config.yaml
Normal file
9
cfg/eks-1.2.0/config.yaml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
## Version-specific settings that override the values in cfg/config.yaml
|
||||||
|
## These settings are required if you are using the --asff option to report findings to AWS Security Hub
|
||||||
|
## AWS account number is required.
|
||||||
|
AWS_ACCOUNT: "<AWS_ACCT_NUMBER>"
|
||||||
|
## AWS region is required.
|
||||||
|
AWS_REGION: "<AWS_REGION>"
|
||||||
|
## EKS Cluster ARN is required.
|
||||||
|
CLUSTER_ARN: "<AWS_CLUSTER_ARN>"
|
14
cfg/eks-1.2.0/controlplane.yaml
Normal file
14
cfg/eks-1.2.0/controlplane.yaml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
controls:
|
||||||
|
version: "eks-1.2.0"
|
||||||
|
id: 2
|
||||||
|
text: "Control Plane Configuration"
|
||||||
|
type: "controlplane"
|
||||||
|
groups:
|
||||||
|
- id: 2.1
|
||||||
|
text: "Logging"
|
||||||
|
checks:
|
||||||
|
- id: 2.1.1
|
||||||
|
text: "Enable audit logs (Manual)"
|
||||||
|
remediation: "Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler."
|
||||||
|
scored: false
|
154
cfg/eks-1.2.0/managedservices.yaml
Normal file
154
cfg/eks-1.2.0/managedservices.yaml
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
---
|
||||||
|
controls:
|
||||||
|
version: "eks-1.2.0"
|
||||||
|
id: 5
|
||||||
|
text: "Managed Services"
|
||||||
|
type: "managedservices"
|
||||||
|
groups:
|
||||||
|
- id: 5.1
|
||||||
|
text: "Image Registry and Image Scanning"
|
||||||
|
checks:
|
||||||
|
- id: 5.1.1
|
||||||
|
text: "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third-party provider (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
To utilize AWS ECR for Image scanning please follow the steps below:
|
||||||
|
|
||||||
|
To create a repository configured for scan on push (AWS CLI):
|
||||||
|
aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE
|
||||||
|
|
||||||
|
To edit the settings of an existing repository (AWS CLI):
|
||||||
|
aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE
|
||||||
|
|
||||||
|
Use the following steps to start a manual image scan using the AWS Management Console.
|
||||||
|
Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories.
|
||||||
|
From the navigation bar, choose the Region to create your repository in.
|
||||||
|
In the navigation pane, choose Repositories.
|
||||||
|
On the Repositories page, choose the repository that contains the image to scan.
|
||||||
|
On the Images page, select the image to scan and then choose Scan.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.1.2
|
||||||
|
text: "Minimize user access to Amazon ECR (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Before you use IAM to manage access to Amazon ECR, you should understand what IAM features
|
||||||
|
are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other
|
||||||
|
AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.1.3
|
||||||
|
text: "Minimize cluster access to read-only for Amazon ECR (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.
|
||||||
|
|
||||||
|
The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess
|
||||||
|
the following IAM policy permissions for Amazon ECR.
|
||||||
|
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": [
|
||||||
|
"ecr:BatchCheckLayerAvailability",
|
||||||
|
"ecr:BatchGetImage",
|
||||||
|
"ecr:GetDownloadUrlForLayer",
|
||||||
|
"ecr:GetAuthorizationToken"
|
||||||
|
],
|
||||||
|
"Resource": "*"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.1.4
|
||||||
|
text: "Minimize Container Registries to only those approved (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: "No remediation"
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.2
|
||||||
|
text: "Identity and Access Management (IAM)"
|
||||||
|
checks:
|
||||||
|
- id: 5.2.1
|
||||||
|
text: "Prefer using dedicated Amazon EKS Service Accounts (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: "No remediation"
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.3
|
||||||
|
text: "AWS Key Management Service (KMS)"
|
||||||
|
checks:
|
||||||
|
- id: 5.3.1
|
||||||
|
text: "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
This process can only be performed during Cluster Creation.
|
||||||
|
|
||||||
|
Enable 'Secrets Encryption' during Amazon EKS cluster creation as described
|
||||||
|
in the links within the 'References' section.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.4
|
||||||
|
text: "Cluster Networking"
|
||||||
|
checks:
|
||||||
|
- id: 5.4.1
|
||||||
|
text: "Restrict Access to the Control Plane Endpoint (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: "No remediation"
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.4.2
|
||||||
|
text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: "No remediation"
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.4.3
|
||||||
|
text: "Ensure clusters are created with Private Nodes (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: "No remediation"
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.4.4
|
||||||
|
text: "Ensure Network Policy is Enabled and set as appropriate (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: "No remediation"
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.4.5
|
||||||
|
text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: "No remediation"
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
|
||||||
|
- id: 5.5
|
||||||
|
text: "Authentication and Authorization"
|
||||||
|
checks:
|
||||||
|
- id: 5.5.1
|
||||||
|
text: "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Refer to the 'Managing users or IAM roles for your cluster' in Amazon EKS documentation.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
|
||||||
|
- id: 5.6
|
||||||
|
text: "Other Cluster Configurations"
|
||||||
|
checks:
|
||||||
|
- id: 5.6.1
|
||||||
|
text: "Consider Fargate for running untrusted workloads (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Create a Fargate profile for your cluster Before you can schedule pods running on Fargate
|
||||||
|
in your cluster, you must define a Fargate profile that specifies which pods should use
|
||||||
|
Fargate when they are launched. For more information, see AWS Fargate profile.
|
||||||
|
|
||||||
|
Note: If you created your cluster with eksctl using the --fargate option, then a Fargate profile has
|
||||||
|
already been created for your cluster with selectors for all pods in the kube-system
|
||||||
|
and default namespaces. Use the following procedure to create Fargate profiles for
|
||||||
|
any other namespaces you would like to use with Fargate.
|
||||||
|
scored: false
|
6
cfg/eks-1.2.0/master.yaml
Normal file
6
cfg/eks-1.2.0/master.yaml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
controls:
|
||||||
|
version: "eks-1.2.0"
|
||||||
|
id: 1
|
||||||
|
text: "Control Plane Components"
|
||||||
|
type: "master"
|
330
cfg/eks-1.2.0/node.yaml
Normal file
330
cfg/eks-1.2.0/node.yaml
Normal file
@ -0,0 +1,330 @@
|
|||||||
|
---
|
||||||
|
controls:
|
||||||
|
version: "eks-1.2.0"
|
||||||
|
id: 3
|
||||||
|
text: "Worker Node Security Configuration"
|
||||||
|
type: "node"
|
||||||
|
groups:
|
||||||
|
- id: 3.1
|
||||||
|
text: "Worker Node Configuration Files"
|
||||||
|
checks:
|
||||||
|
- id: 3.1.1
|
||||||
|
text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual)"
|
||||||
|
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "644"
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the each worker node.
|
||||||
|
For example,
|
||||||
|
chmod 644 $kubeletkubeconfig
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 3.1.2
|
||||||
|
text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Manual)"
|
||||||
|
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: root:root
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the each worker node.
|
||||||
|
For example,
|
||||||
|
chown root:root $kubeletkubeconfig
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 3.1.3
|
||||||
|
text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual)"
|
||||||
|
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "644"
|
||||||
|
remediation: |
|
||||||
|
Run the following command (using the config file location identified in the Audit step)
|
||||||
|
chmod 644 $kubeletconf
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 3.1.4
|
||||||
|
text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)"
|
||||||
|
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: root:root
|
||||||
|
remediation: |
|
||||||
|
Run the following command (using the config file location identified in the Audit step)
|
||||||
|
chown root:root $kubeletconf
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 3.2
|
||||||
|
text: "Kubelet"
|
||||||
|
checks:
|
||||||
|
- id: 3.2.1
|
||||||
|
text: "Ensure that the Anonymous Auth is Not Enabled (Automated)"
|
||||||
|
audit: "/bin/ps -fC $kubeletbin"
|
||||||
|
audit_config: "/bin/cat $kubeletconf"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--anonymous-auth"
|
||||||
|
path: '{.authentication.anonymous.enabled}'
|
||||||
|
set: true
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: false
|
||||||
|
remediation: |
|
||||||
|
If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
|
||||||
|
false.
|
||||||
|
If using executable arguments, edit the kubelet service file
|
||||||
|
$kubeletsvc on each worker node and
|
||||||
|
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||||
|
--anonymous-auth=false
|
||||||
|
Based on your system, restart the kubelet service. For example:
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart kubelet.service
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 3.2.2
|
||||||
|
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||||
|
audit: "/bin/ps -fC $kubeletbin"
|
||||||
|
audit_config: "/bin/cat $kubeletconf"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --authorization-mode
|
||||||
|
path: '{.authorization.mode}'
|
||||||
|
set: true
|
||||||
|
compare:
|
||||||
|
op: nothave
|
||||||
|
value: AlwaysAllow
|
||||||
|
remediation: |
|
||||||
|
If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If
|
||||||
|
using executable arguments, edit the kubelet service file
|
||||||
|
$kubeletsvc on each worker node and
|
||||||
|
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
||||||
|
--authorization-mode=Webhook
|
||||||
|
Based on your system, restart the kubelet service. For example:
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart kubelet.service
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 3.2.3
|
||||||
|
text: "Ensure that a Client CA File is Configured (Manual)"
|
||||||
|
audit: "/bin/ps -fC $kubeletbin"
|
||||||
|
audit_config: "/bin/cat $kubeletconf"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --client-ca-file
|
||||||
|
path: '{.authentication.x509.clientCAFile}'
|
||||||
|
set: true
|
||||||
|
remediation: |
|
||||||
|
If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to
|
||||||
|
the location of the client CA file.
|
||||||
|
If using command line arguments, edit the kubelet service file
|
||||||
|
$kubeletsvc on each worker node and
|
||||||
|
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
||||||
|
--client-ca-file=<path/to/client-ca-file>
|
||||||
|
Based on your system, restart the kubelet service. For example:
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart kubelet.service
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 3.2.4
|
||||||
|
text: "Ensure that the --read-only-port is disabled (Manual)"
|
||||||
|
audit: "/bin/ps -fC $kubeletbin"
|
||||||
|
audit_config: "/bin/cat $kubeletconf"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--read-only-port"
|
||||||
|
path: '{.readOnlyPort}'
|
||||||
|
set: true
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: 0
|
||||||
|
remediation: |
|
||||||
|
If using a Kubelet config file, edit the file to set readOnlyPort to 0.
|
||||||
|
If using command line arguments, edit the kubelet service file
|
||||||
|
$kubeletsvc on each worker node and
|
||||||
|
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||||
|
--read-only-port=0
|
||||||
|
Based on your system, restart the kubelet service. For example:
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart kubelet.service
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 3.2.5
|
||||||
|
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)"
|
||||||
|
audit: "/bin/ps -fC $kubeletbin"
|
||||||
|
audit_config: "/bin/cat $kubeletconf"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --streaming-connection-idle-timeout
|
||||||
|
path: '{.streamingConnectionIdleTimeout}'
|
||||||
|
set: true
|
||||||
|
compare:
|
||||||
|
op: noteq
|
||||||
|
value: 0
|
||||||
|
- flag: --streaming-connection-idle-timeout
|
||||||
|
path: '{.streamingConnectionIdleTimeout}'
|
||||||
|
set: false
|
||||||
|
bin_op: or
|
||||||
|
remediation: |
|
||||||
|
If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a
|
||||||
|
value other than 0.
|
||||||
|
If using command line arguments, edit the kubelet service file
|
||||||
|
$kubeletsvc on each worker node and
|
||||||
|
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||||
|
--streaming-connection-idle-timeout=5m
|
||||||
|
Based on your system, restart the kubelet service. For example:
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart kubelet.service
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 3.2.6
|
||||||
|
text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)"
|
||||||
|
audit: "/bin/ps -fC $kubeletbin"
|
||||||
|
audit_config: "/bin/cat $kubeletconf"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --protect-kernel-defaults
|
||||||
|
path: '{.protectKernelDefaults}'
|
||||||
|
set: true
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: true
|
||||||
|
remediation: |
|
||||||
|
If using a Kubelet config file, edit the file to set protectKernelDefaults: true.
|
||||||
|
If using command line arguments, edit the kubelet service file
|
||||||
|
$kubeletsvc on each worker node and
|
||||||
|
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||||
|
--protect-kernel-defaults=true
|
||||||
|
Based on your system, restart the kubelet service. For example:
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart kubelet.service
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 3.2.7
|
||||||
|
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) "
|
||||||
|
audit: "/bin/ps -fC $kubeletbin"
|
||||||
|
audit_config: "/bin/cat $kubeletconf"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --make-iptables-util-chains
|
||||||
|
path: '{.makeIPTablesUtilChains}'
|
||||||
|
set: true
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: true
|
||||||
|
- flag: --make-iptables-util-chains
|
||||||
|
path: '{.makeIPTablesUtilChains}'
|
||||||
|
set: false
|
||||||
|
bin_op: or
|
||||||
|
remediation: |
|
||||||
|
If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true.
|
||||||
|
If using command line arguments, edit the kubelet service file
|
||||||
|
$kubeletsvc on each worker node and
|
||||||
|
remove the --make-iptables-util-chains argument from the
|
||||||
|
KUBELET_SYSTEM_PODS_ARGS variable.
|
||||||
|
Based on your system, restart the kubelet service. For example:
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart kubelet.service
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 3.2.8
|
||||||
|
text: "Ensure that the --hostname-override argument is not set (Manual)"
|
||||||
|
# This is one of those properties that can only be set as a command line argument.
|
||||||
|
# To check if the property is set as expected, we need to parse the kubelet command
|
||||||
|
# instead reading the Kubelet Configuration file.
|
||||||
|
audit: "/bin/ps -fC $kubeletbin "
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --hostname-override
|
||||||
|
set: false
|
||||||
|
remediation: |
|
||||||
|
Edit the kubelet service file $kubeletsvc
|
||||||
|
on each worker node and remove the --hostname-override argument from the
|
||||||
|
KUBELET_SYSTEM_PODS_ARGS variable.
|
||||||
|
Based on your system, restart the kubelet service. For example:
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart kubelet.service
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 3.2.9
|
||||||
|
text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)"
|
||||||
|
audit: "/bin/ps -fC $kubeletbin"
|
||||||
|
audit_config: "/bin/cat $kubeletconf"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --event-qps
|
||||||
|
path: '{.eventRecordQPS}'
|
||||||
|
set: true
|
||||||
|
compare:
|
||||||
|
op: gte
|
||||||
|
value: 0
|
||||||
|
remediation: |
|
||||||
|
If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level.
|
||||||
|
If using command line arguments, edit the kubelet service file
|
||||||
|
$kubeletsvc on each worker node and
|
||||||
|
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||||
|
Based on your system, restart the kubelet service. For example:
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart kubelet.service
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 3.2.10
|
||||||
|
text: "Ensure that the --rotate-certificates argument is not present or is set to true (Manual)"
|
||||||
|
audit: "/bin/ps -fC $kubeletbin"
|
||||||
|
audit_config: "/bin/cat $kubeletconf"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --rotate-certificates
|
||||||
|
path: '{.rotateCertificates}'
|
||||||
|
set: true
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: true
|
||||||
|
- flag: --rotate-certificates
|
||||||
|
path: '{.rotateCertificates}'
|
||||||
|
set: false
|
||||||
|
bin_op: or
|
||||||
|
remediation: |
|
||||||
|
If using a Kubelet config file, edit the file to add the line rotateCertificates: true or
|
||||||
|
remove it altogether to use the default value.
|
||||||
|
If using command line arguments, edit the kubelet service file
|
||||||
|
$kubeletsvc on each worker node and
|
||||||
|
remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS
|
||||||
|
variable.
|
||||||
|
Based on your system, restart the kubelet service. For example:
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart kubelet.service
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 3.2.11
|
||||||
|
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)"
|
||||||
|
audit: "/bin/ps -fC $kubeletbin"
|
||||||
|
audit_config: "/bin/cat $kubeletconf"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: RotateKubeletServerCertificate
|
||||||
|
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||||
|
set: true
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: true
|
||||||
|
remediation: |
|
||||||
|
Edit the kubelet service file $kubeletsvc
|
||||||
|
on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
|
||||||
|
--feature-gates=RotateKubeletServerCertificate=true
|
||||||
|
Based on your system, restart the kubelet service. For example:
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart kubelet.service
|
||||||
|
scored: false
|
||||||
|
- id: 3.3
|
||||||
|
text: "Container Optimized OS"
|
||||||
|
checks:
|
||||||
|
- id: 3.3.1
|
||||||
|
text: "Prefer using a container-optimized OS when possible (Manual)"
|
||||||
|
remediation: "No remediation"
|
||||||
|
scored: false
|
213
cfg/eks-1.2.0/policies.yaml
Normal file
213
cfg/eks-1.2.0/policies.yaml
Normal file
@ -0,0 +1,213 @@
|
|||||||
|
---
|
||||||
|
controls:
|
||||||
|
version: "eks-1.2.0"
|
||||||
|
id: 4
|
||||||
|
text: "Policies"
|
||||||
|
type: "policies"
|
||||||
|
groups:
|
||||||
|
- id: 4.1
|
||||||
|
text: "RBAC and Service Accounts"
|
||||||
|
checks:
|
||||||
|
- id: 4.1.1
|
||||||
|
text: "Ensure that the cluster-admin role is only used where required (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
|
||||||
|
if they need this role or if they could use a role with fewer privileges.
|
||||||
|
Where possible, first bind users to a lower privileged role and then remove the
|
||||||
|
clusterrolebinding to the cluster-admin role :
|
||||||
|
kubectl delete clusterrolebinding [name]
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.1.2
|
||||||
|
text: "Minimize access to secrets (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Where possible, remove get, list and watch access to secret objects in the cluster.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.1.3
|
||||||
|
text: "Minimize wildcard use in Roles and ClusterRoles (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Where possible replace any use of wildcards in clusterroles and roles with specific
|
||||||
|
objects or actions.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.1.4
|
||||||
|
text: "Minimize access to create pods (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Where possible, remove create access to pod objects in the cluster.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.1.5
|
||||||
|
text: "Ensure that default service accounts are not actively used. (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
||||||
|
to the Kubernetes API server.
|
||||||
|
Modify the configuration of each default service account to include this value
|
||||||
|
automountServiceAccountToken: false
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.1.6
|
||||||
|
text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Modify the definition of pods and service accounts which do not need to mount service
|
||||||
|
account tokens to disable it.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.1.7
|
||||||
|
text: "Avoid use of system:masters group (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Remove the system:masters group from all users in the cluster.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.1.8
|
||||||
|
text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Where possible, remove the impersonate, bind and escalate rights from subjects.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.2
|
||||||
|
text: "Pod Security Policies"
|
||||||
|
checks:
|
||||||
|
- id: 4.2.1
|
||||||
|
text: "Minimize the admission of privileged containers (Automated)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Create a PSP as described in the Kubernetes documentation, ensuring that
|
||||||
|
the .spec.privileged field is omitted or set to false.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.2.2
|
||||||
|
text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||||
|
.spec.hostPID field is omitted or set to false.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.2.3
|
||||||
|
text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||||
|
.spec.hostIPC field is omitted or set to false.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.2.4
|
||||||
|
text: "Minimize the admission of containers wishing to share the host network namespace (Automated)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||||
|
.spec.hostNetwork field is omitted or set to false.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.2.5
|
||||||
|
text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||||
|
.spec.allowPrivilegeEscalation field is omitted or set to false.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.2.6
|
||||||
|
text: "Minimize the admission of root containers (Automated)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||||
|
.spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of
|
||||||
|
UIDs not including 0.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.2.7
|
||||||
|
text: "Minimize the admission of containers with added capabilities (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Ensure that allowedCapabilities is not present in PSPs for the cluster unless
|
||||||
|
it is set to an empty array.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.2.8
|
||||||
|
text: "Minimize the admission of containers with capabilities assigned (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Review the use of capabilities in applications running on your cluster. Where a namespace
|
||||||
|
contains applications which do not require any Linux capabities to operate consider adding
|
||||||
|
a PSP which forbids the admission of containers which do not drop all capabilities.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.3
|
||||||
|
text: "CNI Plugin"
|
||||||
|
checks:
|
||||||
|
- id: 4.3.1
|
||||||
|
text: "Ensure CNI plugin supports network policies (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
As with RBAC policies, network policies should adhere to the policy of least privileged
|
||||||
|
access. Start by creating a deny all policy that restricts all inbound and outbound traffic
|
||||||
|
from a namespace or create a global policy using Calico.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.3.2
|
||||||
|
text: "Ensure that all Namespaces have Network Policies defined (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Follow the documentation and create NetworkPolicy objects as you need them.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.4
|
||||||
|
text: "Secrets Management"
|
||||||
|
checks:
|
||||||
|
- id: 4.4.1
|
||||||
|
text: "Prefer using secrets as files over secrets as environment variables (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
If possible, rewrite application code to read secrets from mounted secret files, rather than
|
||||||
|
from environment variables.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.4.2
|
||||||
|
text: "Consider external secret storage (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Refer to the secrets management options offered by your cloud provider or a third-party
|
||||||
|
secrets management solution.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.5
|
||||||
|
text: "Extensible Admission Control"
|
||||||
|
checks: []
|
||||||
|
|
||||||
|
- id: 4.6
|
||||||
|
text: "General Policies"
|
||||||
|
checks:
|
||||||
|
- id: 4.6.1
|
||||||
|
text: "Create administrative boundaries between resources using namespaces (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Follow the documentation and create namespaces for objects in your deployment as you need
|
||||||
|
them.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.6.2
|
||||||
|
text: "Apply Security Context to Your Pods and Containers (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Follow the Kubernetes documentation and apply security contexts to your pods. For a
|
||||||
|
suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
|
||||||
|
Containers.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.6.3
|
||||||
|
text: "The default namespace should not be used (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
|
||||||
|
resources and that all new resources are created in a specific namespace.
|
||||||
|
scored: false
|
@ -450,6 +450,12 @@ func TestValidTargets(t *testing.T) {
|
|||||||
targets: []string{"node", "policies", "controlplane", "managedservices"},
|
targets: []string{"node", "policies", "controlplane", "managedservices"},
|
||||||
expected: true,
|
expected: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "eks-1.2.0 valid",
|
||||||
|
benchmark: "eks-1.2.0",
|
||||||
|
targets: []string{"node", "policies", "controlplane", "managedservices"},
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
|
@ -462,7 +462,7 @@ func getPlatformBenchmarkVersion(platform Platform) string {
|
|||||||
glog.V(3).Infof("getPlatformBenchmarkVersion platform: %s", platform)
|
glog.V(3).Infof("getPlatformBenchmarkVersion platform: %s", platform)
|
||||||
switch platform.Name {
|
switch platform.Name {
|
||||||
case "eks":
|
case "eks":
|
||||||
return "eks-1.1.0"
|
return "eks-1.2.0"
|
||||||
case "gke":
|
case "gke":
|
||||||
switch platform.Version {
|
switch platform.Version {
|
||||||
case "1.15", "1.16", "1.17", "1.18", "1.19":
|
case "1.15", "1.16", "1.17", "1.18", "1.19":
|
||||||
|
@ -636,7 +636,7 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) {
|
|||||||
args: args{
|
args: args{
|
||||||
platform: Platform{Name: "eks"},
|
platform: Platform{Name: "eks"},
|
||||||
},
|
},
|
||||||
want: "eks-1.1.0",
|
want: "eks-1.2.0",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "gke 1.19",
|
name: "gke 1.19",
|
||||||
|
@ -25,6 +25,7 @@ The following table shows the valid targets based on the CIS Benchmark version.
|
|||||||
| gke-1.2.0 | controlplane, node, policies, managedservices |
|
| gke-1.2.0 | controlplane, node, policies, managedservices |
|
||||||
| eks-1.0.1 | controlplane, node, policies, managedservices |
|
| eks-1.0.1 | controlplane, node, policies, managedservices |
|
||||||
| eks-1.1.0 | controlplane, node, policies, managedservices |
|
| eks-1.1.0 | controlplane, node, policies, managedservices |
|
||||||
|
| eks-1.2.0 | controlplane, node, policies, managedservices |
|
||||||
| ack-1.0 | master, controlplane, node, etcd, policies, managedservices |
|
| ack-1.0 | master, controlplane, node, etcd, policies, managedservices |
|
||||||
| aks-1.0 | controlplane, node, policies, managedservices |
|
| aks-1.0 | controlplane, node, policies, managedservices |
|
||||||
| rh-0.7 | master,node|
|
| rh-0.7 | master,node|
|
||||||
|
@ -20,6 +20,7 @@ Some defined by other hardenening guides.
|
|||||||
| CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE |
|
| CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE |
|
||||||
| CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS |
|
| CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS |
|
||||||
| CIS | [EKS 1.1.0](https://workbench.cisecurity.org/benchmarks/6248) | eks-1.1.0 | EKS |
|
| CIS | [EKS 1.1.0](https://workbench.cisecurity.org/benchmarks/6248) | eks-1.1.0 | EKS |
|
||||||
|
| CIS | [EKS 1.2.0](https://workbench.cisecurity.org/benchmarks/9681) | eks-1.2.0 | EKS |
|
||||||
| CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK |
|
| CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK |
|
||||||
| CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS |
|
| CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS |
|
||||||
| RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 |
|
| RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 |
|
||||||
|
@ -40,7 +40,7 @@ spec:
|
|||||||
"--targets",
|
"--targets",
|
||||||
"node",
|
"node",
|
||||||
"--benchmark",
|
"--benchmark",
|
||||||
"eks-1.1.0",
|
"eks-1.2.0",
|
||||||
"--asff",
|
"--asff",
|
||||||
]
|
]
|
||||||
env:
|
env:
|
||||||
@ -59,7 +59,7 @@ spec:
|
|||||||
mountPath: /etc/kubernetes
|
mountPath: /etc/kubernetes
|
||||||
readOnly: true
|
readOnly: true
|
||||||
- name: kube-bench-eks-config
|
- name: kube-bench-eks-config
|
||||||
mountPath: "/opt/kube-bench/cfg/eks-1.1.0/config.yaml"
|
mountPath: "/opt/kube-bench/cfg/eks-1.2.0/config.yaml"
|
||||||
subPath: config.yaml
|
subPath: config.yaml
|
||||||
readOnly: true
|
readOnly: true
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
|
@ -20,7 +20,7 @@ spec:
|
|||||||
"--targets",
|
"--targets",
|
||||||
"node",
|
"node",
|
||||||
"--benchmark",
|
"--benchmark",
|
||||||
"eks-1.1.0",
|
"eks-1.2.0",
|
||||||
]
|
]
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: var-lib-kubelet
|
- name: var-lib-kubelet
|
||||||
|
Loading…
Reference in New Issue
Block a user