mirror of
https://github.com/aquasecurity/kube-bench.git
synced 2024-11-26 01:49:28 +00:00
feat(cfg): add EKS 1.5.0
This commit is contained in:
parent
5a3fd1d896
commit
7981c07e0f
9
cfg/eks-1.5.0/config.yaml
Normal file
9
cfg/eks-1.5.0/config.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
## These settings are required if you are using the --asff option to report findings to AWS Security Hub
|
||||
## AWS account number is required.
|
||||
AWS_ACCOUNT: "<AWS_ACCT_NUMBER>"
|
||||
## AWS region is required.
|
||||
AWS_REGION: "<AWS_REGION>"
|
||||
## EKS Cluster ARN is required.
|
||||
CLUSTER_ARN: "<AWS_CLUSTER_ARN>"
|
32
cfg/eks-1.5.0/controlplane.yaml
Normal file
32
cfg/eks-1.5.0/controlplane.yaml
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
controls:
|
||||
version: "eks-1.5.0"
|
||||
id: 2
|
||||
text: "Control Plane Configuration"
|
||||
type: "controlplane"
|
||||
groups:
|
||||
- id: 2.1
|
||||
text: "Logging"
|
||||
checks:
|
||||
- id: 2.1.1
|
||||
text: "Enable audit Logs (Automated)"
|
||||
remediation: |
|
||||
From Console:
|
||||
1. For each EKS Cluster in each region;
|
||||
2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.
|
||||
3. Click 'Manage logging'.
|
||||
4. Ensure that all options are toggled to 'Enabled'.
|
||||
API server: Enabled
|
||||
Audit: Enabled
|
||||
Authenticator: Enabled
|
||||
Controller manager: Enabled
|
||||
Scheduler: Enabled
|
||||
5. Click 'Save Changes'.
|
||||
|
||||
From CLI:
|
||||
# For each EKS Cluster in each region;
|
||||
aws eks update-cluster-config \
|
||||
--region '${REGION_CODE}' \
|
||||
--name '${CLUSTER_NAME}' \
|
||||
--logging '{"clusterLogging":[{"types":["api","audit","authenticator","controllerManager","scheduler"],"enabled":true}]}'
|
||||
scored: false
|
227
cfg/eks-1.5.0/managedservices.yaml
Normal file
227
cfg/eks-1.5.0/managedservices.yaml
Normal file
@ -0,0 +1,227 @@
|
||||
---
|
||||
controls:
|
||||
version: "eks-1.5.0"
|
||||
id: 5
|
||||
text: "Managed Services"
|
||||
type: "managedservices"
|
||||
groups:
|
||||
- id: 5.1
|
||||
text: "Image Registry and Image Scanning"
|
||||
checks:
|
||||
- id: 5.1.1
|
||||
text: "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
To utilize AWS ECR for Image scanning please follow the steps below:
|
||||
|
||||
To create a repository configured for scan on push (AWS CLI):
|
||||
aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE
|
||||
|
||||
To edit the settings of an existing repository (AWS CLI):
|
||||
aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE
|
||||
|
||||
Use the following steps to start a manual image scan using the AWS Management Console.
|
||||
|
||||
1. Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories.
|
||||
2. From the navigation bar, choose the Region to create your repository in.
|
||||
3. In the navigation pane, choose Repositories.
|
||||
4. On the Repositories page, choose the repository that contains the image to scan.
|
||||
5. On the Images page, select the image to scan and then choose Scan.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.2
|
||||
text: "Minimize user access to Amazon ECR (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Before you use IAM to manage access to Amazon ECR, you should understand what IAM features
|
||||
are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other
|
||||
AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.3
|
||||
text: "Minimize cluster access to read-only for Amazon ECR (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.
|
||||
|
||||
The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess
|
||||
the following IAM policy permissions for Amazon ECR.
|
||||
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ecr:BatchCheckLayerAvailability",
|
||||
"ecr:BatchGetImage",
|
||||
"ecr:GetDownloadUrlForLayer",
|
||||
"ecr:GetAuthorizationToken"
|
||||
],
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
scored: false
|
||||
|
||||
- id: 5.1.4
|
||||
text: "Minimize Container Registries to only those approved (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
To minimize AWS ECR container registries to only those approved, you can follow these steps:
|
||||
|
||||
1. Define your approval criteria: Determine the criteria that containers must meet to
|
||||
be considered approved. This can include factors such as security, compliance,
|
||||
compatibility, and other requirements.
|
||||
2. Identify all existing ECR registries: Identify all ECR registries that are currently
|
||||
being used in your organization.
|
||||
3. Evaluate ECR registries against approval criteria: Evaluate each ECR registry
|
||||
against your approval criteria to determine whether it should be approved or not.
|
||||
This can be done by reviewing the registry settings and configuration, as well as
|
||||
conducting security assessments and vulnerability scans.
|
||||
4. Establish policies and procedures: Establish policies and procedures that outline
|
||||
how ECR registries will be approved, maintained, and monitored. This should
|
||||
include guidelines for developers to follow when selecting a registry for their
|
||||
container images.
|
||||
5. Implement access controls: Implement access controls to ensure that only
|
||||
approved ECR registries are used to store and distribute container images. This
|
||||
can be done by setting up IAM policies and roles that restrict access to
|
||||
unapproved registries or create a whitelist of approved registries.
|
||||
6. Monitor and review: Continuously monitor and review the use of ECR registries
|
||||
to ensure that they continue to meet your approval criteria. This can include
|
||||
scored: false
|
||||
|
||||
- id: 5.2
|
||||
text: "Identity and Access Management (IAM)"
|
||||
checks:
|
||||
- id: 5.2.1
|
||||
text: "Prefer using dedicated Amazon EKS Service Accounts (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
With IAM roles for service accounts on Amazon EKS clusters, you can associate an
|
||||
IAM role with a Kubernetes service account. This service account can then provide
|
||||
AWS permissions to the containers in any pod that uses that service account. With this
|
||||
feature, you no longer need to provide extended permissions to the worker node IAM
|
||||
role so that pods on that node can call AWS APIs.
|
||||
Applications must sign their AWS API requests with AWS credentials. This feature
|
||||
provides a strategy for managing credentials for your applications, similar to the way
|
||||
that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances.
|
||||
Instead of creating and distributing your AWS credentials to the containers or using the
|
||||
Amazon EC2 instance’s role, you can associate an IAM role with a Kubernetes service
|
||||
account. The applications in the pod’s containers can then use an AWS SDK or the
|
||||
AWS CLI to make API requests to authorized AWS services.
|
||||
|
||||
The IAM roles for service accounts feature provides the following benefits:
|
||||
|
||||
- Least privilege - By using the IAM roles for service accounts feature, you no
|
||||
longer need to provide extended permissions to the worker node IAM role so that
|
||||
pods on that node can call AWS APIs. You can scope IAM permissions to a
|
||||
service account, and only pods that use that service account have access to
|
||||
those permissions. This feature also eliminates the need for third-party solutions
|
||||
such as kiam or kube2iam.
|
||||
- Credential isolation - A container can only retrieve credentials for the IAM role
|
||||
that is associated with the service account to which it belongs. A container never
|
||||
has access to credentials that are intended for another container that belongs to
|
||||
another pod.
|
||||
- Audit-ability - Access and event logging is available through CloudTrail to help
|
||||
ensure retrospective auditing.
|
||||
scored: false
|
||||
|
||||
- id: 5.3
|
||||
text: "AWS EKS Key Management Service"
|
||||
checks:
|
||||
- id: 5.3.1
|
||||
text: "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
This process can only be performed during Cluster Creation.
|
||||
|
||||
Enable 'Secrets Encryption' during Amazon EKS cluster creation as described
|
||||
in the links within the 'References' section.
|
||||
scored: false
|
||||
|
||||
- id: 5.4
|
||||
text: "Cluster Networking"
|
||||
checks:
|
||||
- id: 5.4.1
|
||||
text: "Restrict Access to the Control Plane Endpoint (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
By enabling private endpoint access to the Kubernetes API server, all communication
|
||||
between your nodes and the API server stays within your VPC. You can also limit the IP
|
||||
addresses that can access your API server from the internet, or completely disable
|
||||
internet access to the API server.
|
||||
With this in mind, you can update your cluster accordingly using the AWS CLI to ensure
|
||||
that Private Endpoint Access is enabled.
|
||||
If you choose to also enable Public Endpoint Access then you should also configure a
|
||||
list of allowable CIDR blocks, resulting in restricted access from the internet. If you
|
||||
specify no CIDR blocks, then the public API server endpoint is able to receive and
|
||||
process requests from all IP addresses by defaulting to ['0.0.0.0/0'].
|
||||
For example, the following command would enable private access to the Kubernetes
|
||||
API as well as limited public access over the internet from a single IP address (noting
|
||||
the /32 CIDR suffix):
|
||||
aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPrivateAccess=true,publicAccessCidrs="203.0.113.5/32"
|
||||
|
||||
Note: The CIDR blocks specified cannot include reserved addresses.
|
||||
There is a maximum number of CIDR blocks that you can specify. For more information,
|
||||
see the EKS Service Quotas link in the references section.
|
||||
For more detailed information, see the EKS Cluster Endpoint documentation link in the
|
||||
references section.
|
||||
scored: false
|
||||
|
||||
- id: 5.4.2
|
||||
text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
By enabling private endpoint access to the Kubernetes API server, all communication
|
||||
between your nodes and the API server stays within your VPC.
|
||||
With this in mind, you can update your cluster accordingly using the AWS CLI to ensure
|
||||
that Private Endpoint Access is enabled.
|
||||
For example, the following command would enable private access to the Kubernetes
|
||||
API and ensure that no public access is permitted:
|
||||
aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false
|
||||
|
||||
Note: For more detailed information, see the EKS Cluster Endpoint documentation link
|
||||
in the references section.
|
||||
scored: false
|
||||
|
||||
- id: 5.4.3
|
||||
text: "Ensure clusters are created with Private Nodes (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
aws eks update-cluster-config \
|
||||
--region region-code \
|
||||
--name my-cluster \
|
||||
--resources-vpc-config endpointPublicAccess=true,publicAccessCidrs="203.0.113.5/32",endpointPrivateAccess=true
|
||||
scored: false
|
||||
|
||||
- id: 5.4.4
|
||||
text: "Ensure Network Policy is Enabled and set as appropriate (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Utilize Calico or other network policy engine to segment and isolate your traffic.
|
||||
scored: false
|
||||
|
||||
- id: 5.4.5
|
||||
text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Your load balancer vendor can provide details on configuring HTTPS with TLS.
|
||||
scored: false
|
||||
|
||||
|
||||
- id: 5.5
|
||||
text: "Authentication and Authorization"
|
||||
checks:
|
||||
- id: 5.5.1
|
||||
text: "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156 or greater (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Refer to the 'Managing users or IAM roles for your cluster' in Amazon EKS documentation.
|
||||
|
||||
Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS
|
||||
IAM Authenticator anymore.
|
||||
The relevant AWS CLI commands, depending on the use case, are:
|
||||
aws eks update-kubeconfig
|
||||
aws eks get-token
|
||||
scored: false
|
6
cfg/eks-1.5.0/master.yaml
Normal file
6
cfg/eks-1.5.0/master.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
controls:
|
||||
version: "eks-1.5.0"
|
||||
id: 1
|
||||
text: "Control Plane Components"
|
||||
type: "master"
|
453
cfg/eks-1.5.0/node.yaml
Normal file
453
cfg/eks-1.5.0/node.yaml
Normal file
@ -0,0 +1,453 @@
|
||||
---
|
||||
controls:
|
||||
version: "eks-1.5.0"
|
||||
id: 3
|
||||
text: "Worker Node Security Configuration"
|
||||
type: "node"
|
||||
groups:
|
||||
- id: 3.1
|
||||
text: "Worker Node Configuration Files"
|
||||
checks:
|
||||
- id: 3.1.1
|
||||
text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chmod 644 $kubeletkubeconfig
|
||||
scored: false
|
||||
|
||||
- id: 3.1.2
|
||||
text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chown root:root $kubeletkubeconfig
|
||||
scored: false
|
||||
|
||||
- id: 3.1.3
|
||||
text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chmod 644 $kubeletconf
|
||||
scored: false
|
||||
|
||||
- id: 3.1.4
|
||||
text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chown root:root $kubeletconf
|
||||
scored: false
|
||||
|
||||
- id: 3.2
|
||||
text: "Kubelet"
|
||||
checks:
|
||||
- id: 3.2.1
|
||||
text: "Ensure that the Anonymous Auth is Not Enabled (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--anonymous-auth"
|
||||
path: '{.authentication.anonymous.enabled}'
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If configuring via the Kubelet config file, you first need to locate the file.
|
||||
To do this, SSH to each node and execute the following command to find the kubelet
|
||||
process:
|
||||
ps -ef | grep kubelet
|
||||
The output of the above command provides details of the active kubelet process, from
|
||||
which we can see the location of the configuration file provided to the kubelet service
|
||||
with the --config argument. The file can be viewed with a command such as more or
|
||||
less, like so:
|
||||
sudo less /path/to/kubelet-config.json
|
||||
Disable Anonymous Authentication by setting the following parameter:
|
||||
"authentication": { "anonymous": { "enabled": false } }
|
||||
|
||||
Remediation Method 2.
|
||||
If using executable arguments, edit the kubelet service file on each worker node and
|
||||
ensure the below parameters are part of the KUBELET_ARGS variable string.
|
||||
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
|
||||
Bottlerocket AMIs, then this file can be found at
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
|
||||
you may need to look up documentation for your chosen operating system to determine
|
||||
which service manager is configured:
|
||||
--anonymous-auth=false
|
||||
|
||||
For Both Remediation Steps:
|
||||
Based on your system, restart the kubelet service and check the service status.
|
||||
The following example is for operating systems using systemd, such as the Amazon
|
||||
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
|
||||
command. If systemctl is not available then you will need to look up documentation for
|
||||
your chosen operating system to determine which service manager is configured:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.2
|
||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --authorization-mode
|
||||
path: '{.authorization.mode}'
|
||||
set: true
|
||||
compare:
|
||||
op: nothave
|
||||
value: AlwaysAllow
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If configuring via the Kubelet config file, you first need to locate the file.
|
||||
To do this, SSH to each node and execute the following command to find the kubelet
|
||||
process:
|
||||
ps -ef | grep kubelet
|
||||
The output of the above command provides details of the active kubelet process, from
|
||||
which we can see the location of the configuration file provided to the kubelet service
|
||||
with the --config argument. The file can be viewed with a command such as more or
|
||||
less, like so:
|
||||
sudo less /path/to/kubelet-config.json
|
||||
Enable Webhook Authentication by setting the following parameter:
|
||||
"authentication": { "webhook": { "enabled": true } }
|
||||
Next, set the Authorization Mode to Webhook by setting the following parameter:
|
||||
"authorization": { "mode": "Webhook }
|
||||
Finer detail of the authentication and authorization fields can be found in the
|
||||
Kubelet Configuration documentation.
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file on each worker node and
|
||||
ensure the below parameters are part of the KUBELET_ARGS variable string.
|
||||
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
|
||||
Bottlerocket AMIs, then this file can be found at
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
|
||||
you may need to look up documentation for your chosen operating system to determine
|
||||
which service manager is configured:
|
||||
--authentication-token-webhook
|
||||
--authorization-mode=Webhook
|
||||
|
||||
For Both Remediation Steps:
|
||||
Based on your system, restart the kubelet service and check the service status.
|
||||
The following example is for operating systems using systemd, such as the Amazon
|
||||
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
|
||||
command. If systemctl is not available then you will need to look up documentation for
|
||||
your chosen operating system to determine which service manager is configured:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.3
|
||||
text: "Ensure that a Client CA File is Configured (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --client-ca-file
|
||||
path: '{.authentication.x509.clientCAFile}'
|
||||
set: true
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If configuring via the Kubelet config file, you first need to locate the file.
|
||||
To do this, SSH to each node and execute the following command to find the kubelet
|
||||
process:
|
||||
ps -ef | grep kubelet
|
||||
The output of the above command provides details of the active kubelet process, from
|
||||
which we can see the location of the configuration file provided to the kubelet service
|
||||
with the --config argument. The file can be viewed with a command such as more or
|
||||
less, like so:
|
||||
sudo less /path/to/kubelet-config.json
|
||||
Configure the client certificate authority file by setting the following parameter
|
||||
appropriately:
|
||||
"authentication": { "x509": {"clientCAFile": <path/to/client-ca-file> } }"
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file on each worker node and
|
||||
ensure the below parameters are part of the KUBELET_ARGS variable string.
|
||||
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
|
||||
Bottlerocket AMIs, then this file can be found at
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
|
||||
you may need to look up documentation for your chosen operating system to determine
|
||||
which service manager is configured:
|
||||
--client-ca-file=<path/to/client-ca-file>
|
||||
|
||||
For Both Remediation Steps:
|
||||
Based on your system, restart the kubelet service and check the service status.
|
||||
The following example is for operating systems using systemd, such as the Amazon
|
||||
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
|
||||
command. If systemctl is not available then you will need to look up documentation for
|
||||
your chosen operating system to determine which service manager is configured:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: false
|
||||
|
||||
- id: 3.2.4
|
||||
text: "Ensure that the --read-only-port is disabled (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--read-only-port"
|
||||
path: '{.readOnlyPort}'
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: 0
|
||||
remediation: |
|
||||
If modifying the Kubelet config file, edit the kubelet-config.json file
|
||||
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to 0
|
||||
"readOnlyPort": 0
|
||||
If using executable arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
|
||||
worker node and add the below parameter at the end of the KUBELET_ARGS variable
|
||||
string.
|
||||
--read-only-port=0
|
||||
|
||||
Based on your system, restart the kubelet service and check status
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: false
|
||||
|
||||
- id: 3.2.5
|
||||
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --streaming-connection-idle-timeout
|
||||
path: '{.streamingConnectionIdleTimeout}'
|
||||
set: true
|
||||
compare:
|
||||
op: noteq
|
||||
value: 0
|
||||
- flag: --streaming-connection-idle-timeout
|
||||
path: '{.streamingConnectionIdleTimeout}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If modifying the Kubelet config file, edit the kubelet-config.json file
|
||||
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to a
|
||||
non-zero value in the format of #h#m#s
|
||||
"streamingConnectionIdleTimeout": "4h0m0s"
|
||||
You should ensure that the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not
|
||||
specify a --streaming-connection-idle-timeout argument because it would
|
||||
override the Kubelet config file.
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
|
||||
worker node and add the below parameter at the end of the KUBELET_ARGS variable
|
||||
string.
|
||||
--streaming-connection-idle-timeout=4h0m0s
|
||||
|
||||
Remediation Method 3:
|
||||
If using the api configz endpoint consider searching for the status of
|
||||
"streamingConnectionIdleTimeout": by extracting the live configuration from the
|
||||
nodes running kubelet.
|
||||
**See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
|
||||
Live Cluster, and then rerun the curl statement from audit process to check for kubelet
|
||||
configuration changes
|
||||
kubectl proxy --port=8001 &
|
||||
export HOSTNAME_PORT=localhost:8001 (example host and port number)
|
||||
export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes")
|
||||
curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
|
||||
|
||||
For all three remediations:
|
||||
Based on your system, restart the kubelet service and check status
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.6
|
||||
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --make-iptables-util-chains
|
||||
path: '{.makeIPTablesUtilChains}'
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: --make-iptables-util-chains
|
||||
path: '{.makeIPTablesUtilChains}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If modifying the Kubelet config file, edit the kubelet-config.json file
|
||||
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
|
||||
true
|
||||
"makeIPTablesUtilChains": true
|
||||
Ensure that /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf
|
||||
does not set the --make-iptables-util-chains argument because that would
|
||||
override your Kubelet config file.
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
|
||||
worker node and add the below parameter at the end of the KUBELET_ARGS variable
|
||||
string.
|
||||
--make-iptables-util-chains:true
|
||||
|
||||
Remediation Method 3:
|
||||
If using the api configz endpoint consider searching for the status of
|
||||
"makeIPTablesUtilChains.: true by extracting the live configuration from the nodes
|
||||
running kubelet.
|
||||
**See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
|
||||
Live Cluster, and then rerun the curl statement from audit process to check for kubelet
|
||||
configuration changes
|
||||
kubectl proxy --port=8001 &
|
||||
export HOSTNAME_PORT=localhost:8001 (example host and port number)
|
||||
export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes")
|
||||
curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
|
||||
|
||||
For all three remediations:
|
||||
Based on your system, restart the kubelet service and check status
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.7
|
||||
text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --event-qps
|
||||
path: '{.eventRecordQPS}'
|
||||
set: true
|
||||
compare:
|
||||
op: gte
|
||||
value: 0
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate
|
||||
level.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node
|
||||
and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 3.2.8
|
||||
text: "Ensure that the --rotate-certificates argument is not present or is set to true (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --rotate-certificates
|
||||
path: '{.rotateCertificates}'
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: --rotate-certificates
|
||||
path: '{.rotateCertificates}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If modifying the Kubelet config file, edit the kubelet-config.json file
|
||||
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
|
||||
true
|
||||
"RotateCertificate":true
|
||||
Additionally, ensure that the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate
|
||||
executable argument to false because this would override the Kubelet
|
||||
config file.
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
|
||||
worker node and add the below parameter at the end of the KUBELET_ARGS variable
|
||||
string.
|
||||
--RotateCertificate=true
|
||||
scored: false
|
||||
|
||||
- id: 3.2.9
|
||||
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: RotateKubeletServerCertificate
|
||||
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If modifying the Kubelet config file, edit the kubelet-config.json file
|
||||
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
|
||||
true
|
||||
|
||||
"featureGates": {
|
||||
"RotateKubeletServerCertificate":true
|
||||
},
|
||||
|
||||
Additionally, ensure that the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set
|
||||
the --rotate-kubelet-server-certificate executable argument to false because
|
||||
this would override the Kubelet config file.
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
|
||||
worker node and add the below parameter at the end of the KUBELET_ARGS variable
|
||||
string.
|
||||
--rotate-kubelet-server-certificate=true
|
||||
|
||||
Remediation Method 3:
|
||||
If using the api configz endpoint consider searching for the status of
|
||||
"RotateKubeletServerCertificate": by extracting the live configuration from the
|
||||
nodes running kubelet.
|
||||
**See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
|
||||
Live Cluster, and then rerun the curl statement from audit process to check for kubelet
|
||||
configuration changes
|
||||
kubectl proxy --port=8001 &
|
||||
export HOSTNAME_PORT=localhost:8001 (example host and port number)
|
||||
export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes")
|
||||
curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
|
||||
|
||||
For all three remediation methods:
|
||||
Restart the kubelet service and check status. The example below is for when using
|
||||
systemctl to manage services:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: false
|
250
cfg/eks-1.5.0/policies.yaml
Normal file
250
cfg/eks-1.5.0/policies.yaml
Normal file
@ -0,0 +1,250 @@
|
||||
---
|
||||
controls:
|
||||
version: "eks-1.5.0"
|
||||
id: 4
|
||||
text: "Policies"
|
||||
type: "policies"
|
||||
groups:
|
||||
- id: 4.1
|
||||
text: "RBAC and Service Accounts"
|
||||
checks:
|
||||
- id: 4.1.1
|
||||
text: "Ensure that the cluster-admin role is only used where required (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if
|
||||
they need this role or if they could use a role with fewer privileges.
|
||||
Where possible, first bind users to a lower privileged role and then remove the
|
||||
clusterrolebinding to the cluster-admin role :
|
||||
kubectl delete clusterrolebinding [name]
|
||||
scored: false
|
||||
|
||||
- id: 4.1.2
|
||||
text: "Minimize access to secrets (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove get, list and watch access to secret objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.3
|
||||
text: "Minimize wildcard use in Roles and ClusterRoles (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible replace any use of wildcards in clusterroles and roles with specific
|
||||
objects or actions.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.4
|
||||
text: "Minimize access to create pods (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove create access to pod objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.5
|
||||
text: "Ensure that default service accounts are not actively used. ((Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create explicit service accounts wherever a Kubernetes workload requires specific
|
||||
access to the Kubernetes API server.
|
||||
Modify the configuration of each default service account to include this value
|
||||
automountServiceAccountToken: false
|
||||
|
||||
Automatic remediation for the default account:
|
||||
kubectl patch serviceaccount default -p
|
||||
$'automountServiceAccountToken: false'
|
||||
scored: false
|
||||
|
||||
- id: 4.1.6
|
||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Modify the definition of pods and service accounts which do not need to mount service
|
||||
account tokens to disable it.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.7
|
||||
text: "Avoid use of system:masters group (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Remove the system:masters group from all users in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.8
|
||||
text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove the impersonate, bind and escalate rights from subjects.
|
||||
scored: false
|
||||
|
||||
- id: 4.2
|
||||
text: "Pod Security Standards"
|
||||
checks:
|
||||
- id: 4.2.1
|
||||
text: "Minimize the admission of privileged containers (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of privileged containers.
|
||||
To enable PSA for a namespace in your cluster, set the pod-security.kubernetes.io/enforce
|
||||
label with the policy value you want to enforce.
|
||||
kubectl label --overwrite ns NAMESPACE pod-security.kubernetes.io/enforce=restricted
|
||||
The above command enforces the restricted policy for the NAMESPACE namespace.
|
||||
You can also enable Pod Security Admission for all your namespaces. For example:
|
||||
kubectl label --overwrite ns --all pod-security.kubernetes.io/warn=baseline
|
||||
scored: false
|
||||
|
||||
- id: 4.2.2
|
||||
text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of hostPID containers.
|
||||
scored: false
|
||||
|
||||
- id: 4.2.3
|
||||
text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of hostIPC containers.
|
||||
scored: false
|
||||
|
||||
- id: 4.2.4
|
||||
text: "Minimize the admission of containers wishing to share the host network namespace (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||
.spec.hostNetwork field is omitted or set to false.
|
||||
scored: false
|
||||
|
||||
- id: 4.2.5
|
||||
text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers with .spec.allowPrivilegeEscalation set to true.
|
||||
scored: false
|
||||
|
||||
- id: 4.3
|
||||
text: "CNI Plugin"
|
||||
checks:
|
||||
- id: 4.3.1
|
||||
text: "Ensure CNI plugin supports network policies (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
As with RBAC policies, network policies should adhere to the policy of least privileged
|
||||
access. Start by creating a deny all policy that restricts all inbound and outbound traffic
|
||||
from a namespace or create a global policy using Calico.
|
||||
scored: false
|
||||
|
||||
- id: 4.3.2
|
||||
text: "Ensure that all Namespaces have Network Policies defined (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create NetworkPolicy objects as you need them.
|
||||
scored: false
|
||||
|
||||
- id: 4.4
|
||||
text: "Secrets Management"
|
||||
checks:
|
||||
- id: 4.4.1
|
||||
text: "Prefer using secrets as files over secrets as environment variables (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
If possible, rewrite application code to read secrets from mounted secret files, rather than
|
||||
from environment variables.
|
||||
scored: false
|
||||
|
||||
- id: 4.4.2
|
||||
text: "Consider external secret storage (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Refer to the secrets management options offered by your cloud provider or a third-party
|
||||
secrets management solution.
|
||||
scored: false
|
||||
|
||||
- id: 4.5
|
||||
text: "General Policies"
|
||||
checks:
|
||||
- id: 4.5.1
|
||||
text: "Create administrative boundaries between resources using namespaces (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create namespaces for objects in your deployment as you need
|
||||
them.
|
||||
scored: false
|
||||
|
||||
- id: 4.5.2
|
||||
text: "Apply Security Context to Your Pods and Containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
As a best practice we recommend that you scope the binding for privileged pods to
|
||||
service accounts within a particular namespace, e.g. kube-system, and limiting access
|
||||
to that namespace. For all other serviceaccounts/namespaces, we recommend
|
||||
implementing a more restrictive policy such as this:
|
||||
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: restricted
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
# This is redundant with non-root + disallow privilege escalation,
|
||||
# but we can provide it for defense in depth.
|
||||
requiredDropCapabilities:
|
||||
- ALL
|
||||
# Allow core volume types.
|
||||
volumes:
|
||||
- 'configMap'
|
||||
- 'emptyDir'
|
||||
- 'projected'
|
||||
- 'secret'
|
||||
- 'downwardAPI'
|
||||
# Assume that persistentVolumes set up by the cluster admin are safe to use.
|
||||
- 'persistentVolumeClaim'
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
# Require the container to run without root privileges.
|
||||
rule: 'MustRunAsNonRoot'
|
||||
seLinux:
|
||||
# This policy assumes the nodes are using AppArmor rather than SELinux.
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
# Forbid adding the root group.
|
||||
- min: 1
|
||||
max: 65535
|
||||
fsGroup:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
# Forbid adding the root group.
|
||||
- min: 1
|
||||
max: 65535
|
||||
readOnlyRootFilesystem: false
|
||||
|
||||
This policy prevents pods from running as privileged or escalating privileges. It also
|
||||
restricts the types of volumes that can be mounted and the root supplemental groups
|
||||
that can be added.
|
||||
Another, albeit similar, approach is to start with policy that locks everything down and
|
||||
incrementally add exceptions for applications that need looser restrictions such as
|
||||
logging agents which need the ability to mount a host path.
|
||||
scored: false
|
||||
|
||||
- id: 4.5.3
|
||||
text: "The default namespace should not be used (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
|
||||
resources and that all new resources are created in a specific namespace.
|
||||
scored: false
|
Loading…
Reference in New Issue
Block a user