1
0
mirror of https://github.com/aquasecurity/kube-bench.git synced 2025-08-04 12:56:13 +00:00

Merge branch 'upstream-main' into cis-openshift-1-6

This commit is contained in:
Deepanshu Bhatia 2025-01-29 23:21:20 +05:30
commit 308839d7bd
43 changed files with 3451 additions and 226 deletions

View File

@ -14,7 +14,7 @@ on:
- "LICENSE"
- "NOTICE"
env:
GO_VERSION: "1.22.7"
GO_VERSION: "1.23.4"
KIND_VERSION: "v0.11.1"
KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6"
@ -34,8 +34,8 @@ jobs:
- name: Setup golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: v1.57.2
args: --verbose
version: v1.61
args: --verbose --timeout 2m
unit:
name: Unit tests
runs-on: ubuntu-latest
@ -49,7 +49,7 @@ jobs:
- name: Run unit tests
run: make tests
- name: Upload code coverage
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@v5
with:
file: ./coverage.txt
e2e:

View File

@ -2,11 +2,9 @@
linters:
disable-all: true
enable:
- deadcode
- gocyclo
- gofmt
- goimports
- govet
- misspell
- typecheck
- varcheck

View File

@ -5,7 +5,7 @@ env:
- CGO_ENABLED=0
- KUBEBENCH_CFG=/etc/kube-bench/cfg
builds:
- main: main.go
- main: .
binary: kube-bench
tags:
- osusergo

View File

@ -1,4 +1,4 @@
FROM golang:1.23.2 AS build
FROM golang:1.23.4 AS build
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
COPY makefile makefile
COPY go.mod go.sum ./
@ -13,11 +13,13 @@ RUN make build && cp kube-bench /go/bin/kube-bench
ARG KUBECTL_VERSION TARGETARCH
RUN wget -O /usr/local/bin/kubectl "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl"
RUN wget -O kubectl.sha256 "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl.sha256"
# Verify kubectl sha256sum
RUN /bin/bash -c 'echo "$(<kubectl.sha256) /usr/local/bin/kubectl" | sha256sum -c -'
RUN chmod +x /usr/local/bin/kubectl
FROM alpine:3.20.3 AS run
FROM alpine:3.21.2 AS run
WORKDIR /opt/kube-bench/
# add GNU ps for -C, -o cmd, --no-headers support and add findutils to get GNU xargs
# https://github.com/aquasecurity/kube-bench/issues/109
@ -38,22 +40,34 @@ RUN wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/s
RUN apk add gcompat
RUN apk add jq
ENV PATH=$PATH:/usr/local/mount-from-host/bin
# Add bash for running helper scripts
RUN apk add bash
ENV PATH=$PATH:/usr/local/mount-from-host/bin:/go/bin
COPY --from=build /go/bin/kube-bench /usr/local/bin/kube-bench
COPY --from=build /usr/local/bin/kubectl /usr/local/bin/kubectl
COPY entrypoint.sh .
COPY cfg/ cfg/
COPY helper_scripts/check_files_owner_in_dir.sh /go/bin/
RUN chmod a+x /go/bin/check_files_owner_in_dir.sh
ENTRYPOINT ["./entrypoint.sh"]
CMD ["install"]
# Build-time metadata as defined at http://label-schema.org
ARG BUILD_DATE
ARG VCS_REF
ARG KUBEBENCH_VERSION
LABEL org.label-schema.build-date=$BUILD_DATE \
org.label-schema.name="kube-bench" \
org.label-schema.description="Run the CIS Kubernetes Benchmark tests" \
org.label-schema.url="https://github.com/aquasecurity/kube-bench" \
org.label-schema.vcs-ref=$VCS_REF \
org.label-schema.vcs-url="https://github.com/aquasecurity/kube-bench" \
org.label-schema.schema-version="1.0"
org.label-schema.name="kube-bench" \
org.label-schema.vendor="Aqua Security Software Ltd." \
org.label-schema.version=$KUBEBENCH_VERSION \
org.label-schema.release=$KUBEBENCH_VERSION \
org.label-schema.summary="Aqua security server" \
org.label-schema.maintainer="admin@aquasec.com" \
org.label-schema.description="Run the CIS Kubernetes Benchmark tests" \
org.label-schema.url="https://github.com/aquasecurity/kube-bench" \
org.label-schema.vcs-ref=$VCS_REF \
org.label-schema.vcs-url="https://github.com/aquasecurity/kube-bench" \
org.label-schema.schema-version="1.0"

View File

@ -1,4 +1,4 @@
FROM golang:1.23.2 AS build
FROM golang:1.23.4 AS build
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
COPY makefile makefile
COPY go.mod go.sum ./
@ -42,6 +42,7 @@ COPY --from=build /go/bin/kube-bench /usr/local/bin/kube-bench
COPY --from=build /usr/local/bin/kubectl /usr/local/bin/kubectl
COPY entrypoint.sh .
COPY cfg/ cfg/
COPY helper_scripts/check_files_owner_in_dir.sh /go/bin
ENTRYPOINT ["./entrypoint.sh"]
CMD ["install"]
@ -49,10 +50,17 @@ CMD ["install"]
# Build-time metadata as defined at http://label-schema.org
ARG BUILD_DATE
ARG VCS_REF
ARG KUBEBENCH_VERSION
LABEL org.label-schema.build-date=$BUILD_DATE \
org.label-schema.name="kube-bench" \
org.label-schema.description="Run the CIS Kubernetes Benchmark tests" \
org.label-schema.url="https://github.com/aquasecurity/kube-bench" \
org.label-schema.vcs-ref=$VCS_REF \
org.label-schema.vcs-url="https://github.com/aquasecurity/kube-bench" \
org.label-schema.schema-version="1.0"
org.label-schema.name="kube-bench" \
org.label-schema.vendor="Aqua Security Software Ltd." \
org.label-schema.version=$KUBEBENCH_VERSION \
org.label-schema.release=$KUBEBENCH_VERSION \
org.label-schema.summary="Aqua security server" \
org.label-schema.maintainer="admin@aquasec.com" \
org.label-schema.description="Run the CIS Kubernetes Benchmark tests" \
org.label-schema.url="https://github.com/aquasecurity/kube-bench" \
org.label-schema.vcs-ref=$VCS_REF \
org.label-schema.vcs-url="https://github.com/aquasecurity/kube-bench" \
org.label-schema.schema-version="1.0"

View File

@ -1,4 +1,4 @@
FROM golang:1.23.2 AS build
FROM golang:1.23.4 AS build
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
COPY makefile makefile
COPY go.mod go.sum ./
@ -42,6 +42,7 @@ COPY --from=build /go/bin/kube-bench /usr/local/bin/kube-bench
COPY --from=build /usr/local/bin/kubectl /usr/local/bin/kubectl
COPY entrypoint.sh .
COPY cfg/ cfg/
COPY helper_scripts/check_files_owner_in_dir.sh /go/bin
ENTRYPOINT ["./entrypoint.sh"]
CMD ["install"]
@ -49,10 +50,17 @@ CMD ["install"]
# Build-time metadata as defined at http://label-schema.org
ARG BUILD_DATE
ARG VCS_REF
ARG KUBEBENCH_VERSION
LABEL org.label-schema.build-date=$BUILD_DATE \
org.label-schema.name="kube-bench" \
org.label-schema.description="Run the CIS Kubernetes Benchmark tests" \
org.label-schema.url="https://github.com/aquasecurity/kube-bench" \
org.label-schema.vcs-ref=$VCS_REF \
org.label-schema.vcs-url="https://github.com/aquasecurity/kube-bench" \
org.label-schema.schema-version="1.0"
org.label-schema.name="kube-bench" \
org.label-schema.vendor="Aqua Security Software Ltd." \
org.label-schema.version=$KUBEBENCH_VERSION \
org.label-schema.release=$KUBEBENCH_VERSION \
org.label-schema.summary="Aqua security server" \
org.label-schema.maintainer="admin@aquasec.com" \
org.label-schema.description="Run the CIS Kubernetes Benchmark tests" \
org.label-schema.url="https://github.com/aquasecurity/kube-bench" \
org.label-schema.vcs-ref=$VCS_REF \
org.label-schema.vcs-url="https://github.com/aquasecurity/kube-bench" \
org.label-schema.schema-version="1.0"

2
cfg/cis-1.10/config.yaml Normal file
View File

@ -0,0 +1,2 @@
---
## Version-specific settings that override the values in cfg/config.yaml

View File

@ -0,0 +1,62 @@
---
controls:
version: "cis-1.10"
id: 3
text: "Control Plane Configuration"
type: "controlplane"
groups:
- id: 3.1
text: "Authentication and Authorization"
checks:
- id: 3.1.1
text: "Client certificate authentication should not be used for users (Manual)"
type: "manual"
remediation: |
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be
implemented in place of client certificates.
scored: false
- id: 3.1.2
text: "Service account token authentication should not be used for users (Manual)"
type: "manual"
remediation: |
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented
in place of service account tokens.
scored: false
- id: 3.1.3
text: "Bootstrap token authentication should not be used for users (Manual)"
type: "manual"
remediation: |
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented
in place of bootstrap tokens.
scored: false
- id: 3.2
text: "Logging"
checks:
- id: 3.2.1
text: "Ensure that a minimal audit policy is created (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--audit-policy-file"
set: true
remediation: |
Create an audit policy file for your cluster.
scored: false
- id: 3.2.2
text: "Ensure that the audit policy covers key security concerns (Manual)"
type: "manual"
remediation: |
Review the audit policy provided for the cluster and ensure that it covers
at least the following areas,
- Access to Secrets managed by the cluster. Care should be taken to only
log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in
order to avoid risk of logging sensitive data.
- Modification of Pod and Deployment objects.
- Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.
For most requests, minimally logging at the Metadata level is recommended
(the most basic level of logging).
scored: false

135
cfg/cis-1.10/etcd.yaml Normal file
View File

@ -0,0 +1,135 @@
---
controls:
version: "cis-1.10"
id: 2
text: "Etcd Node Configuration"
type: "etcd"
groups:
- id: 2
text: "Etcd Node Configuration"
checks:
- id: 2.1
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)"
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
tests:
bin_op: and
test_items:
- flag: "--cert-file"
env: "ETCD_CERT_FILE"
- flag: "--key-file"
env: "ETCD_KEY_FILE"
remediation: |
Follow the etcd service documentation and configure TLS encryption.
Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml
on the master node and set the below parameters.
--cert-file=</path/to/ca-file>
--key-file=</path/to/key-file>
scored: true
- id: 2.2
text: "Ensure that the --client-cert-auth argument is set to true (Automated)"
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
tests:
test_items:
- flag: "--client-cert-auth"
env: "ETCD_CLIENT_CERT_AUTH"
compare:
op: eq
value: true
remediation: |
Edit the etcd pod specification file $etcdconf on the master
node and set the below parameter.
--client-cert-auth="true"
scored: true
- id: 2.3
text: "Ensure that the --auto-tls argument is not set to true (Automated)"
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--auto-tls"
env: "ETCD_AUTO_TLS"
set: false
- flag: "--auto-tls"
env: "ETCD_AUTO_TLS"
compare:
op: eq
value: false
remediation: |
Edit the etcd pod specification file $etcdconf on the master
node and either remove the --auto-tls parameter or set it to false.
--auto-tls=false
scored: true
- id: 2.4
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
set as appropriate (Automated)"
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
tests:
bin_op: and
test_items:
- flag: "--peer-cert-file"
env: "ETCD_PEER_CERT_FILE"
- flag: "--peer-key-file"
env: "ETCD_PEER_KEY_FILE"
remediation: |
Follow the etcd service documentation and configure peer TLS encryption as appropriate
for your etcd cluster.
Then, edit the etcd pod specification file $etcdconf on the
master node and set the below parameters.
--peer-client-file=</path/to/peer-cert-file>
--peer-key-file=</path/to/peer-key-file>
scored: true
- id: 2.5
text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)"
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
tests:
test_items:
- flag: "--peer-client-cert-auth"
env: "ETCD_PEER_CLIENT_CERT_AUTH"
compare:
op: eq
value: true
remediation: |
Edit the etcd pod specification file $etcdconf on the master
node and set the below parameter.
--peer-client-cert-auth=true
scored: true
- id: 2.6
text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)"
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--peer-auto-tls"
env: "ETCD_PEER_AUTO_TLS"
set: false
- flag: "--peer-auto-tls"
env: "ETCD_PEER_AUTO_TLS"
compare:
op: eq
value: false
remediation: |
Edit the etcd pod specification file $etcdconf on the master
node and either remove the --peer-auto-tls parameter or set it to false.
--peer-auto-tls=false
scored: true
- id: 2.7
text: "Ensure that a unique Certificate Authority is used for etcd (Manual)"
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
tests:
test_items:
- flag: "--trusted-ca-file"
env: "ETCD_TRUSTED_CA_FILE"
remediation: |
[Manual test]
Follow the etcd documentation and create a dedicated certificate authority setup for the
etcd service.
Then, edit the etcd pod specification file $etcdconf on the
master node and set the below parameter.
--trusted-ca-file=</path/to/ca-file>
scored: false

917
cfg/cis-1.10/master.yaml Normal file
View File

@ -0,0 +1,917 @@
---
controls:
version: "cis-1.10"
id: 1
text: "Control Plane Security Configuration"
type: "master"
groups:
- id: 1.1
text: "Control Plane Node Configuration Files"
checks:
- id: 1.1.1
text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)"
audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'"
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the below command (based on the file location on your system) on the
control plane node.
For example, chmod 600 $apiserverconf
scored: true
- id: 1.1.2
text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)"
audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
tests:
test_items:
- flag: "root:root"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example, chown root:root $apiserverconf
scored: true
- id: 1.1.3
text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)"
audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'"
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example, chmod 600 $controllermanagerconf
scored: true
- id: 1.1.4
text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)"
audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
tests:
test_items:
- flag: "root:root"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example, chown root:root $controllermanagerconf
scored: true
- id: 1.1.5
text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)"
audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'"
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example, chmod 600 $schedulerconf
scored: true
- id: 1.1.6
text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)"
audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
tests:
test_items:
- flag: "root:root"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example, chown root:root $schedulerconf
scored: true
- id: 1.1.7
text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)"
audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'"
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example,
chmod 600 $etcdconf
scored: true
- id: 1.1.8
text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)"
audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'"
use_multiple_values: true
tests:
test_items:
- flag: "root:root"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example,
chown root:root $etcdconf
scored: true
- id: 1.1.9
text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)"
audit: |
ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a
find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example, chmod 600 <path/to/cni/files>
scored: false
- id: 1.1.10
text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)"
audit: |
ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G
find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G
use_multiple_values: true
tests:
test_items:
- flag: "root:root"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example,
chown root:root <path/to/cni/files>
scored: false
- id: 1.1.11
text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)"
audit: |
DATA_DIR=''
for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do
if test -d "$d"; then DATA_DIR="$d"; fi
done
if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi
stat -c permissions=%a "$DATA_DIR"
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "700"
remediation: |
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
from the command 'ps -ef | grep etcd'.
Run the below command (based on the etcd data directory found above). For example,
chmod 700 /var/lib/etcd
scored: true
- id: 1.1.12
text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)"
audit: |
DATA_DIR=''
for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do
if test -d "$d"; then DATA_DIR="$d"; fi
done
if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi
stat -c %U:%G "$DATA_DIR"
tests:
test_items:
- flag: "etcd:etcd"
remediation: |
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
from the command 'ps -ef | grep etcd'.
Run the below command (based on the etcd data directory found above).
For example, chown etcd:etcd /var/lib/etcd
scored: true
- id: 1.1.13
text: "Ensure that the default administrative credential file permissions are set to 600 (Automated)"
audit: |
for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "permissions=%a %n" $adminconf; fi; done
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example, chmod 600 /etc/kubernetes/admin.conf
On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present.
For example, chmod 600 /etc/kubernetes/super-admin.conf
scored: true
- id: 1.1.14
text: "Ensure that the default administrative credential file ownership is set to root:root (Automated)"
audit: |
for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "ownership=%U:%G %n" $adminconf; fi; done
use_multiple_values: true
tests:
test_items:
- flag: "ownership"
compare:
op: eq
value: "root:root"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example, chown root:root /etc/kubernetes/admin.conf
On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present.
For example, chown root:root /etc/kubernetes/super-admin.conf
scored: true
- id: 1.1.15
text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)"
audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'"
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example,
chmod 600 $schedulerkubeconfig
scored: true
- id: 1.1.16
text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)"
audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'"
tests:
test_items:
- flag: "root:root"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example,
chown root:root $schedulerkubeconfig
scored: true
- id: 1.1.17
text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)"
audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'"
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example,
chmod 600 $controllermanagerkubeconfig
scored: true
- id: 1.1.18
text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)"
audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'"
tests:
test_items:
- flag: "root:root"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example,
chown root:root $controllermanagerkubeconfig
scored: true
- id: 1.1.19
text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)"
audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G"
use_multiple_values: true
tests:
test_items:
- flag: "root:root"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example,
chown -R root:root /etc/kubernetes/pki/
scored: true
- id: 1.1.20
text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)"
audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a"
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example,
chmod -R 600 /etc/kubernetes/pki/*.crt
scored: false
- id: 1.1.21
text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)"
audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a"
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example,
chmod -R 600 /etc/kubernetes/pki/*.key
scored: false
- id: 1.2
text: "API Server"
checks:
- id: 1.2.1
text: "Ensure that the --anonymous-auth argument is set to false (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--anonymous-auth"
compare:
op: eq
value: false
remediation: |
Edit the API server pod specification file $apiserverconf
on the control plane node and set the below parameter.
--anonymous-auth=false
scored: false
- id: 1.2.2
text: "Ensure that the --token-auth-file parameter is not set (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--token-auth-file"
set: false
remediation: |
Follow the documentation and configure alternate mechanisms for authentication. Then,
edit the API server pod specification file $apiserverconf
on the control plane node and remove the --token-auth-file=<filename> parameter.
scored: true
- id: 1.2.3
text: "Ensure that the --DenyServiceExternalIPs is set (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--enable-admission-plugins"
compare:
op: has
value: "DenyServiceExternalIPs"
remediation: |
Edit the API server pod specification file $apiserverconf
on the control plane node and add the `DenyServiceExternalIPs` plugin
to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs.
scored: false
- id: 1.2.4
text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: and
test_items:
- flag: "--kubelet-client-certificate"
- flag: "--kubelet-client-key"
remediation: |
Follow the Kubernetes documentation and set up the TLS connection between the
apiserver and kubelets. Then, edit API server pod specification file
$apiserverconf on the control plane node and set the
kubelet client certificate and key parameters as below.
--kubelet-client-certificate=<path/to/client-certificate-file>
--kubelet-client-key=<path/to/client-key-file>
scored: true
- id: 1.2.5
text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--kubelet-certificate-authority"
remediation: |
Follow the Kubernetes documentation and setup the TLS connection between
the apiserver and kubelets. Then, edit the API server pod specification file
$apiserverconf on the control plane node and set the
--kubelet-certificate-authority parameter to the path to the cert file for the certificate authority.
--kubelet-certificate-authority=<ca-string>
scored: true
- id: 1.2.6
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--authorization-mode"
compare:
op: nothave
value: "AlwaysAllow"
remediation: |
Edit the API server pod specification file $apiserverconf
on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow.
One such example could be as below.
--authorization-mode=RBAC
scored: true
- id: 1.2.7
text: "Ensure that the --authorization-mode argument includes Node (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--authorization-mode"
compare:
op: has
value: "Node"
remediation: |
Edit the API server pod specification file $apiserverconf
on the control plane node and set the --authorization-mode parameter to a value that includes Node.
--authorization-mode=Node,RBAC
scored: true
- id: 1.2.8
text: "Ensure that the --authorization-mode argument includes RBAC (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--authorization-mode"
compare:
op: has
value: "RBAC"
remediation: |
Edit the API server pod specification file $apiserverconf
on the control plane node and set the --authorization-mode parameter to a value that includes RBAC,
for example `--authorization-mode=Node,RBAC`.
scored: true
- id: 1.2.9
text: "Ensure that the admission control plugin EventRateLimit is set (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--enable-admission-plugins"
compare:
op: has
value: "EventRateLimit"
remediation: |
Follow the Kubernetes documentation and set the desired limits in a configuration file.
Then, edit the API server pod specification file $apiserverconf
and set the below parameters.
--enable-admission-plugins=...,EventRateLimit,...
--admission-control-config-file=<path/to/configuration/file>
scored: false
- id: 1.2.10
text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--enable-admission-plugins"
compare:
op: nothave
value: AlwaysAdmit
- flag: "--enable-admission-plugins"
set: false
remediation: |
Edit the API server pod specification file $apiserverconf
on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a
value that does not include AlwaysAdmit.
scored: true
- id: 1.2.11
text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--enable-admission-plugins"
compare:
op: has
value: "AlwaysPullImages"
remediation: |
Edit the API server pod specification file $apiserverconf
on the control plane node and set the --enable-admission-plugins parameter to include
AlwaysPullImages.
--enable-admission-plugins=...,AlwaysPullImages,...
scored: false
- id: 1.2.12
text: "Ensure that the admission control plugin ServiceAccount is set (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--disable-admission-plugins"
compare:
op: nothave
value: "ServiceAccount"
- flag: "--disable-admission-plugins"
set: false
remediation: |
Follow the documentation and create ServiceAccount objects as per your environment.
Then, edit the API server pod specification file $apiserverconf
on the control plane node and ensure that the --disable-admission-plugins parameter is set to a
value that does not include ServiceAccount.
scored: true
- id: 1.2.13
text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--disable-admission-plugins"
compare:
op: nothave
value: "NamespaceLifecycle"
- flag: "--disable-admission-plugins"
set: false
remediation: |
Edit the API server pod specification file $apiserverconf
on the control plane node and set the --disable-admission-plugins parameter to
ensure it does not include NamespaceLifecycle.
scored: true
- id: 1.2.14
text: "Ensure that the admission control plugin NodeRestriction is set (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--enable-admission-plugins"
compare:
op: has
value: "NodeRestriction"
remediation: |
Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets.
Then, edit the API server pod specification file $apiserverconf
on the control plane node and set the --enable-admission-plugins parameter to a
value that includes NodeRestriction.
--enable-admission-plugins=...,NodeRestriction,...
scored: true
- id: 1.2.15
text: "Ensure that the --profiling argument is set to false (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--profiling"
compare:
op: eq
value: false
remediation: |
Edit the API server pod specification file $apiserverconf
on the control plane node and set the below parameter.
--profiling=false
scored: true
- id: 1.2.16
text: "Ensure that the --audit-log-path argument is set (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--audit-log-path"
remediation: |
Edit the API server pod specification file $apiserverconf
on the control plane node and set the --audit-log-path parameter to a suitable path and
file where you would like audit logs to be written, for example,
--audit-log-path=/var/log/apiserver/audit.log
scored: true
- id: 1.2.17
text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--audit-log-maxage"
compare:
op: gte
value: 30
remediation: |
Edit the API server pod specification file $apiserverconf
on the control plane node and set the --audit-log-maxage parameter to 30
or as an appropriate number of days, for example,
--audit-log-maxage=30
scored: true
- id: 1.2.18
text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--audit-log-maxbackup"
compare:
op: gte
value: 10
remediation: |
Edit the API server pod specification file $apiserverconf
on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate
value. For example,
--audit-log-maxbackup=10
scored: true
- id: 1.2.19
text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--audit-log-maxsize"
compare:
op: gte
value: 100
remediation: |
Edit the API server pod specification file $apiserverconf
on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB.
For example, to set it as 100 MB, --audit-log-maxsize=100
scored: true
- id: 1.2.20
text: "Ensure that the --request-timeout argument is set as appropriate (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
type: manual
remediation: |
Edit the API server pod specification file $apiserverconf
and set the below parameter as appropriate and if needed.
For example, --request-timeout=300s
scored: false
- id: 1.2.21
text: "Ensure that the --service-account-lookup argument is set to true (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--service-account-lookup"
set: false
- flag: "--service-account-lookup"
compare:
op: eq
value: true
remediation: |
Edit the API server pod specification file $apiserverconf
on the control plane node and set the below parameter.
--service-account-lookup=true
Alternatively, you can delete the --service-account-lookup parameter from this file so
that the default takes effect.
scored: true
- id: 1.2.22
text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--service-account-key-file"
remediation: |
Edit the API server pod specification file $apiserverconf
on the control plane node and set the --service-account-key-file parameter
to the public key file for service accounts. For example,
--service-account-key-file=<filename>
scored: true
- id: 1.2.23
text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: and
test_items:
- flag: "--etcd-certfile"
- flag: "--etcd-keyfile"
remediation: |
Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd.
Then, edit the API server pod specification file $apiserverconf
on the control plane node and set the etcd certificate and key file parameters.
--etcd-certfile=<path/to/client-certificate-file>
--etcd-keyfile=<path/to/client-key-file>
scored: true
- id: 1.2.24
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
bin_op: and
test_items:
- flag: "--tls-cert-file"
- flag: "--tls-private-key-file"
remediation: |
Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
Then, edit the API server pod specification file $apiserverconf
on the control plane node and set the TLS certificate and private key file parameters.
--tls-cert-file=<path/to/tls-certificate-file>
--tls-private-key-file=<path/to/tls-key-file>
scored: true
- id: 1.2.25
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--client-ca-file"
remediation: |
Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
Then, edit the API server pod specification file $apiserverconf
on the control plane node and set the client certificate authority file.
--client-ca-file=<path/to/client-ca-file>
scored: true
- id: 1.2.26
text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--etcd-cafile"
remediation: |
Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd.
Then, edit the API server pod specification file $apiserverconf
on the control plane node and set the etcd certificate authority file parameter.
--etcd-cafile=<path/to/ca-file>
scored: true
- id: 1.2.27
text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--encryption-provider-config"
remediation: |
Follow the Kubernetes documentation and configure a EncryptionConfig file.
Then, edit the API server pod specification file $apiserverconf
on the control plane node and set the --encryption-provider-config parameter to the path of that file.
For example, --encryption-provider-config=</path/to/EncryptionConfig/File>
scored: false
- id: 1.2.28
text: "Ensure that encryption providers are appropriately configured (Manual)"
audit: |
ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%')
if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi
tests:
test_items:
- flag: "provider"
compare:
op: valid_elements
value: "aescbc,kms,secretbox"
remediation: |
Follow the Kubernetes documentation and configure a EncryptionConfig file.
In this file, choose aescbc, kms or secretbox as the encryption provider.
scored: false
- id: 1.2.29
text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)"
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
tests:
test_items:
- flag: "--tls-cipher-suites"
compare:
op: valid_elements
value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"
remediation: |
Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml
on the control plane node and set the below parameter.
--tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
scored: false
- id: 1.3
text: "Controller Manager"
checks:
- id: 1.3.1
text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)"
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
tests:
test_items:
- flag: "--terminated-pod-gc-threshold"
remediation: |
Edit the Controller Manager pod specification file $controllermanagerconf
on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold,
for example, --terminated-pod-gc-threshold=10
scored: false
- id: 1.3.2
text: "Ensure that the --profiling argument is set to false (Automated)"
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
tests:
test_items:
- flag: "--profiling"
compare:
op: eq
value: false
remediation: |
Edit the Controller Manager pod specification file $controllermanagerconf
on the control plane node and set the below parameter.
--profiling=false
scored: true
- id: 1.3.3
text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)"
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
tests:
test_items:
- flag: "--use-service-account-credentials"
compare:
op: noteq
value: false
remediation: |
Edit the Controller Manager pod specification file $controllermanagerconf
on the control plane node to set the below parameter.
--use-service-account-credentials=true
scored: true
- id: 1.3.4
text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
tests:
test_items:
- flag: "--service-account-private-key-file"
remediation: |
Edit the Controller Manager pod specification file $controllermanagerconf
on the control plane node and set the --service-account-private-key-file parameter
to the private key file for service accounts.
--service-account-private-key-file=<filename>
scored: true
- id: 1.3.5
text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)"
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
tests:
test_items:
- flag: "--root-ca-file"
remediation: |
Edit the Controller Manager pod specification file $controllermanagerconf
on the control plane node and set the --root-ca-file parameter to the certificate bundle file`.
--root-ca-file=<path/to/file>
scored: true
- id: 1.3.6
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--feature-gates"
compare:
op: nothave
value: "RotateKubeletServerCertificate=false"
set: true
- flag: "--feature-gates"
set: false
remediation: |
Edit the Controller Manager pod specification file $controllermanagerconf
on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true.
--feature-gates=RotateKubeletServerCertificate=true
scored: true
- id: 1.3.7
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--bind-address"
compare:
op: eq
value: "127.0.0.1"
- flag: "--bind-address"
set: false
remediation: |
Edit the Controller Manager pod specification file $controllermanagerconf
on the control plane node and ensure the correct value for the --bind-address parameter
scored: true
- id: 1.4
text: "Scheduler"
checks:
- id: 1.4.1
text: "Ensure that the --profiling argument is set to false (Automated)"
audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
tests:
test_items:
- flag: "--profiling"
compare:
op: eq
value: false
remediation: |
Edit the Scheduler pod specification file $schedulerconf file
on the control plane node and set the below parameter.
--profiling=false
scored: true
- id: 1.4.2
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--bind-address"
compare:
op: eq
value: "127.0.0.1"
- flag: "--bind-address"
set: false
remediation: |
Edit the Scheduler pod specification file $schedulerconf
on the control plane node and ensure the correct value for the --bind-address parameter
scored: true

478
cfg/cis-1.10/node.yaml Normal file
View File

@ -0,0 +1,478 @@
---
controls:
version: "cis-1.10"
id: 4
text: "Worker Node Security Configuration"
type: "node"
groups:
- id: 4.1
text: "Worker Node Configuration Files"
checks:
- id: 4.1.1
text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)"
audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' '
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the below command (based on the file location on your system) on the each worker node.
For example, chmod 600 $kubeletsvc
scored: true
- id: 4.1.2
text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"'
tests:
bin_op: or
test_items:
- flag: root:root
- flag: "File not found"
remediation: |
Run the below command (based on the file location on your system) on the each worker node.
For example,
chown root:root $kubeletsvc
scored: true
- id: 4.1.3
text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)"
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' '
tests:
bin_op: or
test_items:
- flag: "permissions"
set: true
compare:
op: bitmask
value: "600"
remediation: |
Run the below command (based on the file location on your system) on the each worker node.
For example,
chmod 600 $proxykubeconfig
scored: false
- id: 4.1.4
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)"
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
tests:
bin_op: or
test_items:
- flag: root:root
remediation: |
Run the below command (based on the file location on your system) on the each worker node.
For example, chown root:root $proxykubeconfig
scored: false
- id: 4.1.5
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)"
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the below command (based on the file location on your system) on the each worker node.
For example,
chmod 600 $kubeletkubeconfig
scored: true
- id: 4.1.6
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)"
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
tests:
test_items:
- flag: root:root
remediation: |
Run the below command (based on the file location on your system) on the each worker node.
For example,
chown root:root $kubeletkubeconfig
scored: true
- id: 4.1.7
text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)"
audit: |
CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq)
if test -z $CAFILE; then CAFILE=$kubeletcafile; fi
if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command to modify the file permissions of the
--client-ca-file chmod 600 <filename>
scored: false
- id: 4.1.8
text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)"
audit: |
CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq)
if test -z $CAFILE; then CAFILE=$kubeletcafile; fi
if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi
tests:
test_items:
- flag: root:root
compare:
op: eq
value: root:root
remediation: |
Run the following command to modify the ownership of the --client-ca-file.
chown root:root <filename>
scored: false
- id: 4.1.9
text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)"
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
Run the following command (using the config file location identified in the Audit step)
chmod 600 $kubeletconf
scored: true
- id: 4.1.10
text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)"
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
tests:
test_items:
- flag: root:root
remediation: |
Run the following command (using the config file location identified in the Audit step)
chown root:root $kubeletconf
scored: true
- id: 4.2
text: "Kubelet"
checks:
- id: 4.2.1
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: "--anonymous-auth"
path: '{.authentication.anonymous.enabled}'
compare:
op: eq
value: false
remediation: |
If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to
`false`.
If using executable arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
`--anonymous-auth=false`
Based on your system, restart the kubelet service. For example,
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.2
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --authorization-mode
path: '{.authorization.mode}'
compare:
op: nothave
value: AlwaysAllow
remediation: |
If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If
using executable arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_AUTHZ_ARGS variable.
--authorization-mode=Webhook
Based on your system, restart the kubelet service. For example,
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.3
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --client-ca-file
path: '{.authentication.x509.clientCAFile}'
remediation: |
If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to
the location of the client CA file.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_AUTHZ_ARGS variable.
--client-ca-file=<path/to/client-ca-file>
Based on your system, restart the kubelet service. For example,
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.4
text: "Verify that the --read-only-port argument is set to 0 (Manual)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
bin_op: or
test_items:
- flag: "--read-only-port"
path: '{.readOnlyPort}'
compare:
op: eq
value: 0
- flag: "--read-only-port"
path: '{.readOnlyPort}'
set: false
remediation: |
If using a Kubelet config file, edit the file to set `readOnlyPort` to 0.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
--read-only-port=0
Based on your system, restart the kubelet service. For example,
systemctl daemon-reload
systemctl restart kubelet.service
scored: false
- id: 4.2.5
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --streaming-connection-idle-timeout
path: '{.streamingConnectionIdleTimeout}'
compare:
op: noteq
value: 0
- flag: --streaming-connection-idle-timeout
path: '{.streamingConnectionIdleTimeout}'
set: false
bin_op: or
remediation: |
If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a
value other than 0.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
--streaming-connection-idle-timeout=5m
Based on your system, restart the kubelet service. For example,
systemctl daemon-reload
systemctl restart kubelet.service
scored: false
- id: 4.2.6
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --make-iptables-util-chains
path: '{.makeIPTablesUtilChains}'
compare:
op: eq
value: true
- flag: --make-iptables-util-chains
path: '{.makeIPTablesUtilChains}'
set: false
bin_op: or
remediation: |
If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
remove the --make-iptables-util-chains argument from the
KUBELET_SYSTEM_PODS_ARGS variable.
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.7
text: "Ensure that the --hostname-override argument is not set (Manual)"
# This is one of those properties that can only be set as a command line argument.
# To check if the property is set as expected, we need to parse the kubelet command
# instead reading the Kubelet Configuration file.
audit: "/bin/ps -fC $kubeletbin"
tests:
test_items:
- flag: --hostname-override
set: false
remediation: |
Edit the kubelet service file $kubeletsvc
on each worker node and remove the --hostname-override argument from the
KUBELET_SYSTEM_PODS_ARGS variable.
Based on your system, restart the kubelet service. For example,
systemctl daemon-reload
systemctl restart kubelet.service
scored: false
- id: 4.2.8
text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --event-qps
path: '{.eventRecordQPS}'
compare:
op: gte
value: 0
- flag: --event-qps
path: '{.eventRecordQPS}'
set: false
bin_op: or
remediation: |
If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
Based on your system, restart the kubelet service. For example,
systemctl daemon-reload
systemctl restart kubelet.service
scored: false
- id: 4.2.9
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --tls-cert-file
path: '{.tlsCertFile}'
- flag: --tls-private-key-file
path: '{.tlsPrivateKeyFile}'
remediation: |
If using a Kubelet config file, edit the file to set `tlsCertFile` to the location
of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile`
to the location of the corresponding private key file.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
--tls-cert-file=<path/to/tls-certificate-file>
--tls-private-key-file=<path/to/tls-key-file>
Based on your system, restart the kubelet service. For example,
systemctl daemon-reload
systemctl restart kubelet.service
scored: false
- id: 4.2.10
text: "Ensure that the --rotate-certificates argument is not set to false (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --rotate-certificates
path: '{.rotateCertificates}'
compare:
op: eq
value: true
- flag: --rotate-certificates
path: '{.rotateCertificates}'
set: false
bin_op: or
remediation: |
If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or
remove it altogether to use the default value.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS
variable.
Based on your system, restart the kubelet service. For example,
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 4.2.11
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
bin_op: or
test_items:
- flag: RotateKubeletServerCertificate
path: '{.featureGates.RotateKubeletServerCertificate}'
compare:
op: nothave
value: false
- flag: RotateKubeletServerCertificate
path: '{.featureGates.RotateKubeletServerCertificate}'
set: false
remediation: |
Edit the kubelet service file $kubeletsvc
on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
--feature-gates=RotateKubeletServerCertificate=true
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: false
- id: 4.2.12
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --tls-cipher-suites
path: '{range .tlsCipherSuites[:]}{}{'',''}{end}'
compare:
op: valid_elements
value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
remediation: |
If using a Kubelet config file, edit the file to set `tlsCipherSuites` to
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
or to a subset of these values.
If using executable arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the --tls-cipher-suites parameter as follows, or to a subset of these values.
--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: false
- id: 4.2.13
text: "Ensure that a limit is set on pod PIDs (Manual)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --pod-max-pids
path: '{.podPidsLimit}'
remediation: |
Decide on an appropriate level for this parameter and set it,
either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting.
scored: false
- id: 4.3
text: "kube-proxy"
checks:
- id: 4.3.1
text: "Ensure that the kube-proxy metrics service is bound to localhost (Automated)"
audit: "/bin/ps -fC $proxybin"
audit_config: "/bin/sh -c 'if test -e $proxykubeconfig; then cat $proxykubeconfig; fi'"
tests:
bin_op: or
test_items:
- flag: "--metrics-bind-address"
path: '{.metricsBindAddress}'
compare:
op: has
value: "127.0.0.1"
- flag: "--metrics-bind-address"
path: '{.metricsBindAddress}'
set: false
remediation: |
Modify or remove any values which bind the metrics service to a non-localhost address.
The default value is 127.0.0.1:10249.
scored: true

559
cfg/cis-1.10/policies.yaml Normal file
View File

@ -0,0 +1,559 @@
---
controls:
version: "cis-1.10"
id: 5
text: "Kubernetes Policies"
type: "policies"
groups:
- id: 5.1
text: "RBAC and Service Accounts"
checks:
- id: 5.1.1
text: "Ensure that the cluster-admin role is only used where required (Automated)"
audit: |
kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name --no-headers | while read -r role_name role_binding subject
do
if [[ "${role_name}" != "cluster-admin" && "${role_binding}" == "cluster-admin" ]]; then
is_compliant="false"
else
is_compliant="true"
fi;
echo "**role_name: ${role_name} role_binding: ${role_binding} subject: ${subject} is_compliant: ${is_compliant}"
done
use_multiple_values: true
tests:
test_items:
- flag: "is_compliant"
compare:
op: eq
value: true
remediation: |
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
if they need this role or if they could use a role with fewer privileges.
Where possible, first bind users to a lower privileged role and then remove the
clusterrolebinding to the cluster-admin role : kubectl delete clusterrolebinding [name]
Condition: is_compliant is false if rolename is not cluster-admin and rolebinding is cluster-admin.
scored: true
- id: 5.1.2
text: "Minimize access to secrets (Automated)"
audit: "echo \"canGetListWatchSecretsAsSystemAuthenticated: $(kubectl auth can-i get,list,watch secrets --all-namespaces --as=system:authenticated)\""
tests:
test_items:
- flag: "canGetListWatchSecretsAsSystemAuthenticated"
compare:
op: eq
value: no
remediation: |
Where possible, remove get, list and watch access to Secret objects in the cluster.
scored: true
- id: 5.1.3
text: "Minimize wildcard use in Roles and ClusterRoles (Automated)"
audit: |
# Check Roles
kubectl get roles --all-namespaces -o custom-columns=ROLE_NAMESPACE:.metadata.namespace,ROLE_NAME:.metadata.name --no-headers | while read -r role_namespace role_name
do
role_rules=$(kubectl get role -n "${role_namespace}" "${role_name}" -o=json | jq -c '.rules')
if echo "${role_rules}" | grep -q "\[\"\*\"\]"; then
role_is_compliant="false"
else
role_is_compliant="true"
fi;
echo "**role_name: ${role_name} role_namespace: ${role_namespace} role_rules: ${role_rules} role_is_compliant: ${role_is_compliant}"
done
# Check ClusterRoles
kubectl get clusterroles -o custom-columns=CLUSTERROLE_NAME:.metadata.name --no-headers | while read -r clusterrole_name
do
clusterrole_rules=$(kubectl get clusterrole "${clusterrole_name}" -o=json | jq -c '.rules')
if echo "${clusterrole_rules}" | grep -q "\[\"\*\"\]"; then
clusterrole_is_compliant="false"
else
clusterrole_is_compliant="true"
fi;
echo "**clusterrole_name: ${clusterrole_name} clusterrole_rules: ${clusterrole_rules} clusterrole_is_compliant: ${clusterrole_is_compliant}"
done
use_multiple_values: true
tests:
bin_op: or
test_items:
- flag: "role_is_compliant"
compare:
op: eq
value: true
set: true
- flag: "clusterrole_is_compliant"
compare:
op: eq
value: true
set: true
remediation: |
Where possible replace any use of wildcards ["*"] in roles and clusterroles with specific
objects or actions.
Condition: role_is_compliant is false if ["*"] is found in rules.
Condition: clusterrole_is_compliant is false if ["*"] is found in rules.
scored: true
- id: 5.1.4
text: "Minimize access to create pods (Automated)"
audit: |
echo "canCreatePodsAsSystemAuthenticated: $(kubectl auth can-i create pods --all-namespaces --as=system:authenticated)"
tests:
test_items:
- flag: "canCreatePodsAsSystemAuthenticated"
compare:
op: eq
value: no
remediation: |
Where possible, remove create access to pod objects in the cluster.
scored: true
- id: 5.1.5
text: "Ensure that default service accounts are not actively used (Automated)"
audit: |
kubectl get serviceaccount --all-namespaces --field-selector metadata.name=default -o=json | jq -r '.items[] | " namespace: \(.metadata.namespace), kind: \(.kind), name: \(.metadata.name), automountServiceAccountToken: \(.automountServiceAccountToken | if . == null then "notset" else . end )"' | xargs -L 1
use_multiple_values: true
tests:
test_items:
- flag: "automountServiceAccountToken"
compare:
op: eq
value: false
set: true
remediation: |
Create explicit service accounts wherever a Kubernetes workload requires specific access
to the Kubernetes API server.
Modify the configuration of each default service account to include this value
`automountServiceAccountToken: false`.
scored: true
- id: 5.1.6
text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)"
audit: |
kubectl get pods --all-namespaces -o custom-columns=POD_NAMESPACE:.metadata.namespace,POD_NAME:.metadata.name,POD_SERVICE_ACCOUNT:.spec.serviceAccount,POD_IS_AUTOMOUNTSERVICEACCOUNTTOKEN:.spec.automountServiceAccountToken --no-headers | while read -r pod_namespace pod_name pod_service_account pod_is_automountserviceaccounttoken
do
# Retrieve automountServiceAccountToken's value for ServiceAccount and Pod, set to notset if null or <none>.
svacc_is_automountserviceaccounttoken=$(kubectl get serviceaccount -n "${pod_namespace}" "${pod_service_account}" -o json | jq -r '.automountServiceAccountToken' | sed -e 's/<none>/notset/g' -e 's/null/notset/g')
pod_is_automountserviceaccounttoken=$(echo "${pod_is_automountserviceaccounttoken}" | sed -e 's/<none>/notset/g' -e 's/null/notset/g')
if [ "${svacc_is_automountserviceaccounttoken}" = "false" ] && ( [ "${pod_is_automountserviceaccounttoken}" = "false" ] || [ "${pod_is_automountserviceaccounttoken}" = "notset" ] ); then
is_compliant="true"
elif [ "${svacc_is_automountserviceaccounttoken}" = "true" ] && [ "${pod_is_automountserviceaccounttoken}" = "false" ]; then
is_compliant="true"
else
is_compliant="false"
fi
echo "**namespace: ${pod_namespace} pod_name: ${pod_name} service_account: ${pod_service_account} pod_is_automountserviceaccounttoken: ${pod_is_automountserviceaccounttoken} svacc_is_automountServiceAccountToken: ${svacc_is_automountserviceaccounttoken} is_compliant: ${is_compliant}"
done
use_multiple_values: true
tests:
test_items:
- flag: "is_compliant"
compare:
op: eq
value: true
remediation: |
Modify the definition of ServiceAccounts and Pods which do not need to mount service
account tokens to disable it, with `automountServiceAccountToken: false`.
If both the ServiceAccount and the Pod's .spec specify a value for automountServiceAccountToken, the Pod spec takes precedence.
Condition: Pod is_compliant to true when
- ServiceAccount is automountServiceAccountToken: false and Pod is automountServiceAccountToken: false or notset
- ServiceAccount is automountServiceAccountToken: true notset and Pod is automountServiceAccountToken: false
scored: true
- id: 5.1.7
text: "Avoid use of system:masters group (Manual)"
type: "manual"
remediation: |
Remove the system:masters group from all users in the cluster.
scored: false
- id: 5.1.8
text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
type: "manual"
remediation: |
Where possible, remove the impersonate, bind and escalate rights from subjects.
scored: false
- id: 5.1.9
text: "Minimize access to create persistent volumes (Manual)"
type: "manual"
remediation: |
Where possible, remove create access to PersistentVolume objects in the cluster.
scored: false
- id: 5.1.10
text: "Minimize access to the proxy sub-resource of nodes (Manual)"
type: "manual"
remediation: |
Where possible, remove access to the proxy sub-resource of node objects.
scored: false
- id: 5.1.11
text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)"
type: "manual"
remediation: |
Where possible, remove access to the approval sub-resource of certificatesigningrequests objects.
scored: false
- id: 5.1.12
text: "Minimize access to webhook configuration objects (Manual)"
type: "manual"
remediation: |
Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects
scored: false
- id: 5.1.13
text: "Minimize access to the service account token creation (Manual)"
type: "manual"
remediation: |
Where possible, remove access to the token sub-resource of serviceaccount objects.
scored: false
- id: 5.2
text: "Pod Security Standards"
checks:
- id: 5.2.1
text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)"
type: "manual"
remediation: |
Ensure that either Pod Security Admission or an external policy control system is in place
for every namespace which contains user workloads.
scored: false
- id: 5.2.2
text: "Minimize the admission of privileged containers (Manual)"
audit: |
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
do
# Retrieve container(s) for each Pod.
kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o json | jq -c '.spec.containers[]' | while read -r container
do
# Retrieve container's name.
container_name=$(echo ${container} | jq -r '.name')
# Retrieve container's .securityContext.privileged value.
container_privileged=$(echo ${container} | jq -r '.securityContext.privileged' | sed -e 's/null/notset/g')
if [ "${container_privileged}" = "false" ] || [ "${container_privileged}" = "notset" ] ; then
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_privileged: ${container_privileged} is_compliant: true"
else
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_privileged: ${container_privileged} is_compliant: false"
fi
done
done
use_multiple_values: true
tests:
test_items:
- flag: "is_compliant"
compare:
op: eq
value: true
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of privileged containers.
Audit: the audit list all pods' containers to retrieve their .securityContext.privileged value.
Condition: is_compliant is false if container's `.securityContext.privileged` is set to `true`.
Default: by default, there are no restrictions on the creation of privileged containers.
scored: false
- id: 5.2.3
text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)"
audit: |
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
do
# Retrieve spec.hostPID for each pod.
pod_hostpid=$(kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o jsonpath='{.spec.hostPID}' 2>/dev/null)
if [ -z "${pod_hostpid}" ]; then
pod_hostpid="false"
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostpid: ${pod_hostpid} is_compliant: true"
else
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostpid: ${pod_hostpid} is_compliant: false"
fi
done
use_multiple_values: true
tests:
test_items:
- flag: "is_compliant"
compare:
op: eq
value: true
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of `hostPID` containers.
Audit: the audit retrieves each Pod' spec.hostPID.
Condition: is_compliant is false if Pod's spec.hostPID is set to `true`.
Default: by default, there are no restrictions on the creation of hostPID containers.
scored: false
- id: 5.2.4
text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)"
audit: |
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
do
# Retrieve spec.hostIPC for each pod.
pod_hostipc=$(kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o jsonpath='{.spec.hostIPC}' 2>/dev/null)
if [ -z "${pod_hostipc}" ]; then
pod_hostipc="false"
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostipc: ${pod_hostipc} is_compliant: true"
else
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostipc: ${pod_hostipc} is_compliant: false"
fi
done
use_multiple_values: true
tests:
test_items:
- flag: "is_compliant"
compare:
op: eq
value: true
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of `hostIPC` containers.
Audit: the audit retrieves each Pod' spec.IPC.
Condition: is_compliant is false if Pod's spec.hostIPC is set to `true`.
Default: by default, there are no restrictions on the creation of hostIPC containers.
scored: false
- id: 5.2.5
text: "Minimize the admission of containers wishing to share the host network namespace (Manual)"
audit: |
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
do
# Retrieve spec.hostNetwork for each pod.
pod_hostnetwork=$(kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o jsonpath='{.spec.hostNetwork}' 2>/dev/null)
if [ -z "${pod_hostnetwork}" ]; then
pod_hostnetwork="false"
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostnetwork: ${pod_hostnetwork} is_compliant: true"
else
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostnetwork: ${pod_hostnetwork} is_compliant: false"
fi
done
use_multiple_values: true
tests:
test_items:
- flag: "is_compliant"
compare:
op: eq
value: true
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of `hostNetwork` containers.
Audit: the audit retrieves each Pod' spec.hostNetwork.
Condition: is_compliant is false if Pod's spec.hostNetwork is set to `true`.
Default: by default, there are no restrictions on the creation of hostNetwork containers.
scored: false
- id: 5.2.6
text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)"
audit: |
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
do
# Retrieve container(s) for each Pod.
kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o json | jq -c '.spec.containers[]' | while read -r container
do
# Retrieve container's name
container_name=$(echo ${container} | jq -r '.name')
# Retrieve container's .securityContext.allowPrivilegeEscalation
container_allowprivesc=$(echo ${container} | jq -r '.securityContext.allowPrivilegeEscalation' | sed -e 's/null/notset/g')
if [ "${container_allowprivesc}" = "false" ] || [ "${container_allowprivesc}" = "notset" ]; then
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_allowprivesc: ${container_allowprivesc} is_compliant: true"
else
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_allowprivesc: ${container_allowprivesc} is_compliant: false"
fi
done
done
use_multiple_values: true
tests:
test_items:
- flag: "is_compliant"
compare:
op: eq
value: true
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of containers with `.securityContext.allowPrivilegeEscalation` set to `true`.
Audit: the audit retrieves each Pod's container(s) `.securityContext.allowPrivilegeEscalation`.
Condition: is_compliant is false if container's `.securityContext.allowPrivilegeEscalation` is set to `true`.
Default: If notset, privilege escalation is allowed (default to true). However if PSP/PSA is used with a `restricted` profile,
privilege escalation is explicitly disallowed unless configured otherwise.
scored: false
- id: 5.2.7
text: "Minimize the admission of root containers (Manual)"
type: "manual"
remediation: |
Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot`
or `MustRunAs` with the range of UIDs not including 0, is set.
scored: false
- id: 5.2.8
text: "Minimize the admission of containers with the NET_RAW capability (Manual)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of containers with the `NET_RAW` capability.
scored: false
- id: 5.2.9
text: "Minimize the admission of containers with added capabilities (Manual)"
audit: |
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
do
# Retrieve container(s) for each Pod.
kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o json | jq -c '.spec.containers[]' | while read -r container
do
# Retrieve container's name
container_name=$(echo ${container} | jq -r '.name')
# Retrieve container's added capabilities
container_caps_add=$(echo ${container} | jq -r '.securityContext.capabilities.add' | sed -e 's/null/notset/g')
# Set is_compliant to true by default.
is_compliant=true
caps_list=""
if [ "${container_caps_add}" != "notset" ]; then
# Loop through all caps and append caps_list, then set is_compliant to false.
for cap in $(echo "${container_caps_add}" | jq -r '.[]'); do
caps_list+="${cap},"
is_compliant=false
done
# Remove trailing comma for the last list member.
caps_list=${caps_list%,}
fi
if [ "${is_compliant}" = true ]; then
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} container_caps_add: ${container_caps_add} is_compliant: true"
else
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} container_caps_add: ${caps_list} is_compliant: false"
fi
done
done
use_multiple_values: true
tests:
test_items:
- flag: "is_compliant"
compare:
op: eq
value: true
remediation: |
Ensure that `allowedCapabilities` is not present in policies for the cluster unless
it is set to an empty array.
Audit: the audit retrieves each Pod's container(s) added capabilities.
Condition: is_compliant is false if added capabilities are added for a given container.
Default: Containers run with a default set of capabilities as assigned by the Container Runtime.
scored: false
- id: 5.2.10
text: "Minimize the admission of containers with capabilities assigned (Manual)"
type: "manual"
remediation: |
Review the use of capabilites in applications running on your cluster. Where a namespace
contains applicaions which do not require any Linux capabities to operate consider adding
a PSP which forbids the admission of containers which do not drop all capabilities.
scored: false
- id: 5.2.11
text: "Minimize the admission of Windows HostProcess containers (Manual)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`.
scored: false
- id: 5.2.12
text: "Minimize the admission of HostPath volumes (Manual)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of containers with `hostPath` volumes.
scored: false
- id: 5.2.13
text: "Minimize the admission of containers which use HostPorts (Manual)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of containers which use `hostPort` sections.
scored: false
- id: 5.3
text: "Network Policies and CNI"
checks:
- id: 5.3.1
text: "Ensure that the CNI in use supports NetworkPolicies (Manual)"
type: "manual"
remediation: |
If the CNI plugin in use does not support network policies, consideration should be given to
making use of a different plugin, or finding an alternate mechanism for restricting traffic
in the Kubernetes cluster.
scored: false
- id: 5.3.2
text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)"
type: "manual"
remediation: |
Follow the documentation and create NetworkPolicy objects as you need them.
scored: false
- id: 5.4
text: "Secrets Management"
checks:
- id: 5.4.1
text: "Prefer using Secrets as files over Secrets as environment variables (Manual)"
type: "manual"
remediation: |
If possible, rewrite application code to read Secrets from mounted secret files, rather than
from environment variables.
scored: false
- id: 5.4.2
text: "Consider external secret storage (Manual)"
type: "manual"
remediation: |
Refer to the Secrets management options offered by your cloud provider or a third-party
secrets management solution.
scored: false
- id: 5.5
text: "Extensible Admission Control"
checks:
- id: 5.5.1
text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)"
type: "manual"
remediation: |
Follow the Kubernetes documentation and setup image provenance.
scored: false
- id: 5.7
text: "General Policies"
checks:
- id: 5.7.1
text: "Create administrative boundaries between resources using namespaces (Manual)"
type: "manual"
remediation: |
Follow the documentation and create namespaces for objects in your deployment as you need
them.
scored: false
- id: 5.7.2
text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)"
type: "manual"
remediation: |
Use `securityContext` to enable the docker/default seccomp profile in your pod definitions.
An example is as below:
securityContext:
seccompProfile:
type: RuntimeDefault
scored: false
- id: 5.7.3
text: "Apply SecurityContext to your Pods and Containers (Manual)"
type: "manual"
remediation: |
Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a
suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker
Containers.
scored: false
- id: 5.7.4
text: "The default namespace should not be used (Manual)"
type: "manual"
remediation: |
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
resources and that all new resources are created in a specific namespace.
scored: false

View File

@ -900,14 +900,11 @@ groups:
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--bind-address"
compare:
op: eq
value: "127.0.0.1"
- flag: "--bind-address"
set: false
remediation: |
Edit the Controller Manager pod specification file $controllermanagerconf
on the control plane node and ensure the correct value for the --bind-address parameter
@ -935,14 +932,11 @@ groups:
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
tests:
bin_op: or
test_items:
- flag: "--bind-address"
compare:
op: eq
value: "127.0.0.1"
- flag: "--bind-address"
set: false
remediation: |
Edit the Scheduler pod specification file $schedulerconf
on the control plane node and ensure the correct value for the --bind-address parameter

View File

@ -283,9 +283,12 @@ version_mapping:
"1.27": "cis-1.9"
"1.28": "cis-1.9"
"1.29": "cis-1.9"
"1.30": "cis-1.10"
"1.31": "cis-1.10"
"eks-1.0.1": "eks-1.0.1"
"eks-1.1.0": "eks-1.1.0"
"eks-1.2.0": "eks-1.2.0"
"eks-1.5.0": "eks-1.5.0"
"gke-1.0": "gke-1.0"
"gke-1.2.0": "gke-1.2.0"
"gke-1.6.0": "gke-1.6.0"
@ -369,6 +372,12 @@ target_mapping:
- "controlplane"
- "etcd"
- "policies"
"cis-1.10":
- "master"
- "node"
- "controlplane"
- "etcd"
- "policies"
"gke-1.0":
- "master"
- "node"
@ -406,6 +415,12 @@ target_mapping:
- "controlplane"
- "policies"
- "managedservices"
"eks-1.5.0":
- "master"
- "node"
- "controlplane"
- "policies"
- "managedservices"
"rh-0.7":
- "master"
- "node"

View File

@ -0,0 +1,9 @@
---
## Version-specific settings that override the values in cfg/config.yaml
## These settings are required if you are using the --asff option to report findings to AWS Security Hub
## AWS account number is required.
AWS_ACCOUNT: "<AWS_ACCT_NUMBER>"
## AWS region is required.
AWS_REGION: "<AWS_REGION>"
## EKS Cluster ARN is required.
CLUSTER_ARN: "<AWS_CLUSTER_ARN>"

View File

@ -0,0 +1,32 @@
---
controls:
version: "eks-1.5.0"
id: 2
text: "Control Plane Configuration"
type: "controlplane"
groups:
- id: 2.1
text: "Logging"
checks:
- id: 2.1.1
text: "Enable audit Logs (Automated)"
remediation: |
From Console:
1. For each EKS Cluster in each region;
2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.
3. Click 'Manage logging'.
4. Ensure that all options are toggled to 'Enabled'.
API server: Enabled
Audit: Enabled
Authenticator: Enabled
Controller manager: Enabled
Scheduler: Enabled
5. Click 'Save Changes'.
From CLI:
# For each EKS Cluster in each region;
aws eks update-cluster-config \
--region '${REGION_CODE}' \
--name '${CLUSTER_NAME}' \
--logging '{"clusterLogging":[{"types":["api","audit","authenticator","controllerManager","scheduler"],"enabled":true}]}'
scored: false

View File

@ -0,0 +1,227 @@
---
controls:
version: "eks-1.5.0"
id: 5
text: "Managed Services"
type: "managedservices"
groups:
- id: 5.1
text: "Image Registry and Image Scanning"
checks:
- id: 5.1.1
text: "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider (Automated)"
type: "manual"
remediation: |
To utilize AWS ECR for Image scanning please follow the steps below:
To create a repository configured for scan on push (AWS CLI):
aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE
To edit the settings of an existing repository (AWS CLI):
aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE
Use the following steps to start a manual image scan using the AWS Management Console.
1. Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories.
2. From the navigation bar, choose the Region to create your repository in.
3. In the navigation pane, choose Repositories.
4. On the Repositories page, choose the repository that contains the image to scan.
5. On the Images page, select the image to scan and then choose Scan.
scored: false
- id: 5.1.2
text: "Minimize user access to Amazon ECR (Manual)"
type: "manual"
remediation: |
Before you use IAM to manage access to Amazon ECR, you should understand what IAM features
are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other
AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.
scored: false
- id: 5.1.3
text: "Minimize cluster access to read-only for Amazon ECR (Manual)"
type: "manual"
remediation: |
You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.
The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess
the following IAM policy permissions for Amazon ECR.
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ecr:BatchCheckLayerAvailability",
"ecr:BatchGetImage",
"ecr:GetDownloadUrlForLayer",
"ecr:GetAuthorizationToken"
],
"Resource": "*"
}
]
}
scored: false
- id: 5.1.4
text: "Minimize Container Registries to only those approved (Manual)"
type: "manual"
remediation: |
To minimize AWS ECR container registries to only those approved, you can follow these steps:
1. Define your approval criteria: Determine the criteria that containers must meet to
be considered approved. This can include factors such as security, compliance,
compatibility, and other requirements.
2. Identify all existing ECR registries: Identify all ECR registries that are currently
being used in your organization.
3. Evaluate ECR registries against approval criteria: Evaluate each ECR registry
against your approval criteria to determine whether it should be approved or not.
This can be done by reviewing the registry settings and configuration, as well as
conducting security assessments and vulnerability scans.
4. Establish policies and procedures: Establish policies and procedures that outline
how ECR registries will be approved, maintained, and monitored. This should
include guidelines for developers to follow when selecting a registry for their
container images.
5. Implement access controls: Implement access controls to ensure that only
approved ECR registries are used to store and distribute container images. This
can be done by setting up IAM policies and roles that restrict access to
unapproved registries or create a whitelist of approved registries.
6. Monitor and review: Continuously monitor and review the use of ECR registries
to ensure that they continue to meet your approval criteria. This can include
scored: false
- id: 5.2
text: "Identity and Access Management (IAM)"
checks:
- id: 5.2.1
text: "Prefer using dedicated Amazon EKS Service Accounts (Automated)"
type: "manual"
remediation: |
With IAM roles for service accounts on Amazon EKS clusters, you can associate an
IAM role with a Kubernetes service account. This service account can then provide
AWS permissions to the containers in any pod that uses that service account. With this
feature, you no longer need to provide extended permissions to the worker node IAM
role so that pods on that node can call AWS APIs.
Applications must sign their AWS API requests with AWS credentials. This feature
provides a strategy for managing credentials for your applications, similar to the way
that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances.
Instead of creating and distributing your AWS credentials to the containers or using the
Amazon EC2 instances role, you can associate an IAM role with a Kubernetes service
account. The applications in the pods containers can then use an AWS SDK or the
AWS CLI to make API requests to authorized AWS services.
The IAM roles for service accounts feature provides the following benefits:
- Least privilege - By using the IAM roles for service accounts feature, you no
longer need to provide extended permissions to the worker node IAM role so that
pods on that node can call AWS APIs. You can scope IAM permissions to a
service account, and only pods that use that service account have access to
those permissions. This feature also eliminates the need for third-party solutions
such as kiam or kube2iam.
- Credential isolation - A container can only retrieve credentials for the IAM role
that is associated with the service account to which it belongs. A container never
has access to credentials that are intended for another container that belongs to
another pod.
- Audit-ability - Access and event logging is available through CloudTrail to help
ensure retrospective auditing.
scored: false
- id: 5.3
text: "AWS EKS Key Management Service"
checks:
- id: 5.3.1
text: "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS (Manual)"
type: "manual"
remediation: |
This process can only be performed during Cluster Creation.
Enable 'Secrets Encryption' during Amazon EKS cluster creation as described
in the links within the 'References' section.
scored: false
- id: 5.4
text: "Cluster Networking"
checks:
- id: 5.4.1
text: "Restrict Access to the Control Plane Endpoint (Automated)"
type: "manual"
remediation: |
By enabling private endpoint access to the Kubernetes API server, all communication
between your nodes and the API server stays within your VPC. You can also limit the IP
addresses that can access your API server from the internet, or completely disable
internet access to the API server.
With this in mind, you can update your cluster accordingly using the AWS CLI to ensure
that Private Endpoint Access is enabled.
If you choose to also enable Public Endpoint Access then you should also configure a
list of allowable CIDR blocks, resulting in restricted access from the internet. If you
specify no CIDR blocks, then the public API server endpoint is able to receive and
process requests from all IP addresses by defaulting to ['0.0.0.0/0'].
For example, the following command would enable private access to the Kubernetes
API as well as limited public access over the internet from a single IP address (noting
the /32 CIDR suffix):
aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPrivateAccess=true,publicAccessCidrs="203.0.113.5/32"
Note: The CIDR blocks specified cannot include reserved addresses.
There is a maximum number of CIDR blocks that you can specify. For more information,
see the EKS Service Quotas link in the references section.
For more detailed information, see the EKS Cluster Endpoint documentation link in the
references section.
scored: false
- id: 5.4.2
text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Automated)"
type: "manual"
remediation: |
By enabling private endpoint access to the Kubernetes API server, all communication
between your nodes and the API server stays within your VPC.
With this in mind, you can update your cluster accordingly using the AWS CLI to ensure
that Private Endpoint Access is enabled.
For example, the following command would enable private access to the Kubernetes
API and ensure that no public access is permitted:
aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false
Note: For more detailed information, see the EKS Cluster Endpoint documentation link
in the references section.
scored: false
- id: 5.4.3
text: "Ensure clusters are created with Private Nodes (Automated)"
type: "manual"
remediation: |
aws eks update-cluster-config \
--region region-code \
--name my-cluster \
--resources-vpc-config endpointPublicAccess=true,publicAccessCidrs="203.0.113.5/32",endpointPrivateAccess=true
scored: false
- id: 5.4.4
text: "Ensure Network Policy is Enabled and set as appropriate (Automated)"
type: "manual"
remediation: |
Utilize Calico or other network policy engine to segment and isolate your traffic.
scored: false
- id: 5.4.5
text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)"
type: "manual"
remediation: |
Your load balancer vendor can provide details on configuring HTTPS with TLS.
scored: false
- id: 5.5
text: "Authentication and Authorization"
checks:
- id: 5.5.1
text: "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156 or greater (Manual)"
type: "manual"
remediation: |
Refer to the 'Managing users or IAM roles for your cluster' in Amazon EKS documentation.
Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS
IAM Authenticator anymore.
The relevant AWS CLI commands, depending on the use case, are:
aws eks update-kubeconfig
aws eks get-token
scored: false

View File

@ -0,0 +1,6 @@
---
controls:
version: "eks-1.5.0"
id: 1
text: "Control Plane Components"
type: "master"

453
cfg/eks-1.5.0/node.yaml Normal file
View File

@ -0,0 +1,453 @@
---
controls:
version: "eks-1.5.0"
id: 3
text: "Worker Node Security Configuration"
type: "node"
groups:
- id: 3.1
text: "Worker Node Configuration Files"
checks:
- id: 3.1.1
text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Automated)"
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "644"
remediation: |
Run the below command (based on the file location on your system) on the each worker node.
For example,
chmod 644 $kubeletkubeconfig
scored: true
- id: 3.1.2
text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Automated)"
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
tests:
test_items:
- flag: root:root
remediation: |
Run the below command (based on the file location on your system) on the each worker node.
For example,
chown root:root $kubeletkubeconfig
scored: true
- id: 3.1.3
text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Automated)"
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "644"
remediation: |
Run the following command (using the config file location identified in the Audit step)
chmod 644 $kubeletconf
scored: true
- id: 3.1.4
text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)"
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
tests:
test_items:
- flag: root:root
remediation: |
Run the following command (using the config file location identified in the Audit step)
chown root:root $kubeletconf
scored: true
- id: 3.2
text: "Kubelet"
checks:
- id: 3.2.1
text: "Ensure that the Anonymous Auth is Not Enabled (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: "--anonymous-auth"
path: '{.authentication.anonymous.enabled}'
set: true
compare:
op: eq
value: false
remediation: |
Remediation Method 1:
If configuring via the Kubelet config file, you first need to locate the file.
To do this, SSH to each node and execute the following command to find the kubelet
process:
ps -ef | grep kubelet
The output of the above command provides details of the active kubelet process, from
which we can see the location of the configuration file provided to the kubelet service
with the --config argument. The file can be viewed with a command such as more or
less, like so:
sudo less /path/to/kubelet-config.json
Disable Anonymous Authentication by setting the following parameter:
"authentication": { "anonymous": { "enabled": false } }
Remediation Method 2.
If using executable arguments, edit the kubelet service file on each worker node and
ensure the below parameters are part of the KUBELET_ARGS variable string.
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
Bottlerocket AMIs, then this file can be found at
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
you may need to look up documentation for your chosen operating system to determine
which service manager is configured:
--anonymous-auth=false
For Both Remediation Steps:
Based on your system, restart the kubelet service and check the service status.
The following example is for operating systems using systemd, such as the Amazon
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
command. If systemctl is not available then you will need to look up documentation for
your chosen operating system to determine which service manager is configured:
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet -l
scored: true
- id: 3.2.2
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --authorization-mode
path: '{.authorization.mode}'
set: true
compare:
op: nothave
value: AlwaysAllow
remediation: |
Remediation Method 1:
If configuring via the Kubelet config file, you first need to locate the file.
To do this, SSH to each node and execute the following command to find the kubelet
process:
ps -ef | grep kubelet
The output of the above command provides details of the active kubelet process, from
which we can see the location of the configuration file provided to the kubelet service
with the --config argument. The file can be viewed with a command such as more or
less, like so:
sudo less /path/to/kubelet-config.json
Enable Webhook Authentication by setting the following parameter:
"authentication": { "webhook": { "enabled": true } }
Next, set the Authorization Mode to Webhook by setting the following parameter:
"authorization": { "mode": "Webhook }
Finer detail of the authentication and authorization fields can be found in the
Kubelet Configuration documentation.
Remediation Method 2:
If using executable arguments, edit the kubelet service file on each worker node and
ensure the below parameters are part of the KUBELET_ARGS variable string.
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
Bottlerocket AMIs, then this file can be found at
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
you may need to look up documentation for your chosen operating system to determine
which service manager is configured:
--authentication-token-webhook
--authorization-mode=Webhook
For Both Remediation Steps:
Based on your system, restart the kubelet service and check the service status.
The following example is for operating systems using systemd, such as the Amazon
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
command. If systemctl is not available then you will need to look up documentation for
your chosen operating system to determine which service manager is configured:
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet -l
scored: true
- id: 3.2.3
text: "Ensure that a Client CA File is Configured (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --client-ca-file
path: '{.authentication.x509.clientCAFile}'
set: true
remediation: |
Remediation Method 1:
If configuring via the Kubelet config file, you first need to locate the file.
To do this, SSH to each node and execute the following command to find the kubelet
process:
ps -ef | grep kubelet
The output of the above command provides details of the active kubelet process, from
which we can see the location of the configuration file provided to the kubelet service
with the --config argument. The file can be viewed with a command such as more or
less, like so:
sudo less /path/to/kubelet-config.json
Configure the client certificate authority file by setting the following parameter
appropriately:
"authentication": { "x509": {"clientCAFile": <path/to/client-ca-file> } }"
Remediation Method 2:
If using executable arguments, edit the kubelet service file on each worker node and
ensure the below parameters are part of the KUBELET_ARGS variable string.
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
Bottlerocket AMIs, then this file can be found at
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
you may need to look up documentation for your chosen operating system to determine
which service manager is configured:
--client-ca-file=<path/to/client-ca-file>
For Both Remediation Steps:
Based on your system, restart the kubelet service and check the service status.
The following example is for operating systems using systemd, such as the Amazon
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
command. If systemctl is not available then you will need to look up documentation for
your chosen operating system to determine which service manager is configured:
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet -l
scored: true
- id: 3.2.4
text: "Ensure that the --read-only-port is disabled (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: "--read-only-port"
path: '{.readOnlyPort}'
set: true
compare:
op: eq
value: 0
remediation: |
If modifying the Kubelet config file, edit the kubelet-config.json file
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to 0
"readOnlyPort": 0
If using executable arguments, edit the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
worker node and add the below parameter at the end of the KUBELET_ARGS variable
string.
--read-only-port=0
Based on your system, restart the kubelet service and check status
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet -l
scored: true
- id: 3.2.5
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --streaming-connection-idle-timeout
path: '{.streamingConnectionIdleTimeout}'
set: true
compare:
op: noteq
value: 0
- flag: --streaming-connection-idle-timeout
path: '{.streamingConnectionIdleTimeout}'
set: false
bin_op: or
remediation: |
Remediation Method 1:
If modifying the Kubelet config file, edit the kubelet-config.json file
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to a
non-zero value in the format of #h#m#s
"streamingConnectionIdleTimeout": "4h0m0s"
You should ensure that the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not
specify a --streaming-connection-idle-timeout argument because it would
override the Kubelet config file.
Remediation Method 2:
If using executable arguments, edit the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
worker node and add the below parameter at the end of the KUBELET_ARGS variable
string.
--streaming-connection-idle-timeout=4h0m0s
Remediation Method 3:
If using the api configz endpoint consider searching for the status of
"streamingConnectionIdleTimeout": by extracting the live configuration from the
nodes running kubelet.
**See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
Live Cluster, and then rerun the curl statement from audit process to check for kubelet
configuration changes
kubectl proxy --port=8001 &
export HOSTNAME_PORT=localhost:8001 (example host and port number)
export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes")
curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
For all three remediations:
Based on your system, restart the kubelet service and check status
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet -l
scored: true
- id: 3.2.6
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --make-iptables-util-chains
path: '{.makeIPTablesUtilChains}'
set: true
compare:
op: eq
value: true
- flag: --make-iptables-util-chains
path: '{.makeIPTablesUtilChains}'
set: false
bin_op: or
remediation: |
Remediation Method 1:
If modifying the Kubelet config file, edit the kubelet-config.json file
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
true
"makeIPTablesUtilChains": true
Ensure that /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf
does not set the --make-iptables-util-chains argument because that would
override your Kubelet config file.
Remediation Method 2:
If using executable arguments, edit the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
worker node and add the below parameter at the end of the KUBELET_ARGS variable
string.
--make-iptables-util-chains:true
Remediation Method 3:
If using the api configz endpoint consider searching for the status of
"makeIPTablesUtilChains.: true by extracting the live configuration from the nodes
running kubelet.
**See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
Live Cluster, and then rerun the curl statement from audit process to check for kubelet
configuration changes
kubectl proxy --port=8001 &
export HOSTNAME_PORT=localhost:8001 (example host and port number)
export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes")
curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
For all three remediations:
Based on your system, restart the kubelet service and check status
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet -l
scored: true
- id: 3.2.7
text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --event-qps
path: '{.eventRecordQPS}'
set: true
compare:
op: gte
value: 0
remediation: |
If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate
level.
If using command line arguments, edit the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node
and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 3.2.8
text: "Ensure that the --rotate-certificates argument is not present or is set to true (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --rotate-certificates
path: '{.rotateCertificates}'
set: true
compare:
op: eq
value: true
- flag: --rotate-certificates
path: '{.rotateCertificates}'
set: false
bin_op: or
remediation: |
Remediation Method 1:
If modifying the Kubelet config file, edit the kubelet-config.json file
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
true
"RotateCertificate":true
Additionally, ensure that the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate
executable argument to false because this would override the Kubelet
config file.
Remediation Method 2:
If using executable arguments, edit the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
worker node and add the below parameter at the end of the KUBELET_ARGS variable
string.
--RotateCertificate=true
scored: true
- id: 3.2.9
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: RotateKubeletServerCertificate
path: '{.featureGates.RotateKubeletServerCertificate}'
set: true
compare:
op: eq
value: true
remediation: |
Remediation Method 1:
If modifying the Kubelet config file, edit the kubelet-config.json file
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
true
"featureGates": {
"RotateKubeletServerCertificate":true
},
Additionally, ensure that the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set
the --rotate-kubelet-server-certificate executable argument to false because
this would override the Kubelet config file.
Remediation Method 2:
If using executable arguments, edit the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
worker node and add the below parameter at the end of the KUBELET_ARGS variable
string.
--rotate-kubelet-server-certificate=true
Remediation Method 3:
If using the api configz endpoint consider searching for the status of
"RotateKubeletServerCertificate": by extracting the live configuration from the
nodes running kubelet.
**See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
Live Cluster, and then rerun the curl statement from audit process to check for kubelet
configuration changes
kubectl proxy --port=8001 &
export HOSTNAME_PORT=localhost:8001 (example host and port number)
export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes")
curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
For all three remediation methods:
Restart the kubelet service and check status. The example below is for when using
systemctl to manage services:
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet -l
scored: true

250
cfg/eks-1.5.0/policies.yaml Normal file
View File

@ -0,0 +1,250 @@
---
controls:
version: "eks-1.5.0"
id: 4
text: "Policies"
type: "policies"
groups:
- id: 4.1
text: "RBAC and Service Accounts"
checks:
- id: 4.1.1
text: "Ensure that the cluster-admin role is only used where required (Automated)"
type: "manual"
remediation: |
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if
they need this role or if they could use a role with fewer privileges.
Where possible, first bind users to a lower privileged role and then remove the
clusterrolebinding to the cluster-admin role :
kubectl delete clusterrolebinding [name]
scored: false
- id: 4.1.2
text: "Minimize access to secrets (Automated)"
type: "manual"
remediation: |
Where possible, remove get, list and watch access to secret objects in the cluster.
scored: false
- id: 4.1.3
text: "Minimize wildcard use in Roles and ClusterRoles (Automated)"
type: "manual"
remediation: |
Where possible replace any use of wildcards in clusterroles and roles with specific
objects or actions.
scored: false
- id: 4.1.4
text: "Minimize access to create pods (Automated)"
type: "manual"
remediation: |
Where possible, remove create access to pod objects in the cluster.
scored: false
- id: 4.1.5
text: "Ensure that default service accounts are not actively used. ((Automated)"
type: "manual"
remediation: |
Create explicit service accounts wherever a Kubernetes workload requires specific
access to the Kubernetes API server.
Modify the configuration of each default service account to include this value
automountServiceAccountToken: false
Automatic remediation for the default account:
kubectl patch serviceaccount default -p
$'automountServiceAccountToken: false'
scored: false
- id: 4.1.6
text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)"
type: "manual"
remediation: |
Modify the definition of pods and service accounts which do not need to mount service
account tokens to disable it.
scored: false
- id: 4.1.7
text: "Avoid use of system:masters group (Automated)"
type: "manual"
remediation: |
Remove the system:masters group from all users in the cluster.
scored: false
- id: 4.1.8
text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
type: "manual"
remediation: |
Where possible, remove the impersonate, bind and escalate rights from subjects.
scored: false
- id: 4.2
text: "Pod Security Standards"
checks:
- id: 4.2.1
text: "Minimize the admission of privileged containers (Automated)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of privileged containers.
To enable PSA for a namespace in your cluster, set the pod-security.kubernetes.io/enforce
label with the policy value you want to enforce.
kubectl label --overwrite ns NAMESPACE pod-security.kubernetes.io/enforce=restricted
The above command enforces the restricted policy for the NAMESPACE namespace.
You can also enable Pod Security Admission for all your namespaces. For example:
kubectl label --overwrite ns --all pod-security.kubernetes.io/warn=baseline
scored: false
- id: 4.2.2
text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of hostPID containers.
scored: false
- id: 4.2.3
text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of hostIPC containers.
scored: false
- id: 4.2.4
text: "Minimize the admission of containers wishing to share the host network namespace (Automated)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of hostNetwork containers.
scored: false
- id: 4.2.5
text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)"
type: "manual"
remediation: |
Add policies to each namespace in the cluster which has user workloads to restrict the
admission of containers with .spec.allowPrivilegeEscalation set to true.
scored: false
- id: 4.3
text: "CNI Plugin"
checks:
- id: 4.3.1
text: "Ensure CNI plugin supports network policies (Manual)"
type: "manual"
remediation: |
As with RBAC policies, network policies should adhere to the policy of least privileged
access. Start by creating a deny all policy that restricts all inbound and outbound traffic
from a namespace or create a global policy using Calico.
scored: false
- id: 4.3.2
text: "Ensure that all Namespaces have Network Policies defined (Automated)"
type: "manual"
remediation: |
Follow the documentation and create NetworkPolicy objects as you need them.
scored: false
- id: 4.4
text: "Secrets Management"
checks:
- id: 4.4.1
text: "Prefer using secrets as files over secrets as environment variables (Automated)"
type: "manual"
remediation: |
If possible, rewrite application code to read secrets from mounted secret files, rather than
from environment variables.
scored: false
- id: 4.4.2
text: "Consider external secret storage (Manual)"
type: "manual"
remediation: |
Refer to the secrets management options offered by your cloud provider or a third-party
secrets management solution.
scored: false
- id: 4.5
text: "General Policies"
checks:
- id: 4.5.1
text: "Create administrative boundaries between resources using namespaces (Manual)"
type: "manual"
remediation: |
Follow the documentation and create namespaces for objects in your deployment as you need
them.
scored: false
- id: 4.5.2
text: "Apply Security Context to Your Pods and Containers (Manual)"
type: "manual"
remediation: |
As a best practice we recommend that you scope the binding for privileged pods to
service accounts within a particular namespace, e.g. kube-system, and limiting access
to that namespace. For all other serviceaccounts/namespaces, we recommend
implementing a more restrictive policy such as this:
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: restricted
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
spec:
privileged: false
# Required to prevent escalations to root.
allowPrivilegeEscalation: false
# This is redundant with non-root + disallow privilege escalation,
# but we can provide it for defense in depth.
requiredDropCapabilities:
- ALL
# Allow core volume types.
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
# Assume that persistentVolumes set up by the cluster admin are safe to use.
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
# Require the container to run without root privileges.
rule: 'MustRunAsNonRoot'
seLinux:
# This policy assumes the nodes are using AppArmor rather than SELinux.
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
readOnlyRootFilesystem: false
This policy prevents pods from running as privileged or escalating privileges. It also
restricts the types of volumes that can be mounted and the root supplemental groups
that can be added.
Another, albeit similar, approach is to start with policy that locks everything down and
incrementally add exceptions for applications that need looser restrictions such as
logging agents which need the ability to mount a host path.
scored: false
- id: 4.5.3
text: "The default namespace should not be used (Automated)"
type: "manual"
remediation: |
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
resources and that all new resources are created in a specific namespace.
scored: false

View File

@ -186,7 +186,7 @@ groups:
checks:
- id: 4.2.1
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' '
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | wc -l) -gt 0; then journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' '
tests:
test_items:
- flag: "--anonymous-auth"
@ -209,7 +209,7 @@ groups:
- id: 4.2.2
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' '
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | wc -l) -gt 0; then journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' '
tests:
test_items:
- flag: --authorization-mode
@ -231,7 +231,7 @@ groups:
- id: 4.2.3
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' '
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | wc -l) -gt 0; then journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' '
tests:
test_items:
- flag: --client-ca-file

View File

@ -156,7 +156,7 @@ groups:
checks:
- id: 4.2.1
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' '
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | wc -l) -gt 0; then journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' '
tests:
test_items:
- flag: "--anonymous-auth"
@ -179,7 +179,7 @@ groups:
- id: 4.2.2
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' '
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | wc -l) -gt 0; then journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' '
tests:
test_items:
- flag: --authorization-mode
@ -202,7 +202,7 @@ groups:
- id: 4.2.3
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' '
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | wc -l) -gt 0; then journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' '
tests:
test_items:
- flag: --client-ca-file

View File

@ -152,7 +152,7 @@ groups:
checks:
- id: 4.2.1
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' '
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kubelet" | wc -l) -gt 0; then journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' '
tests:
test_items:
- flag: "--anonymous-auth"
@ -174,7 +174,7 @@ groups:
- id: 4.2.2
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' '
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | wc -l) -gt 0; then journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' '
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
tests:
test_items:
@ -197,7 +197,7 @@ groups:
- id: 4.2.3
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' '
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | wc -l) -gt 0; then journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' '
tests:
test_items:
- flag: --client-ca-file

View File

@ -152,7 +152,7 @@ groups:
checks:
- id: 4.2.1
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' '
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | wc -l) -gt 0; then journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' '
tests:
test_items:
- flag: "--anonymous-auth"
@ -174,7 +174,7 @@ groups:
- id: 4.2.2
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' '
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | wc -l) -gt 0; then journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' '
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
tests:
test_items:
@ -197,7 +197,7 @@ groups:
- id: 4.2.3
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' '
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | wc -l) -gt 0; then journalctl -m -u k3s -u k3s-agent | grep "Running kubelet" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' '
tests:
test_items:
- flag: --client-ca-file

View File

@ -263,7 +263,7 @@ groups:
- id: 1.1.19
text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)"
audit: "check_files_owner_in_dir.sh /node/etc/kubernetes/ssl"
audit: "check_files_owner_in_dir.sh /etc/kubernetes/ssl"
tests:
test_items:
- flag: "true"

View File

@ -93,7 +93,7 @@ groups:
- id: 4.1.7
text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)"
audit: "stat -c permissions=%a /node/etc/kubernetes/ssl/kube-ca.pem"
audit: "stat -c permissions=%a /etc/kubernetes/ssl/kube-ca.pem"
tests:
test_items:
- flag: "permissions"
@ -107,7 +107,7 @@ groups:
- id: 4.1.8
text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
audit: "stat -c %U:%G /node/etc/kubernetes/ssl/kube-ca.pem"
audit: "stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem"
tests:
test_items:
- flag: root:root

View File

@ -272,7 +272,7 @@ groups:
- id: 1.1.19
text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)"
audit: "check_files_owner_in_dir.sh /node/etc/kubernetes/ssl"
audit: "check_files_owner_in_dir.sh /etc/kubernetes/ssl"
tests:
test_items:
- flag: "true"
@ -289,7 +289,7 @@ groups:
- id: 1.1.20
text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Automated)"
audit: |
if test -n "$(find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem')"; then find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' | xargs stat -c permissions=%a;else echo "File not found"; fi
if test -n "$(find /etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem')"; then find /etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' | xargs stat -c permissions=%a;else echo "File not found"; fi
tests:
bin_op: or
test_items:
@ -301,13 +301,13 @@ groups:
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example,
find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' -exec chmod -R 600 {} +
find /etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' -exec chmod -R 600 {} +
scored: true
- id: 1.1.21
text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)"
audit: |
if test -n "$(find /node/etc/kubernetes/ssl/ -name '*.pem')"; then find /node/etc/kubernetes/ssl/ -name '*.pem' | xargs stat -c permissions=%a;else echo \"File not found\"; fi
if test -n "$(find /etc/kubernetes/ssl/ -name '*.pem')"; then find /etc/kubernetes/ssl/ -name '*.pem' | xargs stat -c permissions=%a;else echo \"File not found\"; fi
tests:
bin_op: or
test_items:
@ -319,7 +319,7 @@ groups:
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example,
find /node/etc/kubernetes/ssl/ -name '*key.pem' -exec chmod -R 600 {} +
find /etc/kubernetes/ssl/ -name '*key.pem' -exec chmod -R 600 {} +
scored: true
- id: 1.2

View File

@ -92,7 +92,7 @@ groups:
- id: 4.1.7
text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)"
audit: '/bin/sh -c "if test -e /node/etc/kubernetes/ssl/kube-ca.pem; then stat -c permissions=%a /node/etc/kubernetes/ssl/kube-ca.pem; else echo \"File not found\"; fi"'
audit: '/bin/sh -c "if test -e /etc/kubernetes/ssl/kube-ca.pem; then stat -c permissions=%a /etc/kubernetes/ssl/kube-ca.pem; else echo \"File not found\"; fi"'
tests:
bin_op: or
test_items:
@ -107,7 +107,7 @@ groups:
scored: true
- id: 4.1.8
text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
audit: '/bin/sh -c "if test -e /node/etc/kubernetes/ssl/kube-ca.pem; then stat -c %U:%G /node/etc/kubernetes/ssl/kube-ca.pem; else echo \"File not found\"; fi"'
audit: '/bin/sh -c "if test -e /etc/kubernetes/ssl/kube-ca.pem; then stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem; else echo \"File not found\"; fi"'
tests:
bin_op: or
test_items:

View File

@ -300,7 +300,7 @@ groups:
- id: 1.1.19
text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)"
audit: "check_files_owner_in_dir.sh /node/etc/kubernetes/ssl"
audit: "check_files_owner_in_dir.sh /etc/kubernetes/ssl"
tests:
test_items:
- flag: "true"
@ -311,12 +311,12 @@ groups:
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example,
chown -R root:root /etc/kubernetes/pki/
chown -R root:root /etc/kubernetes/ssl/
scored: true
- id: 1.1.20
text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)"
audit: "find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' | xargs stat -c permissions=%a"
audit: "find /etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' | xargs stat -c permissions=%a"
use_multiple_values: true
tests:
test_items:
@ -327,12 +327,12 @@ groups:
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example,
find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' -exec chmod -R 600 {} +
find /etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' -exec chmod -R 600 {} +
scored: false
- id: 1.1.21
text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)"
audit: "find /node/etc/kubernetes/ssl/ -name '*key.pem' | xargs stat -c permissions=%a"
audit: "find /etc/kubernetes/ssl/ -name '*key.pem' | xargs stat -c permissions=%a"
use_multiple_values: true
tests:
test_items:
@ -343,7 +343,7 @@ groups:
remediation: |
Run the below command (based on the file location on your system) on the control plane node.
For example,
find /node/etc/kubernetes/ssl/ -name '*key.pem' -exec chmod -R 600 {} +
find /etc/kubernetes/ssl/ -name '*key.pem' -exec chmod -R 600 {} +
scored: false
- id: 1.2

View File

@ -98,7 +98,7 @@ groups:
- id: 4.1.7
text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)"
audit: "stat -c permissions=%a /node/etc/kubernetes/ssl/kube-ca.pem"
audit: "stat -c permissions=%a /etc/kubernetes/ssl/kube-ca.pem"
tests:
test_items:
- flag: "permissions"
@ -112,7 +112,7 @@ groups:
- id: 4.1.8
text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
audit: "stat -c %U:%G /node/etc/kubernetes/ssl/kube-ca.pem"
audit: "stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem"
tests:
test_items:
- flag: root:root

View File

@ -221,7 +221,7 @@ func (t testItem) evaluate(s string) *testOutput {
match, value, err := t.findValue(s)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
fmt.Fprint(os.Stderr, err.Error())
return failTestItem(err.Error())
}

View File

@ -290,7 +290,7 @@ func TestTestExecute(t *testing.T) {
c.check.AuditEnvOutput = c.strEnv
res, err := c.check.execute()
if err != nil {
t.Errorf(err.Error())
t.Error(err.Error())
}
if !res.testResult {
t.Errorf("Test ID %v - expected:%v, got:%v", c.check.ID, true, res)
@ -334,7 +334,7 @@ func TestTestExecuteExceptions(t *testing.T) {
c.Check.AuditConfigOutput = c.str
res, err := c.Check.execute()
if err != nil {
t.Errorf(err.Error())
t.Error(err.Error())
}
if res.testResult {
t.Errorf("expected:%v, got:%v", false, res)

View File

@ -245,6 +245,8 @@ func TestMapToCISVersion(t *testing.T) {
{kubeVersion: "1.27", succeed: true, exp: "cis-1.9"},
{kubeVersion: "1.28", succeed: true, exp: "cis-1.9"},
{kubeVersion: "1.29", succeed: true, exp: "cis-1.9"},
{kubeVersion: "1.30", succeed: true, exp: "cis-1.10"},
{kubeVersion: "1.31", succeed: true, exp: "cis-1.10"},
{kubeVersion: "gke-1.2.0", succeed: true, exp: "gke-1.2.0"},
{kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"},
{kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"},
@ -461,6 +463,12 @@ func TestValidTargets(t *testing.T) {
targets: []string{"node", "policies", "controlplane", "managedservices"},
expected: true,
},
{
name: "eks-1.5.0 valid",
benchmark: "eks-1.5.0",
targets: []string{"node", "policies", "controlplane", "managedservices"},
expected: true,
},
}
for _, c := range cases {

View File

@ -47,7 +47,7 @@ var runCmd = &cobra.Command{
exitWithError(fmt.Errorf("error validating targets: %v", err))
}
if len(targets) > 0 && !valid {
exitWithError(fmt.Errorf(fmt.Sprintf(`The specified --targets "%s" are not configured for the CIS Benchmark %s\n Valid targets %v`, strings.Join(targets, ","), bv, benchmarkVersionToTargetsMap[bv])))
exitWithError(fmt.Errorf(`The specified --targets "%s" are not configured for the CIS Benchmark %s\n Valid targets %v`, strings.Join(targets, ","), bv, benchmarkVersionToTargetsMap[bv]))
}
// Merge version-specific config if any.

View File

@ -489,7 +489,7 @@ func getPlatformBenchmarkVersion(platform Platform) string {
glog.V(3).Infof("getPlatformBenchmarkVersion platform: %s", platform)
switch platform.Name {
case "eks":
return "eks-1.2.0"
return "eks-1.5.0"
case "gke":
switch platform.Version {
case "1.15", "1.16", "1.17", "1.18", "1.19":

View File

@ -650,7 +650,7 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) {
args: args{
platform: Platform{Name: "eks"},
},
want: "eks-1.2.0",
want: "eks-1.5.0",
},
{
name: "gke 1.19",

View File

@ -13,29 +13,31 @@ Check the contents of the benchmark directory under `cfg` to see which targets a
The following table shows the valid targets based on the CIS Benchmark version.
| CIS Benchmark | Targets |
|-------------------|---------|
| cis-1.5 | master, controlplane, node, etcd, policies |
| cis-1.6 | master, controlplane, node, etcd, policies |
| cis-1.20 | master, controlplane, node, etcd, policies |
| cis-1.23 | master, controlplane, node, etcd, policies |
| cis-1.24 | master, controlplane, node, etcd, policies |
| cis-1.7 | master, controlplane, node, etcd, policies |
| cis-1.8 | master, controlplane, node, etcd, policies |
| cis-1.9 | master, controlplane, node, etcd, policies |
| gke-1.0 | master, controlplane, node, etcd, policies, managedservices |
| gke-1.2.0 | controlplane, node, policies, managedservices |
| gke-1.6.0 | controlplane, node, policies, managedservices |
| eks-1.0.1 | controlplane, node, policies, managedservices |
| eks-1.1.0 | controlplane, node, policies, managedservices |
| eks-1.2.0 | controlplane, node, policies, managedservices |
| ack-1.0 | master, controlplane, node, etcd, policies, managedservices |
| aks-1.0 | controlplane, node, policies, managedservices |
| rh-0.7 | master,node|
| rh-1.0 | master, controlplane, node, etcd, policies |
| rh-1.6 | master, controlplane, node, etcd, policies |
| cis-1.6-k3s | master, controlplane, node, etcd, policies |
| cis-1.24-microk8s | master, controlplane, node, etcd, policies |
| CIS Benchmark | Targets |
|----------------------|---------|
| cis-1.5 | master, controlplane, node, etcd, policies |
| cis-1.6 | master, controlplane, node, etcd, policies |
| cis-1.20 | master, controlplane, node, etcd, policies |
| cis-1.23 | master, controlplane, node, etcd, policies |
| cis-1.24 | master, controlplane, node, etcd, policies |
| cis-1.7 | master, controlplane, node, etcd, policies |
| cis-1.8 | master, controlplane, node, etcd, policies |
| cis-1.9 | master, controlplane, node, etcd, policies |
| cis-1.10 | master, controlplane, node, etcd, policies |
| gke-1.0 | master, controlplane, node, etcd, policies, managedservices |
| gke-1.2.0 | controlplane, node, policies, managedservices |
| gke-1.6.0 | controlplane, node, policies, managedservices |
| eks-1.0.1 | controlplane, node, policies, managedservices |
| eks-1.1.0 | controlplane, node, policies, managedservices |
| eks-1.2.0 | controlplane, node, policies, managedservices |
| eks-1.5.0 | controlplane, node, policies, managedservices |
| ack-1.0 | master, controlplane, node, etcd, policies, managedservices |
| aks-1.0 | controlplane, node, policies, managedservices |
| rh-0.7 | master,node|
| rh-1.0 | master, controlplane, node, etcd, policies |
| rh-1.6 | master, controlplane, node, etcd, policies |
| cis-1.6-k3s | master, controlplane, node, etcd, policies |
| cis-1.24-microk8s | master, controlplane, node, etcd, policies |
The following table shows the valid DISA STIG versions

View File

@ -18,12 +18,14 @@ Some defined by other hardenening guides.
| CIS | [1.7](https://workbench.cisecurity.org/benchmarks/11107) | cis-1.7 | 1.25 |
| CIS | [1.8](https://workbench.cisecurity.org/benchmarks/12958) | cis-1.8 | 1.26 |
| CIS | [1.9](https://workbench.cisecurity.org/benchmarks/16828) | cis-1.9 | 1.27-1.29 |
| CIS | [1.10](https://workbench.cisecurity.org/benchmarks/17568) | cis-1.10 | 1.28-1.31 |
| CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE |
| CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE |
| CIS | [GKE 1.6.0](https://workbench.cisecurity.org/benchmarks/16093) | gke-1.6.0 | GKE |
| CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS |
| CIS | [EKS 1.1.0](https://workbench.cisecurity.org/benchmarks/6248) | eks-1.1.0 | EKS |
| CIS | [EKS 1.2.0](https://workbench.cisecurity.org/benchmarks/9681) | eks-1.2.0 | EKS |
| CIS | [EKS 1.5.0](https://workbench.cisecurity.org/benchmarks/17733) | eks-1.5.0 | EKS |
| CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK |
| CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS |
| RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 |

81
go.mod
View File

@ -1,48 +1,46 @@
module github.com/aquasecurity/kube-bench
go 1.22.0
toolchain go1.22.7
go 1.23.4
require (
github.com/aws/aws-sdk-go-v2 v1.32.3
github.com/aws/aws-sdk-go-v2/config v1.27.37
github.com/aws/aws-sdk-go-v2/service/securityhub v1.54.4
github.com/fatih/color v1.16.0
github.com/golang/glog v1.2.2
github.com/magiconair/properties v1.8.7
github.com/aws/aws-sdk-go-v2 v1.32.8
github.com/aws/aws-sdk-go-v2/config v1.28.10
github.com/aws/aws-sdk-go-v2/service/securityhub v1.55.3
github.com/fatih/color v1.18.0
github.com/golang/glog v1.2.4
github.com/magiconair/properties v1.8.9
github.com/onsi/ginkgo v1.16.5
github.com/pkg/errors v0.9.1
github.com/spf13/cobra v1.8.1
github.com/spf13/viper v1.18.2
github.com/stretchr/testify v1.9.0
github.com/spf13/viper v1.19.0
github.com/stretchr/testify v1.10.0
gopkg.in/yaml.v2 v2.4.0
gorm.io/driver/postgres v1.5.9
gorm.io/driver/postgres v1.5.11
gorm.io/gorm v1.25.12
k8s.io/apimachinery v0.31.2
k8s.io/client-go v0.31.2
k8s.io/apimachinery v0.32.0
k8s.io/client-go v0.32.0
)
require (
github.com/aws/aws-sdk-go-v2/credentials v1.17.35 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.51 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.23.1 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.31.1 // indirect
github.com/aws/smithy-go v1.22.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.6 // indirect
github.com/aws/smithy-go v1.22.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
@ -66,7 +64,7 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
@ -79,24 +77,25 @@ require (
github.com/x448/float16 v0.8.4 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.21.0 // indirect
golang.org/x/term v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
golang.org/x/net v0.33.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.7.0 // indirect
google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/api v0.31.2 // indirect
k8s.io/api v0.32.0 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)

169
go.sum
View File

@ -1,31 +1,31 @@
github.com/aws/aws-sdk-go-v2 v1.32.3 h1:T0dRlFBKcdaUPGNtkBSwHZxrtis8CQU17UpNBZYd0wk=
github.com/aws/aws-sdk-go-v2 v1.32.3/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo=
github.com/aws/aws-sdk-go-v2/config v1.27.37 h1:xaoIwzHVuRWRHFI0jhgEdEGc8xE1l91KaeRDsWEIncU=
github.com/aws/aws-sdk-go-v2/config v1.27.37/go.mod h1:S2e3ax9/8KnMSyRVNd3sWTKs+1clJ2f1U6nE0lpvQRg=
github.com/aws/aws-sdk-go-v2/credentials v1.17.35 h1:7QknrZhYySEB1lEXJxGAmuD5sWwys5ZXNr4m5oEz0IE=
github.com/aws/aws-sdk-go-v2/credentials v1.17.35/go.mod h1:8Vy4kk7at4aPSmibr7K+nLTzG6qUQAUO4tW49fzUV4E=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 h1:C/d03NAmh8C4BZXhuRNboF/DqhBkBCeDiJDcaqIT5pA=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14/go.mod h1:7I0Ju7p9mCIdlrfS+JCgqcYD0VXz/N4yozsox+0o078=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 h1:Jw50LwEkVjuVzE1NzkhNKkBf9cRN7MtE1F/b2cOKTUM=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22/go.mod h1:Y/SmAyPcOTmpeVaWSzSKiILfXTVJwrGmYZhcRbhWuEY=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 h1:981MHwBaRZM7+9QSR6XamDzF/o7ouUGxFzr+nVSIhrs=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22/go.mod h1:1RA1+aBEfn+CAB/Mh0MB6LsdCYCnjZm7tKXtnk499ZQ=
github.com/aws/aws-sdk-go-v2 v1.32.8 h1:cZV+NUS/eGxKXMtmyhtYPJ7Z4YLoI/V8bkTdRZfYhGo=
github.com/aws/aws-sdk-go-v2 v1.32.8/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U=
github.com/aws/aws-sdk-go-v2/config v1.28.10 h1:fKODZHfqQu06pCzR69KJ3GuttraRJkhlC8g80RZ0Dfg=
github.com/aws/aws-sdk-go-v2/config v1.28.10/go.mod h1:PvdxRYZ5Um9QMq9PQ0zHHNdtKK+he2NHtFCUFMXWXeg=
github.com/aws/aws-sdk-go-v2/credentials v1.17.51 h1:F/9Sm6Y6k4LqDesZDPJCLxQGXNNHd/ZtJiWd0lCZKRk=
github.com/aws/aws-sdk-go-v2/credentials v1.17.51/go.mod h1:TKbzCHm43AoPyA+iLGGcruXd4AFhF8tOmLex2R9jWNQ=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 h1:IBAoD/1d8A8/1aA8g4MBVtTRHhXRiNAgwdbo/xRM2DI=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23/go.mod h1:vfENuCM7dofkgKpYzuzf1VT1UKkA/YL3qanfBn7HCaA=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 h1:jSJjSBzw8VDIbWv+mmvBSP8ezsztMYJGH+eKqi9AmNs=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27/go.mod h1:/DAhLbFRgwhmvJdOfSm+WwikZrCuUJiA4WgJG0fTNSw=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 h1:l+X4K77Dui85pIj5foXDhPlnqcNRG2QUyvca300lXh8=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27/go.mod h1:KvZXSFEXm6x84yE8qffKvT3x8J5clWnVFXphpohhzJ8=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 h1:QFASJGfT8wMXtuP3D5CRmMjARHv9ZmzFUMJznHDOY3w=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5/go.mod h1:QdZ3OmoIjSX+8D1OPAzPxDfjXASbBMDsz9qvtyIhtik=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 h1:Xbwbmk44URTiHNx6PNo0ujDE6ERlsCKJD3u1zfnzAPg=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20/go.mod h1:oAfOFzUB14ltPZj1rWwRc3d/6OgD76R8KlvU3EqM9Fg=
github.com/aws/aws-sdk-go-v2/service/securityhub v1.54.4 h1:/dZV1aa+UyaP17M/gHQ6qHDEnvfHAF98CIXzerGQv9M=
github.com/aws/aws-sdk-go-v2/service/securityhub v1.54.4/go.mod h1:3Aq0KVVKwxbRdEywQbgQLnVrimltVKejsW1fVMnK2Uc=
github.com/aws/aws-sdk-go-v2/service/sso v1.23.1 h1:2jrVsMHqdLD1+PA4BA6Nh1eZp0Gsy3mFSB5MxDvcJtU=
github.com/aws/aws-sdk-go-v2/service/sso v1.23.1/go.mod h1:XRlMvmad0ZNL+75C5FYdMvbbLkd6qiqz6foR1nA1PXY=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1 h1:0L7yGCg3Hb3YQqnSgBTZM5wepougtL1aEccdcdYhHME=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1/go.mod h1:FnvDM4sfa+isJ3kDXIzAB9GAwVSzFzSy97uZ3IsHo4E=
github.com/aws/aws-sdk-go-v2/service/sts v1.31.1 h1:8K0UNOkZiK9Uh3HIF6Bx0rcNCftqGCeKmOaR7Gp5BSo=
github.com/aws/aws-sdk-go-v2/service/sts v1.31.1/go.mod h1:yMWe0F+XG0DkRZK5ODZhG7BEFYhLXi2dqGsv6tX0cgI=
github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM=
github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 h1:cWno7lefSH6Pp+mSznagKCgfDGeZRin66UvYUqAkyeA=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8/go.mod h1:tPD+VjU3ABTBoEJ3nctu5Nyg4P4yjqSH5bJGGkY4+XE=
github.com/aws/aws-sdk-go-v2/service/securityhub v1.55.3 h1:TQ0sua3BwzGqHgEao1IwvJ8PAJ+OZPgJ5ByVU7vm314=
github.com/aws/aws-sdk-go-v2/service/securityhub v1.55.3/go.mod h1:6qzlBXc2heuoYIo9eU7/6klKvZKqhADl7Ceh0gp5jCg=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 h1:YqtxripbjWb2QLyzRK9pByfEDvgg95gpC2AyDq4hFE8=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.9/go.mod h1:lV8iQpg6OLOfBnqbGMBKYjilBlf633qwHnBEiMSPoHY=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 h1:6dBT1Lz8fK11m22R+AqfRsFn8320K0T5DTGxxOQBSMw=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8/go.mod h1:/kiBvRQXBc6xeJTYzhSdGvJ5vm1tjaDEjH+MSeRJnlY=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.6 h1:VwhTrsTuVn52an4mXx29PqRzs2Dvu921NpGk7y43tAM=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.6/go.mod h1:+8h7PZb3yY5ftmVLD7ocEoE98hdc8PoKS0H3wfx1dlc=
github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro=
github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -34,8 +34,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@ -46,21 +46,22 @@ github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY=
github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc=
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@ -81,8 +82,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
@ -115,8 +116,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM=
github.com/magiconair/properties v1.8.9/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
@ -140,14 +141,14 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@ -170,8 +171,8 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@ -184,8 +185,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
@ -199,8 +201,8 @@ go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTV
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@ -211,16 +213,16 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -232,23 +234,23 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -259,11 +261,13 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
@ -273,32 +277,31 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8=
gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI=
gorm.io/driver/postgres v1.5.11 h1:ubBVAfbKEUld/twyKZ0IYn9rSQh448EdelLYk9Mv314=
gorm.io/driver/postgres v1.5.11/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI=
gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8=
gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0=
k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk=
k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw=
k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc=
k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs=
k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8=
k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bash
# This script is used to ensure the owner is set to root:root for
# the given directory and all the files in it
#
# inputs:
# $1 = /full/path/to/directory
#
# outputs:
# true/false
INPUT_DIR=$1
if [[ "${INPUT_DIR}" == "" ]]; then
echo "false"
exit
fi
if [[ $(stat -c %U:%G ${INPUT_DIR}) != "root:root" ]]; then
echo "false"
exit
fi
statInfoLines=$(stat -c "%n %U:%G" ${INPUT_DIR}/*)
while read -r statInfoLine; do
f=$(echo ${statInfoLine} | cut -d' ' -f1)
p=$(echo ${statInfoLine} | cut -d' ' -f2)
if [[ $(basename "$f" .pem) == "kube-etcd-"* ]]; then
if [[ "$p" != "root:root" && "$p" != "etcd:etcd" ]]; then
echo "false"
exit
fi
else
if [[ "$p" != "root:root" ]]; then
echo "false"
exit
fi
fi
done <<< "${statInfoLines}"
echo "true"
exit

View File

@ -18,9 +18,9 @@ spec:
"kube-bench",
"run",
"--targets",
"node",
"node,policies,managedservices,controlplane",
"--benchmark",
"eks-1.2.0",
"eks-1.5.0",
]
volumeMounts:
- name: var-lib-kubelet

View File

@ -11,7 +11,7 @@ spec:
spec:
containers:
- command: ["kube-bench"]
image: docker.io/aquasec/kube-bench:v0.9.1
image: docker.io/aquasec/kube-bench:v0.10.0
name: kube-bench
volumeMounts:
- name: var-lib-cni