mirror of
https://github.com/aquasecurity/kube-bench.git
synced 2024-11-26 01:49:28 +00:00
Merge branch 'main' into eks-1-5
This commit is contained in:
commit
3d61d46c8f
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@ -14,7 +14,7 @@ on:
|
|||||||
- "LICENSE"
|
- "LICENSE"
|
||||||
- "NOTICE"
|
- "NOTICE"
|
||||||
env:
|
env:
|
||||||
GO_VERSION: "1.21"
|
GO_VERSION: "1.22.7"
|
||||||
KIND_VERSION: "v0.11.1"
|
KIND_VERSION: "v0.11.1"
|
||||||
KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6"
|
KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6"
|
||||||
|
|
||||||
|
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@ -5,7 +5,7 @@ on:
|
|||||||
tags:
|
tags:
|
||||||
- "v*"
|
- "v*"
|
||||||
env:
|
env:
|
||||||
GO_VERSION: "1.21"
|
GO_VERSION: "1.22.7"
|
||||||
KIND_VERSION: "v0.11.1"
|
KIND_VERSION: "v0.11.1"
|
||||||
KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6"
|
KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6"
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM golang:1.22.4 AS build
|
FROM golang:1.22.7 AS build
|
||||||
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
|
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
|
||||||
COPY makefile makefile
|
COPY makefile makefile
|
||||||
COPY go.mod go.sum ./
|
COPY go.mod go.sum ./
|
||||||
@ -17,11 +17,12 @@ RUN wget -O kubectl.sha256 "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/li
|
|||||||
RUN /bin/bash -c 'echo "$(<kubectl.sha256) /usr/local/bin/kubectl" | sha256sum -c -'
|
RUN /bin/bash -c 'echo "$(<kubectl.sha256) /usr/local/bin/kubectl" | sha256sum -c -'
|
||||||
RUN chmod +x /usr/local/bin/kubectl
|
RUN chmod +x /usr/local/bin/kubectl
|
||||||
|
|
||||||
FROM alpine:3.20.0 AS run
|
FROM alpine:3.20.3 AS run
|
||||||
WORKDIR /opt/kube-bench/
|
WORKDIR /opt/kube-bench/
|
||||||
# add GNU ps for -C, -o cmd, and --no-headers support
|
# add GNU ps for -C, -o cmd, --no-headers support and add findutils to get GNU xargs
|
||||||
# https://github.com/aquasecurity/kube-bench/issues/109
|
# https://github.com/aquasecurity/kube-bench/issues/109
|
||||||
RUN apk --no-cache add procps
|
# https://github.com/aquasecurity/kube-bench/issues/1656
|
||||||
|
RUN apk --no-cache add procps findutils
|
||||||
|
|
||||||
# Upgrading apk-tools to remediate CVE-2021-36159 - https://snyk.io/vuln/SNYK-ALPINE314-APKTOOLS-1533752
|
# Upgrading apk-tools to remediate CVE-2021-36159 - https://snyk.io/vuln/SNYK-ALPINE314-APKTOOLS-1533752
|
||||||
# https://github.com/aquasecurity/kube-bench/issues/943
|
# https://github.com/aquasecurity/kube-bench/issues/943
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM golang:1.22.4 AS build
|
FROM golang:1.22.7 AS build
|
||||||
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
|
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
|
||||||
COPY makefile makefile
|
COPY makefile makefile
|
||||||
COPY go.mod go.sum ./
|
COPY go.mod go.sum ./
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM golang:1.22.4 AS build
|
FROM golang:1.22.7 AS build
|
||||||
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
|
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
|
||||||
COPY makefile makefile
|
COPY makefile makefile
|
||||||
COPY go.mod go.sum ./
|
COPY go.mod go.sum ./
|
||||||
|
@ -345,16 +345,15 @@ groups:
|
|||||||
text: "Ensure that the --DenyServiceExternalIPs is set (Manual)"
|
text: "Ensure that the --DenyServiceExternalIPs is set (Manual)"
|
||||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--enable-admission-plugins"
|
- flag: "--enable-admission-plugins"
|
||||||
compare:
|
compare:
|
||||||
op: have
|
op: has
|
||||||
value: "DenyServiceExternalIPs"
|
value: "DenyServiceExternalIPs"
|
||||||
remediation: |
|
remediation: |
|
||||||
Edit the API server pod specification file $apiserverconf
|
Edit the API server pod specification file $apiserverconf
|
||||||
on the control plane node and remove the `DenyServiceExternalIPs`
|
on the control plane node and add the `DenyServiceExternalIPs` plugin
|
||||||
from enabled admission plugins.
|
to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 1.2.4
|
- id: 1.2.4
|
||||||
|
@ -345,16 +345,15 @@ groups:
|
|||||||
text: "Ensure that the --DenyServiceExternalIPs is set (Manual)"
|
text: "Ensure that the --DenyServiceExternalIPs is set (Manual)"
|
||||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--enable-admission-plugins"
|
- flag: "--enable-admission-plugins"
|
||||||
compare:
|
compare:
|
||||||
op: have
|
op: has
|
||||||
value: "DenyServiceExternalIPs"
|
value: "DenyServiceExternalIPs"
|
||||||
remediation: |
|
remediation: |
|
||||||
Edit the API server pod specification file $apiserverconf
|
Edit the API server pod specification file $apiserverconf
|
||||||
on the control plane node and remove the `DenyServiceExternalIPs`
|
on the control plane node and add the `DenyServiceExternalIPs` plugin
|
||||||
from enabled admission plugins.
|
to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 1.2.4
|
- id: 1.2.4
|
||||||
|
@ -15,6 +15,7 @@ groups:
|
|||||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be
|
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be
|
||||||
implemented in place of client certificates.
|
implemented in place of client certificates.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 3.1.2
|
- id: 3.1.2
|
||||||
text: "Service account token authentication should not be used for users (Manual)"
|
text: "Service account token authentication should not be used for users (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -22,6 +23,7 @@ groups:
|
|||||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented
|
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented
|
||||||
in place of service account tokens.
|
in place of service account tokens.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 3.1.3
|
- id: 3.1.3
|
||||||
text: "Bootstrap token authentication should not be used for users (Manual)"
|
text: "Bootstrap token authentication should not be used for users (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
|
@ -189,7 +189,7 @@ groups:
|
|||||||
- id: 1.1.13
|
- id: 1.1.13
|
||||||
text: "Ensure that the default administrative credential file permissions are set to 600 (Automated)"
|
text: "Ensure that the default administrative credential file permissions are set to 600 (Automated)"
|
||||||
audit: |
|
audit: |
|
||||||
for adminconf in /etc/kubernetes/{admin.conf,super-admin.conf}; do if test -e $adminconf; then stat -c \"permissions=%a %n\" $adminconf; fi; done
|
for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "permissions=%a %n" $adminconf; fi; done
|
||||||
use_multiple_values: true
|
use_multiple_values: true
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -207,7 +207,7 @@ groups:
|
|||||||
- id: 1.1.14
|
- id: 1.1.14
|
||||||
text: "Ensure that the default administrative credential file ownership is set to root:root (Automated)"
|
text: "Ensure that the default administrative credential file ownership is set to root:root (Automated)"
|
||||||
audit: |
|
audit: |
|
||||||
for adminconf in /tmp/{admin.conf,super-admin.conf}; do if test -e $adminconf; then stat -c "ownership=%U:%G %n" $adminconf; fi; done
|
for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "ownership=%U:%G %n" $adminconf; fi; done
|
||||||
use_multiple_values: true
|
use_multiple_values: true
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -219,7 +219,7 @@ groups:
|
|||||||
Run the below command (based on the file location on your system) on the control plane node.
|
Run the below command (based on the file location on your system) on the control plane node.
|
||||||
For example, chown root:root /etc/kubernetes/admin.conf
|
For example, chown root:root /etc/kubernetes/admin.conf
|
||||||
On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present.
|
On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present.
|
||||||
For example, chmod 600 /etc/kubernetes/super-admin.conf
|
For example, chown root:root /etc/kubernetes/super-admin.conf
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 1.1.15
|
- id: 1.1.15
|
||||||
@ -360,12 +360,12 @@ groups:
|
|||||||
test_items:
|
test_items:
|
||||||
- flag: "--enable-admission-plugins"
|
- flag: "--enable-admission-plugins"
|
||||||
compare:
|
compare:
|
||||||
op: have
|
op: has
|
||||||
value: "DenyServiceExternalIPs"
|
value: "DenyServiceExternalIPs"
|
||||||
remediation: |
|
remediation: |
|
||||||
Edit the API server pod specification file $apiserverconf
|
Edit the API server pod specification file $apiserverconf
|
||||||
on the control plane node and remove the `DenyServiceExternalIPs`
|
on the control plane node and add the `DenyServiceExternalIPs` plugin
|
||||||
from enabled admission plugins.
|
to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 1.2.4
|
- id: 1.2.4
|
||||||
|
@ -18,7 +18,7 @@ groups:
|
|||||||
else
|
else
|
||||||
is_compliant="true"
|
is_compliant="true"
|
||||||
fi;
|
fi;
|
||||||
echo "**role_name: ${role_name} role_binding: ${rolebinding} subject: ${subject} is_compliant: ${is_compliant}"
|
echo "**role_name: ${role_name} role_binding: ${role_binding} subject: ${subject} is_compliant: ${is_compliant}"
|
||||||
done
|
done
|
||||||
use_multiple_values: true
|
use_multiple_values: true
|
||||||
tests:
|
tests:
|
||||||
@ -34,6 +34,7 @@ groups:
|
|||||||
clusterrolebinding to the cluster-admin role : kubectl delete clusterrolebinding [name]
|
clusterrolebinding to the cluster-admin role : kubectl delete clusterrolebinding [name]
|
||||||
Condition: is_compliant is false if rolename is not cluster-admin and rolebinding is cluster-admin.
|
Condition: is_compliant is false if rolename is not cluster-admin and rolebinding is cluster-admin.
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 5.1.2
|
- id: 5.1.2
|
||||||
text: "Minimize access to secrets (Automated)"
|
text: "Minimize access to secrets (Automated)"
|
||||||
audit: "echo \"canGetListWatchSecretsAsSystemAuthenticated: $(kubectl auth can-i get,list,watch secrets --all-namespaces --as=system:authenticated)\""
|
audit: "echo \"canGetListWatchSecretsAsSystemAuthenticated: $(kubectl auth can-i get,list,watch secrets --all-namespaces --as=system:authenticated)\""
|
||||||
@ -46,6 +47,7 @@ groups:
|
|||||||
remediation: |
|
remediation: |
|
||||||
Where possible, remove get, list and watch access to Secret objects in the cluster.
|
Where possible, remove get, list and watch access to Secret objects in the cluster.
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 5.1.3
|
- id: 5.1.3
|
||||||
text: "Minimize wildcard use in Roles and ClusterRoles (Automated)"
|
text: "Minimize wildcard use in Roles and ClusterRoles (Automated)"
|
||||||
audit: |
|
audit: |
|
||||||
@ -92,6 +94,7 @@ groups:
|
|||||||
Condition: role_is_compliant is false if ["*"] is found in rules.
|
Condition: role_is_compliant is false if ["*"] is found in rules.
|
||||||
Condition: clusterrole_is_compliant is false if ["*"] is found in rules.
|
Condition: clusterrole_is_compliant is false if ["*"] is found in rules.
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 5.1.4
|
- id: 5.1.4
|
||||||
text: "Minimize access to create pods (Automated)"
|
text: "Minimize access to create pods (Automated)"
|
||||||
audit: |
|
audit: |
|
||||||
@ -106,7 +109,7 @@ groups:
|
|||||||
Where possible, remove create access to pod objects in the cluster.
|
Where possible, remove create access to pod objects in the cluster.
|
||||||
scored: true
|
scored: true
|
||||||
- id: 5.1.5
|
- id: 5.1.5
|
||||||
text: "Ensure that default service accounts are not actively used. (Automated)"
|
text: "Ensure that default service accounts are not actively used (Automated)"
|
||||||
audit: |
|
audit: |
|
||||||
kubectl get serviceaccount --all-namespaces --field-selector metadata.name=default -o=json | jq -r '.items[] | " namespace: \(.metadata.namespace), kind: \(.kind), name: \(.metadata.name), automountServiceAccountToken: \(.automountServiceAccountToken | if . == null then "notset" else . end )"' | xargs -L 1
|
kubectl get serviceaccount --all-namespaces --field-selector metadata.name=default -o=json | jq -r '.items[] | " namespace: \(.metadata.namespace), kind: \(.kind), name: \(.metadata.name), automountServiceAccountToken: \(.automountServiceAccountToken | if . == null then "notset" else . end )"' | xargs -L 1
|
||||||
use_multiple_values: true
|
use_multiple_values: true
|
||||||
@ -123,17 +126,18 @@ groups:
|
|||||||
Modify the configuration of each default service account to include this value
|
Modify the configuration of each default service account to include this value
|
||||||
`automountServiceAccountToken: false`.
|
`automountServiceAccountToken: false`.
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 5.1.6
|
- id: 5.1.6
|
||||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)"
|
text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)"
|
||||||
audit: |
|
audit: |
|
||||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAMESPACE:.metadata.namespace,POD_NAME:.metadata.name,POD_SERVICE_ACCOUNT:.spec.serviceAccount,POD_IS_AUTOMOUNTSERVICEACCOUNTTOKEN:.spec.automountServiceAccountToken --no-headers | while read -r pod_namespace pod_name pod_service_account pod_is_automountserviceaccounttoken
|
kubectl get pods --all-namespaces -o custom-columns=POD_NAMESPACE:.metadata.namespace,POD_NAME:.metadata.name,POD_SERVICE_ACCOUNT:.spec.serviceAccount,POD_IS_AUTOMOUNTSERVICEACCOUNTTOKEN:.spec.automountServiceAccountToken --no-headers | while read -r pod_namespace pod_name pod_service_account pod_is_automountserviceaccounttoken
|
||||||
do
|
do
|
||||||
# Retrieve automountServiceAccountToken's value for ServiceAccount and Pod, set to notset if null or <none>.
|
# Retrieve automountServiceAccountToken's value for ServiceAccount and Pod, set to notset if null or <none>.
|
||||||
svacc_is_automountserviceaccounttoken=$(kubectl get serviceaccount -n ${pod_namespace} ${pod_service_account} -o json | jq -r '.automountServiceAccountToken' | sed -e 's/<none>/notset/g' -e 's/null/notset/g')
|
svacc_is_automountserviceaccounttoken=$(kubectl get serviceaccount -n "${pod_namespace}" "${pod_service_account}" -o json | jq -r '.automountServiceAccountToken' | sed -e 's/<none>/notset/g' -e 's/null/notset/g')
|
||||||
pod_is_automountserviceaccounttoken=$(echo ${pod_is_automountserviceaccounttoken} | sed -e 's/<none>/notset/g' -e 's/null/notset/g')
|
pod_is_automountserviceaccounttoken=$(echo "${pod_is_automountserviceaccounttoken}" | sed -e 's/<none>/notset/g' -e 's/null/notset/g')
|
||||||
if [[ "${svacc_is_automountserviceaccounttoken}" == "false" && ( "${pod_is_automountserviceaccounttoken}" == "false" || "${pod_is_automountserviceaccounttoken}" == "notset" ) ]]; then
|
if [ "${svacc_is_automountserviceaccounttoken}" = "false" ] && ( [ "${pod_is_automountserviceaccounttoken}" = "false" ] || [ "${pod_is_automountserviceaccounttoken}" = "notset" ] ); then
|
||||||
is_compliant="true"
|
is_compliant="true"
|
||||||
elif [[ "${svacc_is_automountserviceaccounttoken}" == "true" && "${pod_is_automountserviceaccounttoken}" == "false" ]]; then
|
elif [ "${svacc_is_automountserviceaccounttoken}" = "true" ] && [ "${pod_is_automountserviceaccounttoken}" = "false" ]; then
|
||||||
is_compliant="true"
|
is_compliant="true"
|
||||||
else
|
else
|
||||||
is_compliant="false"
|
is_compliant="false"
|
||||||
@ -155,48 +159,56 @@ groups:
|
|||||||
- ServiceAccount is automountServiceAccountToken: false and Pod is automountServiceAccountToken: false or notset
|
- ServiceAccount is automountServiceAccountToken: false and Pod is automountServiceAccountToken: false or notset
|
||||||
- ServiceAccount is automountServiceAccountToken: true notset and Pod is automountServiceAccountToken: false
|
- ServiceAccount is automountServiceAccountToken: true notset and Pod is automountServiceAccountToken: false
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 5.1.7
|
- id: 5.1.7
|
||||||
text: "Avoid use of system:masters group (Manual)"
|
text: "Avoid use of system:masters group (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
remediation: |
|
remediation: |
|
||||||
Remove the system:masters group from all users in the cluster.
|
Remove the system:masters group from all users in the cluster.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.1.8
|
- id: 5.1.8
|
||||||
text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
|
text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
remediation: |
|
remediation: |
|
||||||
Where possible, remove the impersonate, bind and escalate rights from subjects.
|
Where possible, remove the impersonate, bind and escalate rights from subjects.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.1.9
|
- id: 5.1.9
|
||||||
text: "Minimize access to create persistent volumes (Manual)"
|
text: "Minimize access to create persistent volumes (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
remediation: |
|
remediation: |
|
||||||
Where possible, remove create access to PersistentVolume objects in the cluster.
|
Where possible, remove create access to PersistentVolume objects in the cluster.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.1.10
|
- id: 5.1.10
|
||||||
text: "Minimize access to the proxy sub-resource of nodes (Manual)"
|
text: "Minimize access to the proxy sub-resource of nodes (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
remediation: |
|
remediation: |
|
||||||
Where possible, remove access to the proxy sub-resource of node objects.
|
Where possible, remove access to the proxy sub-resource of node objects.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.1.11
|
- id: 5.1.11
|
||||||
text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)"
|
text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
remediation: |
|
remediation: |
|
||||||
Where possible, remove access to the approval sub-resource of certificatesigningrequest objects.
|
Where possible, remove access to the approval sub-resource of certificatesigningrequest objects.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.1.12
|
- id: 5.1.12
|
||||||
text: "Minimize access to webhook configuration objects (Manual)"
|
text: "Minimize access to webhook configuration objects (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
remediation: |
|
remediation: |
|
||||||
Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects
|
Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.1.13
|
- id: 5.1.13
|
||||||
text: "Minimize access to the service account token creation (Manual)"
|
text: "Minimize access to the service account token creation (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
remediation: |
|
remediation: |
|
||||||
Where possible, remove access to the token sub-resource of serviceaccount objects.
|
Where possible, remove access to the token sub-resource of serviceaccount objects.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2
|
- id: 5.2
|
||||||
text: "Pod Security Standards"
|
text: "Pod Security Standards"
|
||||||
checks:
|
checks:
|
||||||
@ -207,6 +219,7 @@ groups:
|
|||||||
Ensure that either Pod Security Admission or an external policy control system is in place
|
Ensure that either Pod Security Admission or an external policy control system is in place
|
||||||
for every namespace which contains user workloads.
|
for every namespace which contains user workloads.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2.2
|
- id: 5.2.2
|
||||||
text: "Minimize the admission of privileged containers (Manual)"
|
text: "Minimize the admission of privileged containers (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -214,6 +227,7 @@ groups:
|
|||||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
admission of privileged containers.
|
admission of privileged containers.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2.3
|
- id: 5.2.3
|
||||||
text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)"
|
text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -221,6 +235,7 @@ groups:
|
|||||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
admission of `hostPID` containers.
|
admission of `hostPID` containers.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2.4
|
- id: 5.2.4
|
||||||
text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)"
|
text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -228,6 +243,7 @@ groups:
|
|||||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
admission of `hostIPC` containers.
|
admission of `hostIPC` containers.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2.5
|
- id: 5.2.5
|
||||||
text: "Minimize the admission of containers wishing to share the host network namespace (Manual)"
|
text: "Minimize the admission of containers wishing to share the host network namespace (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -235,6 +251,7 @@ groups:
|
|||||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
admission of `hostNetwork` containers.
|
admission of `hostNetwork` containers.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2.6
|
- id: 5.2.6
|
||||||
text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)"
|
text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -242,6 +259,7 @@ groups:
|
|||||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
admission of containers with `.spec.allowPrivilegeEscalation` set to `true`.
|
admission of containers with `.spec.allowPrivilegeEscalation` set to `true`.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2.7
|
- id: 5.2.7
|
||||||
text: "Minimize the admission of root containers (Manual)"
|
text: "Minimize the admission of root containers (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -249,6 +267,7 @@ groups:
|
|||||||
Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot`
|
Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot`
|
||||||
or `MustRunAs` with the range of UIDs not including 0, is set.
|
or `MustRunAs` with the range of UIDs not including 0, is set.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2.8
|
- id: 5.2.8
|
||||||
text: "Minimize the admission of containers with the NET_RAW capability (Manual)"
|
text: "Minimize the admission of containers with the NET_RAW capability (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -256,6 +275,7 @@ groups:
|
|||||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
admission of containers with the `NET_RAW` capability.
|
admission of containers with the `NET_RAW` capability.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2.9
|
- id: 5.2.9
|
||||||
text: "Minimize the admission of containers with added capabilities (Manual)"
|
text: "Minimize the admission of containers with added capabilities (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -263,6 +283,7 @@ groups:
|
|||||||
Ensure that `allowedCapabilities` is not present in policies for the cluster unless
|
Ensure that `allowedCapabilities` is not present in policies for the cluster unless
|
||||||
it is set to an empty array.
|
it is set to an empty array.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2.10
|
- id: 5.2.10
|
||||||
text: "Minimize the admission of containers with capabilities assigned (Manual)"
|
text: "Minimize the admission of containers with capabilities assigned (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -271,6 +292,7 @@ groups:
|
|||||||
contains applicaions which do not require any Linux capabities to operate consider adding
|
contains applicaions which do not require any Linux capabities to operate consider adding
|
||||||
a PSP which forbids the admission of containers which do not drop all capabilities.
|
a PSP which forbids the admission of containers which do not drop all capabilities.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2.11
|
- id: 5.2.11
|
||||||
text: "Minimize the admission of Windows HostProcess containers (Manual)"
|
text: "Minimize the admission of Windows HostProcess containers (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -278,6 +300,7 @@ groups:
|
|||||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`.
|
admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2.12
|
- id: 5.2.12
|
||||||
text: "Minimize the admission of HostPath volumes (Manual)"
|
text: "Minimize the admission of HostPath volumes (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -285,6 +308,7 @@ groups:
|
|||||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
admission of containers with `hostPath` volumes.
|
admission of containers with `hostPath` volumes.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2.13
|
- id: 5.2.13
|
||||||
text: "Minimize the admission of containers which use HostPorts (Manual)"
|
text: "Minimize the admission of containers which use HostPorts (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -292,6 +316,7 @@ groups:
|
|||||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
admission of containers which use `hostPort` sections.
|
admission of containers which use `hostPort` sections.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.3
|
- id: 5.3
|
||||||
text: "Network Policies and CNI"
|
text: "Network Policies and CNI"
|
||||||
checks:
|
checks:
|
||||||
@ -303,12 +328,14 @@ groups:
|
|||||||
making use of a different plugin, or finding an alternate mechanism for restricting traffic
|
making use of a different plugin, or finding an alternate mechanism for restricting traffic
|
||||||
in the Kubernetes cluster.
|
in the Kubernetes cluster.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.3.2
|
- id: 5.3.2
|
||||||
text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)"
|
text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
remediation: |
|
remediation: |
|
||||||
Follow the documentation and create NetworkPolicy objects as you need them.
|
Follow the documentation and create NetworkPolicy objects as you need them.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.4
|
- id: 5.4
|
||||||
text: "Secrets Management"
|
text: "Secrets Management"
|
||||||
checks:
|
checks:
|
||||||
@ -319,6 +346,7 @@ groups:
|
|||||||
If possible, rewrite application code to read Secrets from mounted secret files, rather than
|
If possible, rewrite application code to read Secrets from mounted secret files, rather than
|
||||||
from environment variables.
|
from environment variables.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.4.2
|
- id: 5.4.2
|
||||||
text: "Consider external secret storage (Manual)"
|
text: "Consider external secret storage (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -326,6 +354,7 @@ groups:
|
|||||||
Refer to the Secrets management options offered by your cloud provider or a third-party
|
Refer to the Secrets management options offered by your cloud provider or a third-party
|
||||||
secrets management solution.
|
secrets management solution.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.5
|
- id: 5.5
|
||||||
text: "Extensible Admission Control"
|
text: "Extensible Admission Control"
|
||||||
checks:
|
checks:
|
||||||
@ -335,6 +364,7 @@ groups:
|
|||||||
remediation: |
|
remediation: |
|
||||||
Follow the Kubernetes documentation and setup image provenance.
|
Follow the Kubernetes documentation and setup image provenance.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.7
|
- id: 5.7
|
||||||
text: "General Policies"
|
text: "General Policies"
|
||||||
checks:
|
checks:
|
||||||
@ -345,6 +375,7 @@ groups:
|
|||||||
Follow the documentation and create namespaces for objects in your deployment as you need
|
Follow the documentation and create namespaces for objects in your deployment as you need
|
||||||
them.
|
them.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.7.2
|
- id: 5.7.2
|
||||||
text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)"
|
text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -355,6 +386,7 @@ groups:
|
|||||||
seccompProfile:
|
seccompProfile:
|
||||||
type: RuntimeDefault
|
type: RuntimeDefault
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.7.3
|
- id: 5.7.3
|
||||||
text: "Apply SecurityContext to your Pods and Containers (Manual)"
|
text: "Apply SecurityContext to your Pods and Containers (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -363,6 +395,7 @@ groups:
|
|||||||
suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker
|
suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker
|
||||||
Containers.
|
Containers.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.7.4
|
- id: 5.7.4
|
||||||
text: "The default namespace should not be used (Manual)"
|
text: "The default namespace should not be used (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
|
@ -164,13 +164,13 @@ node:
|
|||||||
- "/var/snap/microk8s/current/credentials/kubelet.config"
|
- "/var/snap/microk8s/current/credentials/kubelet.config"
|
||||||
- "/etc/kubernetes/kubeconfig-kubelet"
|
- "/etc/kubernetes/kubeconfig-kubelet"
|
||||||
- "/var/lib/rancher/rke2/agent/kubelet.kubeconfig"
|
- "/var/lib/rancher/rke2/agent/kubelet.kubeconfig"
|
||||||
- "/var/lib/rancher/k3s/server/cred/admin.kubeconfig"
|
|
||||||
- "/var/lib/rancher/k3s/agent/kubelet.kubeconfig"
|
- "/var/lib/rancher/k3s/agent/kubelet.kubeconfig"
|
||||||
confs:
|
confs:
|
||||||
- "/etc/kubernetes/kubelet-config.yaml"
|
- "/etc/kubernetes/kubelet-config.yaml"
|
||||||
- "/var/lib/kubelet/config.yaml"
|
- "/var/lib/kubelet/config.yaml"
|
||||||
- "/var/lib/kubelet/config.yml"
|
- "/var/lib/kubelet/config.yml"
|
||||||
- "/etc/kubernetes/kubelet/kubelet-config.json"
|
- "/etc/kubernetes/kubelet/kubelet-config.json"
|
||||||
|
- "/etc/kubernetes/kubelet/config.json"
|
||||||
- "/etc/kubernetes/kubelet/config"
|
- "/etc/kubernetes/kubelet/config"
|
||||||
- "/home/kubernetes/kubelet-config.yaml"
|
- "/home/kubernetes/kubelet-config.yaml"
|
||||||
- "/home/kubernetes/kubelet-config.yml"
|
- "/home/kubernetes/kubelet-config.yml"
|
||||||
@ -190,7 +190,6 @@ node:
|
|||||||
- "/etc/systemd/system/snap.kubelet.daemon.service"
|
- "/etc/systemd/system/snap.kubelet.daemon.service"
|
||||||
- "/etc/systemd/system/snap.microk8s.daemon-kubelet.service"
|
- "/etc/systemd/system/snap.microk8s.daemon-kubelet.service"
|
||||||
- "/etc/kubernetes/kubelet.yaml"
|
- "/etc/kubernetes/kubelet.yaml"
|
||||||
- "/var/lib/rancher/rke2/agent/kubelet.kubeconfig"
|
|
||||||
|
|
||||||
defaultconf: "/var/lib/kubelet/config.yaml"
|
defaultconf: "/var/lib/kubelet/config.yaml"
|
||||||
defaultsvc: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
|
defaultsvc: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
|
||||||
@ -445,6 +444,12 @@ target_mapping:
|
|||||||
- "controlplane"
|
- "controlplane"
|
||||||
- "node"
|
- "node"
|
||||||
- "policies"
|
- "policies"
|
||||||
|
"k3s-cis-1.8":
|
||||||
|
- "master"
|
||||||
|
- "etcd"
|
||||||
|
- "controlplane"
|
||||||
|
- "node"
|
||||||
|
- "policies"
|
||||||
"k3s-cis-1.23":
|
"k3s-cis-1.23":
|
||||||
- "master"
|
- "master"
|
||||||
- "etcd"
|
- "etcd"
|
||||||
|
@ -21,7 +21,7 @@ groups:
|
|||||||
checks:
|
checks:
|
||||||
- id: 3.2.1
|
- id: 3.2.1
|
||||||
text: "Ensure that a minimal audit policy is created (Manual)"
|
text: "Ensure that a minimal audit policy is created (Manual)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
|
@ -323,7 +323,7 @@ groups:
|
|||||||
checks:
|
checks:
|
||||||
- id: 1.2.1
|
- id: 1.2.1
|
||||||
text: "Ensure that the --anonymous-auth argument is set to false (Manual)"
|
text: "Ensure that the --anonymous-auth argument is set to false (Manual)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'"
|
||||||
type: manual
|
type: manual
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -371,7 +371,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.4
|
- id: 1.2.4
|
||||||
text: "Ensure that the --kubelet-https argument is set to true (Automated)"
|
text: "Ensure that the --kubelet-https argument is set to true (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-https'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-https'"
|
||||||
type: "skip"
|
type: "skip"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
@ -389,7 +389,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.5
|
- id: 1.2.5
|
||||||
text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)"
|
text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'"
|
||||||
tests:
|
tests:
|
||||||
bin_op: and
|
bin_op: and
|
||||||
test_items:
|
test_items:
|
||||||
@ -406,7 +406,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.6
|
- id: 1.2.6
|
||||||
text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)"
|
text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--kubelet-certificate-authority"
|
- flag: "--kubelet-certificate-authority"
|
||||||
@ -420,7 +420,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.7
|
- id: 1.2.7
|
||||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--authorization-mode"
|
- flag: "--authorization-mode"
|
||||||
@ -436,7 +436,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.8
|
- id: 1.2.8
|
||||||
text: "Ensure that the --authorization-mode argument includes Node (Automated)"
|
text: "Ensure that the --authorization-mode argument includes Node (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--authorization-mode"
|
- flag: "--authorization-mode"
|
||||||
@ -451,7 +451,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.9
|
- id: 1.2.9
|
||||||
text: "Ensure that the --authorization-mode argument includes RBAC (Automated)"
|
text: "Ensure that the --authorization-mode argument includes RBAC (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--authorization-mode"
|
- flag: "--authorization-mode"
|
||||||
@ -466,7 +466,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.10
|
- id: 1.2.10
|
||||||
text: "Ensure that the admission control plugin EventRateLimit is set (Manual)"
|
text: "Ensure that the admission control plugin EventRateLimit is set (Manual)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--enable-admission-plugins"
|
- flag: "--enable-admission-plugins"
|
||||||
@ -483,7 +483,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.11
|
- id: 1.2.11
|
||||||
text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)"
|
text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
@ -517,7 +517,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.13
|
- id: 1.2.13
|
||||||
text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)"
|
text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
@ -538,7 +538,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.14
|
- id: 1.2.14
|
||||||
text: "Ensure that the admission control plugin ServiceAccount is set (Automated)"
|
text: "Ensure that the admission control plugin ServiceAccount is set (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
@ -557,7 +557,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.15
|
- id: 1.2.15
|
||||||
text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)"
|
text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
@ -575,7 +575,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.16
|
- id: 1.2.16
|
||||||
text: "Ensure that the admission control plugin NodeRestriction is set (Automated)"
|
text: "Ensure that the admission control plugin NodeRestriction is set (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--enable-admission-plugins"
|
- flag: "--enable-admission-plugins"
|
||||||
@ -592,7 +592,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.17
|
- id: 1.2.17
|
||||||
text: "Ensure that the --secure-port argument is not set to 0 (Automated)"
|
text: "Ensure that the --secure-port argument is not set to 0 (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
@ -610,7 +610,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.18
|
- id: 1.2.18
|
||||||
text: "Ensure that the --profiling argument is set to false (Automated)"
|
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--profiling"
|
- flag: "--profiling"
|
||||||
@ -625,7 +625,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.19
|
- id: 1.2.19
|
||||||
text: "Ensure that the --audit-log-path argument is set (Automated)"
|
text: "Ensure that the --audit-log-path argument is set (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
||||||
type: "skip"
|
type: "skip"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -639,7 +639,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.20
|
- id: 1.2.20
|
||||||
text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)"
|
text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
||||||
type: "skip"
|
type: "skip"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -656,7 +656,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.21
|
- id: 1.2.21
|
||||||
text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)"
|
text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
||||||
type: "skip"
|
type: "skip"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -673,7 +673,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.22
|
- id: 1.2.22
|
||||||
text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)"
|
text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
||||||
type: "skip"
|
type: "skip"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -689,7 +689,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.23
|
- id: 1.2.23
|
||||||
text: "Ensure that the --request-timeout argument is set as appropriate (Automated)"
|
text: "Ensure that the --request-timeout argument is set as appropriate (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
||||||
type: "skip"
|
type: "skip"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -702,7 +702,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.24
|
- id: 1.2.24
|
||||||
text: "Ensure that the --service-account-lookup argument is set to true (Automated)"
|
text: "Ensure that the --service-account-lookup argument is set to true (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
@ -722,7 +722,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.25
|
- id: 1.2.25
|
||||||
text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)"
|
text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep"
|
||||||
type: "skip"
|
type: "skip"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -736,7 +736,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.26
|
- id: 1.2.26
|
||||||
text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)"
|
text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep -m1 'Running kube-apiserver'"
|
audit: "journalctl -m -u k3s | grep -m1 'Running kube-apiserver'"
|
||||||
tests:
|
tests:
|
||||||
bin_op: and
|
bin_op: and
|
||||||
test_items:
|
test_items:
|
||||||
@ -754,7 +754,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.27
|
- id: 1.2.27
|
||||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
|
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2"
|
audit: "journalctl -m -u k3s | grep -A1 'Running kube-apiserver' | tail -n2"
|
||||||
tests:
|
tests:
|
||||||
bin_op: and
|
bin_op: and
|
||||||
test_items:
|
test_items:
|
||||||
@ -772,7 +772,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.28
|
- id: 1.2.28
|
||||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--client-ca-file"
|
- flag: "--client-ca-file"
|
||||||
@ -785,7 +785,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.29
|
- id: 1.2.29
|
||||||
text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)"
|
text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--etcd-cafile"
|
- flag: "--etcd-cafile"
|
||||||
@ -798,7 +798,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.30
|
- id: 1.2.30
|
||||||
text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)"
|
text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--encryption-provider-config"
|
- flag: "--encryption-provider-config"
|
||||||
@ -820,7 +820,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.2.32
|
- id: 1.2.32
|
||||||
text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)"
|
text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--tls-cipher-suites"
|
- flag: "--tls-cipher-suites"
|
||||||
@ -845,7 +845,7 @@ groups:
|
|||||||
checks:
|
checks:
|
||||||
- id: 1.3.1
|
- id: 1.3.1
|
||||||
text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)"
|
text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--terminated-pod-gc-threshold"
|
- flag: "--terminated-pod-gc-threshold"
|
||||||
@ -857,7 +857,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.3.2
|
- id: 1.3.2
|
||||||
text: "Ensure that the --profiling argument is set to false (Automated)"
|
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--profiling"
|
- flag: "--profiling"
|
||||||
@ -872,7 +872,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.3.3
|
- id: 1.3.3
|
||||||
text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)"
|
text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--use-service-account-credentials"
|
- flag: "--use-service-account-credentials"
|
||||||
@ -887,7 +887,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.3.4
|
- id: 1.3.4
|
||||||
text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)"
|
text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--service-account-private-key-file"
|
- flag: "--service-account-private-key-file"
|
||||||
@ -900,7 +900,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.3.5
|
- id: 1.3.5
|
||||||
text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)"
|
text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--root-ca-file"
|
- flag: "--root-ca-file"
|
||||||
@ -912,7 +912,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.3.6
|
- id: 1.3.6
|
||||||
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
|
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'RotateKubeletServerCertificate'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'RotateKubeletServerCertificate'"
|
||||||
type: "skip"
|
type: "skip"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
@ -953,7 +953,7 @@ groups:
|
|||||||
checks:
|
checks:
|
||||||
- id: 1.4.1
|
- id: 1.4.1
|
||||||
text: "Ensure that the --profiling argument is set to false (Automated)"
|
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1"
|
audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--profiling"
|
- flag: "--profiling"
|
||||||
@ -969,7 +969,7 @@ groups:
|
|||||||
|
|
||||||
- id: 1.4.2
|
- id: 1.4.2
|
||||||
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
|
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
|
@ -186,7 +186,7 @@ groups:
|
|||||||
checks:
|
checks:
|
||||||
- id: 4.2.1
|
- id: 4.2.1
|
||||||
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
||||||
audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' '
|
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' '
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--anonymous-auth"
|
- flag: "--anonymous-auth"
|
||||||
@ -209,7 +209,7 @@ groups:
|
|||||||
|
|
||||||
- id: 4.2.2
|
- id: 4.2.2
|
||||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||||
audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' '
|
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' '
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --authorization-mode
|
- flag: --authorization-mode
|
||||||
@ -231,7 +231,7 @@ groups:
|
|||||||
|
|
||||||
- id: 4.2.3
|
- id: 4.2.3
|
||||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||||
audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' '
|
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' '
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --client-ca-file
|
- flag: --client-ca-file
|
||||||
@ -251,7 +251,7 @@ groups:
|
|||||||
|
|
||||||
- id: 4.2.4
|
- id: 4.2.4
|
||||||
text: "Ensure that the --read-only-port argument is set to 0 (Manual)"
|
text: "Ensure that the --read-only-port argument is set to 0 (Manual)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' "
|
audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' "
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
@ -276,7 +276,7 @@ groups:
|
|||||||
|
|
||||||
- id: 4.2.5
|
- id: 4.2.5
|
||||||
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)"
|
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'"
|
audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --streaming-connection-idle-timeout
|
- flag: --streaming-connection-idle-timeout
|
||||||
@ -302,7 +302,7 @@ groups:
|
|||||||
|
|
||||||
- id: 4.2.6
|
- id: 4.2.6
|
||||||
text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)"
|
text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'protect-kernel-defaults'"
|
audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'protect-kernel-defaults'"
|
||||||
type: "skip"
|
type: "skip"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -325,7 +325,7 @@ groups:
|
|||||||
|
|
||||||
- id: 4.2.7
|
- id: 4.2.7
|
||||||
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
|
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'"
|
audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'"
|
||||||
type: "skip"
|
type: "skip"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -393,7 +393,7 @@ groups:
|
|||||||
|
|
||||||
- id: 4.2.10
|
- id: 4.2.10
|
||||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
|
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1"
|
audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --tls-cert-file
|
- flag: --tls-cert-file
|
||||||
|
@ -16,18 +16,28 @@ master:
|
|||||||
scheduler:
|
scheduler:
|
||||||
bins:
|
bins:
|
||||||
- containerd
|
- containerd
|
||||||
|
kubeconfig:
|
||||||
|
- /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig
|
||||||
|
|
||||||
controllermanager:
|
controllermanager:
|
||||||
bins:
|
bins:
|
||||||
- containerd
|
- containerd
|
||||||
|
kubeconfig:
|
||||||
|
- /var/lib/rancher/k3s/server/cred/controller.kubeconfig
|
||||||
|
|
||||||
|
|
||||||
etcd:
|
etcd:
|
||||||
bins:
|
bins:
|
||||||
- containerd
|
- containerd
|
||||||
datadirs:
|
|
||||||
- /var/lib/rancher/k3s/server/db/etcd
|
|
||||||
|
|
||||||
node:
|
etcd:
|
||||||
|
components:
|
||||||
|
- etcd
|
||||||
|
|
||||||
|
etcd:
|
||||||
|
confs: /var/lib/rancher/k3s/server/db/etcd/config
|
||||||
|
|
||||||
|
node:
|
||||||
components:
|
components:
|
||||||
- kubelet
|
- kubelet
|
||||||
- proxy
|
- proxy
|
||||||
@ -43,6 +53,6 @@ master:
|
|||||||
- containerd
|
- containerd
|
||||||
defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig
|
defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig
|
||||||
|
|
||||||
policies:
|
policies:
|
||||||
components:
|
components:
|
||||||
- policies
|
- policies
|
||||||
|
@ -21,7 +21,7 @@ groups:
|
|||||||
checks:
|
checks:
|
||||||
- id: 3.2.1
|
- id: 3.2.1
|
||||||
text: "Ensure that a minimal audit policy is created (Automated)"
|
text: "Ensure that a minimal audit policy is created (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--audit-policy-file"
|
- flag: "--audit-policy-file"
|
||||||
|
@ -10,128 +10,135 @@ groups:
|
|||||||
checks:
|
checks:
|
||||||
- id: 2.1
|
- id: 2.1
|
||||||
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)"
|
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)"
|
||||||
audit: "grep -A 4 'client-transport-security' $etcdconf | grep -E 'cert-file|key-file'"
|
audit_config: "cat $etcdconf"
|
||||||
tests:
|
tests:
|
||||||
bin_op: and
|
bin_op: and
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "cert-file"
|
- path: "{.client-transport-security.cert-file}"
|
||||||
set: true
|
compare:
|
||||||
- flag: "key-file"
|
op: eq
|
||||||
set: true
|
value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.crt"
|
||||||
|
- path: "{.client-transport-security.key-file}"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.key"
|
||||||
remediation: |
|
remediation: |
|
||||||
Follow the etcd service documentation and configure TLS encryption.
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml
|
When running with embedded-etcd, K3s generates cert and key files for etcd.
|
||||||
on the master node and set the below parameters.
|
These are located in /var/lib/rancher/k3s/server/tls/etcd/.
|
||||||
--cert-file=</path/to/ca-file>
|
If this check fails, ensure that the configuration file $etcdconf
|
||||||
--key-file=</path/to/key-file>
|
has not been modified to use custom cert and key files.
|
||||||
scored: true
|
scored: false
|
||||||
|
|
||||||
- id: 2.2
|
- id: 2.2
|
||||||
text: "Ensure that the --client-cert-auth argument is set to true (Automated)"
|
text: "Ensure that the --client-cert-auth argument is set to true (Automated)"
|
||||||
audit: "grep -A 4 'client-transport-security' $etcdconf | grep 'client-cert-auth'"
|
audit_config: "cat $etcdconf"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--client-cert-auth"
|
- path: "{.client-transport-security.client-cert-auth}"
|
||||||
set: true
|
|
||||||
- flag: "client-cert-auth"
|
|
||||||
compare:
|
compare:
|
||||||
op: eq
|
op: eq
|
||||||
value: true
|
value: true
|
||||||
set: true
|
|
||||||
remediation: |
|
remediation: |
|
||||||
Edit the etcd pod specification file $etcdconf on the master
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
node and set the below parameter.
|
When running with embedded-etcd, K3s sets the --client-cert-auth parameter to true.
|
||||||
--client-cert-auth="true"
|
If this check fails, ensure that the configuration file $etcdconf
|
||||||
scored: true
|
has not been modified to disable client certificate authentication.
|
||||||
|
scored: false
|
||||||
|
|
||||||
- id: 2.3
|
- id: 2.3
|
||||||
text: "Ensure that the --auto-tls argument is not set to true (Automated)"
|
text: "Ensure that the --auto-tls argument is not set to true (Automated)"
|
||||||
audit: "if grep -q '^auto-tls' $etcdconf;then grep '^auto-tls' $etcdconf;else echo 'notset';fi"
|
audit_config: "cat $etcdconf"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--auto-tls"
|
- path: "{.client-transport-security.auto-tls}"
|
||||||
set: false
|
|
||||||
- flag: "--auto-tls"
|
|
||||||
compare:
|
compare:
|
||||||
op: eq
|
op: eq
|
||||||
value: false
|
value: false
|
||||||
|
- path: "{.client-transport-security.auto-tls}"
|
||||||
|
set: false
|
||||||
remediation: |
|
remediation: |
|
||||||
Edit the etcd pod specification file $etcdconf on the master
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
|
When running with embedded-etcd, K3s does not set the --auto-tls parameter.
|
||||||
|
If this check fails, edit the etcd pod specification file $etcdconf on the master
|
||||||
node and either remove the --auto-tls parameter or set it to false.
|
node and either remove the --auto-tls parameter or set it to false.
|
||||||
--auto-tls=false
|
client-transport-security:
|
||||||
scored: true
|
auto-tls: false
|
||||||
|
scored: false
|
||||||
|
|
||||||
- id: 2.4
|
- id: 2.4
|
||||||
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)"
|
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)"
|
||||||
audit: "grep -A 4 'peer-transport-security' $etcdconf | grep -E 'cert-file|key-file'"
|
audit_config: "cat $etcdconf"
|
||||||
tests:
|
tests:
|
||||||
bin_op: and
|
bin_op: and
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "cert-file"
|
- path: "{.peer-transport-security.cert-file}"
|
||||||
set: true
|
compare:
|
||||||
- flag: "key-file"
|
op: eq
|
||||||
set: true
|
value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt"
|
||||||
|
- path: "{.peer-transport-security.key-file}"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key"
|
||||||
remediation: |
|
remediation: |
|
||||||
Follow the etcd service documentation and configure peer TLS encryption as appropriate
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
for your etcd cluster.
|
When running with embedded-etcd, K3s generates peer cert and key files for etcd.
|
||||||
Then, edit the etcd pod specification file $etcdconf on the
|
These are located in /var/lib/rancher/k3s/server/tls/etcd/.
|
||||||
master node and set the below parameters.
|
If this check fails, ensure that the configuration file $etcdconf
|
||||||
--peer-client-file=</path/to/peer-cert-file>
|
has not been modified to use custom peer cert and key files.
|
||||||
--peer-key-file=</path/to/peer-key-file>
|
scored: false
|
||||||
scored: true
|
|
||||||
|
|
||||||
- id: 2.5
|
- id: 2.5
|
||||||
text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)"
|
text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)"
|
||||||
audit: "grep -A 4 'peer-transport-security' $etcdconf | grep 'client-cert-auth'"
|
audit_config: "cat $etcdconf"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--client-cert-auth"
|
- path: "{.peer-transport-security.client-cert-auth}"
|
||||||
set: true
|
|
||||||
- flag: "client-cert-auth"
|
|
||||||
compare:
|
compare:
|
||||||
op: eq
|
op: eq
|
||||||
value: true
|
value: true
|
||||||
set: true
|
|
||||||
remediation: |
|
remediation: |
|
||||||
Edit the etcd pod specification file $etcdconf on the master
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
node and set the below parameter.
|
When running with embedded-etcd, K3s sets the --peer-cert-auth parameter to true.
|
||||||
--peer-client-cert-auth=true
|
If this check fails, ensure that the configuration file $etcdconf
|
||||||
scored: true
|
has not been modified to disable peer client certificate authentication.
|
||||||
|
scored: false
|
||||||
|
|
||||||
- id: 2.6
|
- id: 2.6
|
||||||
text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)"
|
text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)"
|
||||||
audit: "if grep -q '^peer-auto-tls' $etcdconf;then grep '^peer-auto-tls' $etcdconf;else echo 'notset';fi"
|
audit_config: "cat $etcdconf"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--peer-auto-tls"
|
- path: "{.peer-transport-security.auto-tls}"
|
||||||
set: false
|
|
||||||
- flag: "--peer-auto-tls"
|
|
||||||
compare:
|
compare:
|
||||||
op: eq
|
op: eq
|
||||||
value: false
|
value: false
|
||||||
set: true
|
- path: "{.peer-transport-security.auto-tls}"
|
||||||
|
set: false
|
||||||
remediation: |
|
remediation: |
|
||||||
Edit the etcd pod specification file $etcdconf on the master
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
|
When running with embedded-etcd, K3s does not set the --peer-auto-tls parameter.
|
||||||
|
If this check fails, edit the etcd pod specification file $etcdconf on the master
|
||||||
node and either remove the --peer-auto-tls parameter or set it to false.
|
node and either remove the --peer-auto-tls parameter or set it to false.
|
||||||
--peer-auto-tls=false
|
peer-transport-security:
|
||||||
scored: true
|
auto-tls: false
|
||||||
|
scored: false
|
||||||
|
|
||||||
- id: 2.7
|
- id: 2.7
|
||||||
text: "Ensure that a unique Certificate Authority is used for etcd (Automated)"
|
text: "Ensure that a unique Certificate Authority is used for etcd (Automated)"
|
||||||
audit: "if grep -q 'trusted-ca-file' $etcdconf;then grep 'trusted-ca-file' $etcdconf;else echo 'notset';fi"
|
audit_config: "cat $etcdconf"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "trusted-ca-file"
|
- path: "{.peer-transport-security.trusted-ca-file}"
|
||||||
set: true
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: "/var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt"
|
||||||
remediation: |
|
remediation: |
|
||||||
[Manual test]
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
Follow the etcd documentation and create a dedicated certificate authority setup for the
|
When running with embedded-etcd, K3s generates a unique certificate authority for etcd.
|
||||||
etcd service.
|
This is located at /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt.
|
||||||
Then, edit the etcd pod specification file $etcdconf on the
|
If this check fails, ensure that the configuration file $etcdconf
|
||||||
master node and set the below parameter.
|
has not been modified to use a shared certificate authority.
|
||||||
--trusted-ca-file=</path/to/ca-file>
|
|
||||||
scored: false
|
scored: false
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -19,8 +19,8 @@ groups:
|
|||||||
op: bitmask
|
op: bitmask
|
||||||
value: "600"
|
value: "600"
|
||||||
remediation: |
|
remediation: |
|
||||||
Run the below command (based on the file location on your system) on the each worker node.
|
Not Applicable.
|
||||||
For example, chmod 600 $kubeletsvc
|
The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime.
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.2
|
- id: 4.1.2
|
||||||
@ -31,14 +31,13 @@ groups:
|
|||||||
test_items:
|
test_items:
|
||||||
- flag: root:root
|
- flag: root:root
|
||||||
remediation: |
|
remediation: |
|
||||||
Run the below command (based on the file location on your system) on the each worker node.
|
Not Applicable.
|
||||||
For example,
|
The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime.
|
||||||
chown root:root $kubeletsvc
|
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.3
|
- id: 4.1.3
|
||||||
text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)"
|
text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)"
|
||||||
audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig'
|
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' '
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
@ -51,11 +50,11 @@ groups:
|
|||||||
Run the below command (based on the file location on your system) on the each worker node.
|
Run the below command (based on the file location on your system) on the each worker node.
|
||||||
For example,
|
For example,
|
||||||
chmod 600 $proxykubeconfig
|
chmod 600 $proxykubeconfig
|
||||||
scored: false
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.4
|
- id: 4.1.4
|
||||||
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)"
|
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)"
|
||||||
audit: '/bin/sh -c ''if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi'' '
|
audit: 'stat -c %U:%G $proxykubeconfig'
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
@ -63,18 +62,17 @@ groups:
|
|||||||
remediation: |
|
remediation: |
|
||||||
Run the below command (based on the file location on your system) on the each worker node.
|
Run the below command (based on the file location on your system) on the each worker node.
|
||||||
For example, chown root:root $proxykubeconfig
|
For example, chown root:root $proxykubeconfig
|
||||||
scored: false
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.5
|
- id: 4.1.5
|
||||||
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)"
|
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||||
audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubelet.kubeconfig '
|
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "600"
|
- flag: "permissions"
|
||||||
compare:
|
compare:
|
||||||
op: eq
|
op: bitmask
|
||||||
value: "600"
|
value: "600"
|
||||||
set: true
|
|
||||||
remediation: |
|
remediation: |
|
||||||
Run the below command (based on the file location on your system) on the each worker node.
|
Run the below command (based on the file location on your system) on the each worker node.
|
||||||
For example,
|
For example,
|
||||||
@ -83,7 +81,7 @@ groups:
|
|||||||
|
|
||||||
- id: 4.1.6
|
- id: 4.1.6
|
||||||
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)"
|
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)"
|
||||||
audit: 'stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig'
|
audit: 'stat -c %U:%G $kubeletkubeconfig'
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "root:root"
|
- flag: "root:root"
|
||||||
@ -98,8 +96,8 @@ groups:
|
|||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.7
|
- id: 4.1.7
|
||||||
text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)"
|
text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)"
|
||||||
audit: "stat -c %a /var/lib/rancher/k3s/server/tls/server-ca.crt"
|
audit: "stat -c permissions=%a $kubeletcafile"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "permissions"
|
- flag: "permissions"
|
||||||
@ -109,22 +107,25 @@ groups:
|
|||||||
set: true
|
set: true
|
||||||
remediation: |
|
remediation: |
|
||||||
Run the following command to modify the file permissions of the
|
Run the following command to modify the file permissions of the
|
||||||
--client-ca-file chmod 600 <filename>
|
--client-ca-file chmod 600 $kubeletcafile
|
||||||
scored: false
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.8
|
- id: 4.1.8
|
||||||
text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)"
|
text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
|
||||||
audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls/client-ca.crt"
|
audit: "stat -c %U:%G $kubeletcafile"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: root:root
|
- flag: root:root
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: root:root
|
||||||
remediation: |
|
remediation: |
|
||||||
Run the following command to modify the ownership of the --client-ca-file.
|
Run the following command to modify the ownership of the --client-ca-file.
|
||||||
chown root:root <filename>
|
chown root:root $kubeletcafile
|
||||||
scored: false
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.9
|
- id: 4.1.9
|
||||||
text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Manual)"
|
text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)"
|
||||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
|
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
|
||||||
type: "skip"
|
type: "skip"
|
||||||
tests:
|
tests:
|
||||||
@ -134,20 +135,20 @@ groups:
|
|||||||
op: bitmask
|
op: bitmask
|
||||||
value: "600"
|
value: "600"
|
||||||
remediation: |
|
remediation: |
|
||||||
Run the following command (using the config file location identified in the Audit step)
|
Not Applicable.
|
||||||
chmod 600 $kubeletconf
|
The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime.
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.10
|
- id: 4.1.10
|
||||||
text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)"
|
text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)"
|
||||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
|
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
|
||||||
type: "skip"
|
type: "skip"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: root:root
|
- flag: root:root
|
||||||
remediation: |
|
remediation: |
|
||||||
Run the following command (using the config file location identified in the Audit step)
|
Not Applicable.
|
||||||
chown root:root $kubeletconf
|
The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime.
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.2
|
- id: 4.2
|
||||||
@ -155,7 +156,7 @@ groups:
|
|||||||
checks:
|
checks:
|
||||||
- id: 4.2.1
|
- id: 4.2.1
|
||||||
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
||||||
audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' '
|
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' '
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--anonymous-auth"
|
- flag: "--anonymous-auth"
|
||||||
@ -165,20 +166,20 @@ groups:
|
|||||||
value: false
|
value: false
|
||||||
set: true
|
set: true
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to
|
By default, K3s sets the --anonymous-auth to false. If you have set this to a different value, you
|
||||||
`false`.
|
should set it back to false. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below.
|
||||||
If using executable arguments, edit the kubelet service file
|
kubelet-arg:
|
||||||
$kubeletsvc on each worker node and
|
- "anonymous-auth=true"
|
||||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
If using the command line, edit the K3s service file and remove the below argument.
|
||||||
`--anonymous-auth=false`
|
--kubelet-arg="anonymous-auth=true"
|
||||||
Based on your system, restart the kubelet service. For example,
|
Based on your system, restart the k3s service. For example,
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
systemctl restart kubelet.service
|
systemctl restart k3s.service
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.2.2
|
- id: 4.2.2
|
||||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||||
audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' '
|
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' '
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --authorization-mode
|
- flag: --authorization-mode
|
||||||
@ -188,39 +189,33 @@ groups:
|
|||||||
value: AlwaysAllow
|
value: AlwaysAllow
|
||||||
set: true
|
set: true
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If
|
By default, K3s does not set the --authorization-mode to AlwaysAllow.
|
||||||
using executable arguments, edit the kubelet service file
|
If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below.
|
||||||
$kubeletsvc on each worker node and
|
kubelet-arg:
|
||||||
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
- "authorization-mode=AlwaysAllow"
|
||||||
--authorization-mode=Webhook
|
If using the command line, edit the K3s service file and remove the below argument.
|
||||||
Based on your system, restart the kubelet service. For example,
|
--kubelet-arg="authorization-mode=AlwaysAllow"
|
||||||
|
Based on your system, restart the k3s service. For example,
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
systemctl restart kubelet.service
|
systemctl restart k3s.service
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.2.3
|
- id: 4.2.3
|
||||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||||
audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' '
|
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' '
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --client-ca-file
|
- flag: --client-ca-file
|
||||||
path: '{.authentication.x509.clientCAFile}'
|
path: '{.authentication.x509.clientCAFile}'
|
||||||
set: true
|
set: true
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to
|
By default, K3s automatically provides the client ca certificate for the Kubelet.
|
||||||
the location of the client CA file.
|
It is generated and located at /var/lib/rancher/k3s/agent/client-ca.crt
|
||||||
If using command line arguments, edit the kubelet service file
|
|
||||||
$kubeletsvc on each worker node and
|
|
||||||
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
|
||||||
--client-ca-file=<path/to/client-ca-file>
|
|
||||||
Based on your system, restart the kubelet service. For example,
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart kubelet.service
|
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.2.4
|
- id: 4.2.4
|
||||||
text: "Verify that the --read-only-port argument is set to 0 (Manual)"
|
text: "Verify that the --read-only-port argument is set to 0 (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' "
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
@ -233,19 +228,20 @@ groups:
|
|||||||
path: '{.readOnlyPort}'
|
path: '{.readOnlyPort}'
|
||||||
set: false
|
set: false
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `readOnlyPort` to 0.
|
By default, K3s sets the --read-only-port to 0. If you have set this to a different value, you
|
||||||
If using command line arguments, edit the kubelet service file
|
should set it back to 0. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below.
|
||||||
$kubeletsvc on each worker node and
|
kubelet-arg:
|
||||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
- "read-only-port=XXXX"
|
||||||
--read-only-port=0
|
If using the command line, edit the K3s service file and remove the below argument.
|
||||||
Based on your system, restart the kubelet service. For example,
|
--kubelet-arg="read-only-port=XXXX"
|
||||||
|
Based on your system, restart the k3s service. For example,
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
systemctl restart kubelet.service
|
systemctl restart k3s.service
|
||||||
scored: false
|
scored: true
|
||||||
|
|
||||||
- id: 4.2.5
|
- id: 4.2.5
|
||||||
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)"
|
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --streaming-connection-idle-timeout
|
- flag: --streaming-connection-idle-timeout
|
||||||
@ -258,21 +254,17 @@ groups:
|
|||||||
set: false
|
set: false
|
||||||
bin_op: or
|
bin_op: or
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a
|
If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value.
|
||||||
value other than 0.
|
kubelet-arg:
|
||||||
If using command line arguments, edit the kubelet service file
|
- "streaming-connection-idle-timeout=5m"
|
||||||
$kubeletsvc on each worker node and
|
If using the command line, run K3s with --kubelet-arg="streaming-connection-idle-timeout=5m".
|
||||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
Based on your system, restart the k3s service. For example,
|
||||||
--streaming-connection-idle-timeout=5m
|
systemctl restart k3s.service
|
||||||
Based on your system, restart the kubelet service. For example,
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart kubelet.service
|
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 4.2.6
|
- id: 4.2.6
|
||||||
text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)"
|
text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'protect-kernel-defaults'"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
type: "skip"
|
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --protect-kernel-defaults
|
- flag: --protect-kernel-defaults
|
||||||
@ -282,20 +274,16 @@ groups:
|
|||||||
value: true
|
value: true
|
||||||
set: true
|
set: true
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`.
|
If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter.
|
||||||
If using command line arguments, edit the kubelet service file
|
protect-kernel-defaults: true
|
||||||
$kubeletsvc on each worker node and
|
If using the command line, run K3s with --protect-kernel-defaults=true.
|
||||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
Based on your system, restart the k3s service. For example,
|
||||||
--protect-kernel-defaults=true
|
systemctl restart k3s.service
|
||||||
Based on your system, restart the kubelet service. For example:
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart kubelet.service
|
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.2.7
|
- id: 4.2.7
|
||||||
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
|
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
type: "skip"
|
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --make-iptables-util-chains
|
- flag: --make-iptables-util-chains
|
||||||
@ -309,39 +297,31 @@ groups:
|
|||||||
set: false
|
set: false
|
||||||
bin_op: or
|
bin_op: or
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`.
|
If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter.
|
||||||
If using command line arguments, edit the kubelet service file
|
kubelet-arg:
|
||||||
$kubeletsvc on each worker node and
|
- "make-iptables-util-chains=true"
|
||||||
remove the --make-iptables-util-chains argument from the
|
If using the command line, run K3s with --kubelet-arg="make-iptables-util-chains=true".
|
||||||
KUBELET_SYSTEM_PODS_ARGS variable.
|
Based on your system, restart the k3s service. For example,
|
||||||
Based on your system, restart the kubelet service. For example:
|
systemctl restart k3s.service
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart kubelet.service
|
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.2.8
|
- id: 4.2.8
|
||||||
text: "Ensure that the --hostname-override argument is not set (Manual)"
|
text: "Ensure that the --hostname-override argument is not set (Automated)"
|
||||||
# This is one of those properties that can only be set as a command line argument.
|
|
||||||
# To check if the property is set as expected, we need to parse the kubelet command
|
|
||||||
# instead reading the Kubelet Configuration file.
|
|
||||||
audit: "/bin/ps -fC $kubeletbin "
|
|
||||||
type: "skip"
|
type: "skip"
|
||||||
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --hostname-override
|
- flag: --hostname-override
|
||||||
set: false
|
set: false
|
||||||
remediation: |
|
remediation: |
|
||||||
Edit the kubelet service file $kubeletsvc
|
Not Applicable.
|
||||||
on each worker node and remove the --hostname-override argument from the
|
By default, K3s does set the --hostname-override argument. Per CIS guidelines, this is to comply
|
||||||
KUBELET_SYSTEM_PODS_ARGS variable.
|
with cloud providers that require this flag to ensure that hostname matches node names.
|
||||||
Based on your system, restart the kubelet service. For example,
|
scored: true
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart kubelet.service
|
|
||||||
scored: false
|
|
||||||
|
|
||||||
- id: 4.2.9
|
- id: 4.2.9
|
||||||
text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)"
|
text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)"
|
||||||
audit: "/bin/ps -fC $kubeletbin"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
audit_config: "/bin/cat $kubeletconf"
|
audit_config: "/bin/cat $kubeletconf"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -351,18 +331,18 @@ groups:
|
|||||||
op: eq
|
op: eq
|
||||||
value: 0
|
value: 0
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level.
|
By default, K3s sets the event-qps to 0. Should you wish to change this,
|
||||||
If using command line arguments, edit the kubelet service file
|
If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value.
|
||||||
$kubeletsvc on each worker node and
|
kubelet-arg:
|
||||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
- "event-qps=<value>"
|
||||||
Based on your system, restart the kubelet service. For example,
|
If using the command line, run K3s with --kubelet-arg="event-qps=<value>".
|
||||||
systemctl daemon-reload
|
Based on your system, restart the k3s service. For example,
|
||||||
systemctl restart kubelet.service
|
systemctl restart k3s.service
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 4.2.10
|
- id: 4.2.10
|
||||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
|
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --tls-cert-file
|
- flag: --tls-cert-file
|
||||||
@ -370,23 +350,19 @@ groups:
|
|||||||
- flag: --tls-private-key-file
|
- flag: --tls-private-key-file
|
||||||
path: '/var/lib/rancher/k3s/agent/serving-kubelet.key'
|
path: '/var/lib/rancher/k3s/agent/serving-kubelet.key'
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `tlsCertFile` to the location
|
By default, K3s automatically provides the TLS certificate and private key for the Kubelet.
|
||||||
of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile`
|
They are generated and located at /var/lib/rancher/k3s/agent/serving-kubelet.crt and /var/lib/rancher/k3s/agent/serving-kubelet.key
|
||||||
to the location of the corresponding private key file.
|
If for some reason you need to provide your own certificate and key, you can set the
|
||||||
If using command line arguments, edit the kubelet service file
|
below parameters in the K3s config file /etc/rancher/k3s/config.yaml.
|
||||||
$kubeletsvc on each worker node and
|
kubelet-arg:
|
||||||
set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
|
- "tls-cert-file=<path/to/tls-cert-file>"
|
||||||
--tls-cert-file=<path/to/tls-certificate-file>
|
- "tls-private-key-file=<path/to/tls-private-key-file>"
|
||||||
--tls-private-key-file=<path/to/tls-key-file>
|
scored: true
|
||||||
Based on your system, restart the kubelet service. For example,
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart kubelet.service
|
|
||||||
scored: false
|
|
||||||
|
|
||||||
- id: 4.2.11
|
- id: 4.2.11
|
||||||
text: "Ensure that the --rotate-certificates argument is not set to false (Automated)"
|
text: "Ensure that the --rotate-certificates argument is not set to false (Automated)"
|
||||||
audit: "/bin/ps -fC $kubeletbin"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
audit_config: "/bin/cat $kubeletconf"
|
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --rotate-certificates
|
- flag: --rotate-certificates
|
||||||
@ -399,21 +375,16 @@ groups:
|
|||||||
set: false
|
set: false
|
||||||
bin_op: or
|
bin_op: or
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or
|
By default, K3s does not set the --rotate-certificates argument. If you have set this flag with a value of `false`, you should either set it to `true` or completely remove the flag.
|
||||||
remove it altogether to use the default value.
|
If using the K3s config file /etc/rancher/k3s/config.yaml, remove any rotate-certificates parameter.
|
||||||
If using command line arguments, edit the kubelet service file
|
If using the command line, remove the K3s flag --kubelet-arg="rotate-certificates".
|
||||||
$kubeletsvc on each worker node and
|
Based on your system, restart the k3s service. For example,
|
||||||
remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS
|
systemctl restart k3s.service
|
||||||
variable.
|
|
||||||
Based on your system, restart the kubelet service. For example,
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart kubelet.service
|
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.2.12
|
- id: 4.2.12
|
||||||
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)"
|
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Automated)"
|
||||||
audit: "/bin/ps -fC $kubeletbin"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
audit_config: "/bin/cat $kubeletconf"
|
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
@ -426,17 +397,17 @@ groups:
|
|||||||
path: '{.featureGates.RotateKubeletServerCertificate}'
|
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||||
set: false
|
set: false
|
||||||
remediation: |
|
remediation: |
|
||||||
Edit the kubelet service file $kubeletsvc
|
By default, K3s does not set the RotateKubeletServerCertificate feature gate.
|
||||||
on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
|
If you have enabled this feature gate, you should remove it.
|
||||||
--feature-gates=RotateKubeletServerCertificate=true
|
If using the K3s config file /etc/rancher/k3s/config.yaml, remove any feature-gate=RotateKubeletServerCertificate parameter.
|
||||||
Based on your system, restart the kubelet service. For example:
|
If using the command line, remove the K3s flag --kubelet-arg="feature-gate=RotateKubeletServerCertificate".
|
||||||
systemctl daemon-reload
|
Based on your system, restart the k3s service. For example,
|
||||||
systemctl restart kubelet.service
|
systemctl restart k3s.service
|
||||||
scored: false
|
scored: true
|
||||||
|
|
||||||
- id: 4.2.13
|
- id: 4.2.13
|
||||||
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
|
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
|
||||||
audit: "/bin/ps -fC $kubeletbin"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
audit_config: "/bin/cat $kubeletconf"
|
audit_config: "/bin/cat $kubeletconf"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -446,14 +417,11 @@ groups:
|
|||||||
op: valid_elements
|
op: valid_elements
|
||||||
value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `TLSCipherSuites` to
|
If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `TLSCipherSuites` to
|
||||||
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
kubelet-arg:
|
||||||
|
- "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
|
||||||
or to a subset of these values.
|
or to a subset of these values.
|
||||||
If using executable arguments, edit the kubelet service file
|
If using the command line, add the K3s flag --kubelet-arg="tls-cipher-suites=<same values as above>"
|
||||||
$kubeletsvc on each worker node and
|
Based on your system, restart the k3s service. For example,
|
||||||
set the --tls-cipher-suites parameter as follows, or to a subset of these values.
|
systemctl restart k3s.service
|
||||||
--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
|
||||||
Based on your system, restart the kubelet service. For example:
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart kubelet.service
|
|
||||||
scored: false
|
scored: false
|
||||||
|
@ -152,8 +152,8 @@ groups:
|
|||||||
text: "Minimize the admission of containers with capabilities assigned (Manual)"
|
text: "Minimize the admission of containers with capabilities assigned (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
remediation: |
|
remediation: |
|
||||||
Review the use of capabilites in applications running on your cluster. Where a namespace
|
Review the use of capabilities in applications running on your cluster. Where a namespace
|
||||||
contains applicaions which do not require any Linux capabities to operate consider adding
|
contains applications which do not require any Linux capabities to operate consider adding
|
||||||
a PSP which forbids the admission of containers which do not drop all capabilities.
|
a PSP which forbids the admission of containers which do not drop all capabilities.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
|
@ -23,17 +23,27 @@ master:
|
|||||||
scheduler:
|
scheduler:
|
||||||
bins:
|
bins:
|
||||||
- containerd
|
- containerd
|
||||||
|
kubeconfig:
|
||||||
|
- /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig
|
||||||
|
|
||||||
controllermanager:
|
controllermanager:
|
||||||
bins:
|
bins:
|
||||||
- containerd
|
- containerd
|
||||||
|
kubeconfig:
|
||||||
|
- /var/lib/rancher/k3s/server/cred/controller.kubeconfig
|
||||||
|
|
||||||
etcd:
|
etcd:
|
||||||
bins:
|
bins:
|
||||||
- containerd
|
- containerd
|
||||||
datadirs:
|
|
||||||
- /var/lib/rancher/k3s/server/db/etcd
|
etcd:
|
||||||
node:
|
components:
|
||||||
|
- etcd
|
||||||
|
|
||||||
|
etcd:
|
||||||
|
confs: /var/lib/rancher/k3s/server/db/etcd/config
|
||||||
|
|
||||||
|
node:
|
||||||
components:
|
components:
|
||||||
- kubelet
|
- kubelet
|
||||||
- proxy
|
- proxy
|
||||||
@ -49,6 +59,6 @@ master:
|
|||||||
- containerd
|
- containerd
|
||||||
defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig
|
defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig
|
||||||
|
|
||||||
policies:
|
policies:
|
||||||
components:
|
components:
|
||||||
- policies
|
- policies
|
||||||
|
@ -35,7 +35,7 @@ groups:
|
|||||||
checks:
|
checks:
|
||||||
- id: 3.2.1
|
- id: 3.2.1
|
||||||
text: "Ensure that a minimal audit policy is created (Manual)"
|
text: "Ensure that a minimal audit policy is created (Manual)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'"
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--audit-policy-file"
|
- flag: "--audit-policy-file"
|
||||||
|
@ -10,128 +10,135 @@ groups:
|
|||||||
checks:
|
checks:
|
||||||
- id: 2.1
|
- id: 2.1
|
||||||
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)"
|
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)"
|
||||||
audit: "grep -A 4 'client-transport-security' $etcdconf | grep -E 'cert-file|key-file'"
|
audit_config: "cat $etcdconf"
|
||||||
tests:
|
tests:
|
||||||
bin_op: and
|
bin_op: and
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "cert-file"
|
- path: "{.client-transport-security.cert-file}"
|
||||||
set: true
|
compare:
|
||||||
- flag: "key-file"
|
op: eq
|
||||||
set: true
|
value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.crt"
|
||||||
|
- path: "{.client-transport-security.key-file}"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.key"
|
||||||
remediation: |
|
remediation: |
|
||||||
Follow the etcd service documentation and configure TLS encryption.
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml
|
When running with embedded-etcd, K3s generates cert and key files for etcd.
|
||||||
on the master node and set the below parameters.
|
These are located in /var/lib/rancher/k3s/server/tls/etcd/.
|
||||||
--cert-file=</path/to/ca-file>
|
If this check fails, ensure that the configuration file $etcdconf
|
||||||
--key-file=</path/to/key-file>
|
has not been modified to use custom cert and key files.
|
||||||
scored: true
|
scored: false
|
||||||
|
|
||||||
- id: 2.2
|
- id: 2.2
|
||||||
text: "Ensure that the --client-cert-auth argument is set to true (Automated)"
|
text: "Ensure that the --client-cert-auth argument is set to true (Automated)"
|
||||||
audit: "grep -A 4 'client-transport-security' $etcdconf | grep 'client-cert-auth'"
|
audit_config: "cat $etcdconf"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--client-cert-auth"
|
- path: "{.client-transport-security.client-cert-auth}"
|
||||||
set: true
|
|
||||||
- flag: "client-cert-auth"
|
|
||||||
compare:
|
compare:
|
||||||
op: eq
|
op: eq
|
||||||
value: true
|
value: true
|
||||||
set: true
|
|
||||||
remediation: |
|
remediation: |
|
||||||
Edit the etcd pod specification file $etcdconf on the master
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
node and set the below parameter.
|
When running with embedded-etcd, K3s sets the --client-cert-auth parameter to true.
|
||||||
--client-cert-auth="true"
|
If this check fails, ensure that the configuration file $etcdconf
|
||||||
scored: true
|
has not been modified to disable client certificate authentication.
|
||||||
|
scored: false
|
||||||
|
|
||||||
- id: 2.3
|
- id: 2.3
|
||||||
text: "Ensure that the --auto-tls argument is not set to true (Automated)"
|
text: "Ensure that the --auto-tls argument is not set to true (Automated)"
|
||||||
audit: "if grep -q '^auto-tls' $etcdconf;then grep '^auto-tls' $etcdconf;else echo 'notset';fi"
|
audit_config: "cat $etcdconf"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--auto-tls"
|
- path: "{.client-transport-security.auto-tls}"
|
||||||
set: false
|
|
||||||
- flag: "--auto-tls"
|
|
||||||
compare:
|
compare:
|
||||||
op: eq
|
op: eq
|
||||||
value: false
|
value: false
|
||||||
|
- path: "{.client-transport-security.auto-tls}"
|
||||||
|
set: false
|
||||||
remediation: |
|
remediation: |
|
||||||
Edit the etcd pod specification file $etcdconf on the master
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
|
When running with embedded-etcd, K3s does not set the --auto-tls parameter.
|
||||||
|
If this check fails, edit the etcd pod specification file $etcdconf on the master
|
||||||
node and either remove the --auto-tls parameter or set it to false.
|
node and either remove the --auto-tls parameter or set it to false.
|
||||||
--auto-tls=false
|
client-transport-security:
|
||||||
scored: true
|
auto-tls: false
|
||||||
|
scored: false
|
||||||
|
|
||||||
- id: 2.4
|
- id: 2.4
|
||||||
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)"
|
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)"
|
||||||
audit: "grep -A 4 'peer-transport-security' $etcdconf | grep -E 'cert-file|key-file'"
|
audit_config: "cat $etcdconf"
|
||||||
tests:
|
tests:
|
||||||
bin_op: and
|
bin_op: and
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "cert-file"
|
- path: "{.peer-transport-security.cert-file}"
|
||||||
set: true
|
compare:
|
||||||
- flag: "key-file"
|
op: eq
|
||||||
set: true
|
value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt"
|
||||||
|
- path: "{.peer-transport-security.key-file}"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key"
|
||||||
remediation: |
|
remediation: |
|
||||||
Follow the etcd service documentation and configure peer TLS encryption as appropriate
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
for your etcd cluster.
|
When running with embedded-etcd, K3s generates peer cert and key files for etcd.
|
||||||
Then, edit the etcd pod specification file $etcdconf on the
|
These are located in /var/lib/rancher/k3s/server/tls/etcd/.
|
||||||
master node and set the below parameters.
|
If this check fails, ensure that the configuration file $etcdconf
|
||||||
--peer-client-file=</path/to/peer-cert-file>
|
has not been modified to use custom peer cert and key files.
|
||||||
--peer-key-file=</path/to/peer-key-file>
|
scored: false
|
||||||
scored: true
|
|
||||||
|
|
||||||
- id: 2.5
|
- id: 2.5
|
||||||
text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)"
|
text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)"
|
||||||
audit: "grep -A 4 'peer-transport-security' $etcdconf | grep 'client-cert-auth'"
|
audit_config: "cat $etcdconf"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--client-cert-auth"
|
- path: "{.peer-transport-security.client-cert-auth}"
|
||||||
set: true
|
|
||||||
- flag: "client-cert-auth"
|
|
||||||
compare:
|
compare:
|
||||||
op: eq
|
op: eq
|
||||||
value: true
|
value: true
|
||||||
set: true
|
|
||||||
remediation: |
|
remediation: |
|
||||||
Edit the etcd pod specification file $etcdconf on the master
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
node and set the below parameter.
|
When running with embedded-etcd, K3s sets the --peer-cert-auth parameter to true.
|
||||||
--peer-client-cert-auth=true
|
If this check fails, ensure that the configuration file $etcdconf
|
||||||
scored: true
|
has not been modified to disable peer client certificate authentication.
|
||||||
|
scored: false
|
||||||
|
|
||||||
- id: 2.6
|
- id: 2.6
|
||||||
text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)"
|
text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)"
|
||||||
audit: "if grep -q '^peer-auto-tls' $etcdconf;then grep '^peer-auto-tls' $etcdconf;else echo 'notset';fi"
|
audit_config: "cat $etcdconf"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--peer-auto-tls"
|
- path: "{.peer-transport-security.auto-tls}"
|
||||||
set: false
|
|
||||||
- flag: "--peer-auto-tls"
|
|
||||||
compare:
|
compare:
|
||||||
op: eq
|
op: eq
|
||||||
value: false
|
value: false
|
||||||
set: true
|
- path: "{.peer-transport-security.auto-tls}"
|
||||||
|
set: false
|
||||||
remediation: |
|
remediation: |
|
||||||
Edit the etcd pod specification file $etcdconf on the master
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
|
When running with embedded-etcd, K3s does not set the --peer-auto-tls parameter.
|
||||||
|
If this check fails, edit the etcd pod specification file $etcdconf on the master
|
||||||
node and either remove the --peer-auto-tls parameter or set it to false.
|
node and either remove the --peer-auto-tls parameter or set it to false.
|
||||||
--peer-auto-tls=false
|
peer-transport-security:
|
||||||
scored: true
|
auto-tls: false
|
||||||
|
scored: false
|
||||||
|
|
||||||
- id: 2.7
|
- id: 2.7
|
||||||
text: "Ensure that a unique Certificate Authority is used for etcd (Automated)"
|
text: "Ensure that a unique Certificate Authority is used for etcd (Automated)"
|
||||||
audit: "if grep -q 'trusted-ca-file' $etcdconf;then grep 'trusted-ca-file' $etcdconf;else echo 'notset';fi"
|
audit_config: "cat $etcdconf"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "trusted-ca-file"
|
- path: "{.peer-transport-security.trusted-ca-file}"
|
||||||
set: true
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: "/var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt"
|
||||||
remediation: |
|
remediation: |
|
||||||
[Manual test]
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
Follow the etcd documentation and create a dedicated certificate authority setup for the
|
When running with embedded-etcd, K3s generates a unique certificate authority for etcd.
|
||||||
etcd service.
|
This is located at /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt.
|
||||||
Then, edit the etcd pod specification file $etcdconf on the
|
If this check fails, ensure that the configuration file $etcdconf
|
||||||
master node and set the below parameter.
|
has not been modified to use a shared certificate authority.
|
||||||
--trusted-ca-file=</path/to/ca-file>
|
|
||||||
scored: false
|
scored: false
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -19,9 +19,8 @@ groups:
|
|||||||
op: bitmask
|
op: bitmask
|
||||||
value: "600"
|
value: "600"
|
||||||
remediation: |
|
remediation: |
|
||||||
Run the below command (based on the file location on your system) on the each worker node.
|
Not Applicable.
|
||||||
For example, chmod 600 $kubeletsvc
|
The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime.
|
||||||
Not Applicable - All configuration is passed in as arguments at container run time.
|
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.2
|
- id: 4.1.2
|
||||||
@ -32,16 +31,15 @@ groups:
|
|||||||
test_items:
|
test_items:
|
||||||
- flag: root:root
|
- flag: root:root
|
||||||
remediation: |
|
remediation: |
|
||||||
Run the below command (based on the file location on your system) on the each worker node.
|
Not Applicable.
|
||||||
For example,
|
The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime.
|
||||||
chown root:root $kubeletsvc
|
|
||||||
Not Applicable.
|
Not Applicable.
|
||||||
All configuration is passed in as arguments at container run time.
|
All configuration is passed in as arguments at container run time.
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.3
|
- id: 4.1.3
|
||||||
text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)"
|
text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)"
|
||||||
audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig'
|
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' '
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
@ -53,11 +51,11 @@ groups:
|
|||||||
Run the below command (based on the file location on your system) on the each worker node.
|
Run the below command (based on the file location on your system) on the each worker node.
|
||||||
For example,
|
For example,
|
||||||
chmod 600 $proxykubeconfig
|
chmod 600 $proxykubeconfig
|
||||||
scored: false
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.4
|
- id: 4.1.4
|
||||||
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)"
|
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)"
|
||||||
audit: '/bin/sh -c ''if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi'' '
|
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
@ -65,7 +63,7 @@ groups:
|
|||||||
remediation: |
|
remediation: |
|
||||||
Run the below command (based on the file location on your system) on the each worker node.
|
Run the below command (based on the file location on your system) on the each worker node.
|
||||||
For example, chown root:root $proxykubeconfig
|
For example, chown root:root $proxykubeconfig
|
||||||
scored: false
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.5
|
- id: 4.1.5
|
||||||
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)"
|
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||||
@ -84,7 +82,7 @@ groups:
|
|||||||
|
|
||||||
- id: 4.1.6
|
- id: 4.1.6
|
||||||
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)"
|
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)"
|
||||||
audit: 'stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig'
|
audit: 'stat -c %U:%G $kubeletkubeconfig'
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: root:root
|
- flag: root:root
|
||||||
@ -95,8 +93,8 @@ groups:
|
|||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.7
|
- id: 4.1.7
|
||||||
text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)"
|
text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)"
|
||||||
audit: "stat -c %a /var/lib/rancher/k3s/server/tls/server-ca.crt"
|
audit: "stat -c permissions=%a $kubeletcafile"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "permissions"
|
- flag: "permissions"
|
||||||
@ -105,19 +103,22 @@ groups:
|
|||||||
value: "600"
|
value: "600"
|
||||||
remediation: |
|
remediation: |
|
||||||
Run the following command to modify the file permissions of the
|
Run the following command to modify the file permissions of the
|
||||||
--client-ca-file chmod 600 <filename>
|
--client-ca-file chmod 600 $kubeletcafile
|
||||||
scored: false
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.8
|
- id: 4.1.8
|
||||||
text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)"
|
text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
|
||||||
audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls/client-ca.crt"
|
audit: "stat -c %U:%G $kubeletcafile"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: root:root
|
- flag: root:root
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: root:root
|
||||||
remediation: |
|
remediation: |
|
||||||
Run the following command to modify the ownership of the --client-ca-file.
|
Run the following command to modify the ownership of the --client-ca-file.
|
||||||
chown root:root <filename>
|
chown root:root $kubeletcafile
|
||||||
scored: false
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.9
|
- id: 4.1.9
|
||||||
text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)"
|
text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)"
|
||||||
@ -130,8 +131,8 @@ groups:
|
|||||||
op: bitmask
|
op: bitmask
|
||||||
value: "600"
|
value: "600"
|
||||||
remediation: |
|
remediation: |
|
||||||
Run the following command (using the config file location identified in the Audit step)
|
Not Applicable.
|
||||||
chmod 600 $kubeletconf
|
The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime.
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.1.10
|
- id: 4.1.10
|
||||||
@ -142,10 +143,8 @@ groups:
|
|||||||
test_items:
|
test_items:
|
||||||
- flag: root:root
|
- flag: root:root
|
||||||
remediation: |
|
remediation: |
|
||||||
Run the following command (using the config file location identified in the Audit step)
|
|
||||||
chown root:root $kubeletconf
|
|
||||||
Not Applicable.
|
Not Applicable.
|
||||||
All configuration is passed in as arguments at container run time.
|
The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime.
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.2
|
- id: 4.2
|
||||||
@ -153,7 +152,7 @@ groups:
|
|||||||
checks:
|
checks:
|
||||||
- id: 4.2.1
|
- id: 4.2.1
|
||||||
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
||||||
audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' '
|
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' '
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "--anonymous-auth"
|
- flag: "--anonymous-auth"
|
||||||
@ -162,20 +161,20 @@ groups:
|
|||||||
op: eq
|
op: eq
|
||||||
value: false
|
value: false
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to
|
By default, K3s sets the --anonymous-auth to false. If you have set this to a different value, you
|
||||||
`false`.
|
should set it back to false. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below.
|
||||||
If using executable arguments, edit the kubelet service file
|
kubelet-arg:
|
||||||
$kubeletsvc on each worker node and
|
- "anonymous-auth=true"
|
||||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
If using the command line, edit the K3s service file and remove the below argument.
|
||||||
`--anonymous-auth=false`
|
--kubelet-arg="anonymous-auth=true"
|
||||||
Based on your system, restart the kubelet service. For example,
|
Based on your system, restart the k3s service. For example,
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
systemctl restart kubelet.service
|
systemctl restart k3s.service
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.2.2
|
- id: 4.2.2
|
||||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||||
audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' '
|
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' '
|
||||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -185,38 +184,32 @@ groups:
|
|||||||
op: nothave
|
op: nothave
|
||||||
value: AlwaysAllow
|
value: AlwaysAllow
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If
|
By default, K3s does not set the --authorization-mode to AlwaysAllow.
|
||||||
using executable arguments, edit the kubelet service file
|
If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below.
|
||||||
$kubeletsvc on each worker node and
|
kubelet-arg:
|
||||||
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
- "authorization-mode=AlwaysAllow"
|
||||||
--authorization-mode=Webhook
|
If using the command line, edit the K3s service file and remove the below argument.
|
||||||
Based on your system, restart the kubelet service. For example,
|
--kubelet-arg="authorization-mode=AlwaysAllow"
|
||||||
|
Based on your system, restart the k3s service. For example,
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
systemctl restart kubelet.service
|
systemctl restart k3s.service
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.2.3
|
- id: 4.2.3
|
||||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||||
audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' '
|
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' '
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --client-ca-file
|
- flag: --client-ca-file
|
||||||
path: '{.authentication.x509.clientCAFile}'
|
path: '{.authentication.x509.clientCAFile}'
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to
|
By default, K3s automatically provides the client ca certificate for the Kubelet.
|
||||||
the location of the client CA file.
|
It is generated and located at /var/lib/rancher/k3s/agent/client-ca.crt
|
||||||
If using command line arguments, edit the kubelet service file
|
|
||||||
$kubeletsvc on each worker node and
|
|
||||||
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
|
||||||
--client-ca-file=<path/to/client-ca-file>
|
|
||||||
Based on your system, restart the kubelet service. For example,
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart kubelet.service
|
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.2.4
|
- id: 4.2.4
|
||||||
text: "Verify that the --read-only-port argument is set to 0 (Manual)"
|
text: "Verify that the --read-only-port argument is set to 0 (Automated)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' "
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
@ -230,19 +223,20 @@ groups:
|
|||||||
path: '{.readOnlyPort}'
|
path: '{.readOnlyPort}'
|
||||||
set: false
|
set: false
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `readOnlyPort` to 0.
|
By default, K3s sets the --read-only-port to 0. If you have set this to a different value, you
|
||||||
If using command line arguments, edit the kubelet service file
|
should set it back to 0. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below.
|
||||||
$kubeletsvc on each worker node and
|
kubelet-arg:
|
||||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
- "read-only-port=XXXX"
|
||||||
--read-only-port=0
|
If using the command line, edit the K3s service file and remove the below argument.
|
||||||
Based on your system, restart the kubelet service. For example,
|
--kubelet-arg="read-only-port=XXXX"
|
||||||
|
Based on your system, restart the k3s service. For example,
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
systemctl restart kubelet.service
|
systemctl restart k3s.service
|
||||||
scored: false
|
scored: true
|
||||||
|
|
||||||
- id: 4.2.5
|
- id: 4.2.5
|
||||||
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)"
|
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --streaming-connection-idle-timeout
|
- flag: --streaming-connection-idle-timeout
|
||||||
@ -255,21 +249,17 @@ groups:
|
|||||||
set: false
|
set: false
|
||||||
bin_op: or
|
bin_op: or
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a
|
If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value.
|
||||||
value other than 0.
|
kubelet-arg:
|
||||||
If using command line arguments, edit the kubelet service file
|
- "streaming-connection-idle-timeout=5m"
|
||||||
$kubeletsvc on each worker node and
|
If using the command line, run K3s with --kubelet-arg="streaming-connection-idle-timeout=5m".
|
||||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
Based on your system, restart the k3s service. For example,
|
||||||
--streaming-connection-idle-timeout=5m
|
systemctl restart k3s.service
|
||||||
Based on your system, restart the kubelet service. For example,
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart kubelet.service
|
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 4.2.6
|
- id: 4.2.6
|
||||||
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
|
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
|
||||||
type: "skip"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'"
|
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --make-iptables-util-chains
|
- flag: --make-iptables-util-chains
|
||||||
@ -282,41 +272,31 @@ groups:
|
|||||||
set: false
|
set: false
|
||||||
bin_op: or
|
bin_op: or
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`.
|
If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter.
|
||||||
If using command line arguments, edit the kubelet service file
|
kubelet-arg:
|
||||||
$kubeletsvc on each worker node and
|
- "make-iptables-util-chains=true"
|
||||||
remove the --make-iptables-util-chains argument from the
|
If using the command line, run K3s with --kubelet-arg="make-iptables-util-chains=true".
|
||||||
KUBELET_SYSTEM_PODS_ARGS variable.
|
Based on your system, restart the k3s service. For example,
|
||||||
Based on your system, restart the kubelet service. For example:
|
systemctl restart k3s.service
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart kubelet.service
|
|
||||||
Permissive.
|
|
||||||
scored: true
|
scored: true
|
||||||
|
|
||||||
- id: 4.2.7
|
- id: 4.2.7
|
||||||
text: "Ensure that the --hostname-override argument is not set (Manual)"
|
text: "Ensure that the --hostname-override argument is not set (Automated)"
|
||||||
# This is one of those properties that can only be set as a command line argument.
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
# To check if the property is set as expected, we need to parse the kubelet command
|
|
||||||
# instead reading the Kubelet Configuration file.
|
|
||||||
type: "skip"
|
type: "skip"
|
||||||
audit: "/bin/ps -fC $kubeletbin "
|
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --hostname-override
|
- flag: --hostname-override
|
||||||
set: false
|
set: false
|
||||||
remediation: |
|
remediation: |
|
||||||
Edit the kubelet service file $kubeletsvc
|
|
||||||
on each worker node and remove the --hostname-override argument from the
|
|
||||||
KUBELET_SYSTEM_PODS_ARGS variable.
|
|
||||||
Based on your system, restart the kubelet service. For example,
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart kubelet.service
|
|
||||||
Not Applicable.
|
Not Applicable.
|
||||||
scored: false
|
By default, K3s does set the --hostname-override argument. Per CIS guidelines, this is to comply
|
||||||
|
with cloud providers that require this flag to ensure that hostname matches node names.
|
||||||
|
scored: true
|
||||||
|
|
||||||
- id: 4.2.8
|
- id: 4.2.8
|
||||||
text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)"
|
text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)"
|
||||||
audit: "/bin/ps -fC $kubeletbin"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -330,19 +310,18 @@ groups:
|
|||||||
set: false
|
set: false
|
||||||
bin_op: or
|
bin_op: or
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level.
|
By default, K3s sets the event-qps to 0. Should you wish to change this,
|
||||||
If using command line arguments, edit the kubelet service file
|
If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value.
|
||||||
$kubeletsvc on each worker node and
|
kubelet-arg:
|
||||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
- "event-qps=<value>"
|
||||||
Based on your system, restart the kubelet service. For example,
|
If using the command line, run K3s with --kubelet-arg="event-qps=<value>".
|
||||||
systemctl daemon-reload
|
Based on your system, restart the k3s service. For example,
|
||||||
systemctl restart kubelet.service
|
systemctl restart k3s.service
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 4.2.9
|
- id: 4.2.9
|
||||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
|
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
|
||||||
type: "skip"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1"
|
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
- flag: --tls-cert-file
|
- flag: --tls-cert-file
|
||||||
@ -350,23 +329,18 @@ groups:
|
|||||||
- flag: --tls-private-key-file
|
- flag: --tls-private-key-file
|
||||||
path: '/var/lib/rancher/k3s/agent/serving-kubelet.key'
|
path: '/var/lib/rancher/k3s/agent/serving-kubelet.key'
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `tlsCertFile` to the location
|
By default, K3s automatically provides the TLS certificate and private key for the Kubelet.
|
||||||
of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile`
|
They are generated and located at /var/lib/rancher/k3s/agent/serving-kubelet.crt and /var/lib/rancher/k3s/agent/serving-kubelet.key
|
||||||
to the location of the corresponding private key file.
|
If for some reason you need to provide your own certificate and key, you can set the
|
||||||
If using command line arguments, edit the kubelet service file
|
the below parameters in the K3s config file /etc/rancher/k3s/config.yaml.
|
||||||
$kubeletsvc on each worker node and
|
kubelet-arg:
|
||||||
set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
|
- "tls-cert-file=<path/to/tls-cert-file>"
|
||||||
--tls-cert-file=<path/to/tls-certificate-file>
|
- "tls-private-key-file=<path/to/tls-private-key-file>"
|
||||||
--tls-private-key-file=<path/to/tls-key-file>
|
scored: true
|
||||||
Based on your system, restart the kubelet service. For example,
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart kubelet.service
|
|
||||||
Permissive - When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers.
|
|
||||||
scored: false
|
|
||||||
|
|
||||||
- id: 4.2.10
|
- id: 4.2.10
|
||||||
text: "Ensure that the --rotate-certificates argument is not set to false (Manual)"
|
text: "Ensure that the --rotate-certificates argument is not set to false (Automated)"
|
||||||
audit: "/bin/ps -fC $kubeletbin"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -380,20 +354,16 @@ groups:
|
|||||||
set: false
|
set: false
|
||||||
bin_op: or
|
bin_op: or
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or
|
By default, K3s does not set the --rotate-certificates argument. If you have set this flag with a value of `false`, you should either set it to `true` or completely remove the flag.
|
||||||
remove it altogether to use the default value.
|
If using the K3s config file /etc/rancher/k3s/config.yaml, remove any rotate-certificates parameter.
|
||||||
If using command line arguments, edit the kubelet service file
|
If using the command line, remove the K3s flag --kubelet-arg="rotate-certificates".
|
||||||
$kubeletsvc on each worker node and
|
Based on your system, restart the k3s service. For example,
|
||||||
remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS
|
systemctl restart k3s.service
|
||||||
variable.
|
scored: true
|
||||||
Based on your system, restart the kubelet service. For example,
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart kubelet.service
|
|
||||||
scored: false
|
|
||||||
|
|
||||||
- id: 4.2.11
|
- id: 4.2.11
|
||||||
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)"
|
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Automated)"
|
||||||
audit: "/bin/ps -fC $kubeletbin"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
audit_config: "/bin/cat $kubeletconf"
|
audit_config: "/bin/cat $kubeletconf"
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
@ -407,18 +377,17 @@ groups:
|
|||||||
path: '{.featureGates.RotateKubeletServerCertificate}'
|
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||||
set: false
|
set: false
|
||||||
remediation: |
|
remediation: |
|
||||||
Edit the kubelet service file $kubeletsvc
|
By default, K3s does not set the RotateKubeletServerCertificate feature gate.
|
||||||
on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
|
If you have enabled this feature gate, you should remove it.
|
||||||
--feature-gates=RotateKubeletServerCertificate=true
|
If using the K3s config file /etc/rancher/k3s/config.yaml, remove any feature-gate=RotateKubeletServerCertificate parameter.
|
||||||
Based on your system, restart the kubelet service. For example:
|
If using the command line, remove the K3s flag --kubelet-arg="feature-gate=RotateKubeletServerCertificate".
|
||||||
systemctl daemon-reload
|
Based on your system, restart the k3s service. For example,
|
||||||
systemctl restart kubelet.service
|
systemctl restart k3s.service
|
||||||
Not Applicable.
|
scored: true
|
||||||
scored: false
|
|
||||||
|
|
||||||
- id: 4.2.12
|
- id: 4.2.12
|
||||||
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
|
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
|
||||||
audit: "/bin/ps -fC $kubeletbin"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -428,21 +397,18 @@ groups:
|
|||||||
op: valid_elements
|
op: valid_elements
|
||||||
value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||||
remediation: |
|
remediation: |
|
||||||
If using a Kubelet config file, edit the file to set `TLSCipherSuites` to
|
If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `TLSCipherSuites` to
|
||||||
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
kubelet-arg:
|
||||||
|
- "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
|
||||||
or to a subset of these values.
|
or to a subset of these values.
|
||||||
If using executable arguments, edit the kubelet service file
|
If using the command line, add the K3s flag --kubelet-arg="tls-cipher-suites=<same values as above>"
|
||||||
$kubeletsvc on each worker node and
|
Based on your system, restart the k3s service. For example,
|
||||||
set the --tls-cipher-suites parameter as follows, or to a subset of these values.
|
systemctl restart k3s.service
|
||||||
--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
|
||||||
Based on your system, restart the kubelet service. For example:
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart kubelet.service
|
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 4.2.13
|
- id: 4.2.13
|
||||||
text: "Ensure that a limit is set on pod PIDs (Manual)"
|
text: "Ensure that a limit is set on pod PIDs (Manual)"
|
||||||
audit: "/bin/ps -fC $kubeletbin"
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||||
tests:
|
tests:
|
||||||
test_items:
|
test_items:
|
||||||
@ -450,5 +416,7 @@ groups:
|
|||||||
path: '{.podPidsLimit}'
|
path: '{.podPidsLimit}'
|
||||||
remediation: |
|
remediation: |
|
||||||
Decide on an appropriate level for this parameter and set it,
|
Decide on an appropriate level for this parameter and set it,
|
||||||
either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting.
|
If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `podPidsLimit` to
|
||||||
|
kubelet-arg:
|
||||||
|
- "pod-max-pids=<value>"
|
||||||
scored: false
|
scored: false
|
||||||
|
@ -43,23 +43,15 @@ groups:
|
|||||||
|
|
||||||
- id: 5.1.5
|
- id: 5.1.5
|
||||||
text: "Ensure that default service accounts are not actively used. (Manual)"
|
text: "Ensure that default service accounts are not actively used. (Manual)"
|
||||||
type: "skip"
|
type: "manual"
|
||||||
audit: check_for_default_sa.sh
|
|
||||||
tests:
|
|
||||||
test_items:
|
|
||||||
- flag: "true"
|
|
||||||
compare:
|
|
||||||
op: eq
|
|
||||||
value: "true"
|
|
||||||
set: true
|
|
||||||
remediation: |
|
remediation: |
|
||||||
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
||||||
to the Kubernetes API server.
|
to the Kubernetes API server.
|
||||||
Modify the configuration of each default service account to include this value
|
Modify the configuration of each default service account to include this value
|
||||||
automountServiceAccountToken: false
|
automountServiceAccountToken: false
|
||||||
Permissive - Kubernetes provides default service accounts to be used.
|
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
|
|
||||||
- id: 5.1.6
|
- id: 5.1.6
|
||||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
|
text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
@ -138,29 +130,23 @@ groups:
|
|||||||
|
|
||||||
- id: 5.2.3
|
- id: 5.2.3
|
||||||
text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)"
|
text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)"
|
||||||
type: "skip"
|
|
||||||
remediation: |
|
remediation: |
|
||||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
admission of `hostPID` containers.
|
admission of `hostPID` containers.
|
||||||
Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail.
|
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2.4
|
- id: 5.2.4
|
||||||
text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)"
|
text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)"
|
||||||
type: "skip"
|
|
||||||
remediation: |
|
remediation: |
|
||||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
admission of `hostIPC` containers.
|
admission of `hostIPC` containers.
|
||||||
Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail.
|
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2.5
|
- id: 5.2.5
|
||||||
text: "Minimize the admission of containers wishing to share the host network namespace (Automated)"
|
text: "Minimize the admission of containers wishing to share the host network namespace (Automated)"
|
||||||
type: "skip"
|
|
||||||
remediation: |
|
remediation: |
|
||||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
admission of `hostNetwork` containers.
|
admission of `hostNetwork` containers.
|
||||||
Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail.
|
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.2.6
|
- id: 5.2.6
|
||||||
@ -199,8 +185,8 @@ groups:
|
|||||||
text: "Minimize the admission of containers with capabilities assigned (Manual)"
|
text: "Minimize the admission of containers with capabilities assigned (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
remediation: |
|
remediation: |
|
||||||
Review the use of capabilites in applications running on your cluster. Where a namespace
|
Review the use of capabilities in applications running on your cluster. Where a namespace
|
||||||
contains applicaions which do not require any Linux capabities to operate consider adding
|
contains applications which do not require any Linux capabities to operate consider adding
|
||||||
a PSP which forbids the admission of containers which do not drop all capabilities.
|
a PSP which forbids the admission of containers which do not drop all capabilities.
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
@ -242,10 +228,8 @@ groups:
|
|||||||
|
|
||||||
- id: 5.3.2
|
- id: 5.3.2
|
||||||
text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)"
|
text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)"
|
||||||
type: "skip"
|
|
||||||
remediation: |
|
remediation: |
|
||||||
Follow the documentation and create NetworkPolicy objects as you need them.
|
Follow the documentation and create NetworkPolicy objects as you need them.
|
||||||
Permissive - Enabling Network Policies can prevent certain applications from communicating with each other.
|
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.4
|
- id: 5.4
|
||||||
@ -310,9 +294,7 @@ groups:
|
|||||||
|
|
||||||
- id: 5.7.4
|
- id: 5.7.4
|
||||||
text: "The default namespace should not be used (Manual)"
|
text: "The default namespace should not be used (Manual)"
|
||||||
type: "skip"
|
|
||||||
remediation: |
|
remediation: |
|
||||||
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
|
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
|
||||||
resources and that all new resources are created in a specific namespace.
|
resources and that all new resources are created in a specific namespace.
|
||||||
Permissive - Kubernetes provides a default namespace.
|
|
||||||
scored: false
|
scored: false
|
||||||
|
54
cfg/k3s-cis-1.8/config.yaml
Normal file
54
cfg/k3s-cis-1.8/config.yaml
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
---
|
||||||
|
## Version-specific settings that override the values in cfg/config.yaml
|
||||||
|
|
||||||
|
master:
|
||||||
|
components:
|
||||||
|
- apiserver
|
||||||
|
- kubelet
|
||||||
|
- scheduler
|
||||||
|
- controllermanager
|
||||||
|
- etcd
|
||||||
|
- policies
|
||||||
|
apiserver:
|
||||||
|
bins:
|
||||||
|
- containerd
|
||||||
|
kubelet:
|
||||||
|
bins:
|
||||||
|
- containerd
|
||||||
|
defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig
|
||||||
|
defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt
|
||||||
|
scheduler:
|
||||||
|
bins:
|
||||||
|
- containerd
|
||||||
|
kubeconfig:
|
||||||
|
- /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig
|
||||||
|
controllermanager:
|
||||||
|
bins:
|
||||||
|
- containerd
|
||||||
|
kubeconfig:
|
||||||
|
- /var/lib/rancher/k3s/server/cred/controller.kubeconfig
|
||||||
|
etcd:
|
||||||
|
bins:
|
||||||
|
- containerd
|
||||||
|
|
||||||
|
etcd:
|
||||||
|
confs: /var/lib/rancher/k3s/server/db/etcd/config
|
||||||
|
|
||||||
|
node:
|
||||||
|
components:
|
||||||
|
- kubelet
|
||||||
|
- proxy
|
||||||
|
kubelet:
|
||||||
|
bins:
|
||||||
|
- containerd
|
||||||
|
confs:
|
||||||
|
- /var/lib/rancher/k3s/agent/kubelet.kubeconfig
|
||||||
|
defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig
|
||||||
|
defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt
|
||||||
|
proxy:
|
||||||
|
bins:
|
||||||
|
- containerd
|
||||||
|
defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig
|
||||||
|
policies:
|
||||||
|
components:
|
||||||
|
- policies
|
62
cfg/k3s-cis-1.8/controlplane.yaml
Normal file
62
cfg/k3s-cis-1.8/controlplane.yaml
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
---
|
||||||
|
controls:
|
||||||
|
version: "k3s-cis-1.8"
|
||||||
|
id: 3
|
||||||
|
text: "Control Plane Configuration"
|
||||||
|
type: "controlplane"
|
||||||
|
groups:
|
||||||
|
- id: 3.1
|
||||||
|
text: "Authentication and Authorization"
|
||||||
|
checks:
|
||||||
|
- id: 3.1.1
|
||||||
|
text: "Client certificate authentication should not be used for users (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be
|
||||||
|
implemented in place of client certificates.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 3.1.2
|
||||||
|
text: "Service account token authentication should not be used for users (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented
|
||||||
|
in place of service account tokens.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 3.1.3
|
||||||
|
text: "Bootstrap token authentication should not be used for users (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented
|
||||||
|
in place of bootstrap tokens.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 3.2
|
||||||
|
text: "Logging"
|
||||||
|
checks:
|
||||||
|
- id: 3.2.1
|
||||||
|
text: "Ensure that a minimal audit policy is created (Manual)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--audit-policy-file"
|
||||||
|
set: true
|
||||||
|
remediation: |
|
||||||
|
Create an audit policy file for your cluster.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 3.2.2
|
||||||
|
text: "Ensure that the audit policy covers key security concerns (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Review the audit policy provided for the cluster and ensure that it covers
|
||||||
|
at least the following areas,
|
||||||
|
- Access to Secrets managed by the cluster. Care should be taken to only
|
||||||
|
log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in
|
||||||
|
order to avoid risk of logging sensitive data.
|
||||||
|
- Modification of Pod and Deployment objects.
|
||||||
|
- Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.
|
||||||
|
For most requests, minimally logging at the Metadata level is recommended
|
||||||
|
(the most basic level of logging).
|
||||||
|
scored: false
|
144
cfg/k3s-cis-1.8/etcd.yaml
Normal file
144
cfg/k3s-cis-1.8/etcd.yaml
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
---
|
||||||
|
controls:
|
||||||
|
version: "k3s-cis-1.8"
|
||||||
|
id: 2
|
||||||
|
text: "Etcd Node Configuration"
|
||||||
|
type: "etcd"
|
||||||
|
groups:
|
||||||
|
- id: 2
|
||||||
|
text: "Etcd Node Configuration"
|
||||||
|
checks:
|
||||||
|
- id: 2.1
|
||||||
|
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)"
|
||||||
|
audit_config: "cat $etcdconf"
|
||||||
|
tests:
|
||||||
|
bin_op: and
|
||||||
|
test_items:
|
||||||
|
- path: "{.client-transport-security.cert-file}"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.crt"
|
||||||
|
- path: "{.client-transport-security.key-file}"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.key"
|
||||||
|
remediation: |
|
||||||
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
|
When running with embedded-etcd, K3s generates cert and key files for etcd.
|
||||||
|
These are located in /var/lib/rancher/k3s/server/tls/etcd/.
|
||||||
|
If this check fails, ensure that the configuration file $etcdconf
|
||||||
|
has not been modified to use custom cert and key files.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 2.2
|
||||||
|
text: "Ensure that the --client-cert-auth argument is set to true (Automated)"
|
||||||
|
audit_config: "cat $etcdconf"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- path: "{.client-transport-security.client-cert-auth}"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: true
|
||||||
|
remediation: |
|
||||||
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
|
When running with embedded-etcd, K3s sets the --client-cert-auth parameter to true.
|
||||||
|
If this check fails, ensure that the configuration file $etcdconf
|
||||||
|
has not been modified to disable client certificate authentication.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 2.3
|
||||||
|
text: "Ensure that the --auto-tls argument is not set to true (Automated)"
|
||||||
|
audit_config: "cat $etcdconf"
|
||||||
|
tests:
|
||||||
|
bin_op: or
|
||||||
|
test_items:
|
||||||
|
- path: "{.client-transport-security.auto-tls}"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: false
|
||||||
|
- path: "{.client-transport-security.auto-tls}"
|
||||||
|
set: false
|
||||||
|
remediation: |
|
||||||
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
|
When running with embedded-etcd, K3s does not set the --auto-tls parameter.
|
||||||
|
If this check fails, edit the etcd pod specification file $etcdconf on the master
|
||||||
|
node and either remove the --auto-tls parameter or set it to false.
|
||||||
|
client-transport-security:
|
||||||
|
auto-tls: false
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 2.4
|
||||||
|
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)"
|
||||||
|
audit_config: "cat $etcdconf"
|
||||||
|
tests:
|
||||||
|
bin_op: and
|
||||||
|
test_items:
|
||||||
|
- path: "{.peer-transport-security.cert-file}"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt"
|
||||||
|
- path: "{.peer-transport-security.key-file}"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key"
|
||||||
|
remediation: |
|
||||||
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
|
When running with embedded-etcd, K3s generates peer cert and key files for etcd.
|
||||||
|
These are located in /var/lib/rancher/k3s/server/tls/etcd/.
|
||||||
|
If this check fails, ensure that the configuration file $etcdconf
|
||||||
|
has not been modified to use custom peer cert and key files.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 2.5
|
||||||
|
text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)"
|
||||||
|
audit_config: "cat $etcdconf"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- path: "{.peer-transport-security.client-cert-auth}"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: true
|
||||||
|
remediation: |
|
||||||
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
|
When running with embedded-etcd, K3s sets the --peer-cert-auth parameter to true.
|
||||||
|
If this check fails, ensure that the configuration file $etcdconf
|
||||||
|
has not been modified to disable peer client certificate authentication.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 2.6
|
||||||
|
text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)"
|
||||||
|
audit_config: "cat $etcdconf"
|
||||||
|
tests:
|
||||||
|
bin_op: or
|
||||||
|
test_items:
|
||||||
|
- path: "{.peer-transport-security.auto-tls}"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: false
|
||||||
|
- path: "{.peer-transport-security.auto-tls}"
|
||||||
|
set: false
|
||||||
|
remediation: |
|
||||||
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
|
When running with embedded-etcd, K3s does not set the --peer-auto-tls parameter.
|
||||||
|
If this check fails, edit the etcd pod specification file $etcdconf on the master
|
||||||
|
node and either remove the --peer-auto-tls parameter or set it to false.
|
||||||
|
peer-transport-security:
|
||||||
|
auto-tls: false
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 2.7
|
||||||
|
text: "Ensure that a unique Certificate Authority is used for etcd (Automated)"
|
||||||
|
audit_config: "cat $etcdconf"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- path: "{.peer-transport-security.trusted-ca-file}"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: "/var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt"
|
||||||
|
remediation: |
|
||||||
|
If running on with sqlite or a external DB, etcd checks are Not Applicable.
|
||||||
|
When running with embedded-etcd, K3s generates a unique certificate authority for etcd.
|
||||||
|
This is located at /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt.
|
||||||
|
If this check fails, ensure that the configuration file $etcdconf
|
||||||
|
has not been modified to use a shared certificate authority.
|
||||||
|
scored: false
|
985
cfg/k3s-cis-1.8/master.yaml
Normal file
985
cfg/k3s-cis-1.8/master.yaml
Normal file
@ -0,0 +1,985 @@
|
|||||||
|
---
|
||||||
|
controls:
|
||||||
|
version: "k3s-cis-1.8"
|
||||||
|
id: 1
|
||||||
|
text: "Control Plane Security Configuration"
|
||||||
|
type: "master"
|
||||||
|
groups:
|
||||||
|
- id: 1.1
|
||||||
|
text: "Control Plane Node Configuration Files"
|
||||||
|
checks:
|
||||||
|
- id: 1.1.1
|
||||||
|
text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||||
|
type: "skip"
|
||||||
|
audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "600"
|
||||||
|
remediation: |
|
||||||
|
Not Applicable.
|
||||||
|
By default, K3s embeds the api server within the k3s process. There is no API server pod specification file.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.2
|
||||||
|
text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)"
|
||||||
|
type: "skip"
|
||||||
|
audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "root:root"
|
||||||
|
remediation: |
|
||||||
|
Not Applicable.
|
||||||
|
By default, K3s embeds the api server within the k3s process. There is no API server pod specification file.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.3
|
||||||
|
text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||||
|
type: "skip"
|
||||||
|
audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "600"
|
||||||
|
remediation: |
|
||||||
|
Not Applicable.
|
||||||
|
By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.4
|
||||||
|
text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)"
|
||||||
|
type: "skip"
|
||||||
|
audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "root:root"
|
||||||
|
remediation: |
|
||||||
|
Not Applicable.
|
||||||
|
By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.5
|
||||||
|
text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||||
|
type: "skip"
|
||||||
|
audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "600"
|
||||||
|
remediation: |
|
||||||
|
Not Applicable.
|
||||||
|
By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.6
|
||||||
|
text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)"
|
||||||
|
type: "skip"
|
||||||
|
audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "root:root"
|
||||||
|
remediation: |
|
||||||
|
Not Applicable.
|
||||||
|
By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.7
|
||||||
|
text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||||
|
type: "skip"
|
||||||
|
audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'"
|
||||||
|
use_multiple_values: true
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "600"
|
||||||
|
remediation: |
|
||||||
|
Not Applicable.
|
||||||
|
By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.8
|
||||||
|
text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)"
|
||||||
|
type: "skip"
|
||||||
|
audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'"
|
||||||
|
use_multiple_values: true
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "root:root"
|
||||||
|
remediation: |
|
||||||
|
Not Applicable.
|
||||||
|
By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.9
|
||||||
|
text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Automated)"
|
||||||
|
audit: find /var/lib/cni/networks -type f ! -name lock 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a
|
||||||
|
use_multiple_values: true
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "600"
|
||||||
|
remediation: |
|
||||||
|
By default, K3s sets the CNI file permissions to 600.
|
||||||
|
Note that for many CNIs, a lock file is created with permissions 750. This is expected and can be ignored.
|
||||||
|
If you modify your CNI configuration, ensure that the permissions are set to 600.
|
||||||
|
For example, chmod 600 /var/lib/cni/networks/<filename>
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.10
|
||||||
|
text: "Ensure that the Container Network Interface file ownership is set to root:root (Automated)"
|
||||||
|
audit: find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G
|
||||||
|
use_multiple_values: true
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "root:root"
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the control plane node.
|
||||||
|
For example,
|
||||||
|
chown root:root /var/lib/cni/networks/<filename>
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.11
|
||||||
|
text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)"
|
||||||
|
audit: |
|
||||||
|
if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then
|
||||||
|
stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd
|
||||||
|
else
|
||||||
|
echo "permissions=700"
|
||||||
|
fi
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "700"
|
||||||
|
remediation: |
|
||||||
|
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
|
||||||
|
from the command 'ps -ef | grep etcd'.
|
||||||
|
Run the below command (based on the etcd data directory found above). For example,
|
||||||
|
chmod 700 /var/lib/etcd
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.12
|
||||||
|
text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)"
|
||||||
|
audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G
|
||||||
|
type: "skip"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "etcd:etcd"
|
||||||
|
remediation: |
|
||||||
|
Not Applicable.
|
||||||
|
For K3s, etcd is embedded within the k3s process. There is no separate etcd process.
|
||||||
|
Therefore the etcd data directory ownership is managed by the k3s process and should be root:root.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.13
|
||||||
|
text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||||
|
audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "600"
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the control plane node.
|
||||||
|
For example, chmod 600 /var/lib/rancher/k3s/server/cred/admin.kubeconfig
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.14
|
||||||
|
text: "Ensure that the admin.conf file ownership is set to root:root (Automated)"
|
||||||
|
audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "root:root"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: "root:root"
|
||||||
|
set: true
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the control plane node.
|
||||||
|
For example, chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.15
|
||||||
|
text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||||
|
audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "600"
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the control plane node.
|
||||||
|
For example,
|
||||||
|
chmod 600 $schedulerkubeconfig
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.16
|
||||||
|
text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)"
|
||||||
|
audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "root:root"
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the control plane node.
|
||||||
|
For example,
|
||||||
|
chown root:root $schedulerkubeconfig
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.17
|
||||||
|
text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||||
|
audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "600"
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the control plane node.
|
||||||
|
For example,
|
||||||
|
chmod 600 $controllermanagerkubeconfig
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.18
|
||||||
|
text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)"
|
||||||
|
audit: "stat -c %U:%G $controllermanagerkubeconfig"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "root:root"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: "root:root"
|
||||||
|
set: true
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the control plane node.
|
||||||
|
For example,
|
||||||
|
chown root:root $controllermanagerkubeconfig
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.19
|
||||||
|
text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)"
|
||||||
|
audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls"
|
||||||
|
use_multiple_values: true
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "root:root"
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the control plane node.
|
||||||
|
For example,
|
||||||
|
chown -R root:root /var/lib/rancher/k3s/server/tls
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.1.20
|
||||||
|
text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)"
|
||||||
|
audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.crt'"
|
||||||
|
use_multiple_values: true
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "600"
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the master node.
|
||||||
|
For example,
|
||||||
|
chmod -R 600 /var/lib/rancher/k3s/server/tls/*.crt
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 1.1.21
|
||||||
|
text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)"
|
||||||
|
audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.key'"
|
||||||
|
use_multiple_values: true
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "600"
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the master node.
|
||||||
|
For example,
|
||||||
|
chmod -R 600 /var/lib/rancher/k3s/server/tls/*.key
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2
|
||||||
|
text: "API Server"
|
||||||
|
checks:
|
||||||
|
- id: 1.2.1
|
||||||
|
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--anonymous-auth"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: false
|
||||||
|
remediation: |
|
||||||
|
By default, K3s sets the --anonymous-auth argument to false.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "anonymous-auth=true"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.2
|
||||||
|
text: "Ensure that the --token-auth-file parameter is not set (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--token-auth-file"
|
||||||
|
set: false
|
||||||
|
remediation: |
|
||||||
|
Follow the documentation and configure alternate mechanisms for authentication.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "token-auth-file=<path>"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.3
|
||||||
|
text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1"
|
||||||
|
tests:
|
||||||
|
bin_op: or
|
||||||
|
test_items:
|
||||||
|
- flag: "--enable-admission-plugins"
|
||||||
|
compare:
|
||||||
|
op: nothave
|
||||||
|
value: "DenyServiceExternalIPs"
|
||||||
|
set: true
|
||||||
|
- flag: "--enable-admission-plugins"
|
||||||
|
set: false
|
||||||
|
remediation: |
|
||||||
|
By default, K3s does not set DenyServiceExternalIPs.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "enable-admission-plugins=DenyServiceExternalIPs"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.4
|
||||||
|
text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1"
|
||||||
|
tests:
|
||||||
|
bin_op: and
|
||||||
|
test_items:
|
||||||
|
- flag: "--kubelet-client-certificate"
|
||||||
|
- flag: "--kubelet-client-key"
|
||||||
|
remediation: |
|
||||||
|
By default, K3s automatically provides the kubelet client certificate and key.
|
||||||
|
They are generated and located at /var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/client-kube-apiserver.key
|
||||||
|
If for some reason you need to provide your own certificate and key, you can set the
|
||||||
|
below parameters in the K3s config file /etc/rancher/k3s/config.yaml.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "kubelet-client-certificate=<path/to/client-cert-file>"
|
||||||
|
- "kubelet-client-key=<path/to/client-key-file>"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.5
|
||||||
|
text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--kubelet-certificate-authority"
|
||||||
|
remediation: |
|
||||||
|
By default, K3s automatically provides the kubelet CA cert file, at /var/lib/rancher/k3s/server/tls/server-ca.crt.
|
||||||
|
If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "kubelet-certificate-authority=<path/to/ca-cert-file>"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.6
|
||||||
|
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--authorization-mode"
|
||||||
|
compare:
|
||||||
|
op: nothave
|
||||||
|
value: "AlwaysAllow"
|
||||||
|
remediation: |
|
||||||
|
By default, K3s does not set the --authorization-mode to AlwaysAllow.
|
||||||
|
If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "authorization-mode=AlwaysAllow"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.7
|
||||||
|
text: "Ensure that the --authorization-mode argument includes Node (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--authorization-mode"
|
||||||
|
compare:
|
||||||
|
op: has
|
||||||
|
value: "Node"
|
||||||
|
remediation: |
|
||||||
|
By default, K3s sets the --authorization-mode to Node and RBAC.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml,
|
||||||
|
ensure that you are not overriding authorization-mode.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.8
|
||||||
|
text: "Ensure that the --authorization-mode argument includes RBAC (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--authorization-mode"
|
||||||
|
compare:
|
||||||
|
op: has
|
||||||
|
value: "RBAC"
|
||||||
|
remediation: |
|
||||||
|
By default, K3s sets the --authorization-mode to Node and RBAC.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml,
|
||||||
|
ensure that you are not overriding authorization-mode.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.9
|
||||||
|
text: "Ensure that the admission control plugin EventRateLimit is set (Manual)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--enable-admission-plugins"
|
||||||
|
compare:
|
||||||
|
op: has
|
||||||
|
value: "EventRateLimit"
|
||||||
|
remediation: |
|
||||||
|
Follow the Kubernetes documentation and set the desired limits in a configuration file.
|
||||||
|
Then, edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameters.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "enable-admission-plugins=...,EventRateLimit,..."
|
||||||
|
- "admission-control-config-file=<path/to/configuration/file>"
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 1.2.10
|
||||||
|
text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'"
|
||||||
|
tests:
|
||||||
|
bin_op: or
|
||||||
|
test_items:
|
||||||
|
- flag: "--enable-admission-plugins"
|
||||||
|
compare:
|
||||||
|
op: nothave
|
||||||
|
value: AlwaysAdmit
|
||||||
|
- flag: "--enable-admission-plugins"
|
||||||
|
set: false
|
||||||
|
remediation: |
|
||||||
|
By default, K3s does not set the --enable-admission-plugins to AlwaysAdmit.
|
||||||
|
If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "enable-admission-plugins=AlwaysAdmit"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.11
|
||||||
|
text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--enable-admission-plugins"
|
||||||
|
compare:
|
||||||
|
op: has
|
||||||
|
value: "AlwaysPullImages"
|
||||||
|
remediation: |
|
||||||
|
Permissive, per CIS guidelines,
|
||||||
|
"This setting could impact offline or isolated clusters, which have images pre-loaded and
|
||||||
|
do not have access to a registry to pull in-use images. This setting is not appropriate for
|
||||||
|
clusters which use this configuration."
|
||||||
|
Edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameter.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "enable-admission-plugins=...,AlwaysPullImages,..."
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 1.2.12
|
||||||
|
text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)"
|
||||||
|
type: "skip"
|
||||||
|
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||||
|
tests:
|
||||||
|
bin_op: or
|
||||||
|
test_items:
|
||||||
|
- flag: "--enable-admission-plugins"
|
||||||
|
compare:
|
||||||
|
op: has
|
||||||
|
value: "SecurityContextDeny"
|
||||||
|
- flag: "--enable-admission-plugins"
|
||||||
|
compare:
|
||||||
|
op: has
|
||||||
|
value: "PodSecurityPolicy"
|
||||||
|
remediation: |
|
||||||
|
Not Applicable.
|
||||||
|
Enabling Pod Security Policy is no longer supported on K3s v1.25+ and will cause applications to unexpectedly fail.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 1.2.13
|
||||||
|
text: "Ensure that the admission control plugin ServiceAccount is set (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1"
|
||||||
|
tests:
|
||||||
|
bin_op: or
|
||||||
|
test_items:
|
||||||
|
- flag: "--disable-admission-plugins"
|
||||||
|
compare:
|
||||||
|
op: nothave
|
||||||
|
value: "ServiceAccount"
|
||||||
|
- flag: "--disable-admission-plugins"
|
||||||
|
set: false
|
||||||
|
remediation: |
|
||||||
|
By default, K3s does not set the --disable-admission-plugins to anything.
|
||||||
|
Follow the documentation and create ServiceAccount objects as per your environment.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "disable-admission-plugins=ServiceAccount"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.14
|
||||||
|
text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1"
|
||||||
|
tests:
|
||||||
|
bin_op: or
|
||||||
|
test_items:
|
||||||
|
- flag: "--disable-admission-plugins"
|
||||||
|
compare:
|
||||||
|
op: nothave
|
||||||
|
value: "NamespaceLifecycle"
|
||||||
|
- flag: "--disable-admission-plugins"
|
||||||
|
set: false
|
||||||
|
remediation: |
|
||||||
|
By default, K3s does not set the --disable-admission-plugins to anything.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "disable-admission-plugins=...,NamespaceLifecycle,..."
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.15
|
||||||
|
text: "Ensure that the admission control plugin NodeRestriction is set (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--enable-admission-plugins"
|
||||||
|
compare:
|
||||||
|
op: has
|
||||||
|
value: "NodeRestriction"
|
||||||
|
remediation: |
|
||||||
|
By default, K3s sets the --enable-admission-plugins to NodeRestriction.
|
||||||
|
If using the K3s config file /etc/rancher/k3s/config.yaml, check that you are not overriding the admission plugins.
|
||||||
|
If you are, include NodeRestriction in the list.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "enable-admission-plugins=...,NodeRestriction,..."
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.16
|
||||||
|
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--profiling"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: false
|
||||||
|
remediation: |
|
||||||
|
By default, K3s sets the --profiling argument to false.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "profiling=true"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.17
|
||||||
|
text: "Ensure that the --audit-log-path argument is set (Manual)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--audit-log-path"
|
||||||
|
remediation: |
|
||||||
|
Edit the K3s config file /etc/rancher/k3s/config.yaml and set the audit-log-path parameter to a suitable path and
|
||||||
|
file where you would like audit logs to be written, for example,
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log"
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 1.2.18
|
||||||
|
text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Manual)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--audit-log-maxage"
|
||||||
|
compare:
|
||||||
|
op: gte
|
||||||
|
value: 30
|
||||||
|
remediation: |
|
||||||
|
Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and
|
||||||
|
set the audit-log-maxage parameter to 30 or as an appropriate number of days, for example,
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "audit-log-maxage=30"
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 1.2.19
|
||||||
|
text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Manual)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--audit-log-maxbackup"
|
||||||
|
compare:
|
||||||
|
op: gte
|
||||||
|
value: 10
|
||||||
|
remediation: |
|
||||||
|
Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and
|
||||||
|
set the audit-log-maxbackup parameter to 10 or to an appropriate value. For example,
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "audit-log-maxbackup=10"
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 1.2.20
|
||||||
|
text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Manual)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--audit-log-maxsize"
|
||||||
|
compare:
|
||||||
|
op: gte
|
||||||
|
value: 100
|
||||||
|
remediation: |
|
||||||
|
Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and
|
||||||
|
set the audit-log-maxsize parameter to an appropriate size in MB. For example,
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "audit-log-maxsize=100"
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 1.2.21
|
||||||
|
text: "Ensure that the --request-timeout argument is set as appropriate (Manual)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--request-timeout"
|
||||||
|
remediation: |
|
||||||
|
Permissive, per CIS guidelines,
|
||||||
|
"it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed".
|
||||||
|
Edit the K3s config file /etc/rancher/k3s/config.yaml
|
||||||
|
and set the below parameter if needed. For example,
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "request-timeout=300s"
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 1.2.22
|
||||||
|
text: "Ensure that the --service-account-lookup argument is set to true (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1"
|
||||||
|
tests:
|
||||||
|
bin_op: or
|
||||||
|
test_items:
|
||||||
|
- flag: "--service-account-lookup"
|
||||||
|
set: false
|
||||||
|
- flag: "--service-account-lookup"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: true
|
||||||
|
remediation: |
|
||||||
|
By default, K3s does not set the --service-account-lookup argument.
|
||||||
|
Edit the K3s config file /etc/rancher/k3s/config.yaml and set the service-account-lookup. For example,
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "service-account-lookup=true"
|
||||||
|
Alternatively, you can delete the service-account-lookup parameter from this file so
|
||||||
|
that the default takes effect.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.23
|
||||||
|
text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--service-account-key-file"
|
||||||
|
remediation: |
|
||||||
|
K3s automatically generates and sets the service account key file.
|
||||||
|
It is located at /var/lib/rancher/k3s/server/tls/service.key.
|
||||||
|
If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "service-account-key-file=<path>"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.24
|
||||||
|
text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)"
|
||||||
|
audit: |
|
||||||
|
if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then
|
||||||
|
journalctl -m -u k3s | grep -m1 'Running kube-apiserver' | tail -n1
|
||||||
|
else
|
||||||
|
echo "--etcd-certfile AND --etcd-keyfile"
|
||||||
|
fi
|
||||||
|
tests:
|
||||||
|
bin_op: and
|
||||||
|
test_items:
|
||||||
|
- flag: "--etcd-certfile"
|
||||||
|
set: true
|
||||||
|
- flag: "--etcd-keyfile"
|
||||||
|
set: true
|
||||||
|
remediation: |
|
||||||
|
K3s automatically generates and sets the etcd certificate and key files.
|
||||||
|
They are located at /var/lib/rancher/k3s/server/tls/etcd/client.crt and /var/lib/rancher/k3s/server/tls/etcd/client.key.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "etcd-certfile=<path>"
|
||||||
|
- "etcd-keyfile=<path>"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.25
|
||||||
|
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep -A1 'Running kube-apiserver' | tail -n2"
|
||||||
|
tests:
|
||||||
|
bin_op: and
|
||||||
|
test_items:
|
||||||
|
- flag: "--tls-cert-file"
|
||||||
|
set: true
|
||||||
|
- flag: "--tls-private-key-file"
|
||||||
|
set: true
|
||||||
|
remediation: |
|
||||||
|
By default, K3s automatically generates and provides the TLS certificate and private key for the apiserver.
|
||||||
|
They are generated and located at /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "tls-cert-file=<path>"
|
||||||
|
- "tls-private-key-file=<path>"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.26
|
||||||
|
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--client-ca-file"
|
||||||
|
remediation: |
|
||||||
|
By default, K3s automatically provides the client certificate authority file.
|
||||||
|
It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt.
|
||||||
|
If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "client-ca-file=<path>"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.27
|
||||||
|
text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--etcd-cafile"
|
||||||
|
remediation: |
|
||||||
|
By default, K3s automatically provides the etcd certificate authority file.
|
||||||
|
It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt.
|
||||||
|
If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "etcd-cafile=<path>"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.2.28
|
||||||
|
text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--encryption-provider-config"
|
||||||
|
remediation: |
|
||||||
|
K3s can be configured to use encryption providers to encrypt secrets at rest.
|
||||||
|
Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter.
|
||||||
|
secrets-encryption: true
|
||||||
|
Secrets encryption can then be managed with the k3s secrets-encrypt command line tool.
|
||||||
|
If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 1.2.29
|
||||||
|
text: "Ensure that encryption providers are appropriately configured (Manual)"
|
||||||
|
audit: |
|
||||||
|
ENCRYPTION_PROVIDER_CONFIG=$(journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%')
|
||||||
|
if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -o 'providers\"\:\[.*\]' $ENCRYPTION_PROVIDER_CONFIG | grep -o "[A-Za-z]*" | head -2 | tail -1 | sed 's/^/provider=/'; fi
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "provider"
|
||||||
|
compare:
|
||||||
|
op: valid_elements
|
||||||
|
value: "aescbc,kms,secretbox"
|
||||||
|
remediation: |
|
||||||
|
K3s can be configured to use encryption providers to encrypt secrets at rest. K3s will utilize the aescbc provider.
|
||||||
|
Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter.
|
||||||
|
secrets-encryption: true
|
||||||
|
Secrets encryption can then be managed with the k3s secrets-encrypt command line tool.
|
||||||
|
If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 1.2.30
|
||||||
|
text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--tls-cipher-suites"
|
||||||
|
compare:
|
||||||
|
op: valid_elements
|
||||||
|
value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384"
|
||||||
|
remediation: |
|
||||||
|
By default, the K3s kube-apiserver complies with this test. Changes to these values may cause regression, therefore ensure that all apiserver clients support the new TLS configuration before applying it in production deployments.
|
||||||
|
If a custom TLS configuration is required, consider also creating a custom version of this rule that aligns with your requirements.
|
||||||
|
If this check fails, remove any custom configuration around `tls-cipher-suites` or update the /etc/rancher/k3s/config.yaml file to match the default by adding the following:
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.3
|
||||||
|
text: "Controller Manager"
|
||||||
|
checks:
|
||||||
|
- id: 1.3.1
|
||||||
|
text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--terminated-pod-gc-threshold"
|
||||||
|
remediation: |
|
||||||
|
Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node
|
||||||
|
and set the --terminated-pod-gc-threshold to an appropriate threshold,
|
||||||
|
kube-controller-manager-arg:
|
||||||
|
- "terminated-pod-gc-threshold=10"
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 1.3.2
|
||||||
|
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--profiling"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: false
|
||||||
|
remediation: |
|
||||||
|
By default, K3s sets the --profiling argument to false.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-controller-manager-arg:
|
||||||
|
- "profiling=true"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.3.3
|
||||||
|
text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--use-service-account-credentials"
|
||||||
|
compare:
|
||||||
|
op: noteq
|
||||||
|
value: false
|
||||||
|
remediation: |
|
||||||
|
By default, K3s sets the --use-service-account-credentials argument to true.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-controller-manager-arg:
|
||||||
|
- "use-service-account-credentials=false"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.3.4
|
||||||
|
text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--service-account-private-key-file"
|
||||||
|
remediation: |
|
||||||
|
By default, K3s automatically provides the service account private key file.
|
||||||
|
It is generated and located at /var/lib/rancher/k3s/server/tls/service.current.key.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-controller-manager-arg:
|
||||||
|
- "service-account-private-key-file=<path>"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.3.5
|
||||||
|
text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--root-ca-file"
|
||||||
|
remediation: |
|
||||||
|
By default, K3s automatically provides the root CA file.
|
||||||
|
It is generated and located at /var/lib/rancher/k3s/server/tls/server-ca.crt.
|
||||||
|
If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-controller-manager-arg:
|
||||||
|
- "root-ca-file=<path>"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.3.6
|
||||||
|
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1"
|
||||||
|
tests:
|
||||||
|
bin_op: or
|
||||||
|
test_items:
|
||||||
|
- flag: "--feature-gates"
|
||||||
|
compare:
|
||||||
|
op: nothave
|
||||||
|
value: "RotateKubeletServerCertificate=false"
|
||||||
|
set: true
|
||||||
|
- flag: "--feature-gates"
|
||||||
|
set: false
|
||||||
|
remediation: |
|
||||||
|
By default, K3s does not set the RotateKubeletServerCertificate feature gate.
|
||||||
|
If you have enabled this feature gate, you should remove it.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below.
|
||||||
|
kube-controller-manager-arg:
|
||||||
|
- "feature-gate=RotateKubeletServerCertificate"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.3.7
|
||||||
|
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1"
|
||||||
|
tests:
|
||||||
|
bin_op: or
|
||||||
|
test_items:
|
||||||
|
- flag: "--bind-address"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: "127.0.0.1"
|
||||||
|
set: true
|
||||||
|
- flag: "--bind-address"
|
||||||
|
set: false
|
||||||
|
remediation: |
|
||||||
|
By default, K3s sets the --bind-address argument to 127.0.0.1
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-controller-manager-arg:
|
||||||
|
- "bind-address=<IP>"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.4
|
||||||
|
text: "Scheduler"
|
||||||
|
checks:
|
||||||
|
- id: 1.4.1
|
||||||
|
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'profiling'"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--profiling"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: false
|
||||||
|
set: true
|
||||||
|
remediation: |
|
||||||
|
By default, K3s sets the --profiling argument to false.
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-scheduler-arg:
|
||||||
|
- "profiling=true"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 1.4.2
|
||||||
|
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'"
|
||||||
|
tests:
|
||||||
|
bin_op: or
|
||||||
|
test_items:
|
||||||
|
- flag: "--bind-address"
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: "127.0.0.1"
|
||||||
|
set: true
|
||||||
|
- flag: "--bind-address"
|
||||||
|
set: false
|
||||||
|
remediation: |
|
||||||
|
By default, K3s sets the --bind-address argument to 127.0.0.1
|
||||||
|
If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below.
|
||||||
|
kube-scheduler-arg:
|
||||||
|
- "bind-address=<IP>"
|
||||||
|
scored: true
|
422
cfg/k3s-cis-1.8/node.yaml
Normal file
422
cfg/k3s-cis-1.8/node.yaml
Normal file
@ -0,0 +1,422 @@
|
|||||||
|
---
|
||||||
|
controls:
|
||||||
|
version: "k3s-cis-1.8"
|
||||||
|
id: 4
|
||||||
|
text: "Worker Node Security Configuration"
|
||||||
|
type: "node"
|
||||||
|
groups:
|
||||||
|
- id: 4.1
|
||||||
|
text: "Worker Node Configuration Files"
|
||||||
|
checks:
|
||||||
|
- id: 4.1.1
|
||||||
|
text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)"
|
||||||
|
type: "skip"
|
||||||
|
audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' '
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "600"
|
||||||
|
remediation: |
|
||||||
|
Not Applicable.
|
||||||
|
The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.1.2
|
||||||
|
text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
|
||||||
|
type: "skip"
|
||||||
|
audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' '
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: root:root
|
||||||
|
remediation: |
|
||||||
|
Not Applicable.
|
||||||
|
The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime.
|
||||||
|
Not Applicable.
|
||||||
|
All configuration is passed in as arguments at container run time.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.1.3
|
||||||
|
text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)"
|
||||||
|
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' '
|
||||||
|
tests:
|
||||||
|
bin_op: or
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "600"
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the each worker node.
|
||||||
|
For example,
|
||||||
|
chmod 600 $proxykubeconfig
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.1.4
|
||||||
|
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)"
|
||||||
|
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
|
||||||
|
tests:
|
||||||
|
bin_op: or
|
||||||
|
test_items:
|
||||||
|
- flag: root:root
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the each worker node.
|
||||||
|
For example, chown root:root $proxykubeconfig
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.1.5
|
||||||
|
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||||
|
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "600"
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the each worker node.
|
||||||
|
For example,
|
||||||
|
chmod 600 $kubeletkubeconfig
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.1.6
|
||||||
|
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)"
|
||||||
|
audit: 'stat -c %U:%G $kubeletkubeconfig'
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: root:root
|
||||||
|
remediation: |
|
||||||
|
Run the below command (based on the file location on your system) on the each worker node.
|
||||||
|
For example,
|
||||||
|
chown root:root $kubeletkubeconfig
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.1.7
|
||||||
|
text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)"
|
||||||
|
audit: "stat -c permissions=%a $kubeletcafile"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "600"
|
||||||
|
remediation: |
|
||||||
|
Run the following command to modify the file permissions of the
|
||||||
|
--client-ca-file chmod 600 $kubeletcafile
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.1.8
|
||||||
|
text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
|
||||||
|
audit: "stat -c %U:%G $kubeletcafile"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: root:root
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: root:root
|
||||||
|
remediation: |
|
||||||
|
Run the following command to modify the ownership of the --client-ca-file.
|
||||||
|
chown root:root $kubeletcafile
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.1.9
|
||||||
|
text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)"
|
||||||
|
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
|
||||||
|
type: "skip"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "permissions"
|
||||||
|
compare:
|
||||||
|
op: bitmask
|
||||||
|
value: "600"
|
||||||
|
remediation: |
|
||||||
|
Not Applicable.
|
||||||
|
The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.1.10
|
||||||
|
text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)"
|
||||||
|
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
|
||||||
|
type: "skip"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: root:root
|
||||||
|
remediation: |
|
||||||
|
Not Applicable.
|
||||||
|
The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.2
|
||||||
|
text: "Kubelet"
|
||||||
|
checks:
|
||||||
|
- id: 4.2.1
|
||||||
|
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
||||||
|
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' '
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: "--anonymous-auth"
|
||||||
|
path: '{.authentication.anonymous.enabled}'
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: false
|
||||||
|
remediation: |
|
||||||
|
By default, K3s sets the --anonymous-auth to false. If you have set this to a different value, you
|
||||||
|
should set it back to false. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below.
|
||||||
|
kubelet-arg:
|
||||||
|
- "anonymous-auth=true"
|
||||||
|
If using the command line, edit the K3s service file and remove the below argument.
|
||||||
|
--kubelet-arg="anonymous-auth=true"
|
||||||
|
Based on your system, restart the k3s service. For example,
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart k3s.service
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.2.2
|
||||||
|
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||||
|
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' '
|
||||||
|
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --authorization-mode
|
||||||
|
path: '{.authorization.mode}'
|
||||||
|
compare:
|
||||||
|
op: nothave
|
||||||
|
value: AlwaysAllow
|
||||||
|
remediation: |
|
||||||
|
By default, K3s does not set the --authorization-mode to AlwaysAllow.
|
||||||
|
If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below.
|
||||||
|
kubelet-arg:
|
||||||
|
- "authorization-mode=AlwaysAllow"
|
||||||
|
If using the command line, edit the K3s service file and remove the below argument.
|
||||||
|
--kubelet-arg="authorization-mode=AlwaysAllow"
|
||||||
|
Based on your system, restart the k3s service. For example,
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart k3s.service
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.2.3
|
||||||
|
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||||
|
audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' '
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --client-ca-file
|
||||||
|
path: '{.authentication.x509.clientCAFile}'
|
||||||
|
remediation: |
|
||||||
|
By default, K3s automatically provides the client ca certificate for the Kubelet.
|
||||||
|
It is generated and located at /var/lib/rancher/k3s/agent/client-ca.crt
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.2.4
|
||||||
|
text: "Verify that the --read-only-port argument is set to 0 (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
|
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||||
|
tests:
|
||||||
|
bin_op: or
|
||||||
|
test_items:
|
||||||
|
- flag: "--read-only-port"
|
||||||
|
path: '{.readOnlyPort}'
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: 0
|
||||||
|
- flag: "--read-only-port"
|
||||||
|
path: '{.readOnlyPort}'
|
||||||
|
set: false
|
||||||
|
remediation: |
|
||||||
|
By default, K3s sets the --read-only-port to 0. If you have set this to a different value, you
|
||||||
|
should set it back to 0. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below.
|
||||||
|
kubelet-arg:
|
||||||
|
- "read-only-port=XXXX"
|
||||||
|
If using the command line, edit the K3s service file and remove the below argument.
|
||||||
|
--kubelet-arg="read-only-port=XXXX"
|
||||||
|
Based on your system, restart the k3s service. For example,
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart k3s.service
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.2.5
|
||||||
|
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)"
|
||||||
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --streaming-connection-idle-timeout
|
||||||
|
path: '{.streamingConnectionIdleTimeout}'
|
||||||
|
compare:
|
||||||
|
op: noteq
|
||||||
|
value: 0
|
||||||
|
- flag: --streaming-connection-idle-timeout
|
||||||
|
path: '{.streamingConnectionIdleTimeout}'
|
||||||
|
set: false
|
||||||
|
bin_op: or
|
||||||
|
remediation: |
|
||||||
|
If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value.
|
||||||
|
kubelet-arg:
|
||||||
|
- "streaming-connection-idle-timeout=5m"
|
||||||
|
If using the command line, run K3s with --kubelet-arg="streaming-connection-idle-timeout=5m".
|
||||||
|
Based on your system, restart the k3s service. For example,
|
||||||
|
systemctl restart k3s.service
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.2.6
|
||||||
|
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --make-iptables-util-chains
|
||||||
|
path: '{.makeIPTablesUtilChains}'
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: true
|
||||||
|
- flag: --make-iptables-util-chains
|
||||||
|
path: '{.makeIPTablesUtilChains}'
|
||||||
|
set: false
|
||||||
|
bin_op: or
|
||||||
|
remediation: |
|
||||||
|
If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter.
|
||||||
|
kubelet-arg:
|
||||||
|
- "make-iptables-util-chains=true"
|
||||||
|
If using the command line, run K3s with --kubelet-arg="make-iptables-util-chains=true".
|
||||||
|
Based on your system, restart the k3s service. For example,
|
||||||
|
systemctl restart k3s.service
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.2.7
|
||||||
|
text: "Ensure that the --hostname-override argument is not set (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
|
type: "skip"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --hostname-override
|
||||||
|
set: false
|
||||||
|
remediation: |
|
||||||
|
Not Applicable.
|
||||||
|
By default, K3s does set the --hostname-override argument. Per CIS guidelines, this is to comply
|
||||||
|
with cloud providers that require this flag to ensure that hostname matches node names.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.2.8
|
||||||
|
text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)"
|
||||||
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
|
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --event-qps
|
||||||
|
path: '{.eventRecordQPS}'
|
||||||
|
compare:
|
||||||
|
op: gte
|
||||||
|
value: 0
|
||||||
|
- flag: --event-qps
|
||||||
|
path: '{.eventRecordQPS}'
|
||||||
|
set: false
|
||||||
|
bin_op: or
|
||||||
|
remediation: |
|
||||||
|
By default, K3s sets the event-qps to 0. Should you wish to change this,
|
||||||
|
If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value.
|
||||||
|
kubelet-arg:
|
||||||
|
- "event-qps=<value>"
|
||||||
|
If using the command line, run K3s with --kubelet-arg="event-qps=<value>".
|
||||||
|
Based on your system, restart the k3s service. For example,
|
||||||
|
systemctl restart k3s.service
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.2.9
|
||||||
|
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --tls-cert-file
|
||||||
|
path: '/var/lib/rancher/k3s/agent/serving-kubelet.crt'
|
||||||
|
- flag: --tls-private-key-file
|
||||||
|
path: '/var/lib/rancher/k3s/agent/serving-kubelet.key'
|
||||||
|
remediation: |
|
||||||
|
By default, K3s automatically provides the TLS certificate and private key for the Kubelet.
|
||||||
|
They are generated and located at /var/lib/rancher/k3s/agent/serving-kubelet.crt and /var/lib/rancher/k3s/agent/serving-kubelet.key
|
||||||
|
If for some reason you need to provide your own certificate and key, you can set the
|
||||||
|
below parameters in the K3s config file /etc/rancher/k3s/config.yaml.
|
||||||
|
kubelet-arg:
|
||||||
|
- "tls-cert-file=<path/to/tls-cert-file>"
|
||||||
|
- "tls-private-key-file=<path/to/tls-private-key-file>"
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.2.10
|
||||||
|
text: "Ensure that the --rotate-certificates argument is not set to false (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
|
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --rotate-certificates
|
||||||
|
path: '{.rotateCertificates}'
|
||||||
|
compare:
|
||||||
|
op: eq
|
||||||
|
value: true
|
||||||
|
- flag: --rotate-certificates
|
||||||
|
path: '{.rotateCertificates}'
|
||||||
|
set: false
|
||||||
|
bin_op: or
|
||||||
|
remediation: |
|
||||||
|
By default, K3s does not set the --rotate-certificates argument. If you have set this flag with a value of `false`, you should either set it to `true` or completely remove the flag.
|
||||||
|
If using the K3s config file /etc/rancher/k3s/config.yaml, remove any rotate-certificates parameter.
|
||||||
|
If using the command line, remove the K3s flag --kubelet-arg="rotate-certificates".
|
||||||
|
Based on your system, restart the k3s service. For example,
|
||||||
|
systemctl restart k3s.service
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.2.11
|
||||||
|
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Automated)"
|
||||||
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
|
audit_config: "/bin/cat $kubeletconf"
|
||||||
|
tests:
|
||||||
|
bin_op: or
|
||||||
|
test_items:
|
||||||
|
- flag: RotateKubeletServerCertificate
|
||||||
|
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||||
|
compare:
|
||||||
|
op: nothave
|
||||||
|
value: false
|
||||||
|
- flag: RotateKubeletServerCertificate
|
||||||
|
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||||
|
set: false
|
||||||
|
remediation: |
|
||||||
|
By default, K3s does not set the RotateKubeletServerCertificate feature gate.
|
||||||
|
If you have enabled this feature gate, you should remove it.
|
||||||
|
If using the K3s config file /etc/rancher/k3s/config.yaml, remove any feature-gate=RotateKubeletServerCertificate parameter.
|
||||||
|
If using the command line, remove the K3s flag --kubelet-arg="feature-gate=RotateKubeletServerCertificate".
|
||||||
|
Based on your system, restart the k3s service. For example,
|
||||||
|
systemctl restart k3s.service
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 4.2.12
|
||||||
|
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
|
||||||
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
|
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --tls-cipher-suites
|
||||||
|
path: '{range .tlsCipherSuites[:]}{}{'',''}{end}'
|
||||||
|
compare:
|
||||||
|
op: valid_elements
|
||||||
|
value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||||
|
remediation: |
|
||||||
|
If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `TLSCipherSuites` to
|
||||||
|
kubelet-arg:
|
||||||
|
- "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
|
||||||
|
or to a subset of these values.
|
||||||
|
If using the command line, add the K3s flag --kubelet-arg="tls-cipher-suites=<same values as above>"
|
||||||
|
Based on your system, restart the k3s service. For example,
|
||||||
|
systemctl restart k3s.service
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 4.2.13
|
||||||
|
text: "Ensure that a limit is set on pod PIDs (Manual)"
|
||||||
|
audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1"
|
||||||
|
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||||
|
tests:
|
||||||
|
test_items:
|
||||||
|
- flag: --pod-max-pids
|
||||||
|
path: '{.podPidsLimit}'
|
||||||
|
remediation: |
|
||||||
|
Decide on an appropriate level for this parameter and set it,
|
||||||
|
If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `podPidsLimit` to
|
||||||
|
kubelet-arg:
|
||||||
|
- "pod-max-pids=<value>"
|
||||||
|
scored: false
|
300
cfg/k3s-cis-1.8/policies.yaml
Normal file
300
cfg/k3s-cis-1.8/policies.yaml
Normal file
@ -0,0 +1,300 @@
|
|||||||
|
---
|
||||||
|
controls:
|
||||||
|
version: "k3s-cis-1.8"
|
||||||
|
id: 5
|
||||||
|
text: "Kubernetes Policies"
|
||||||
|
type: "policies"
|
||||||
|
groups:
|
||||||
|
- id: 5.1
|
||||||
|
text: "RBAC and Service Accounts"
|
||||||
|
checks:
|
||||||
|
- id: 5.1.1
|
||||||
|
text: "Ensure that the cluster-admin role is only used where required (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
|
||||||
|
if they need this role or if they could use a role with fewer privileges.
|
||||||
|
Where possible, first bind users to a lower privileged role and then remove the
|
||||||
|
clusterrolebinding to the cluster-admin role :
|
||||||
|
kubectl delete clusterrolebinding [name]
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.1.2
|
||||||
|
text: "Minimize access to secrets (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Where possible, remove get, list and watch access to Secret objects in the cluster.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.1.3
|
||||||
|
text: "Minimize wildcard use in Roles and ClusterRoles (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Where possible replace any use of wildcards in clusterroles and roles with specific
|
||||||
|
objects or actions.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.1.4
|
||||||
|
text: "Minimize access to create pods (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Where possible, remove create access to pod objects in the cluster.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.1.5
|
||||||
|
text: "Ensure that default service accounts are not actively used. (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
||||||
|
to the Kubernetes API server.
|
||||||
|
Modify the configuration of each default service account to include this value
|
||||||
|
automountServiceAccountToken: false
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
|
||||||
|
- id: 5.1.6
|
||||||
|
text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Modify the definition of pods and service accounts which do not need to mount service
|
||||||
|
account tokens to disable it.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.1.7
|
||||||
|
text: "Avoid use of system:masters group (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Remove the system:masters group from all users in the cluster.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.1.8
|
||||||
|
text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Where possible, remove the impersonate, bind and escalate rights from subjects.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.1.9
|
||||||
|
text: "Minimize access to create persistent volumes (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Where possible, remove create access to PersistentVolume objects in the cluster.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.1.10
|
||||||
|
text: "Minimize access to the proxy sub-resource of nodes (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Where possible, remove access to the proxy sub-resource of node objects.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.1.11
|
||||||
|
text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Where possible, remove access to the approval sub-resource of certificatesigningrequest objects.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.1.12
|
||||||
|
text: "Minimize access to webhook configuration objects (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.1.13
|
||||||
|
text: "Minimize access to the service account token creation (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Where possible, remove access to the token sub-resource of serviceaccount objects.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.2
|
||||||
|
text: "Pod Security Standards"
|
||||||
|
checks:
|
||||||
|
- id: 5.2.1
|
||||||
|
text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Ensure that either Pod Security Admission or an external policy control system is in place
|
||||||
|
for every namespace which contains user workloads.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.2.2
|
||||||
|
text: "Minimize the admission of privileged containers (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
|
admission of privileged containers.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.2.3
|
||||||
|
text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)"
|
||||||
|
remediation: |
|
||||||
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
|
admission of `hostPID` containers.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.2.4
|
||||||
|
text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)"
|
||||||
|
remediation: |
|
||||||
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
|
admission of `hostIPC` containers.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.2.5
|
||||||
|
text: "Minimize the admission of containers wishing to share the host network namespace (Automated)"
|
||||||
|
remediation: |
|
||||||
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
|
admission of `hostNetwork` containers.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.2.6
|
||||||
|
text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
|
admission of containers with `.spec.allowPrivilegeEscalation` set to `true`.
|
||||||
|
scored: true
|
||||||
|
|
||||||
|
- id: 5.2.7
|
||||||
|
text: "Minimize the admission of root containers (Automated)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot`
|
||||||
|
or `MustRunAs` with the range of UIDs not including 0, is set.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.2.8
|
||||||
|
text: "Minimize the admission of containers with the NET_RAW capability (Automated)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
|
admission of containers with the `NET_RAW` capability.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.2.9
|
||||||
|
text: "Minimize the admission of containers with added capabilities (Automated)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Ensure that `allowedCapabilities` is not present in policies for the cluster unless
|
||||||
|
it is set to an empty array.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.2.10
|
||||||
|
text: "Minimize the admission of containers with capabilities assigned (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Review the use of capabilities in applications running on your cluster. Where a namespace
|
||||||
|
contains applications which do not require any Linux capabities to operate consider adding
|
||||||
|
a PSP which forbids the admission of containers which do not drop all capabilities.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.2.11
|
||||||
|
text: "Minimize the admission of Windows HostProcess containers (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
|
admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.2.12
|
||||||
|
text: "Minimize the admission of HostPath volumes (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
|
admission of containers with `hostPath` volumes.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.2.13
|
||||||
|
text: "Minimize the admission of containers which use HostPorts (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||||
|
admission of containers which use `hostPort` sections.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.3
|
||||||
|
text: "Network Policies and CNI"
|
||||||
|
checks:
|
||||||
|
- id: 5.3.1
|
||||||
|
text: "Ensure that the CNI in use supports NetworkPolicies (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
If the CNI plugin in use does not support network policies, consideration should be given to
|
||||||
|
making use of a different plugin, or finding an alternate mechanism for restricting traffic
|
||||||
|
in the Kubernetes cluster.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.3.2
|
||||||
|
text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)"
|
||||||
|
remediation: |
|
||||||
|
Follow the documentation and create NetworkPolicy objects as you need them.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.4
|
||||||
|
text: "Secrets Management"
|
||||||
|
checks:
|
||||||
|
- id: 5.4.1
|
||||||
|
text: "Prefer using Secrets as files over Secrets as environment variables (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
If possible, rewrite application code to read Secrets from mounted secret files, rather than
|
||||||
|
from environment variables.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.4.2
|
||||||
|
text: "Consider external secret storage (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Refer to the Secrets management options offered by your cloud provider or a third-party
|
||||||
|
secrets management solution.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.5
|
||||||
|
text: "Extensible Admission Control"
|
||||||
|
checks:
|
||||||
|
- id: 5.5.1
|
||||||
|
text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Follow the Kubernetes documentation and setup image provenance.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.7
|
||||||
|
text: "General Policies"
|
||||||
|
checks:
|
||||||
|
- id: 5.7.1
|
||||||
|
text: "Create administrative boundaries between resources using namespaces (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Follow the documentation and create namespaces for objects in your deployment as you need
|
||||||
|
them.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.7.2
|
||||||
|
text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Use `securityContext` to enable the docker/default seccomp profile in your pod definitions.
|
||||||
|
An example is as below:
|
||||||
|
securityContext:
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.7.3
|
||||||
|
text: "Apply SecurityContext to your Pods and Containers (Manual)"
|
||||||
|
type: "manual"
|
||||||
|
remediation: |
|
||||||
|
Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a
|
||||||
|
suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker
|
||||||
|
Containers.
|
||||||
|
scored: false
|
||||||
|
|
||||||
|
- id: 5.7.4
|
||||||
|
text: "The default namespace should not be used (Manual)"
|
||||||
|
remediation: |
|
||||||
|
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
|
||||||
|
resources and that all new resources are created in a specific namespace.
|
||||||
|
scored: false
|
@ -48,7 +48,7 @@ groups:
|
|||||||
echo "No matching pods found on the current node."
|
echo "No matching pods found on the current node."
|
||||||
else
|
else
|
||||||
# Execute the stat command
|
# Execute the stat command
|
||||||
oc exec -n openshift-sdn "$POD_NAME" - stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null
|
oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null
|
||||||
fi
|
fi
|
||||||
tests:
|
tests:
|
||||||
bin_op: or
|
bin_op: or
|
||||||
|
@ -42,16 +42,8 @@ groups:
|
|||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.1.5
|
- id: 5.1.5
|
||||||
text: "Ensure that default service accounts are not actively used. (Automated)"
|
text: "Ensure that default service accounts are not actively used. (Manual)"
|
||||||
type: "skip"
|
type: "manual"
|
||||||
audit: check_for_default_sa.sh
|
|
||||||
tests:
|
|
||||||
test_items:
|
|
||||||
- flag: "true"
|
|
||||||
compare:
|
|
||||||
op: eq
|
|
||||||
value: "true"
|
|
||||||
set: true
|
|
||||||
remediation: |
|
remediation: |
|
||||||
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
||||||
to the Kubernetes API server.
|
to the Kubernetes API server.
|
||||||
|
@ -44,14 +44,6 @@ groups:
|
|||||||
- id: 5.1.5
|
- id: 5.1.5
|
||||||
text: "Ensure that default service accounts are not actively used. (Manual)"
|
text: "Ensure that default service accounts are not actively used. (Manual)"
|
||||||
type: "manual"
|
type: "manual"
|
||||||
audit: check_for_default_sa.sh
|
|
||||||
tests:
|
|
||||||
test_items:
|
|
||||||
- flag: "true"
|
|
||||||
compare:
|
|
||||||
op: eq
|
|
||||||
value: "true"
|
|
||||||
set: true
|
|
||||||
remediation: |
|
remediation: |
|
||||||
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
||||||
to the Kubernetes API server.
|
to the Kubernetes API server.
|
||||||
|
@ -43,21 +43,12 @@ groups:
|
|||||||
|
|
||||||
- id: 5.1.5
|
- id: 5.1.5
|
||||||
text: "Ensure that default service accounts are not actively used. (Manual)"
|
text: "Ensure that default service accounts are not actively used. (Manual)"
|
||||||
type: "skip"
|
type: "manual"
|
||||||
audit: check_for_default_sa.sh
|
|
||||||
tests:
|
|
||||||
test_items:
|
|
||||||
- flag: "true"
|
|
||||||
compare:
|
|
||||||
op: eq
|
|
||||||
value: "true"
|
|
||||||
set: true
|
|
||||||
remediation: |
|
remediation: |
|
||||||
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
||||||
to the Kubernetes API server.
|
to the Kubernetes API server.
|
||||||
Modify the configuration of each default service account to include this value
|
Modify the configuration of each default service account to include this value
|
||||||
automountServiceAccountToken: false
|
automountServiceAccountToken: false
|
||||||
Permissive - Kubernetes provides default service accounts to be used.
|
|
||||||
scored: false
|
scored: false
|
||||||
|
|
||||||
- id: 5.1.6
|
- id: 5.1.6
|
||||||
|
@ -148,12 +148,18 @@ groups:
|
|||||||
- id: 1.1.10
|
- id: 1.1.10
|
||||||
text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)"
|
text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)"
|
||||||
audit: |
|
audit: |
|
||||||
ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G
|
'/bin/sh -c "if [[ -e /etc/cni/net.d ]]; then
|
||||||
|
ps -fC "${kubeletbin:-kubelet}" | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G
|
||||||
find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G
|
find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G
|
||||||
|
else
|
||||||
|
echo "File not found"
|
||||||
|
fi'
|
||||||
use_multiple_values: true
|
use_multiple_values: true
|
||||||
tests:
|
tests:
|
||||||
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "root:root"
|
- flag: "root:root"
|
||||||
|
- flag: "File not found"
|
||||||
remediation: |
|
remediation: |
|
||||||
Run the below command (based on the file location on your system) on the control plane node.
|
Run the below command (based on the file location on your system) on the control plane node.
|
||||||
For example,
|
For example,
|
||||||
@ -321,11 +327,18 @@ groups:
|
|||||||
|
|
||||||
- id: 1.1.21
|
- id: 1.1.21
|
||||||
text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)"
|
text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)"
|
||||||
audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.key"
|
audit: |
|
||||||
|
'/bin/sh -c if test -e "/var/lib/rancher/rke2/server/tls/*.key"; then
|
||||||
|
stat -c "%a" "/var/lib/rancher/rke2/server/tls/*.key"
|
||||||
|
else
|
||||||
|
echo "File not found"
|
||||||
|
fi'
|
||||||
use_multiple_values: true
|
use_multiple_values: true
|
||||||
tests:
|
tests:
|
||||||
|
bin_op: or
|
||||||
test_items:
|
test_items:
|
||||||
- flag: "permissions"
|
- flag: "permissions"
|
||||||
|
- flag: "File not found"
|
||||||
compare:
|
compare:
|
||||||
op: eq
|
op: eq
|
||||||
value: "600"
|
value: "600"
|
||||||
@ -979,7 +992,7 @@ groups:
|
|||||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||||
on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true.
|
on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true.
|
||||||
--feature-gates=RotateKubeletServerCertificate=true
|
--feature-gates=RotateKubeletServerCertificate=true
|
||||||
scored: true
|
scored: false
|
||||||
type: skip
|
type: skip
|
||||||
|
|
||||||
- id: 1.3.7
|
- id: 1.3.7
|
||||||
|
@ -440,7 +440,7 @@ groups:
|
|||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
systemctl restart kubelet.service
|
systemctl restart kubelet.service
|
||||||
scored: false
|
scored: false
|
||||||
|
type: skip
|
||||||
- id: 4.2.13
|
- id: 4.2.13
|
||||||
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
|
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
|
||||||
audit: "/bin/ps -fC $kubeletbin"
|
audit: "/bin/ps -fC $kubeletbin"
|
||||||
|
@ -252,7 +252,7 @@ func (controls *Controls) ASFF() ([]types.AwsSecurityFinding, error) {
|
|||||||
|
|
||||||
f := types.AwsSecurityFinding{
|
f := types.AwsSecurityFinding{
|
||||||
AwsAccountId: aws.String(account),
|
AwsAccountId: aws.String(account),
|
||||||
Confidence: *aws.Int32(100),
|
Confidence: aws.Int32(100),
|
||||||
GeneratorId: aws.String(fmt.Sprintf("%s/cis-kubernetes-benchmark/%s/%s", arn, controls.Version, check.ID)),
|
GeneratorId: aws.String(fmt.Sprintf("%s/cis-kubernetes-benchmark/%s/%s", arn, controls.Version, check.ID)),
|
||||||
Id: id,
|
Id: id,
|
||||||
CreatedAt: aws.String(tf),
|
CreatedAt: aws.String(tf),
|
||||||
|
@ -407,7 +407,7 @@ func TestControls_ASFF(t *testing.T) {
|
|||||||
want: []types.AwsSecurityFinding{
|
want: []types.AwsSecurityFinding{
|
||||||
{
|
{
|
||||||
AwsAccountId: aws.String("foo account"),
|
AwsAccountId: aws.String("foo account"),
|
||||||
Confidence: *aws.Int32(100),
|
Confidence: aws.Int32(100),
|
||||||
GeneratorId: aws.String(fmt.Sprintf("%s/cis-kubernetes-benchmark/%s/%s", fmt.Sprintf(ARN, "somewhere"), "1", "check1id")),
|
GeneratorId: aws.String(fmt.Sprintf("%s/cis-kubernetes-benchmark/%s/%s", fmt.Sprintf(ARN, "somewhere"), "1", "check1id")),
|
||||||
Description: aws.String("check1text"),
|
Description: aws.String("check1text"),
|
||||||
ProductArn: aws.String(fmt.Sprintf(ARN, "somewhere")),
|
ProductArn: aws.String(fmt.Sprintf(ARN, "somewhere")),
|
||||||
|
@ -92,7 +92,7 @@ aws ecr create-repository --repository-name k8s/kube-bench --image-tag-mutabilit
|
|||||||
git clone https://github.com/aquasecurity/kube-bench.git
|
git clone https://github.com/aquasecurity/kube-bench.git
|
||||||
cd kube-bench
|
cd kube-bench
|
||||||
aws ecr get-login-password --region <AWS_REGION> | docker login --username AWS --password-stdin <AWS_ACCT_NUMBER>.dkr.ecr.<AWS_REGION>.amazonaws.com
|
aws ecr get-login-password --region <AWS_REGION> | docker login --username AWS --password-stdin <AWS_ACCT_NUMBER>.dkr.ecr.<AWS_REGION>.amazonaws.com
|
||||||
docker build -t k8s/kube-bench .
|
make build-docker IMAGE_NAME=k8s/kube-bench
|
||||||
docker tag k8s/kube-bench:latest <AWS_ACCT_NUMBER>.dkr.ecr.<AWS_REGION>.amazonaws.com/k8s/kube-bench:latest
|
docker tag k8s/kube-bench:latest <AWS_ACCT_NUMBER>.dkr.ecr.<AWS_REGION>.amazonaws.com/k8s/kube-bench:latest
|
||||||
docker push <AWS_ACCT_NUMBER>.dkr.ecr.<AWS_REGION>.amazonaws.com/k8s/kube-bench:latest
|
docker push <AWS_ACCT_NUMBER>.dkr.ecr.<AWS_REGION>.amazonaws.com/k8s/kube-bench:latest
|
||||||
```
|
```
|
||||||
|
38
go.mod
38
go.mod
@ -1,11 +1,11 @@
|
|||||||
module github.com/aquasecurity/kube-bench
|
module github.com/aquasecurity/kube-bench
|
||||||
|
|
||||||
go 1.21
|
go 1.22
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/aws/aws-sdk-go-v2 v1.26.0
|
github.com/aws/aws-sdk-go-v2 v1.31.0
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.27.4
|
github.com/aws/aws-sdk-go-v2/config v1.27.37
|
||||||
github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1
|
github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.3
|
||||||
github.com/fatih/color v1.16.0
|
github.com/fatih/color v1.16.0
|
||||||
github.com/golang/glog v1.2.0
|
github.com/golang/glog v1.2.0
|
||||||
github.com/magiconair/properties v1.8.7
|
github.com/magiconair/properties v1.8.7
|
||||||
@ -22,17 +22,17 @@ require (
|
|||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.4 // indirect
|
github.com/aws/aws-sdk-go-v2/credentials v1.17.35 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 // indirect
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 // indirect
|
github.com/aws/aws-sdk-go-v2/service/sso v1.23.1 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 // indirect
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 // indirect
|
github.com/aws/aws-sdk-go-v2/service/sts v1.31.1 // indirect
|
||||||
github.com/aws/smithy-go v1.20.1 // indirect
|
github.com/aws/smithy-go v1.21.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||||
@ -74,13 +74,13 @@ require (
|
|||||||
github.com/subosito/gotenv v1.6.0 // indirect
|
github.com/subosito/gotenv v1.6.0 // indirect
|
||||||
go.uber.org/atomic v1.10.0 // indirect
|
go.uber.org/atomic v1.10.0 // indirect
|
||||||
go.uber.org/multierr v1.9.0 // indirect
|
go.uber.org/multierr v1.9.0 // indirect
|
||||||
golang.org/x/crypto v0.17.0 // indirect
|
golang.org/x/crypto v0.21.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||||
golang.org/x/net v0.19.0 // indirect
|
golang.org/x/net v0.23.0 // indirect
|
||||||
golang.org/x/oauth2 v0.15.0 // indirect
|
golang.org/x/oauth2 v0.15.0 // indirect
|
||||||
golang.org/x/sync v0.5.0 // indirect
|
golang.org/x/sync v0.5.0 // indirect
|
||||||
golang.org/x/sys v0.15.0 // indirect
|
golang.org/x/sys v0.18.0 // indirect
|
||||||
golang.org/x/term v0.15.0 // indirect
|
golang.org/x/term v0.18.0 // indirect
|
||||||
golang.org/x/text v0.14.0 // indirect
|
golang.org/x/text v0.14.0 // indirect
|
||||||
golang.org/x/time v0.5.0 // indirect
|
golang.org/x/time v0.5.0 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
|
79
go.sum
79
go.sum
@ -1,35 +1,31 @@
|
|||||||
github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
github.com/aws/aws-sdk-go-v2 v1.31.0 h1:3V05LbxTSItI5kUqNwhJrrrY1BAXxXt0sN0l72QmG5U=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.26.0 h1:/Ce4OCiM3EkpW7Y+xUnfAFpchU78K7/Ug01sZni9PgA=
|
github.com/aws/aws-sdk-go-v2 v1.31.0/go.mod h1:ztolYtaEUtdpf9Wftr31CJfLVjOnD/CVRkKOOYgF8hA=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.26.0/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I=
|
github.com/aws/aws-sdk-go-v2/config v1.27.37 h1:xaoIwzHVuRWRHFI0jhgEdEGc8xE1l91KaeRDsWEIncU=
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.27.4 h1:AhfWb5ZwimdsYTgP7Od8E9L1u4sKmDW2ZVeLcf2O42M=
|
github.com/aws/aws-sdk-go-v2/config v1.27.37/go.mod h1:S2e3ax9/8KnMSyRVNd3sWTKs+1clJ2f1U6nE0lpvQRg=
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.27.4/go.mod h1:zq2FFXK3A416kiukwpsd+rD4ny6JC7QSkp4QdN1Mp2g=
|
github.com/aws/aws-sdk-go-v2/credentials v1.17.35 h1:7QknrZhYySEB1lEXJxGAmuD5sWwys5ZXNr4m5oEz0IE=
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.4 h1:h5Vztbd8qLppiPwX+y0Q6WiwMZgpd9keKe2EAENgAuI=
|
github.com/aws/aws-sdk-go-v2/credentials v1.17.35/go.mod h1:8Vy4kk7at4aPSmibr7K+nLTzG6qUQAUO4tW49fzUV4E=
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.4/go.mod h1:+30tpwrkOgvkJL1rUZuRLoxcJwtI/OkeBLYnHxJtVe0=
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 h1:C/d03NAmh8C4BZXhuRNboF/DqhBkBCeDiJDcaqIT5pA=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 h1:AK0J8iYBFeUk2Ax7O8YpLtFsfhdOByh2QIkHmigpRYk=
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14/go.mod h1:7I0Ju7p9mCIdlrfS+JCgqcYD0VXz/N4yozsox+0o078=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2/go.mod h1:iRlGzMix0SExQEviAyptRWRGdYNo3+ufW/lCzvKVTUc=
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 h1:kYQ3H1u0ANr9KEKlGs/jTLrBFPo8P8NaH/w7A01NeeM=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30/go.mod h1:LUBAO3zNXQjoONBKn/kR1y0Q4cj/D02Ts0uHYjcCQLM=
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18/go.mod h1:r506HmK5JDUh9+Mw4CfGJGSSoqIiLCndAuqXuhbv67Y=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 h1:bNo4LagzUKbjdxE0tIcR9pMzLR2U/Tgie1Hq1HQ3iH8=
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 h1:Z7IdFUONvTcvS7YuhtVxN99v2cCoHRXOS4mTr0B/pUc=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2/go.mod h1:wRQv0nN6v9wDXuWThpovGQjqF1HFdcgWjporw14lS8k=
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18/go.mod h1:DkKMmksZVVyat+Y+r1dEOgJEfUeA7UngIHWeKsi0yNc=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24/go.mod h1:gAuCezX/gob6BSMbItsSlMb6WZGV7K2+fWOvk8xBSto=
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 h1:EtOU5jsPdIQNP+6Q2C5e3d65NKT1PeCiQk+9OdzO12Q=
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2/go.mod h1:tyF5sKccmDz0Bv4NrstEr+/9YkSPJHrcO7UsUKf7pWM=
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 h1:QFASJGfT8wMXtuP3D5CRmMjARHv9ZmzFUMJznHDOY3w=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5/go.mod h1:QdZ3OmoIjSX+8D1OPAzPxDfjXASbBMDsz9qvtyIhtik=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 h1:Xbwbmk44URTiHNx6PNo0ujDE6ERlsCKJD3u1zfnzAPg=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE=
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20/go.mod h1:oAfOFzUB14ltPZj1rWwRc3d/6OgD76R8KlvU3EqM9Fg=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8=
|
github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.3 h1:YSmEnPSWj74eOtbXG4Z2J+GTQjBrz7w2wP01isHFZwU=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 h1:5ffmXjPtwRExp1zc7gENLgCPyHFbhEPwVTkTiH9niSk=
|
github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.3/go.mod h1:QFtYEC35t39ftJ6emZgapzdtBjGZsuR4bAd73SiG23I=
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2/go.mod h1:Ru7vg1iQ7cR4i7SZ/JTLYN9kaXtbL69UdgG0OQWQxW0=
|
github.com/aws/aws-sdk-go-v2/service/sso v1.23.1 h1:2jrVsMHqdLD1+PA4BA6Nh1eZp0Gsy3mFSB5MxDvcJtU=
|
||||||
github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 h1:+lpa31bGPPvgpZwUJ4ldKRCsPukzJ0PqoO5AQ9S79oQ=
|
github.com/aws/aws-sdk-go-v2/service/sso v1.23.1/go.mod h1:XRlMvmad0ZNL+75C5FYdMvbbLkd6qiqz6foR1nA1PXY=
|
||||||
github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1/go.mod h1:vKGWzDG4Ytw3hgv/FvNy0HX/XEoJ6k/e7KAANzXWP8Y=
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1 h1:0L7yGCg3Hb3YQqnSgBTZM5wepougtL1aEccdcdYhHME=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 h1:utEGkfdQ4L6YW/ietH7111ZYglLJvS+sLriHJ1NBJEQ=
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1/go.mod h1:FnvDM4sfa+isJ3kDXIzAB9GAwVSzFzSy97uZ3IsHo4E=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.1/go.mod h1:RsYqzYr2F2oPDdpy+PdhephuZxTfjHQe7SOBcZGoAU8=
|
github.com/aws/aws-sdk-go-v2/service/sts v1.31.1 h1:8K0UNOkZiK9Uh3HIF6Bx0rcNCftqGCeKmOaR7Gp5BSo=
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 h1:9/GylMS45hGGFCcMrUZDVayQE1jYSIN6da9jo7RAYIw=
|
github.com/aws/aws-sdk-go-v2/service/sts v1.31.1/go.mod h1:yMWe0F+XG0DkRZK5ODZhG7BEFYhLXi2dqGsv6tX0cgI=
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1/go.mod h1:YjAPFn4kGFqKC54VsHs5fn5B6d+PCY2tziEa3U/GB5Y=
|
github.com/aws/smithy-go v1.21.0 h1:H7L8dtDRk0P1Qm6y0ji7MCYMQObJ5R9CRpyPhRUkLYA=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 h1:3I2cBEYgKhrWlwyZgfpSO2BpaMY1LHPqXYk/QGlu2ew=
|
github.com/aws/smithy-go v1.21.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.28.1/go.mod h1:uQ7YYKZt3adCRrdCBREm1CD3efFLOUNH77MrUCvx5oA=
|
|
||||||
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
|
||||||
github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw=
|
|
||||||
github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
|
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
@ -76,7 +72,6 @@ github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYu
|
|||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
@ -104,8 +99,6 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
|
|||||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
|
||||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
|
||||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||||
@ -199,8 +192,8 @@ go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTV
|
|||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||||
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
||||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
@ -212,8 +205,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||||
golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
|
golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
|
||||||
golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
|
golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@ -233,10 +226,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
|
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
@ -54,11 +54,11 @@ func (p *Publisher) PublishFinding(finding []types.AwsSecurityFinding) (*Publish
|
|||||||
errs = errors.Wrap(err, "finding publish failed")
|
errs = errors.Wrap(err, "finding publish failed")
|
||||||
}
|
}
|
||||||
if r != nil {
|
if r != nil {
|
||||||
if r.FailedCount != 0 {
|
if *r.FailedCount != 0 {
|
||||||
o.FailedCount += r.FailedCount
|
o.FailedCount += *r.FailedCount
|
||||||
}
|
}
|
||||||
if r.SuccessCount != 0 {
|
if *r.SuccessCount != 0 {
|
||||||
o.SuccessCount += r.SuccessCount
|
o.SuccessCount += *r.SuccessCount
|
||||||
}
|
}
|
||||||
o.FailedFindings = append(o.FailedFindings, r.FailedFindings...)
|
o.FailedFindings = append(o.FailedFindings, r.FailedFindings...)
|
||||||
}
|
}
|
||||||
|
2
makefile
2
makefile
@ -11,7 +11,7 @@ uname := $(shell uname -s)
|
|||||||
BUILDX_PLATFORM ?= linux/amd64,linux/arm64,linux/arm,linux/ppc64le,linux/s390x
|
BUILDX_PLATFORM ?= linux/amd64,linux/arm64,linux/arm,linux/ppc64le,linux/s390x
|
||||||
DOCKER_ORGS ?= aquasec public.ecr.aws/aquasecurity
|
DOCKER_ORGS ?= aquasec public.ecr.aws/aquasecurity
|
||||||
GOARCH ?= $@
|
GOARCH ?= $@
|
||||||
KUBECTL_VERSION ?= 1.28.7
|
KUBECTL_VERSION ?= 1.31.0
|
||||||
ARCH ?= $(shell go env GOARCH)
|
ARCH ?= $(shell go env GOARCH)
|
||||||
|
|
||||||
ifneq ($(findstring Microsoft,$(shell uname -r)),)
|
ifneq ($(findstring Microsoft,$(shell uname -r)),)
|
||||||
|
Loading…
Reference in New Issue
Block a user