1
0
mirror of https://github.com/aquasecurity/kube-bench.git synced 2025-02-21 12:02:30 +00:00

Update all TCs in node.yaml

This commit is contained in:
Deepanshu Bhatia 2024-09-02 00:57:32 +05:30 committed by Md Safiyat Reza
parent 291074eecb
commit b7b566d634

View File

@ -37,7 +37,7 @@ groups:
scored: true
- id: 4.1.3
text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)"
text: "If proxy kube proxy configuration file exists ensure permissions are set to 644 or more restrictive (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
@ -63,7 +63,7 @@ groups:
scored: false
- id: 4.1.4
text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)"
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
@ -86,7 +86,7 @@ groups:
scored: false
- id: 4.1.5
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)"
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated)"
audit: |
# Check permissions
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
@ -100,10 +100,10 @@ groups:
value: "644"
remediation: |
None required.
scored: false
scored: true
- id: 4.1.6
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)"
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null
@ -113,13 +113,13 @@ groups:
- flag: root:root
remediation: |
None required.
scored: false
scored: true
- id: 4.1.7
text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/cert/ca.pem 2> /dev/null
use_multiple_values: true
tests:
test_items:
@ -135,7 +135,7 @@ groups:
text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/cert/ca.pem 2> /dev/null
use_multiple_values: true
tests:
test_items:
@ -145,17 +145,17 @@ groups:
scored: true
- id: 4.1.9
text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)"
text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/kubeconfig 2> /dev/null
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/data/kubelet/config.json 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "644"
value: "600"
remediation: |
None required.
scored: true
@ -164,7 +164,7 @@ groups:
text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/kubeconfig 2> /dev/null
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/data/kubelet/config.json 2> /dev/null
use_multiple_values: true
tests:
test_items:
@ -177,7 +177,7 @@ groups:
text: "Kubelet"
checks:
- id: 4.2.1
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
text: "Activate Garbage collection in OpenShift Container Platform 4, as appropriate (Manual)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf 2> /dev/null
@ -187,170 +187,133 @@ groups:
- flag: "enabled: true"
set: false
remediation: |
Follow the instructions in the documentation to create a Kubelet config CRD
and set the anonymous-auth is set to false.
To configure, follow the directions in Garbage Collection Remediation https://docs.openshift.com/container-platform/latest/nodes/nodes/nodes-nodes-garbage-collection.html.
scored: true
- id: 4.2.2
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)"
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authentication.anonymous.enabled' 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "true"
set: false
remediation: |
Create a kubeletconfig to explicitly disable anonymous authentication. Examples of how
to do this can be found in the OpenShift documentation.
scored: true
- id: 4.2.3
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
type: manual
# Takes a lot of time for connection to fail and
audit: |
POD=$(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}')
TOKEN=$(oc whoami -t)
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$NODE_NAME/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode' 2> /dev/null
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authorization' 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "Connection timed out"
- flag: mode
compare:
op: noteq
value: AlwaysAllow
remediation: |
None required. Unauthenticated/Unauthorized users have no access to OpenShift nodes.
scored: false
scored: true
- id: 4.2.3
- id: 4.2.4
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf 2> /dev/null
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authentication.x509' 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: '"clientCAFile": "/etc/kubernetes/kubelet-ca.crt"'
- flag: clientCAFile
compare:
op: eq
value: /etc/kubernetes/kubelet-ca.crt
remediation: |
None required. Changing the clientCAFile value is unsupported.
scored: true
- id: 4.2.4
- id: 4.2.5
text: "Verify that the read only port is not used or is set to 0 (Automated)"
audit: |
oc -n openshift-kube-apiserver get cm config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments' 2> /dev/null
echo `oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o yaml | grep --color read-only-port` 2> /dev/null
echo `oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "read-only-port"` 2> /dev/null
tests:
bin_op: or
test_items:
- flag: "read-only-port"
- flag: kubelet-read-only-port
compare:
op: has
value: "[\"0\"]"
- flag: "read-only-port"
set: false
remediation: |
In earlier versions of OpenShift 4, the read-only-port argument is not used.
Follow the instructions in the documentation to create a Kubelet config CRD
and set the --read-only-port is set to 0.
Follow the instructions in the documentation https://docs.openshift.com/container-platform/latest/post_installation_configuration/machine-configuration-tasks.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters_post-install-machine-configuration-tasks
to create a kubeletconfig CRD and set the kubelet-read-only-port is set to 0.
scored: true
- id: 4.2.5
- id: 4.2.6
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)"
audit: |
# Should return 1 for node
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/${NODE_NAME} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout 2> /dev/null
echo exit_code=$?
# Should return 1 for node
oc debug node/${NODE_NAME} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf 2> /dev/null
echo exit_code=$?
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig' 2> /dev/null
use_multiple_values: true
tests:
bin_op: or
test_items:
- flag: --streaming-connection-idle-timeout
compare:
op: noteq
value: 0
- flag: streamingConnectionIdleTimeout
compare:
op: noteq
value: 0s
- flag: "exit_code"
compare:
op: eq
value: 1
remediation: |
Follow the instructions in the documentation to create a Kubelet config CRD and set
the --streaming-connection-idle-timeout to the desired value. Do not set the value to 0.
Follow the instructions https://docs.openshift.com/container-platform/latest/post_installation_configuration/machine-configuration-tasks.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters_post-install-machine-configuration-tasks in the documentation to create a kubeletconfig CRD and set
the streamingConnectionIdleTimeout to the desired value. Do not set the value to 0.
scored: true
- id: 4.2.6
text: "Ensure that the --protect-kernel-defaults argument is not set (Manual)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host more /etc/kubernetes/kubelet.conf 2> /dev/null
tests:
test_items:
- flag: protectKernelDefaults
set: false
remediation: |
None required. The OpenShift 4 kubelet modifies the system tunable;
using the protect-kernel-defaults flag will cause the kubelet to fail on start if the tunables
don't match the kubelet configuration and the OpenShift node will fail to start.
scored: false
- id: 4.2.7
text: "Ensure that the --make-iptables-util-chains argument is set to true (Manual)"
audit: |
/bin/bash
flag=make-iptables-util-chains
opt=makeIPTablesUtilChains
# look at each machineconfigpool
while read -r pool nodeconfig; do
# true by default
value='true'
# first look for the flag
oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.systemd[][] | select(.name=="kubelet.service") | .contents' | sed -n "/^ExecStart=/,/^\$/ { /^\\s*--$flag=false/ q 100 }"
# if the above command exited with 100, the flag was false
[ $? == 100 ] && value='false'
# now look in the yaml KubeletConfig
yamlconfig=$(oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.storage.files[] | select(.path=="/etc/kubernetes/kubelet.conf") | .contents.source ' | sed 's/^data:,//' | while read; do echo -e ${REPLY//%/\\x}; done)
echo "$yamlconfig" | sed -n "/^$opt:\\s*false\\s*$/ q 100"
[ $? == 100 ] && value='false'
echo "Pool $pool has $flag ($opt) set to $value"
done < <(oc get machineconfigpools -o json | jq -r '.items[] | select(.status.machineCount>0) | .metadata.name + " " + .spec.configuration.name')
# Should return 1 for node
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig' 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "set to true"
- flag: makeIPTablesUtilChains
compare:
op: eq
value: true
remediation: |
None required. The --make-iptables-util-chains argument is set to true by default.
None required. The makeIPTablesUtilChains argument is set to true by default.
scored: false
- id: 4.2.8
text: "Ensure that the --hostname-override argument is not set (Manual)"
audit: |
echo `oc get machineconfig 01-worker-kubelet -o yaml | grep hostname-override`
echo `oc get machineconfig 01-master-kubelet -o yaml | grep hostname-override`
tests:
test_items:
- flag: hostname-override
set: false
remediation: |
By default, --hostname-override argument is not set.
scored: false
- id: 4.2.9
text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf;
oc get machineconfig 01-worker-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050
oc get machineconfig 01-master-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050
type: "manual"
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig' 2> /dev/null
tests:
test_items:
- flag: kubeAPIQPS
compare:
op: gte
value: 0
remediation: |
Follow the documentation to edit kubelet parameters
https://docs.openshift.com/container-platform/4.15/scalability_and_performance/recommended-host-practices.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters
KubeAPIQPS: <QPS>
None required by default. Follow the documentation to edit kubeletconfig parameters
https://docs.openshift.com/container-platform/4.15/post_installation_configuration/machine-configuration-tasks.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters_post-install-machine-configuration-tasks
scored: false
- id: 4.2.10
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
- id: 4.2.9
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
audit: |
oc get configmap config -n openshift-kube-apiserver -o json \
| jq -r '.data["config.yaml"]' \
| jq -r '.apiServerArguments |
.["kubelet-client-certificate"][0],
.["kubelet-client-key"][0]
'
oc get configmap config -n openshift-kube-apiserver -ojson | \
jq -r '.data["config.yaml"]' | \
jq -r '.apiServerArguments | ."kubelet-client-certificate"[0], ."kubelet-client-key"[0]' 2> /dev/null
tests:
bin_op: and
test_items:
@ -361,40 +324,30 @@ groups:
This is not configurable.
scored: true
- id: 4.2.11
- id: 4.2.10
text: "Ensure that the --rotate-certificates argument is not set to false (Manual)"
audit: |
#Verify the rotateKubeletClientCertificate feature gate is not set to false
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate 2> /dev/null
# Verify the rotateCertificates argument is set to true
oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq -r '.kubeletconfig' 2> /dev/null
use_multiple_values: true
tests:
bin_op: or
test_items:
- flag: rotateCertificates
compare:
op: eq
value: true
- flag: rotateKubeletClientCertificates
compare:
op: noteq
value: false
- flag: rotateKubeletClientCertificates
set: false
remediation: |
None required.
scored: false
- id: 4.2.12
- id: 4.2.11
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)"
audit: |
#Verify the rotateKubeletServerCertificate feature gate is on
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/${NODE_NAME} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf 2> /dev/null
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.featureGates' 2> /dev/null
# Verify the rotateCertificates argument is set to true
oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq -r '.kubeletconfig' 2> /dev/null
use_multiple_values: true
tests:
bin_op: or
@ -408,22 +361,24 @@ groups:
op: eq
value: true
remediation: |
By default, kubelet server certificate rotation is disabled.
None required. By default, kubelet server certificate rotation is enabled.
scored: false
- id: 4.2.13
- id: 4.2.12
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
audit: |
# needs verification
# verify cipher suites
oc describe --namespace=openshift-ingress-operator ingresscontroller/default
oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo
oc get openshiftapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo
oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo
oc get --namespace=openshift-ingress-operator ingresscontroller/default -o json | jq '.status.tlsProfile.ciphers' 2> /dev/null
oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.servingInfo.cipherSuites' 2> /dev/null
oc get openshiftapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.servingInfo.cipherSuites' 2> /dev/null
oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq '.servingInfo.cipherSuites' 2> /dev/null
#check value for tlsSecurityProfile; null is returned if default is used
oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile
oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile 2> /dev/null
type: manual
remediation: |
Follow the directions above and in the OpenShift documentation to configure the tlsSecurityProfile.
Configuring Ingress
Configuring Ingress. https://docs.openshift.com/container-platform/4.15/networking/ingress-operator.html#nw-ingress-controller-configuration-parameters_configuring-ingress
Please reference the OpenShift TLS security profile documentation for more detail on each profile.
https://docs.openshift.com/container-platform/4.15/security/tls-security-profiles.html
scored: false