Currently, certain commands involve retrieving all node names or pods and then executing additional commands in a loop, resulting in a time complexity linearly proportional to the number of nodes. (#1597)

This approach becomes time-consuming for larger clusters.

As kube-bench is executed as a job on every node in the cluster, To enhance performance, Streamlined the commands to execute directly on current node where kube-bench operates.
This change ensures that the time complexity remains constant, regardless of the cluster size.
By running the necessary commands only once per node, regardless of how many nodes are in the cluster, this approach significantly boosts performance and efficiency.
pull/1587/head^2
mjshastha 4 weeks ago committed by GitHub
parent 73e1377ce0
commit d2d3e72271
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -11,16 +11,17 @@ groups:
- id: 2.1 - id: 2.1
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)" text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)"
audit: | audit: |
# For --cert-file # Get the node name where the pod is running
for i in $(oc get pods -oname -n openshift-etcd) NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
do # Get the pod name in the openshift-etcd namespace
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
done 2>/dev/null if [ -z "$POD_NAME" ]; then
# For --key-file echo "No matching file found on the current node."
for i in $(oc get pods -oname -n openshift-etcd) else
do # Execute the stat command
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/'
done 2>/dev/null oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/'
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -36,10 +37,16 @@ groups:
- id: 2.2 - id: 2.2
text: "Ensure that the --client-cert-auth argument is set to true (Manual)" text: "Ensure that the --client-cert-auth argument is set to true (Manual)"
audit: | audit: |
for i in $(oc get pods -oname -n openshift-etcd) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' # Get the pod name in the openshift-etcd namespace
done 2>/dev/null POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/'
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -55,10 +62,16 @@ groups:
text: "Ensure that the --auto-tls argument is not set to true (Manual)" text: "Ensure that the --auto-tls argument is not set to true (Manual)"
audit: | audit: |
# Returns 0 if found, 1 if not found # Returns 0 if found, 1 if not found
for i in $(oc get pods -oname -n openshift-etcd) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? # Get the pod name in the openshift-etcd namespace
done 2>/dev/null POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$?
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -73,16 +86,17 @@ groups:
- id: 2.4 - id: 2.4
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)" text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)"
audit: | audit: |
# For --peer-cert-file # Get the node name where the pod is running
for i in $(oc get pods -oname -n openshift-etcd) NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
do # Get the pod name in the openshift-etcd namespace
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
done 2>/dev/null if [ -z "$POD_NAME" ]; then
# For --peer-key-file echo "No matching file found on the current node."
for i in $(oc get pods -oname -n openshift-etcd) else
do # Execute the stat command
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/'
done 2>/dev/null oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/'
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -97,10 +111,16 @@ groups:
- id: 2.5 - id: 2.5
text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)" text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)"
audit: | audit: |
for i in $(oc get pods -oname -n openshift-etcd) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' # Get the pod name in the openshift-etcd namespace
done 2>/dev/null POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/'
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -116,10 +136,16 @@ groups:
text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)" text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)"
audit: | audit: |
# Returns 0 if found, 1 if not found # Returns 0 if found, 1 if not found
for i in $(oc get pods -oname -n openshift-etcd) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? # Get the pod name in the openshift-etcd namespace
done 2>/dev/null POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$?
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -134,14 +160,17 @@ groups:
- id: 2.7 - id: 2.7
text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" text: "Ensure that a unique Certificate Authority is used for etcd (Manual)"
audit: | audit: |
for i in $(oc get pods -oname -n openshift-etcd) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' # Get the pod name in the openshift-etcd namespace
done 2>/dev/null POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
for i in $(oc get pods -oname -n openshift-etcd) if [ -z "$POD_NAME" ]; then
do echo "No matching file found on the current node."
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' else
done 2>/dev/null # Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/'
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/'
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:

@ -11,10 +11,18 @@ groups:
- id: 1.1.1 - id: 1.1.1
text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Manual)" text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Manual)"
audit: | audit: |
for i in $( oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o name ) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-kube-apiserver $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml;
done 2>/dev/null # Get the pod name in the openshift-kube-apiserver namespace
POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -29,11 +37,18 @@ groups:
- id: 1.1.2 - id: 1.1.2
text: "Ensure that the API server pod specification file ownership is set to root:root (Manual)" text: "Ensure that the API server pod specification file ownership is set to root:root (Manual)"
audit: | audit: |
for i in $( oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o name ) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-kube-apiserver $i -- \
stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml # Get the pod name in the openshift-kube-apiserver namespace
done 2>/dev/null POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -45,10 +60,18 @@ groups:
- id: 1.1.3 - id: 1.1.3
text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Manual)" text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Manual)"
audit: | audit: |
for i in $( oc get pods -n openshift-kube-controller-manager -o name -l app=kube-controller-manager) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml;
done 2>/dev/null # Get the pod name in the openshift-kube-controller-manager namespace
POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -63,11 +86,18 @@ groups:
- id: 1.1.4 - id: 1.1.4
text: "Ensure that the controller manager pod specification file ownership is set to root:root (Manual)" text: "Ensure that the controller manager pod specification file ownership is set to root:root (Manual)"
audit: | audit: |
for i in $( oc get pods -n openshift-kube-controller-manager -o name -l app=kube-controller-manager) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-kube-controller-manager $i -- \
stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml # Get the pod name in the openshift-kube-controller-manager namespace
done 2>/dev/null POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -79,10 +109,18 @@ groups:
- id: 1.1.5 - id: 1.1.5
text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Manual)" text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Manual)"
audit: | audit: |
for i in $( oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name ) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml;
done 2>/dev/null # Get the pod name in the openshift-kube-scheduler namespace
POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -97,11 +135,18 @@ groups:
- id: 1.1.6 - id: 1.1.6
text: "Ensure that the scheduler pod specification file ownership is set to root:root (Manual))" text: "Ensure that the scheduler pod specification file ownership is set to root:root (Manual))"
audit: | audit: |
for i in $( oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name ) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-kube-scheduler $i -- \
stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml # Get the pod name in the openshift-kube-scheduler namespace
done 2>/dev/null POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -113,10 +158,18 @@ groups:
- id: 1.1.7 - id: 1.1.7
text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Manual))" text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Manual))"
audit: | audit: |
for i in $( oc get pods -n openshift-etcd -l app=etcd -o name | grep etcd ) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc rsh -n openshift-etcd $i stat -c "$i %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml
done 2>/dev/null # Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -131,10 +184,18 @@ groups:
- id: 1.1.8 - id: 1.1.8
text: "Ensure that the etcd pod specification file ownership is set to root:root (Manual)" text: "Ensure that the etcd pod specification file ownership is set to root:root (Manual)"
audit: | audit: |
for i in $( oc get pods -n openshift-etcd -l app=etcd -o name | grep etcd ) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc rsh -n openshift-etcd $i stat -c "$i %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml
done 2>/dev/null # Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -146,16 +207,41 @@ groups:
- id: 1.1.9 - id: 1.1.9
text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)"
audit: | audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# For CNI multus # For CNI multus
for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; done 2>/dev/null # Get the pod name in the openshift-multus namespace
for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; done 2>/dev/null POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; 2>/dev/null
oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; 2>/dev/null
fi
# For SDN pods # For SDN pods
for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null
oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null
fi
# For OVS pods # For OVS pods
for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null
for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null
oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null
oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -170,17 +256,40 @@ groups:
- id: 1.1.10 - id: 1.1.10
text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)"
audit: | audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# For CNI multus # For CNI multus
for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \""$i %n %U:%G\" /host/etc/cni/net.d/*.conf"; done 2>/dev/null # Get the pod name in the openshift-multus namespace
for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \""$i %n %U:%G\" /host/var/run/multus/cni/net.d/*.conf"; done 2>/dev/null POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c '$i %n %U:%G' /host/etc/cni/net.d/*.conf" 2>/dev/null
oc exec -n openshift-multus $i -- /bin/bash -c "stat -c '$i %n %U:%G' /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null
fi
# For SDN pods # For SDN pods
for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G"{} \;; done 2>/dev/null
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null
oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null
fi
# For OVS pods in 4.5 # For OVS pods in 4.5
for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null
for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null if [ -z "$POD_NAME" ]; then
# For OVS pods in 4.6 TBD echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null
oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null
oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -192,7 +301,18 @@ groups:
- id: 1.1.11 - id: 1.1.11
text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Manual)" text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Manual)"
audit: | audit: |
for i in $(oc get pods -n openshift-etcd -l app=etcd -oname); do oc exec -n openshift-etcd -c etcd $i -- stat -c "$i %n permissions=%a" /var/lib/etcd/member; done # Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /var/lib/etcd/member
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -207,7 +327,18 @@ groups:
- id: 1.1.12 - id: 1.1.12
text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Manual)" text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Manual)"
audit: | audit: |
for i in $(oc get pods -n openshift-etcd -l app=etcd -oname); do oc exec -n openshift-etcd -c etcd $i -- stat -c "$i %n %U:%G" /var/lib/etcd/member; done # Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /var/lib/etcd/member
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -219,10 +350,8 @@ groups:
- id: 1.1.13 - id: 1.1.13
text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Manual))" text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Manual))"
audit: | audit: |
for i in $(oc get nodes -o name) NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubeconfig 2> /dev/null
oc debug $i -- chroot /host stat -c "$i %n permissions=%a" /etc/kubernetes/kubeconfig
done 2>/dev/null
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -237,10 +366,8 @@ groups:
- id: 1.1.14 - id: 1.1.14
text: "Ensure that the admin.conf file ownership is set to root:root (Manual)" text: "Ensure that the admin.conf file ownership is set to root:root (Manual)"
audit: | audit: |
for i in $(oc get nodes -o name) NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubeconfig 2> /dev/null
oc debug $i -- chroot /host stat -c "$i %n %U:%G" /etc/kubernetes/kubeconfig
done 2>/dev/null
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -252,10 +379,18 @@ groups:
- id: 1.1.15 - id: 1.1.15
text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Manual)" text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Manual)"
audit: | audit: |
for i in $(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig
done 2>/dev/null # Get the pod name in the openshift-kube-scheduler namespace
POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -270,10 +405,18 @@ groups:
- id: 1.1.16 - id: 1.1.16
text: "Ensure that the scheduler.conf file ownership is set to root:root (Manual)" text: "Ensure that the scheduler.conf file ownership is set to root:root (Manual)"
audit: | audit: |
for i in $(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig
done 2>/dev/null # Get the pod name in the openshift-kube-scheduler namespace
POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -285,10 +428,18 @@ groups:
- id: 1.1.17 - id: 1.1.17
text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Manual)" text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Manual)"
audit: | audit: |
for i in $(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o name) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig
done 2>/dev/null # Get the pod name in the openshift-kube-controller-manager namespace
POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -303,10 +454,18 @@ groups:
- id: 1.1.18 - id: 1.1.18
text: "Ensure that the controller-manager.conf file ownership is set to root:root (Manual)" text: "Ensure that the controller-manager.conf file ownership is set to root:root (Manual)"
audit: | audit: |
for i in $(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o name) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig
done 2>/dev/null # Get the pod name in the openshift-kube-controller-manager namespace
POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -319,15 +478,22 @@ groups:
text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Manual)" text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Manual)"
audit: | audit: |
# Should return root:root for all files and directories # Should return root:root for all files and directories
for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# echo $i static-pod-certs
oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; # Get the pod name in the openshift-kube-controller-manager namespace
oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
# echo $i static-pod-resources
oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; if [ -z "$POD_NAME" ]; then
oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; echo "No matching pods found on the current node."
done else
# echo $i static-pod-certs
oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \;
oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \;
# echo $i static-pod-resources
oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \;
oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \;
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -339,11 +505,18 @@ groups:
- id: 1.1.20 - id: 1.1.20
text: "Ensure that the OpenShift PKI certificate file permissions are set to 644 or more restrictive (Manual)" text: "Ensure that the OpenShift PKI certificate file permissions are set to 644 or more restrictive (Manual)"
audit: | audit: |
for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# echo $i static-pod-certs
oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$i %n permissions=%a" {} \; # Get the pod name in the openshift-kube-apiserver namespace
done POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$POD_NAME %n permissions=%a" {} \;
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -358,11 +531,18 @@ groups:
- id: 1.1.21 - id: 1.1.21
text: "Ensure that the OpenShift PKI key file permissions are set to 600 (Manual)" text: "Ensure that the OpenShift PKI key file permissions are set to 600 (Manual)"
audit: | audit: |
for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# echo $i static-pod-certs
oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$i %n permissions=%a" {} \; # Get the pod name in the openshift-kube-apiserver namespace
done POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$POD_NAME %n permissions=%a" {} \;
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -532,11 +712,9 @@ groups:
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments'
# For OCP 4.5 and earlier verify that authorization-mode is not used # For OCP 4.5 and earlier verify that authorization-mode is not used
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/$NODE_NAME -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode 2> /dev/null
oc debug node/${node} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode oc debug node/$NODE_NAME -- chroot /host ps -aux | grep kubelet | grep authorization-mode 2> /dev/null
oc debug node/${node} -- chroot /host ps -aux | grep kubelet | grep authorization-mode
done
#Check that no overrides are configured #Check that no overrides are configured
oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides'
audit_config: | audit_config: |

@ -11,11 +11,8 @@ groups:
- id: 4.1.1 - id: 4.1.1
text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)"
audit: | audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/systemd/system/kubelet.service 2> /dev/null
oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/systemd/system/kubelet.service
done 2> /dev/null
use_multiple_values: true
tests: tests:
test_items: test_items:
- flag: "permissions" - flag: "permissions"
@ -30,11 +27,8 @@ groups:
text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
audit: | audit: |
# Should return root:root for each node # Should return root:root for each node
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/systemd/system/kubelet.service 2> /dev/null
oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/systemd/system/kubelet.service
done 2> /dev/null
use_multiple_values: true
tests: tests:
test_items: test_items:
- flag: root:root - flag: root:root
@ -45,11 +39,17 @@ groups:
- id: 4.1.3 - id: 4.1.3
text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)"
audit: | audit: |
for i in $(oc get pods -n openshift-sdn -l app=sdn -oname) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-sdn $i -- stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml # Get the pod name in the openshift-sdn namespace
done 2> /dev/null POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
use_multiple_values: true
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-sdn "$POD_NAME" - stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null
fi
tests: tests:
bin_op: or bin_op: or
test_items: test_items:
@ -65,10 +65,17 @@ groups:
- id: 4.1.4 - id: 4.1.4
text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)"
audit: | audit: |
for i in $(oc get pods -n openshift-sdn -l app=sdn -oname) # Get the node name where the pod is running
do NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
oc exec -n openshift-sdn $i -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml # Get the pod name in the openshift-sdn namespace
done 2> /dev/null POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml 2>/dev/null
fi
use_multiple_values: true use_multiple_values: true
tests: tests:
bin_op: or bin_op: or
@ -82,10 +89,8 @@ groups:
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)" text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)"
audit: | audit: |
# Check permissions # Check permissions
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet.conf 2> /dev/null
oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/kubernetes/kubelet.conf
done 2> /dev/null
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -100,10 +105,8 @@ groups:
- id: 4.1.6 - id: 4.1.6
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)" text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)"
audit: | audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null
oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/kubernetes/kubelet.conf
done 2> /dev/null
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -115,10 +118,8 @@ groups:
- id: 4.1.7 - id: 4.1.7
text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)"
audit: | audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null
oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/kubernetes/kubelet-ca.crt
done 2> /dev/null
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -133,10 +134,8 @@ groups:
- id: 4.1.8 - id: 4.1.8
text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
audit: | audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null
oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/kubernetes/kubelet-ca.crt
done 2> /dev/null
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -148,10 +147,8 @@ groups:
- id: 4.1.9 - id: 4.1.9
text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)"
audit: | audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/kubeconfig 2> /dev/null
oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /var/lib/kubelet/kubeconfig
done 2> /dev/null
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -166,10 +163,8 @@ groups:
- id: 4.1.10 - id: 4.1.10
text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)"
audit: | audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/kubeconfig 2> /dev/null
oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /var/lib/kubelet/kubeconfig
done 2> /dev/null
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -184,10 +179,8 @@ groups:
- id: 4.2.1 - id: 4.2.1
text: "Ensure that the --anonymous-auth argument is set to false (Automated)" text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
audit: | audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/$NODE_NAME -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf 2> /dev/null
oc debug node/${node} -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf
done
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -205,10 +198,8 @@ groups:
audit: | audit: |
POD=$(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') POD=$(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}')
TOKEN=$(oc whoami -t) TOKEN=$(oc whoami -t)
for name in $(oc get nodes -ojsonpath='{.items[*].metadata.name}') NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$NODE_NAME/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode' 2> /dev/null
oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$name/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode'
done
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
@ -220,14 +211,12 @@ groups:
- id: 4.2.3 - id: 4.2.3
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
audit: | audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/$NODE_NAME -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf 2> /dev/null
oc debug node/${node} -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf | awk -F': ' '{ print "clientCAFile=" $2 }'
done 2> /dev/null
use_multiple_values: true use_multiple_values: true
tests: tests:
test_items: test_items:
- flag: clientCAFile="/etc/kubernetes/kubelet-ca.crt" - flag: '"clientCAFile": "/etc/kubernetes/kubelet-ca.crt"'
remediation: | remediation: |
None required. Changing the clientCAFile value is unsupported. None required. Changing the clientCAFile value is unsupported.
scored: true scored: true
@ -255,18 +244,13 @@ groups:
- id: 4.2.5 - id: 4.2.5
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)"
audit: | audit: |
# Should return 1 for each node # Should return 1 for node
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/${NODE_NAME} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout 2> /dev/null
oc debug node/${node} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout echo exit_code=$?
echo exit_code=$? # Should return 1 for node
done 2>/dev/null oc debug node/${NODE_NAME} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf 2> /dev/null
# Should return 1 for each node echo exit_code=$?
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf
echo exit_code=$?
done 2>/dev/null
use_multiple_values: true use_multiple_values: true
tests: tests:
bin_op: or bin_op: or
@ -291,10 +275,8 @@ groups:
- id: 4.2.6 - id: 4.2.6
text: "Ensure that the --protect-kernel-defaults argument is not set (Manual)" text: "Ensure that the --protect-kernel-defaults argument is not set (Manual)"
audit: | audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/$NODE_NAME -- chroot /host more /etc/kubernetes/kubelet.conf 2> /dev/null
oc debug node/${node} -- chroot /host more /etc/kubernetes/kubelet.conf;
done
tests: tests:
test_items: test_items:
- flag: protectKernelDefaults - flag: protectKernelDefaults
@ -349,10 +331,8 @@ groups:
- id: 4.2.9 - id: 4.2.9
text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)" text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)"
audit: | audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf;
oc debug node/${node} -- chroot /host more /etc/kubernetes/kubelet.conf;
done
oc get machineconfig 01-worker-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 oc get machineconfig 01-worker-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050
oc get machineconfig 01-master-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 oc get machineconfig 01-master-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050
type: "manual" type: "manual"
@ -365,7 +345,12 @@ groups:
- id: 4.2.10 - id: 4.2.10
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
audit: | audit: |
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' oc get configmap config -n openshift-kube-apiserver -o json \
| jq -r '.data["config.yaml"]' \
| jq -r '.apiServerArguments |
.["kubelet-client-certificate"][0],
.["kubelet-client-key"][0]
'
tests: tests:
bin_op: and bin_op: and
test_items: test_items:
@ -380,15 +365,10 @@ groups:
text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" text: "Ensure that the --rotate-certificates argument is not set to false (Manual)"
audit: | audit: |
#Verify the rotateKubeletClientCertificate feature gate is not set to false #Verify the rotateKubeletClientCertificate feature gate is not set to false
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate 2> /dev/null
oc debug node/${node} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate
done 2> /dev/null
# Verify the rotateCertificates argument is set to true # Verify the rotateCertificates argument is set to true
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null
do
oc debug node/${node} -- chroot host grep rotate /etc/kubernetes/kubelet.conf;
done 2> /dev/null
use_multiple_values: true use_multiple_values: true
tests: tests:
bin_op: or bin_op: or
@ -411,24 +391,19 @@ groups:
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)"
audit: | audit: |
#Verify the rotateKubeletServerCertificate feature gate is on #Verify the rotateKubeletServerCertificate feature gate is on
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
do oc debug node/${NODE_NAME} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf 2> /dev/null
oc debug node/${node} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf;
done 2> /dev/null
# Verify the rotateCertificates argument is set to true # Verify the rotateCertificates argument is set to true
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null
do
oc debug node/${node} -- chroot host grep rotate /etc/kubernetes/kubelet.conf;
done 2> /dev/null
use_multiple_values: true use_multiple_values: true
tests: tests:
bin_op: or bin_op: or
test_items: test_items:
- flag: RotateKubeletServerCertificate - flag: rotateCertificates
compare: compare:
op: eq op: eq
value: true value: true
- flag: rotateCertificates - flag: RotateKubeletServerCertificate
compare: compare:
op: eq op: eq
value: true value: true

@ -78,10 +78,7 @@ groups:
text: "Minimize the admission of privileged containers (Manual)" text: "Minimize the admission of privileged containers (Manual)"
audit: | audit: |
# needs verification # needs verification
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegedContainer:.allowPrivilegedContainer
do
echo "$i"; oc describe scc $i | grep "Allow Privileged";
done
tests: tests:
test_items: test_items:
- flag: "false" - flag: "false"
@ -93,10 +90,7 @@ groups:
- id: 5.2.2 - id: 5.2.2
text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)"
audit: | audit: |
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; oc get scc -o=custom-columns=NAME:.metadata.name,allowHostPID:.allowHostPID
do
echo "$i"; oc describe scc $i | grep "Allow Host PID";
done
tests: tests:
test_items: test_items:
- flag: "false" - flag: "false"
@ -108,10 +102,7 @@ groups:
- id: 5.2.3 - id: 5.2.3
text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)"
audit: | audit: |
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; oc get scc -o=custom-columns=NAME:.metadata.name,allowHostIPC:.allowHostIPC
do
echo "$i"; oc describe scc $i | grep "Allow Host IPC";
done
tests: tests:
test_items: test_items:
- flag: "false" - flag: "false"
@ -123,10 +114,7 @@ groups:
- id: 5.2.4 - id: 5.2.4
text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" text: "Minimize the admission of containers wishing to share the host network namespace (Manual)"
audit: | audit: |
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; oc get scc -o=custom-columns=NAME:.metadata.name,allowHostNetwork:.allowHostNetwork
do
echo "$i"; oc describe scc $i | grep "Allow Host Network";
done
tests: tests:
test_items: test_items:
- flag: "false" - flag: "false"
@ -138,10 +126,7 @@ groups:
- id: 5.2.5 - id: 5.2.5
text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)"
audit: | audit: |
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegeEscalation:.allowPrivilegeEscalation
do
echo "$i"; oc describe scc $i | grep "Allow Privilege Escalation";
done
tests: tests:
test_items: test_items:
- flag: "false" - flag: "false"
@ -153,18 +138,10 @@ groups:
- id: 5.2.6 - id: 5.2.6
text: "Minimize the admission of root containers (Manual)" text: "Minimize the admission of root containers (Manual)"
audit: | audit: |
# needs verification # needs verification # | awk 'NR>1 {gsub("map\\[type:", "", $2); gsub("\\]$", "", $2); print $1 ":" $2}'
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; oc get scc -o=custom-columns=NAME:.metadata.name,runAsUser:.runAsUser.type
do
echo "$i";
oc describe scc $i | grep "Run As User Strategy";
done
#For SCCs with MustRunAs verify that the range of UIDs does not include 0 #For SCCs with MustRunAs verify that the range of UIDs does not include 0
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; oc get scc -o=custom-columns=NAME:.metadata.name,uidRangeMin:.runAsUser.uidRangeMin,uidRangeMax:.runAsUser.uidRangeMax
do
echo "$i";
oc describe scc $i | grep "\sUID";
done
tests: tests:
bin_op: or bin_op: or
test_items: test_items:
@ -183,11 +160,7 @@ groups:
text: "Minimize the admission of containers with the NET_RAW capability (Manual)" text: "Minimize the admission of containers with the NET_RAW capability (Manual)"
audit: | audit: |
# needs verification # needs verification
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; oc get scc -o=custom-columns=NAME:.metadata.name,requiredDropCapabilities:.requiredDropCapabilities
do
echo "$i";
oc describe scc $i | grep "Required Drop Capabilities";
done
tests: tests:
bin_op: or bin_op: or
test_items: test_items:

Loading…
Cancel
Save