From d2d3e722710555dde14eebe25c098a6281acc701 Mon Sep 17 00:00:00 2001 From: mjshastha <61929310+mjshastha@users.noreply.github.com> Date: Thu, 18 Apr 2024 11:31:17 +0530 Subject: [PATCH] Currently, certain commands involve retrieving all node names or pods and then executing additional commands in a loop, resulting in a time complexity linearly proportional to the number of nodes. (#1597) This approach becomes time-consuming for larger clusters. As kube-bench is executed as a job on every node in the cluster, To enhance performance, Streamlined the commands to execute directly on current node where kube-bench operates. This change ensures that the time complexity remains constant, regardless of the cluster size. By running the necessary commands only once per node, regardless of how many nodes are in the cluster, this approach significantly boosts performance and efficiency. --- cfg/rh-1.0/etcd.yaml | 117 +++++++----- cfg/rh-1.0/master.yaml | 378 ++++++++++++++++++++++++++++----------- cfg/rh-1.0/node.yaml | 165 ++++++++--------- cfg/rh-1.0/policies.yaml | 45 +---- 4 files changed, 430 insertions(+), 275 deletions(-) diff --git a/cfg/rh-1.0/etcd.yaml b/cfg/rh-1.0/etcd.yaml index 2fa7898..4398d9c 100644 --- a/cfg/rh-1.0/etcd.yaml +++ b/cfg/rh-1.0/etcd.yaml @@ -11,16 +11,17 @@ groups: - id: 2.1 text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)" audit: | - # For --cert-file - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' - done 2>/dev/null - # For --key-file - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' + fi use_multiple_values: true tests: test_items: @@ -36,10 +37,16 @@ groups: - id: 2.2 text: "Ensure that the --client-cert-auth argument is set to true (Manual)" audit: | - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' + fi use_multiple_values: true tests: test_items: @@ -55,10 +62,16 @@ groups: text: "Ensure that the --auto-tls argument is not set to true (Manual)" audit: | # Returns 0 if found, 1 if not found - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? + fi use_multiple_values: true tests: test_items: @@ -73,16 +86,17 @@ groups: - id: 2.4 text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)" audit: | - # For --peer-cert-file - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' - done 2>/dev/null - # For --peer-key-file - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' + fi use_multiple_values: true tests: test_items: @@ -97,10 +111,16 @@ groups: - id: 2.5 text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)" audit: | - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' + fi use_multiple_values: true tests: test_items: @@ -116,10 +136,16 @@ groups: text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)" audit: | # Returns 0 if found, 1 if not found - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? + fi use_multiple_values: true tests: test_items: @@ -134,14 +160,17 @@ groups: - id: 2.7 text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" audit: | - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' - done 2>/dev/null - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' + fi use_multiple_values: true tests: test_items: diff --git a/cfg/rh-1.0/master.yaml b/cfg/rh-1.0/master.yaml index 8858908..37b50f0 100644 --- a/cfg/rh-1.0/master.yaml +++ b/cfg/rh-1.0/master.yaml @@ -11,10 +11,18 @@ groups: - id: 1.1.1 text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $( oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o name ) - do - oc exec -n openshift-kube-apiserver $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml; - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -29,11 +37,18 @@ groups: - id: 1.1.2 text: "Ensure that the API server pod specification file ownership is set to root:root (Manual)" audit: | - for i in $( oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o name ) - do - oc exec -n openshift-kube-apiserver $i -- \ - stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -45,10 +60,18 @@ groups: - id: 1.1.3 text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $( oc get pods -n openshift-kube-controller-manager -o name -l app=kube-controller-manager) - do - oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml; - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -63,11 +86,18 @@ groups: - id: 1.1.4 text: "Ensure that the controller manager pod specification file ownership is set to root:root (Manual)" audit: | - for i in $( oc get pods -n openshift-kube-controller-manager -o name -l app=kube-controller-manager) - do - oc exec -n openshift-kube-controller-manager $i -- \ - stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -79,10 +109,18 @@ groups: - id: 1.1.5 text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $( oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name ) - do - oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml; - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -97,11 +135,18 @@ groups: - id: 1.1.6 text: "Ensure that the scheduler pod specification file ownership is set to root:root (Manual))" audit: | - for i in $( oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name ) - do - oc exec -n openshift-kube-scheduler $i -- \ - stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -113,10 +158,18 @@ groups: - id: 1.1.7 text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Manual))" audit: | - for i in $( oc get pods -n openshift-etcd -l app=etcd -o name | grep etcd ) - do - oc rsh -n openshift-etcd $i stat -c "$i %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -131,10 +184,18 @@ groups: - id: 1.1.8 text: "Ensure that the etcd pod specification file ownership is set to root:root (Manual)" audit: | - for i in $( oc get pods -n openshift-etcd -l app=etcd -o name | grep etcd ) - do - oc rsh -n openshift-etcd $i stat -c "$i %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -146,16 +207,41 @@ groups: - id: 1.1.9 text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') # For CNI multus - for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; done 2>/dev/null - for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; done 2>/dev/null + # Get the pod name in the openshift-multus namespace + POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; 2>/dev/null + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; 2>/dev/null + fi # For SDN pods - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + fi + # For OVS pods - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null + POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + fi use_multiple_values: true tests: test_items: @@ -170,17 +256,40 @@ groups: - id: 1.1.10 text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') # For CNI multus - for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \""$i %n %U:%G\" /host/etc/cni/net.d/*.conf"; done 2>/dev/null - for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \""$i %n %U:%G\" /host/var/run/multus/cni/net.d/*.conf"; done 2>/dev/null + # Get the pod name in the openshift-multus namespace + POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c '$i %n %U:%G' /host/etc/cni/net.d/*.conf" 2>/dev/null + oc exec -n openshift-multus $i -- /bin/bash -c "stat -c '$i %n %U:%G' /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null + fi # For SDN pods - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G"{} \;; done 2>/dev/null + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + fi # For OVS pods in 4.5 - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null - # For OVS pods in 4.6 TBD + POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + fi use_multiple_values: true tests: test_items: @@ -192,7 +301,18 @@ groups: - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Manual)" audit: | - for i in $(oc get pods -n openshift-etcd -l app=etcd -oname); do oc exec -n openshift-etcd -c etcd $i -- stat -c "$i %n permissions=%a" /var/lib/etcd/member; done + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /var/lib/etcd/member + fi use_multiple_values: true tests: test_items: @@ -207,7 +327,18 @@ groups: - id: 1.1.12 text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Manual)" audit: | - for i in $(oc get pods -n openshift-etcd -l app=etcd -oname); do oc exec -n openshift-etcd -c etcd $i -- stat -c "$i %n %U:%G" /var/lib/etcd/member; done + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /var/lib/etcd/member + fi use_multiple_values: true tests: test_items: @@ -219,10 +350,8 @@ groups: - id: 1.1.13 text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Manual))" audit: | - for i in $(oc get nodes -o name) - do - oc debug $i -- chroot /host stat -c "$i %n permissions=%a" /etc/kubernetes/kubeconfig - done 2>/dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubeconfig 2> /dev/null use_multiple_values: true tests: test_items: @@ -237,10 +366,8 @@ groups: - id: 1.1.14 text: "Ensure that the admin.conf file ownership is set to root:root (Manual)" audit: | - for i in $(oc get nodes -o name) - do - oc debug $i -- chroot /host stat -c "$i %n %U:%G" /etc/kubernetes/kubeconfig - done 2>/dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubeconfig 2> /dev/null use_multiple_values: true tests: test_items: @@ -252,10 +379,18 @@ groups: - id: 1.1.15 text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name) - do - oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + fi use_multiple_values: true tests: test_items: @@ -270,10 +405,18 @@ groups: - id: 1.1.16 text: "Ensure that the scheduler.conf file ownership is set to root:root (Manual)" audit: | - for i in $(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name) - do - oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + fi use_multiple_values: true tests: test_items: @@ -285,10 +428,18 @@ groups: - id: 1.1.17 text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o name) - do - oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + fi use_multiple_values: true tests: test_items: @@ -303,10 +454,18 @@ groups: - id: 1.1.18 text: "Ensure that the controller-manager.conf file ownership is set to root:root (Manual)" audit: | - for i in $(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o name) - do - oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + fi use_multiple_values: true tests: test_items: @@ -319,15 +478,22 @@ groups: text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Manual)" audit: | # Should return root:root for all files and directories - for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') - do - # echo $i static-pod-certs - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - # echo $i static-pod-resources - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - done + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # echo $i static-pod-certs + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + # echo $i static-pod-resources + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + fi use_multiple_values: true tests: test_items: @@ -339,11 +505,18 @@ groups: - id: 1.1.20 text: "Ensure that the OpenShift PKI certificate file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') - do - # echo $i static-pod-certs - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$i %n permissions=%a" {} \; - done + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$POD_NAME %n permissions=%a" {} \; + fi use_multiple_values: true tests: test_items: @@ -358,11 +531,18 @@ groups: - id: 1.1.21 text: "Ensure that the OpenShift PKI key file permissions are set to 600 (Manual)" audit: | - for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') - do - # echo $i static-pod-certs - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$i %n permissions=%a" {} \; - done + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$POD_NAME %n permissions=%a" {} \; + fi use_multiple_values: true tests: test_items: @@ -532,11 +712,9 @@ groups: oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' # For OCP 4.5 and earlier verify that authorization-mode is not used - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode - oc debug node/${node} -- chroot /host ps -aux | grep kubelet | grep authorization-mode - done + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode 2> /dev/null + oc debug node/$NODE_NAME -- chroot /host ps -aux | grep kubelet | grep authorization-mode 2> /dev/null #Check that no overrides are configured oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' audit_config: | diff --git a/cfg/rh-1.0/node.yaml b/cfg/rh-1.0/node.yaml index 0ea5682..fb982d6 100644 --- a/cfg/rh-1.0/node.yaml +++ b/cfg/rh-1.0/node.yaml @@ -11,11 +11,8 @@ groups: - id: 4.1.1 text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/systemd/system/kubelet.service - done 2> /dev/null - use_multiple_values: true + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/systemd/system/kubelet.service 2> /dev/null tests: test_items: - flag: "permissions" @@ -30,11 +27,8 @@ groups: text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" audit: | # Should return root:root for each node - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/systemd/system/kubelet.service - done 2> /dev/null - use_multiple_values: true + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/systemd/system/kubelet.service 2> /dev/null tests: test_items: - flag: root:root @@ -45,11 +39,17 @@ groups: - id: 4.1.3 text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname) - do - oc exec -n openshift-sdn $i -- stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml - done 2> /dev/null - use_multiple_values: true + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-sdn namespace + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" - stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null + fi tests: bin_op: or test_items: @@ -65,10 +65,17 @@ groups: - id: 4.1.4 text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" audit: | - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname) - do - oc exec -n openshift-sdn $i -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml - done 2> /dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-sdn namespace + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml 2>/dev/null + fi use_multiple_values: true tests: bin_op: or @@ -82,10 +89,8 @@ groups: text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)" audit: | # Check permissions - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/kubernetes/kubelet.conf - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: test_items: @@ -100,10 +105,8 @@ groups: - id: 4.1.6 text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/kubernetes/kubelet.conf - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: test_items: @@ -115,10 +118,8 @@ groups: - id: 4.1.7 text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/kubernetes/kubelet-ca.crt - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null use_multiple_values: true tests: test_items: @@ -133,10 +134,8 @@ groups: - id: 4.1.8 text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/kubernetes/kubelet-ca.crt - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null use_multiple_values: true tests: test_items: @@ -148,10 +147,8 @@ groups: - id: 4.1.9 text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /var/lib/kubelet/kubeconfig - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/kubeconfig 2> /dev/null use_multiple_values: true tests: test_items: @@ -166,10 +163,8 @@ groups: - id: 4.1.10 text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /var/lib/kubelet/kubeconfig - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/kubeconfig 2> /dev/null use_multiple_values: true tests: test_items: @@ -184,10 +179,8 @@ groups: - id: 4.2.1 text: "Ensure that the --anonymous-auth argument is set to false (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf - done + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: test_items: @@ -205,10 +198,8 @@ groups: audit: | POD=$(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') TOKEN=$(oc whoami -t) - for name in $(oc get nodes -ojsonpath='{.items[*].metadata.name}') - do - oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$name/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode' - done + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$NODE_NAME/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode' 2> /dev/null use_multiple_values: true tests: test_items: @@ -220,14 +211,12 @@ groups: - id: 4.2.3 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf | awk -F': ' '{ print "clientCAFile=" $2 }' - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: test_items: - - flag: clientCAFile="/etc/kubernetes/kubelet-ca.crt" + - flag: '"clientCAFile": "/etc/kubernetes/kubelet-ca.crt"' remediation: | None required. Changing the clientCAFile value is unsupported. scored: true @@ -255,18 +244,13 @@ groups: - id: 4.2.5 text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" audit: | - # Should return 1 for each node - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout - echo exit_code=$? - done 2>/dev/null - # Should return 1 for each node - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf - echo exit_code=$? - done 2>/dev/null + # Should return 1 for node + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout 2> /dev/null + echo exit_code=$? + # Should return 1 for node + oc debug node/${NODE_NAME} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf 2> /dev/null + echo exit_code=$? use_multiple_values: true tests: bin_op: or @@ -291,10 +275,8 @@ groups: - id: 4.2.6 text: "Ensure that the --protect-kernel-defaults argument is not set (Manual)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); - do - oc debug node/${node} -- chroot /host more /etc/kubernetes/kubelet.conf; - done + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host more /etc/kubernetes/kubelet.conf 2> /dev/null tests: test_items: - flag: protectKernelDefaults @@ -349,10 +331,8 @@ groups: - id: 4.2.9 text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); - do - oc debug node/${node} -- chroot /host more /etc/kubernetes/kubelet.conf; - done + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf; oc get machineconfig 01-worker-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 oc get machineconfig 01-master-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 type: "manual" @@ -365,7 +345,12 @@ groups: - id: 4.2.10 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" audit: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' + oc get configmap config -n openshift-kube-apiserver -o json \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments | + .["kubelet-client-certificate"][0], + .["kubelet-client-key"][0] + ' tests: bin_op: and test_items: @@ -380,15 +365,10 @@ groups: text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" audit: | #Verify the rotateKubeletClientCertificate feature gate is not set to false - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate 2> /dev/null # Verify the rotateCertificates argument is set to true - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot host grep rotate /etc/kubernetes/kubelet.conf; - done 2> /dev/null + oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: bin_op: or @@ -411,24 +391,19 @@ groups: text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" audit: | #Verify the rotateKubeletServerCertificate feature gate is on - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); - do - oc debug node/${node} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf; - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf 2> /dev/null # Verify the rotateCertificates argument is set to true - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot host grep rotate /etc/kubernetes/kubelet.conf; - done 2> /dev/null + oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: bin_op: or test_items: - - flag: RotateKubeletServerCertificate + - flag: rotateCertificates compare: op: eq value: true - - flag: rotateCertificates + - flag: RotateKubeletServerCertificate compare: op: eq value: true diff --git a/cfg/rh-1.0/policies.yaml b/cfg/rh-1.0/policies.yaml index 95de04e..e90cd87 100644 --- a/cfg/rh-1.0/policies.yaml +++ b/cfg/rh-1.0/policies.yaml @@ -78,10 +78,7 @@ groups: text: "Minimize the admission of privileged containers (Manual)" audit: | # needs verification - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; oc describe scc $i | grep "Allow Privileged"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegedContainer:.allowPrivilegedContainer tests: test_items: - flag: "false" @@ -93,10 +90,7 @@ groups: - id: 5.2.2 text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" audit: | - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; oc describe scc $i | grep "Allow Host PID"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostPID:.allowHostPID tests: test_items: - flag: "false" @@ -108,10 +102,7 @@ groups: - id: 5.2.3 text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" audit: | - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; oc describe scc $i | grep "Allow Host IPC"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostIPC:.allowHostIPC tests: test_items: - flag: "false" @@ -123,10 +114,7 @@ groups: - id: 5.2.4 text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" audit: | - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; oc describe scc $i | grep "Allow Host Network"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostNetwork:.allowHostNetwork tests: test_items: - flag: "false" @@ -138,10 +126,7 @@ groups: - id: 5.2.5 text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" audit: | - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; oc describe scc $i | grep "Allow Privilege Escalation"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegeEscalation:.allowPrivilegeEscalation tests: test_items: - flag: "false" @@ -153,18 +138,10 @@ groups: - id: 5.2.6 text: "Minimize the admission of root containers (Manual)" audit: | - # needs verification - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; - oc describe scc $i | grep "Run As User Strategy"; - done + # needs verification # | awk 'NR>1 {gsub("map\\[type:", "", $2); gsub("\\]$", "", $2); print $1 ":" $2}' + oc get scc -o=custom-columns=NAME:.metadata.name,runAsUser:.runAsUser.type #For SCCs with MustRunAs verify that the range of UIDs does not include 0 - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; - oc describe scc $i | grep "\sUID"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,uidRangeMin:.runAsUser.uidRangeMin,uidRangeMax:.runAsUser.uidRangeMax tests: bin_op: or test_items: @@ -183,11 +160,7 @@ groups: text: "Minimize the admission of containers with the NET_RAW capability (Manual)" audit: | # needs verification - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; - oc describe scc $i | grep "Required Drop Capabilities"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,requiredDropCapabilities:.requiredDropCapabilities tests: bin_op: or test_items: