1
0
mirror of https://github.com/aquasecurity/kube-bench.git synced 2025-03-09 11:46:07 +00:00
kube-bench/cfg/rh-1.6/master.yaml
2024-11-06 13:53:18 +05:30

1230 lines
63 KiB
YAML

---
controls:
version: rh-1.6
id: 1
text: "Master Node Security Configuration"
type: "master"
groups:
- id: 1.1
text: "Master Node Configuration Files"
checks:
- id: 1.1.1
text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-kube-apiserver namespace
POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml
fi
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
There is no remediation for updating the permissions of kube-apiserver-pod.yaml.
The file is owned by an OpenShift operator and any changes to the file will result in a degraded cluster state.
Please do not attempt to remediate the permissions of this file.
scored: false
- id: 1.1.2
text: "Ensure that the API server pod specification file ownership is set to root:root (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-kube-apiserver namespace
POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml
fi
use_multiple_values: true
tests:
test_items:
- flag: "root:root"
remediation: |
No remediation required; file permissions are managed by the operator.
scored: false
- id: 1.1.3
text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-kube-controller-manager namespace
POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml
fi
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
There is no remediation for updating the permissions of kube-controller-manager-pod.yaml.
The file is owned by an OpenShift operator and any changes to the file will result in a degraded cluster state.
Please do not attempt to remediate the permissions of this file.
scored: false
- id: 1.1.4
text: "Ensure that the controller manager pod specification file ownership is set to root:root (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-kube-controller-manager namespace
POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml
fi
use_multiple_values: true
tests:
test_items:
- flag: "root:root"
remediation: |
No remediation required; file permissions are managed by the operator.
scored: false
- id: 1.1.5
text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-kube-scheduler namespace
POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml
fi
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
There is no remediation for updating the permissions of kube-scheduler-pod.yaml.
The file is owned by an OpenShift operator and any changes to the file will result in a degraded cluster state.
Please do not attempt to remediate the permissions of this file.
scored: false
- id: 1.1.6
text: "Ensure that the scheduler pod specification file ownership is set to root:root (Manual))"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-kube-scheduler namespace
POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml
fi
use_multiple_values: true
tests:
test_items:
- flag: "root:root"
remediation: |
No remediation required; file permissions are managed by the operator.
scored: false
- id: 1.1.7
text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Manual))"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml
fi
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
There is no remediation for updating the permissions of etcd-pod.yaml.
The file is owned by an OpenShift operator and any changes to the file will result in a degraded cluster state.
Please do not attempt to remediate the permissions of this file.
scored: false
- id: 1.1.8
text: "Ensure that the etcd pod specification file ownership is set to root:root (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml
fi
use_multiple_values: true
tests:
test_items:
- flag: "root:root"
remediation: |
No remediation required; file permissions are managed by the operator.
scored: false
- id: 1.1.9
text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# For CNI multus
# Get the pod name in the openshift-multus namespace
POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -n "$POD_NAME" ]; then
# Execute the stat command
oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf" 2>/dev/null
oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null
fi
# For SDN pods
POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -n "$POD_NAME" ]; then
# Execute the stat command
oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null
oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null
fi
# For OVS pods
POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -n "$POD_NAME" ]; then
# Execute the stat command
oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null
oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null
oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null
fi
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
No remediation required; file permissions are managed by the operator.
scored: false
- id: 1.1.10
text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# For CNI multus
# Get the pod name in the openshift-multus namespace
POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -n "$POD_NAME" ]; then
# Execute the stat command
oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c '$i %n %U:%G' /host/etc/cni/net.d/*.conf" 2>/dev/null
oc exec -n openshift-multus $i -- /bin/bash -c "stat -c '$i %n %U:%G' /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null
fi
# For SDN pods
POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -n "$POD_NAME" ]; then
# Execute the stat command
oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null
oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null
fi
# For OVS pods in 4.5
POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -n "$POD_NAME" ]; then
# Execute the stat command
oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null
oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null
oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null
fi
use_multiple_values: true
tests:
bin_op: or
test_items:
- flag: "root:root"
- flag: "openvswitch:openvswitch"
remediation: |
No remediation required; file permissions are managed by the operator.
scored: false
- id: 1.1.11
text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /var/lib/etcd/member
fi
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "700"
remediation: |
No remediation required; file permissions are managed by the etcd operator.
scored: false
- id: 1.1.12
text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /var/lib/etcd/member
fi
use_multiple_values: true
tests:
test_items:
- flag: "root:root"
remediation: |
No remediation required; file permissions are managed by the operator.
scored: false
- id: 1.1.13
text: "Ensure that the kubeconfig file ownership are set to 600 or more restrictive (Manual))"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubeconfig 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
There is no remediation for updating the permissions of kubeconfig.
The file is owned by an OpenShift operator and any changes to the file will result in a degraded cluster state.
Please do not attempt to remediate the permissions of this file.
scored: false
- id: 1.1.14
text: "Ensure that the kubeconfig file ownership is set to root:root (Manual)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubeconfig 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "root:root"
remediation: |
No remediation required; file permissions are managed by the operator.
scored: false
- id: 1.1.15
text: "Ensure that the Scheduler kubeconfig file permissions are set to 600 or more restrictive (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-kube-scheduler namespace
POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig
fi
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
There is no remediation for updating the permissions of the kubeconfig file.
The file is owned by an OpenShift operator and any changes to the file will result in a degraded cluster state.
Please do not attempt to remediate the permissions of this file.
scored: false
- id: 1.1.16
text: "Ensure that the scheduler.conf file ownership is set to root:root (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-kube-scheduler namespace
POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig
fi
use_multiple_values: true
tests:
test_items:
- flag: "root:root"
remediation: |
No remediation required; file permissions are managed by the operator.
scored: false
- id: 1.1.17
text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-kube-controller-manager namespace
POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig
fi
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
No remediation required; file permissions are managed by the operator.
scored: false
- id: 1.1.18
text: "Ensure that the controller-manager.conf file ownership is set to root:root (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-kube-controller-manager namespace
POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig
fi
use_multiple_values: true
tests:
test_items:
- flag: "root:root"
remediation: |
No remediation required; file permissions are managed by the operator.
scored: false
- id: 1.1.19
text: "Ensure that the OpenShift PKI directory and file ownership is set to root:root (Manual)"
audit: |
# Should return root:root for all files and directories
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-kube-controller-manager namespace
POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# echo $i static-pod-certs
oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \;
oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \;
# echo $i static-pod-resources
oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \;
oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \;
fi
use_multiple_values: true
tests:
test_items:
- flag: "root:root"
remediation: |
No remediation required; file permissions are managed by the operator.
scored: false
- id: 1.1.20
text: "Ensure that the OpenShift PKI certificate file permissions are set to 600 or more restrictive (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-kube-apiserver namespace
POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$POD_NAME %n permissions=%a" {} \;
fi
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
No remediation required; file permissions are managed by the operator.
scored: false
- id: 1.1.21
text: "Ensure that the OpenShift PKI key file permissions are set to 600 (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-kube-apiserver namespace
POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$POD_NAME %n permissions=%a" {} \;
fi
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
No remediation required; file permissions are managed by the operator.
scored: false
- id: 1.2
text: "API Server"
checks:
- id: 1.2.1
text: "Ensure that anonymous requests are authorized (Manual)"
audit: |
# To see what unauthenticated users are allowed to do.
oc get clusterrolebindings -o json | jq '.items[] | select(.subjects[]?.kind == "Group" and .subjects[]?.name == "system:unauthenticated") | .metadata.name' | uniq
tests:
bin_op: or
test_items:
- flag: "self-access-reviewers"
- flag: "system:oauth-token-deleters"
- flag: "system:openshift:public-info-viewer"
- flag: "system:public-info-viewer"
- flag: "system:scope-impersonation"
- flag: "system:webhooks"
remediation: |
None required. The default configuration should not be modified.
scored: false
- id: 1.2.2
text: "Ensure that the --basic-auth-file argument is not set (Manual)"
audit: |
oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "basic-auth"
oc -n openshift-apiserver get cm config -o yaml | grep --color "basic-auth"
# Add | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }; to create AVAILABLE = true/false form
oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }'
tests:
bin_op: and
test_items:
- flag: "basic-auth-file"
set: false
- flag: "available"
compare:
op: eq
value: true
remediation: |
None required. --basic-auth-file cannot be configured on OpenShift.
scored: false
- id: 1.2.3
text: "Ensure that the --token-auth-file parameter is not set (Manual)"
audit: |
# Verify that the token-auth-file flag is not present
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' | grep --color "token-auth-file"
oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' | grep --color "token-auth-file"
oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' | grep --color "token-auth-file"
#Verify that the authentication operator is running
oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }'
tests:
bin_op: and
test_items:
- flag: "token-auth-file"
set: false
- flag: "available"
compare:
op: eq
value: true
remediation: |
None is required.
scored: false
- id: 1.2.4
text: "Use https for kubelet connections (Manual)"
audit: |
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
oc -n openshift-apiserver describe secret serving-cert
tests:
bin_op: and
test_items:
- flag: "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/kubelet-client/tls.crt"
- flag: "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/kubelet-client/tls.key"
remediation: |
No remediation is required.
OpenShift platform components use X.509 certificates for authentication.
OpenShift manages the CAs and certificates for platform components. This is not configurable.
scored: false
- id: 1.2.5
text: "Ensure that the kubelet uses certificates to authenticate (Manual)"
audit: |
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
oc -n openshift-apiserver describe secret serving-cert
tests:
bin_op: and
test_items:
- flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt"
- flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key"
remediation: |
No remediation is required.
OpenShift platform components use X.509 certificates for authentication.
OpenShift manages the CAs and certificates for platform components.
This is not configurable.
scored: false
- id: 1.2.6
text: "Verify that the kubelet certificate authority is set as appropriate (Manual)"
audit: |
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments'
tests:
test_items:
- flag: "/etc/kubernetes/static-pod-resources/configmaps/kubelet-serving-ca/ca-bundle.crt"
remediation: |
No remediation is required.
OpenShift platform components use X.509 certificates for authentication.
OpenShift manages the CAs and certificates for platform components.
This is not configurable.
scored: false
- id: 1.2.7
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)"
audit: |
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments."authorization-mode"[]'
tests:
test_items:
- flag: "AlwaysAllow"
set: false
remediation: |
None. RBAC is always on and the OpenShift API server does not use the values assigned to the flag authorization-mode.
scored: false
- id: 1.2.8
text: "Verify that RBAC is enabled (Manual)"
audit: |
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments."authorization-mode"[]'
tests:
test_items:
- flag: "RBAC"
remediation: |
None. It is not possible to disable RBAC.
scored: false
- id: 1.2.9
text: "Ensure that the APIPriorityAndFairness feature gate is enabled (Manual)"
audit: |
#Verify the APIPriorityAndFairness feature-gate
oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments'
tests:
test_items:
- flag: "APIPriorityAndFairness=true"
remediation: |
No remediation is required. By default, the OpenShift kubelet has been fixed to send fewer requests.
scored: false
- id: 1.2.10
text: "Ensure that the admission control plugin AlwaysAdmit is not set (Manual)"
audit: |
#Verify the set of admission-plugins for OCP 4.6 and higher
oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"'
tests:
test_items:
- flag: "AlwaysAdmit"
set: false
remediation: |
No remediation is required. The AlwaysAdmit admission controller cannot be enabled in OpenShift.
scored: false
- id: 1.2.11
text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)"
audit: |
#Verify the set of admissi on-plugins for OCP 4.6 and higher
oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"'
tests:
test_items:
- flag: "AlwaysPullImages"
set: false
remediation: |
None required.
scored: false
- id: 1.2.12
text: "Ensure that the admission control plugin ServiceAccount is set (Manual)"
audit: |
oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"'
tests:
test_items:
- flag: "ServiceAccount"
set: true
remediation: |
None required. By default, OpenShift configures the ServiceAccount admission controller.
scored: false
- id: 1.2.13
text: "Ensure that the admission control plugin NamespaceLifecycle is set (Manual)"
audit: |
oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"'
tests:
test_items:
- flag: "NamespaceLifecycle"
remediation: |
None required. OpenShift configures NamespaceLifecycle admission controller by default.
scored: false
- id: 1.2.14
text: "Ensure that the admission control plugin SecurityContextConstraint is set (Manual)"
audit: |
oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"'
tests:
test_items:
- flag: "security.openshift.io/SecurityContextConstraint"
remediation: |
None required. By default, the SecurityContextConstraints admission controller is configured and cannot be disabled.
scored: false
- id: 1.2.15
text: "Ensure that the admission control plugin NodeRestriction is set (Manual)"
audit: |
oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"'
tests:
test_items:
- flag: "NodeRestriction"
remediation: |
None required. In OpenShift, the NodeRestriction admission plugin is enabled by default and cannot be disabled.
scored: false
- id: 1.2.16
text: "Ensure that the --insecure-bind-address argument is not set (Manual)"
audit: |
# InsecureBindAddress=true should not be in the results
oc get kubeapiservers.operator.openshift.io cluster -ojson | jq '.spec.observedConfig.apiServerArguments."feature-gates"'
# Result should be only 6443
oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}'
# Result should be only 8443
oc -n openshift-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}'
tests:
bin_op: and
test_items:
- flag: "InsecureBindAddress=true"
set: false
- flag: 6443
- flag: 8443
remediation: |
None required. By default, the openshift-kube-apiserver is served over HTTPS with authentication and authorization.
scored: false
- id: 1.2.17
text: "Ensure that the --insecure-port argument is set to 0 (Manual)"
audit: |
# Should return 6443
oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}'
tests:
test_items:
- flag: "6443"
remediation: |
None required. By default, the openshift-kube-server is served over HTTPS with authentication and authorization.
scored: false
- id: 1.2.18
text: "Ensure that the --secure-port argument is not set to 0 (Manual)"
audit: |
echo bindAddress=$(oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.servingInfo.bindAddress')
# Should return only 6443
echo ports=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[*].spec.containers[?(@.name=="kube-apiserver")].ports[*].containerPort}')
tests:
bin_op: and
test_items:
- flag: 'bindAddress'
compare:
op: eq
value: '"0.0.0.0:6443"'
- flag: "ports"
compare:
op: eq
value: '6443'
remediation: |
None required. By default, the openshift-kube-apiserver is served over HTTPS with authentication and authorization;
the secure API endpoint is bound to 0.0.0.0:6443.
scored: false
- id: 1.2.19
text: "Ensure that the healthz endpoint is protected by RBAC (Manual)"
type: manual
audit: |
# Verify endpoints
oc -n openshift-kube-apiserver describe endpoints
# Check config for ports, livenessProbe, readinessProbe, healthz
oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers'
# Test to validate RBAC enabled on the apiserver endpoint; check with non-admin role
oc project openshift-kube-apiserver POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') PORT=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}')
# Following should return 403 Forbidden
oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -k
# Create a service account to test RBAC
oc create -n openshift-kube-apiserver sa permission-test-sa
# Should return 403 Forbidden
export SA_TOKEN=$(oc create token -n openshift-kube-apiserver permission-test-sa)
oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k
# As cluster admin, should succeed
export CLUSTER_ADMIN_TOKEN=$(oc whoami -t)
oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k
# Cleanup
unset CLUSTER_ADMIN_TOKEN SA_TOKEN
oc delete -n openshift-kube-apiserver sa permission-test-sa
remediation: |
None required as profiling data is protected by RBAC.
scored: false
- id: 1.2.20
text: "Ensure that the --audit-log-path argument is set (Manual)"
audit: |
# Should return “/var/log/kube-apiserver/audit.log"
output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?')
[ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true
POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}')
oc rsh -n openshift-kube-apiserver -c kube-apiserver $POD ls /var/log/kube-apiserver/audit.log 2>/dev/null
# Should return 0
echo kube_apiserver_exit_code=$?
# Should return "/var/log/openshift-apiserver/audit.log"
output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?')
[ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true
POD=$(oc get pods -n openshift-apiserver -l apiserver=true -o jsonpath='{.items[0].metadata.name}')
oc rsh -n openshift-apiserver $POD ls /var/log/openshift-apiserver/audit.log 2>/dev/null
# Should return 0
echo apiserver_exit_code=$?
tests:
bin_op: and
test_items:
- flag: "/var/log/kube-apiserver/audit.log"
- flag: "/var/log/kube-apiserver/audit.log" # This is needed for second printing in ls command.
- flag: "kube_apiserver_exit_code=0"
- flag: "/var/log/openshift-apiserver/audit.log"
- flag: "/var/log/openshift-apiserver/audit.log" # This is needed for second printing in ls command.
- flag: "apiserver_exit_code=0"
remediation: |
None required. This is managed by the cluster apiserver operator. By default, auditing is enabled.
scored: false
- id: 1.2.21
text: "Ensure that the audit logs are forwarded off the cluster for retention (Manual)"
type: "manual"
remediation: |
Follow the documentation for log forwarding. Forwarding logs to third party systems
https://docs.openshift.com/container-platform/4.15/observability/logging/log_collection_forwarding/configuring-log-forwarding.html
scored: false
- id: 1.2.22
text: "Ensure that the maximumRetainedFiles argument is set to 10 or as appropriate (Manual)"
audit: |
output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?')
[ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true
tests:
test_items:
- flag: "audit-log-maxbackup"
compare:
op: gte
value: 10
remediation: |
None required. By default, auditing is enabled and the maximum audit log backup is set to 10.
scored: false
- id: 1.2.23
text: "Ensure that the maximumFileSizeMegabytes argument is set to 100 (Manual)"
audit: |
output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?')
[ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true
tests:
test_items:
- flag: "audit-log-maxsize"
compare:
op: gte
value: 100
remediation: |
None. The audit-log-maxsize parameter is by default set to 100 and not supported to change.
maximumFileSizeMegabytes: 100
scored: false
- id: 1.2.24
text: "Ensure that the --request-timeout argument is set (Manual)"
audit: |
echo requestTimeoutSeconds=`oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["min-request-timeout"][]?'`
tests:
test_items:
- flag: "requestTimeoutSeconds"
compare:
op: gte
value: 100
remediation: |
None required. By default, min-request-timeout is set to 3600 seconds in OpenShift.
scored: false
- id: 1.2.25
text: "Ensure that the --service-account-lookup argument is set to true (Manual)"
audit: |
output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"][0]')
[ "$output" == "null" ] && echo "ocp 4.5 has service-account-lookup=true compiled" || echo service-account-lookup=$output
tests:
test_items:
- flag: "service-account-lookup=true"
remediation: |
None required. Service account lookup is enabled by default.
scored: false
- id: 1.2.26
text: "Ensure that the --service-account-key-file argument is set as appropriate (Manual)"
audit: |
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .serviceAccountPublicKeyFiles[]
tests:
bin_op: and
test_items:
- flag: "/etc/kubernetes/static-pod-resources/configmaps/sa-token-signing-certs"
- flag: "/etc/kubernetes/static-pod-resources/configmaps/bound-sa-token-signing-certs"
remediation: |
The OpenShift API server does not use the service-account-key-file argument.
The ServiceAccount token authenticator is configured with serviceAccountConfig.publicKeyFiles.
OpenShift does not reuse the apiserver TLS key. This is not configurable.
scored: false
- id: 1.2.27
text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Manual)"
audit: |
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-certfile"]'
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-keyfile"]'
tests:
bin_op: and
test_items:
- flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.crt"
- flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.key"
remediation: |
OpenShift automatically manages TLS and client certificate authentication for etcd.
This is not configurable.
scored: false
- id: 1.2.28
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
audit: |
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-cert-file"][]'
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-private-key-file"][]'
tests:
bin_op: and
test_items:
- flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt"
- flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key"
remediation: |
None. By default, OpenShift uses X.509 certificates to provide secure connections between the API server and
node/kubelet. OpenShift does not use values assigned to the tls-cert-file or tls-private-key-file flags.
You may optionally set a custom default certificate to be used by the API server when serving content in
order to enable clients to access the API server at a different host name or without the need to distribute
the cluster-managed certificate authority (CA) certificates to the clients.
Follow the directions in the OpenShift documentation
https://docs.openshift.com/container-platform/4.15/security/certificates/api-server.html
scored: false
- id: 1.2.29
text: "Ensure that the --client-ca-file argument is set as appropriate (Manual)"
audit: |
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["client-ca-file"][]'
tests:
test_items:
- flag: "/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt"
remediation: |
None required. By default, OpenShift configures the client-ca-file and automatically manages the certificate.
It does not use the value assigned to the client-ca-file flag.
You may optionally set a custom default certificate to be used by the API server when serving content in
order to enable clients to access the API server at a different host name or without the need to distribute
the cluster-managed certificate authority (CA) certificates to the clients.
Please follow the OpenShift documentation for providing certificates for OpenShift to use.
https://docs.openshift.com/container-platform/4.15/security/certificate_types_descriptions/user-provided-certificates-for-api-server.html#location
scored: false
- id: 1.2.30
text: "Ensure that the --etcd-cafile argument is set as appropriate (Manual)"
audit: |
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-cafile"][]'
tests:
test_items:
- flag: "/etc/kubernetes/static-pod-resources/configmaps/etcd-serving-ca/ca-bundle.crt"
remediation: |
None required. By default, OpenShift uses X.509 certificates to provide secure communication to etcd.
OpenShift does not use values assigned to etcd-cafile. OpenShift generates the etcd-cafile and sets the
arguments appropriately in the API server. Communication with etcd is secured by the etcd serving CA.
scored: false
- id: 1.2.31
text: "Ensure that encryption providers are appropriately configured (Manual)"
audit: |
# encrypt the etcd datastore
oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}'
tests:
test_items:
- flag: "EncryptionCompleted"
remediation: |
Follow the OpenShift documentation for encrypting etcd data.
https://docs.openshift.com/container-platform/4.15/security/encrypting-etcd.html
scored: false
- id: 1.2.32
text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)"
type: manual
audit: |
# verify cipher suites
oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo
oc get kubeapiservers.operator.openshift.io cluster -o json | jq .spec.observedConfig.servingInfo
oc get openshiftapiservers.operator.openshift.io cluster -o json | jq .spec.observedConfig.servingInfo
oc get -n openshift-ingress-operator ingresscontroller/default -o json | jq .status.tlsProfile
remediation: |
None required. By default, OpenShift uses the Intermediate TLS profile, which requires a minimum of TLS 1.2.
You can configure TLS security profiles by following the OpenShift TLS documentation.
https://docs.openshift.com/container-platform/4.15/security/tls-security-profiles.html
Note: The HAProxy Ingress controller image does not support TLS 1.3 and because the Modern profile requires
TLS 1.3, it is not supported. The Ingress Operator converts the Modern profile to Intermediate.
The Ingress Operator also converts the TLS 1.0 of an Old or Custom profile to 1.1, and TLS 1.3 of a Custom
profile to 1.2.
scored: false
- id: 1.2.33
text: "Ensure unsupported configuration overrides are not used (Manual)"
audit: |
oc get kubeapiserver/cluster -o jsonpath='{.spec.unsupportedConfigOverrides}'
tests:
test_items:
- flag: "null"
remediation: |
None required. By default, OpenShift sets this value to null and doesn't support overriding configuration
with unsupported features.
scored: false
- id: 1.3
text: "Controller Manager"
checks:
- id: 1.3.1
text: "Ensure that controller manager healthz endpoints are protected by RBAC (Manual)"
type: manual
audit: |
# Verify configuration for ports, livenessProbe, readinessProbe, healthz
oc -n openshift-kube-controller-manager get cm kube-controller-manager-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers[].livenessProbe'
oc -n openshift-kube-controller-manager get cm kube-controller-manager-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers[].readinessProbe'
# Verify endpoints
oc -n openshift-kube-controller-manager describe endpoints
# Test to validate RBAC enabled on the controller endpoint; check with non-admin role
POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}')
PORT=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}')
# Following should return 403 Forbidden
oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -k
# Create a service account to test RBAC
oc create -n openshift-kube-controller-manager sa permission-test-sa
# Should return 403 Forbidden
export SA_TOKEN=$(oc create token -n openshift-kube-controller-manager permission-test-sa)
oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k
# As cluster admin, should succeed
CLUSTER_ADMIN_TOKEN=$(oc whoami -t)
oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k
# Cleanup
unset CLUSTER_ADMIN_TOKEN POD PORT SA_TOKEN
oc delete -n openshift-kube-controller-manager sa permission-test-sa
remediation: |
None required. By default, the operator exposes metrics via metrics service. The metrics are collected
from the OpenShift Controller Manager and the Kubernetes Controller Manager and protected by RBAC.
scored: false
- id: 1.3.2
text: "Ensure that the --use-service-account-credentials argument is set to true (Manual)"
audit: |
echo use-service-account-credentials=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["use-service-account-credentials"][]'`
tests:
test_items:
- flag: "use-service-account-credentials"
compare:
op: eq
value: true
remediation: |
The OpenShift Controller Manager operator manages and updates the OpenShift Controller Manager.
The Kubernetes Controller Manager operator manages and updates the Kubernetes Controller Manager deployed on top of OpenShift.
This operator is configured via KubeControllerManager custom resource.
scored: false
- id: 1.3.3
text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Manual)"
audit: |
oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["service-account-private-key-file"][]'
tests:
test_items:
- flag: "/etc/kubernetes/static-pod-resources/secrets/service-account-private-key/service-account.key"
remediation: |
None required.
OpenShift manages the service account credentials for the scheduler automatically.
scored: false
- id: 1.3.4
text: "Ensure that the --root-ca-file argument is set as appropriate (Manual)"
audit: |
oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["root-ca-file"][]'
tests:
test_items:
- flag: "/etc/kubernetes/static-pod-resources/configmaps/serviceaccount-ca/ca-bundle.crt"
remediation: |
None required.
Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform.
scored: false
- id: 1.3.5
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Manual)"
audit: |
echo secure-port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["secure-port"][]'`
#Following should fail with a http code 403
POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}')
oc rsh -n openshift-kube-controller-manager -c kube-controller-manager $POD curl https://localhost:10257/metrics -k
# Cleanup
unset POD
tests:
bin_op: and
test_items:
- flag: "secure-port"
compare:
op: eq
value: "\"10257\""
- flag: "\"code\": 403"
remediation: |
Edit the Controller Manager pod specification file $controllermanagerconf
on the master node and ensure the correct value for the --bind-address parameter.
scored: false
- id: 1.4
text: "Scheduler"
checks:
- id: 1.4.1
text: "Ensure that the healthz endpoints for the scheduler are protected by RBAC (Manual)"
type: manual
audit: |
# check configuration for ports, livenessProbe, readinessProbe, healthz
oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers[].livenessProbe'
oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers[].readinessProbe'
# Test to verify endpoints
oc -n openshift-kube-scheduler describe endpoints
# Test to validate RBAC enabled on the scheduler endpoint; check with non-admin role
# oc project openshift-kube-scheduler
export POD=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}')
export PORT=$(oc get pod -n openshift-kube-scheduler $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}')
# Should return 403 Forbidden
oc rsh -n openshift-kube-scheduler ${POD} curl https://localhost:${PORT}/metrics -k
# Create a service account to test RBAC
oc create sa -n openshift-kube-scheduler permission-test-sa
# Should return 403 Forbidden
export SA_TOKEN=$(oc create token -n openshift-kube-scheduler permission-test-sa)
oc rsh -n openshift-kube-scheduler ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k
# As cluster admin, should succeed
export CLUSTER_ADMIN_TOKEN=$(oc whoami -t)
oc rsh -n openshift-kube-scheduler ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k
# Cleanup
unset CLUSTER_ADMIN_TOKEN POD PORT SA_TOKEN
oc delete sa -n openshift-kube-scheduler permission-test-sa
remediation: |
None required. By default, profiling is enabled and protected by RBAC.
scored: false
- id: 1.4.2
text: "Verify that the scheduler API service is protected by RBAC (Manual)"
type: manual
audit: |
# To verify endpoints
oc -n openshift-kube-scheduler describe endpoints
# To verify that bind-adress is not used in the configuration and that port is set to 0
oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers[]|select(.name=="kube-scheduler")|.args'
# To test for RBAC:
# oc project openshift-kube-scheduler
export POD=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}')
export POD_IP=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o jsonpath='{.items[0].status.podIP}')
export PORT=$(oc get pod -n openshift-kube-scheduler $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}')
# Should return a 403
oc rsh -n openshift-kube-scheduler ${POD} curl https://${POD_IP}:${PORT}/metrics
# Create a service account to test RBAC
oc create sa -n openshift-kube-scheduler permission-test-sa
# Should return 403 Forbidden
export SA_TOKEN=$(oc create token -n openshift-kube-scheduler permission-test-sa)
oc rsh -n openshift-kube-scheduler ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k
# As cluster admin, should succeed
export CLUSTER_ADMIN_TOKEN=$(oc whoami -t)
oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k
# Cleanup
unset CLUSTER_ADMIN_TOKEN POD PORT SA_TOKEN
oc delete sa -n openshift-kube-scheduler permission-test-sa
remediation: |
By default, the --bind-address argument is not used and the metrics endpoint is protected by RBAC when using the pod IP address.
scored: false