1
0
mirror of https://github.com/aquasecurity/kube-bench.git synced 2025-07-31 19:08:07 +00:00
This commit is contained in:
Deepanshu Bhatia 2025-06-17 20:44:19 +05:30 committed by GitHub
commit fd9614e220
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 2432 additions and 6 deletions

View File

@ -297,6 +297,7 @@ version_mapping:
"ocp-3.10": "rh-0.7"
"ocp-3.11": "rh-0.7"
"ocp-4.0": "rh-1.0"
"ocp-4.15": "rh-1.6"
"aks-1.0": "aks-1.0"
"aks-1.7": "aks-1.7"
"ack-1.0": "ack-1.0"
@ -451,6 +452,12 @@ target_mapping:
- "controlplane"
- "policies"
- "etcd"
"rh-1.6":
- "master"
- "node"
- "controlplane"
- "policies"
- "etcd"
"eks-stig-kubernetes-v1r6":
- "node"
- "controlplane"

View File

@ -13,7 +13,7 @@ groups:
type: "manual"
audit: |
#To get a list of users and service accounts with the cluster-admin role
oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind |
oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind |
grep cluster-admin
#To verity that kbueadmin is removed, no results should be returned
oc get secrets kubeadmin -n kube-system

2
cfg/rh-1.6/config.yaml Normal file
View File

@ -0,0 +1,2 @@
---
## Version-specific settings that override the values in cfg/config.yaml

View File

@ -0,0 +1,67 @@
---
controls:
version: rh-1.6
id: 3
text: "Control Plane Configuration"
type: "controlplane"
groups:
- id: 3.1
text: "Authentication and Authorization"
checks:
- id: 3.1.1
text: "Client certificate authentication should not be used for users (Manual)"
audit: |
# To verify user authentication is enabled
oc describe authentication
# To verify that an identity provider is configured
oc get oauth -o json | jq '.items[].spec.identityProviders'
# To verify that a custom cluster-admin user exists
oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | grep cluster-admin | grep User
# To verity that kbueadmin is removed, no results should be returned
oc get secrets kubeadmin -n kube-system
type: manual
remediation: |
Configure an identity provider for the OpenShift cluster.
Understanding identity provider configuration | Authentication | OpenShift
Container Platform 4.15. Once an identity provider has been defined,
you can use RBAC to define and apply permissions.
After you define an identity provider and create a new cluster-admin user,
remove the kubeadmin user to improve cluster security.
scored: false
- id: 3.2
text: "Logging"
checks:
- id: 3.2.1
text: "Ensure that a minimal audit policy is created (Manual)"
audit: |
#View the audit log profile
oc get apiserver cluster -o json | jq .spec.audit.profile
#To verify kube apiserver audit config
oc get cm -n openshift-kube-apiserver config -o json | jq -r '.data."config.yaml"' | jq .apiServerArguments
#To verify openshift apiserver audit config
oc get cm -n openshift-apiserver config -o json | jq -r '.data."config.yaml"' | jq .apiServerArguments
#Review the audit policies of openshift apiserver
oc get cm -n openshift-apiserver audit -o json | jq -r '.data."policy.yaml"'
#Review the audit policies of kube apiserver
oc get cm -n openshift-kube-apiserver kube-apiserver-audit-policies -o json | jq -r '.data."policy.yaml"'
#To view kube apiserver log files
oc adm node-logs --role=master --path=kube-apiserver/
#To view openshift apiserver log files
oc adm node-logs --role=master --path=openshift-apiserver/
type: manual
remediation: |
No remediation required.
scored: false
- id: 3.2.2
text: "Ensure that the audit policy covers key security concerns (Manual)"
audit: |
#To verify openshift apiserver audit config
oc get configmap -n openshift-kube-apiserver kube-apiserver-audit-policies -ojson | jq -r '.data."policy.yaml"'
#To verify kube apiserver audit config
oc get configmap -n openshift-apiserver audit -o json | jq -r '.data."policy.yaml"'
type: manual
remediation: |
Update the audit log policy profile to use WriteRequestBodies.
scored: false

185
cfg/rh-1.6/etcd.yaml Normal file
View File

@ -0,0 +1,185 @@
---
controls:
version: rh-1.6
id: 2
text: "Etcd Node Configuration"
type: "etcd"
groups:
- id: 2
text: "Etcd Node Configuration Files"
checks:
- id: 2.1
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/'
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/'
fi
use_multiple_values: true
tests:
test_items:
- flag: "file"
compare:
op: regex
# some systems have certs in directory '/etc/kubernetes/static-pod-certs/secrets/etcd-all-certs'
value: \/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-(?:serving|certs)\/etcd-serving-.*\.(?:crt|key)
remediation: |
OpenShift does not use the etcd-certfile or etcd-keyfile flags.
Certificates for etcd are managed by the etcd cluster operator.
scored: false
- id: 2.2
text: "Ensure that the --client-cert-auth argument is set to true (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/'
fi
use_multiple_values: true
tests:
test_items:
- flag: "--client-cert-auth"
compare:
op: eq
value: true
remediation: |
This setting is managed by the cluster etcd operator. No remediation required."
scored: false
- id: 2.3
text: "Ensure that the --auto-tls argument is not set to true (Manual)"
audit: |
# Returns 0 if found, 1 if not found
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$?
fi
use_multiple_values: true
tests:
test_items:
- flag: "exit_code"
compare:
op: eq
value: "1"
remediation: |
This setting is managed by the cluster etcd operator. No remediation required.
scored: false
- id: 2.4
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/'
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/'
fi
use_multiple_values: true
tests:
test_items:
- flag: "file"
compare:
op: regex
# some systems have certs in directory '/etc/kubernetes/static-pod-certs/secrets/etcd-all-certs'
value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-(?:peer|certs)\/etcd-peer-.*\.(?:crt|key)'
remediation: |
None. This configuration is managed by the etcd operator.
scored: false
- id: 2.5
text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/'
fi
use_multiple_values: true
tests:
test_items:
- flag: "--peer-client-cert-auth"
compare:
op: eq
value: true
remediation: |
This setting is managed by the cluster etcd operator. No remediation required.
scored: false
- id: 2.6
text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)"
audit: |
# Returns 0 if found, 1 if not found
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$?
fi
use_multiple_values: true
tests:
test_items:
- flag: "exit_code"
compare:
op: eq
value: "1"
remediation: |
This setting is managed by the cluster etcd operator. No remediation required.
scored: false
- id: 2.7
text: "Ensure that a unique Certificate Authority is used for etcd (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/'
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/'
fi
use_multiple_values: true
tests:
test_items:
- flag: "file"
compare:
op: regex
value: '\/etc\/kubernetes\/static-pod-certs\/configmaps\/etcd-(?:serving|peer-client)-ca\/ca-bundle\.(?:crt|key)'
remediation: |
None required. Certificates for etcd are managed by the OpenShift cluster etcd operator.
scored: false

1229
cfg/rh-1.6/master.yaml Normal file

File diff suppressed because it is too large Load Diff

384
cfg/rh-1.6/node.yaml Normal file
View File

@ -0,0 +1,384 @@
---
controls:
version: rh-1.6
id: 4
text: "Worker Node Security Configuration"
type: "node"
groups:
- id: 4.1
text: "Worker Node Configuration Files"
checks:
- id: 4.1.1
text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/systemd/system/kubelet.service 2> /dev/null
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "644"
remediation: |
By default, the kubelet service file has permissions of 644.
scored: true
- id: 4.1.2
text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
audit: |
# Should return root:root for each node
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/systemd/system/kubelet.service 2> /dev/null
tests:
test_items:
- flag: root:root
remediation: |
By default, the kubelet service file has ownership of root:root.
scored: true
- id: 4.1.3
text: "If proxy kube proxy configuration file exists ensure permissions are set to 644 or more restrictive (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-sdn namespace
POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-sdn "$POD_NAME" - stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null
fi
tests:
bin_op: or
test_items:
- flag: "permissions"
set: true
compare:
op: bitmask
value: "644"
remediation: |
None needed.
scored: false
- id: 4.1.4
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-sdn namespace
POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml 2>/dev/null
fi
use_multiple_values: true
tests:
bin_op: or
test_items:
- flag: root:root
remediation: |
None required. The configuration is managed by OpenShift operators.
scored: false
- id: 4.1.5
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated)"
audit: |
# Check permissions
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet.conf 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "644"
remediation: |
None required.
scored: true
- id: 4.1.6
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: root:root
remediation: |
None required.
scored: true
- id: 4.1.7
text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "644"
remediation: |
None required.
scored: true
- id: 4.1.8
text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: root:root
remediation: |
None required.
scored: true
- id: 4.1.9
text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
# default setups have the file present at /var/lib/kubelet only. Custom setup is present at /var/data/kubelet/config.json.
oc debug node/$NODE_NAME -- /bin/sh -c '
if [ -f /var/data/kubelet/config.json ]; then
chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/data/kubelet/config.json;
else
chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/config.json;
fi' 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
None required.
scored: true
- id: 4.1.10
text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
# default setups have the file present at /var/lib/kubelet only. Custom setup is present at /var/data/kubelet/config.json.
oc debug node/$NODE_NAME -- /bin/sh -c '
if [ -f /var/data/kubelet/config.json ]; then
chroot /host stat -c "$NODE_NAME %n %U:%G" /var/data/kubelet/config.json;
else
chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/config.json;
fi' 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: root:root
remediation: |
None required.
scored: true
- id: 4.2
text: "Kubelet"
checks:
- id: 4.2.1
text: "Activate Garbage collection in OpenShift Container Platform 4, as appropriate (Manual)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "enabled: true"
set: false
remediation: |
To configure, follow the directions in Garbage Collection Remediation https://docs.openshift.com/container-platform/latest/nodes/nodes/nodes-nodes-garbage-collection.html.
scored: true
- id: 4.2.2
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authentication.anonymous.enabled' 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "true"
set: false
remediation: |
Create a kubeletconfig to explicitly disable anonymous authentication. Examples of how
to do this can be found in the OpenShift documentation.
scored: true
- id: 4.2.3
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
type: manual
# Takes a lot of time for connection to fail and
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authorization' 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: mode
compare:
op: noteq
value: AlwaysAllow
remediation: |
None required. Unauthenticated/Unauthorized users have no access to OpenShift nodes.
scored: true
- id: 4.2.4
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq -r '.kubeletconfig.authentication.x509.clientCAFile' 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: '/etc/kubernetes/kubelet-ca.crt'
remediation: |
None required. Changing the clientCAFile value is unsupported.
scored: true
- id: 4.2.5
text: "Verify that the read only port is not used or is set to 0 (Automated)"
audit: |
oc -n openshift-kube-apiserver get cm config -o json | jq -r '.data."config.yaml"' | jq -r '.apiServerArguments."kubelet-read-only-port"[]' 2> /dev/null
tests:
test_items:
- flag: '0'
remediation: |
In earlier versions of OpenShift 4, the read-only-port argument is not used.
Follow the instructions in the documentation https://docs.openshift.com/container-platform/latest/post_installation_configuration/machine-configuration-tasks.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters_post-install-machine-configuration-tasks
to create a kubeletconfig CRD and set the kubelet-read-only-port is set to 0.
scored: true
- id: 4.2.6
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)"
audit: |
# Should return 1 for node
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
echo streamingConnectionIdleTimeout=$(oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.streamingConnectionIdleTimeout' 2> /dev/null)
use_multiple_values: true
tests:
test_items:
- flag: streamingConnectionIdleTimeout
compare:
op: noteq
value: 0s
remediation: |
Follow the instructions https://docs.openshift.com/container-platform/latest/post_installation_configuration/machine-configuration-tasks.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters_post-install-machine-configuration-tasks in the documentation to create a kubeletconfig CRD and set
the streamingConnectionIdleTimeout to the desired value. Do not set the value to 0.
scored: true
- id: 4.2.7
text: "Ensure that the --make-iptables-util-chains argument is set to true (Manual)"
audit: |
# Should return 1 for node
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.makeIPTablesUtilChains' 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "true"
remediation: |
None required. The makeIPTablesUtilChains argument is set to true by default.
scored: false
- id: 4.2.8
text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
echo kubeAPIQPS=$(oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.kubeAPIQPS' 2> /dev/null)
tests:
test_items:
- flag: kubeAPIQPS
compare:
op: gte
value: 0
remediation: |
None required by default. Follow the documentation to edit kubeletconfig parameters
https://docs.openshift.com/container-platform/4.15/post_installation_configuration/machine-configuration-tasks.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters_post-install-machine-configuration-tasks
scored: false
- id: 4.2.9
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
audit: |
oc get configmap config -n openshift-kube-apiserver -ojson | \
jq -r '.data["config.yaml"]' | \
jq -r '.apiServerArguments | ."kubelet-client-certificate"[0], ."kubelet-client-key"[0]' 2> /dev/null
tests:
bin_op: and
test_items:
- flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt"
- flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key"
remediation: |
OpenShift automatically manages TLS authentication for the API server communication with the node/kublet.
This is not configurable.
scored: true
- id: 4.2.10
text: "Ensure that the --rotate-certificates argument is not set to false (Manual)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
echo rotateCertificates=$(oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq -r '.kubeletconfig.rotateCertificates' 2> /dev/null)
use_multiple_values: true
tests:
test_items:
- flag: rotateCertificates
compare:
op: eq
value: true
remediation: |
None required.
scored: false
- id: 4.2.11
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)"
audit: |
#Verify the rotateKubeletServerCertificate feature gate is on
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
echo RotateKubeletServerCertificate=$(oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.featureGates.RotateKubeletServerCertificate' 2> /dev/null)
# Verify the rotateCertificates argument is set to true
echo rotateCertificates=$(oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq -r '.kubeletconfig.rotateCertificates' 2> /dev/null)
tests:
bin_op: and
test_items:
- flag: RotateKubeletServerCertificate
compare:
op: eq
value: true
- flag: rotateCertificates
compare:
op: eq
value: true
remediation: |
None required. By default, kubelet server certificate rotation is enabled.
scored: false
- id: 4.2.12
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
audit: |
# needs verification
# verify cipher suites
oc get --namespace=openshift-ingress-operator ingresscontroller/default -o json | jq '.status.tlsProfile.ciphers' 2> /dev/null
oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.servingInfo.cipherSuites' 2> /dev/null
oc get openshiftapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.servingInfo.cipherSuites' 2> /dev/null
oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq '.servingInfo.cipherSuites' 2> /dev/null
#check value for tlsSecurityProfile; null is returned if default is used
oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile 2> /dev/null
type: manual
remediation: |
Follow the directions above and in the OpenShift documentation to configure the tlsSecurityProfile.
Configuring Ingress. https://docs.openshift.com/container-platform/4.15/networking/ingress-operator.html#nw-ingress-controller-configuration-parameters_configuring-ingress
Please reference the OpenShift TLS security profile documentation for more detail on each profile.
https://docs.openshift.com/container-platform/4.15/security/tls-security-profiles.html
scored: false

347
cfg/rh-1.6/policies.yaml Normal file
View File

@ -0,0 +1,347 @@
---
controls:
version: rh-1.6
id: 5
text: "Kubernetes Policies"
type: "policies"
groups:
- id: 5.1
text: "RBAC and Service Accounts"
checks:
- id: 5.1.1
text: "Ensure that the cluster-admin role is only used where required (Manual)"
type: "manual"
audit: |
#To get a list of users and service accounts with the cluster-admin role
oc get clusterrolebindings -o=custom-columns="NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind" | grep cluster-admin
#To verity that kbueadmin is removed, no results should be returned
oc get secrets kubeadmin -n kube-system
remediation: |
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
if they need this role or if they could use a role with fewer privileges.
Where possible, first bind users to a lower privileged role and then remove the
clusterrolebinding to the cluster-admin role :
oc delete clusterrolebinding [name]
scored: false
- id: 5.1.2
text: "Minimize access to secrets (Manual)"
type: "manual"
remediation: |
Where possible, remove get, list and watch access to secret objects in the cluster.
scored: false
- id: 5.1.3
text: "Minimize wildcard use in Roles and ClusterRoles (Manual)"
type: "manual"
audit: |
# needs verification
# Run the command below to describe each cluster role and inspect it for wildcard usage
oc describe clusterrole
# Run the command below to describe each role and inspect it for wildcard usage
oc describe role -A
remediation: |
Where possible replace any use of wildcards in clusterroles and roles with specific
objects or actions.
scored: false
- id: 5.1.4
text: "Minimize access to create pods (Manual)"
type: "manual"
audit: |
# needs verification
# Review the users who have create access to pod objects in the Kubernetes API
oc adm policy who-can create pod
remediation: |
Where possible, remove create access to pod objects in the cluster.
scored: false
- id: 5.1.5
text: "Ensure that default service accounts are not actively used. (Manual)"
type: "manual"
remediation: |
None required.
scored: false
- id: 5.1.6
text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
type: "manual"
audit: |
# needs verification
# Find all pods that automatically mount service account tokens
oc get pods -A -o json | jq '.items[] | select(.spec.automountServiceAccountToken) | .metadata.name'
# Find all service accounts that automatically mount service tokens
oc get serviceaccounts -A -o json | jq '.items[] | select(.automountServiceAccountToken) | .metadata.name'
remediation: |
Modify the definition of pods and service accounts which do not need to mount service
account tokens to disable it.
scored: false
- id: 5.2
text: "Pod Security Policies"
checks:
- id: 5.2.1
text: "Minimize the admission of privileged containers (Manual)"
audit: |
# needs verification
oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegedContainer:.allowPrivilegedContainer
tests:
test_items:
- flag: "false"
remediation: |
Create an SCC that sets allowPrivilegedContainer to false and take it into use by
assigning it to applicable users and groups.
scored: false
- id: 5.2.2
text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)"
audit: |
oc get scc -o=custom-columns=NAME:.metadata.name,allowHostPID:.allowHostPID
tests:
test_items:
- flag: "false"
remediation: |
Create an SCC that sets allowHostPID to false and take it into use by assigning it to
applicable users and groups.
scored: false
- id: 5.2.3
text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)"
audit: |
oc get scc -o=custom-columns=NAME:.metadata.name,allowHostIPC:.allowHostIPC
tests:
test_items:
- flag: "false"
remediation: |
Create an SCC that sets allowHostIPC to false and take it into use by assigning it to
applicable users and groups.
scored: false
- id: 5.2.4
text: "Minimize the admission of containers wishing to share the host network namespace (Manual)"
audit: |
oc get scc -o=custom-columns=NAME:.metadata.name,allowHostNetwork:.allowHostNetwork
tests:
test_items:
- flag: "false"
remediation: |
Create an SCC that sets allowHostNetwork to false and take it into use by assigning
it to applicable users and groups.
scored: false
- id: 5.2.5
text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)"
audit: |
oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegeEscalation:.allowPrivilegeEscalation
tests:
test_items:
- flag: "false"
remediation: |
Create an SCC that sets allowPrivilegeEscalation to false and take it into use by
assigning it to applicable users and groups.
scored: false
- id: 5.2.6
text: "Minimize the admission of root containers (Manual)"
audit: |
# needs verification
oc get scc -o=custom-columns=NAME:.metadata.name,runAsUser:.runAsUser.type
#For SCCs with MustRunAs verify that the range of UIDs does not include 0
oc get scc -o=custom-columns=NAME:.metadata.name,uidRangeMin:.runAsUser.uidRangeMin,uidRangeMax:.runAsUser.uidRangeMax
tests:
bin_op: or
test_items:
- flag: "MustRunAsNonRoot"
- flag: "MustRunAs"
compare:
op: nothave
value: 0
remediation: |
None required. By default, OpenShift includes the nonroot and nonroot-v2 SCCs that
restrict the ability to run as nonroot. If additional SCCs are appropriate, follow the
OpenShift documentation to create custom SCCs.
scored: false
- id: 5.2.7
text: "Minimize the admission of containers with the NET_RAW capability (Manual)"
audit: |
# needs verification
oc get scc -o=custom-columns=NAME:.metadata.name,requiredDropCapabilities:.requiredDropCapabilities
tests:
bin_op: or
test_items:
- flag: "ALL"
- flag: "NET_RAW"
remediation: |
Create an SCC that sets requiredDropCapabilities to include ALL or at least
NET_RAW and take it into use by assigning it to applicable users and groups.
scored: false
- id: 5.2.8
text: "Minimize the admission of containers with added capabilities (Manual)"
type: "manual"
audit: |
# needs verification
# List all SCCs that prohibit users from defining container capabilities
oc get scc -A -o json | jq '.items[] | select(.allowedCapabilities==null) | .metadata.name'
# List all SCCs that do not set default container capabilities
oc get scc -A -o json | jq '.items[] | select(.defaultAddCapabilities==null) | .metadata.name'
tests:
test_items:
- flag: "false"
remediation: |
Utilize the restricted-v2 SCC or create an SCC that sets allowedCapabilities and
defaultAddCapabilities to an empty list and take it into use by assigning it to
applicable users and groups.
scored: false
- id: 5.2.9
text: "Minimize the admission of containers with capabilities assigned (Manual)"
type: "manual"
audit: |
# needs verification
# List all SCCs that drop all capabilities from containers
oc get scc -A -o json | jq '.items[] | select(.requiredDropCapabilities[]?|any(. == "ALL"; .)) | .metadata.name'
tests:
test_items:
- flag: "false"
remediation: |
Review the use of capabilities in applications running on your cluster. Where a namespace
contains applications which do not require any Linux capabilities to operate, consider
adding a SCC which forbids the admission of containers which do not drop all capabilities.
scored: false
- id: 5.2.10
text: "Minimize access to privileged Security Context Constraints (Manual)"
type: "manual"
audit: |
# needs verification
# All users and groups with access to SCCs that include privileged or elevated capabilities.
oc get scc -ojson | jq '.items[]|select(.allowHostIPC or .allowHostPID or .allowHostPorts
or .allowHostNetwork or .allowHostDirVolumePlugin
or .allowPrivilegedContainer or .runAsUser.type != "MustRunAsRange") |
.metadata.name,{"Group:":.groups},{"User":.users}'
tests:
test_items:
- flag: "false"
remediation: |
Remove any users and groups who do not need access to an SCC, following the principle of least privilege.
You can remove users and groups from an SCC using the oc edit scc $NAME command.
Additionally, you can create your own SCCs that contain the container functionality you
need for a particular use case and assign that SCC to users and groups if the default
SCCs are not appropriate for your use case.
scored: false
- id: 5.3
text: "Network Policies and CNI"
checks:
- id: 5.3.1
text: "Ensure that the CNI in use supports Network Policies (Manual)"
type: "manual"
remediation: |
None required. This will depend on the CNI plugin in use.
scored: false
- id: 5.3.2
text: "Ensure that all Namespaces have Network Policies defined (Manual)"
type: "manual"
audit: |
#Run the following command and review the NetworkPolicy objects created in the cluster.
oc -n all get networkpolicy
remediation: |
Follow the documentation and create NetworkPolicy objects as you need them.
scored: false
- id: 5.4
text: "Secrets Management"
checks:
- id: 5.4.1
text: "Prefer using secrets as files over secrets as environment variables (Manual)"
type: "manual"
audit: |
#Run the following command to find references to objects which use environment variables defined from secrets.
oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {"\n"}{end}' -A
remediation: |
If possible, rewrite application code to read secrets from mounted secret files, rather than
from environment variables.
scored: false
- id: 5.4.2
text: "Consider external secret storage (Manual)"
type: "manual"
remediation: |
Refer to the secrets management options offered by your cloud provider or a third-party
secrets management solution.
scored: false
- id: 5.5
text: "Extensible Admission Control"
checks:
- id: 5.5.1
text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)"
type: "manual"
audit: |
# needs verification
oc get image.config.openshift.io/cluster -o json | jq .spec.registrySources
remediation: |
Follow the OpenShift documentation for Image Configuration resources: https://docs.openshift.com/container-platform/4.15/openshift_images/image-configuration.html
scored: false
- id: 5.7
text: "General Policies"
checks:
- id: 5.7.1
text: "Create administrative boundaries between resources using namespaces (Manual)"
type: "manual"
audit: |
#Run the following command and review the namespaces created in the cluster.
oc get namespaces
#Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.
oc get namespaces -o json | jq '.items[] | select(.metadata.name|test("(?!default|kube-.|openshift|openshift-.)^.*")) | .metadata.name'
remediation: |
Follow the documentation and create namespaces for objects in your deployment as you need them.
scored: false
- id: 5.7.2
text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)"
type: "manual"
audit: |
oc get pods -A -o json | jq '.items[] | select( (.metadata.namespace | test("^kube*|^openshift*") | not)
and .spec.securityContext.seccompProfile.type==null) |
(.metadata.namespace + "/" + .metadata.name)'
remediation: |
For any non-privileged pods or containers that do not have seccomp profiles, consider
using the RuntimeDefault or creating a custom seccomp profile specifically for the workload.
Please refer to the OpenShift documentation for working with custom seccomp profiles.
https://docs.openshift.com/container-platform/4.15/security/seccomp-profiles.html
scored: false
- id: 5.7.3
text: "Apply Security Context to Your Pods and Containers (Manual)"
type: "manual"
audit: |
# needs verification
# obtain a list of pods that are using privileged security context constraints
oc get pods -A -o json | jq '.items[] | select(.metadata.annotations."openshift.io/scc"|test("privileged"?)) | .metadata.name'
# obtain a list of pods that are not using security context constraints at all
oc get pods -A -o json | jq '.items[] | select(.metadata.annotations."openshift.io/scc" == null) | .metadata.name'
remediation: |
Follow the Kubernetes documentation and apply security contexts to your pods. For a
suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
Containers.
scored: false
- id: 5.7.4
text: "The default namespace should not be used (Manual)"
type: "manual"
audit: |
# Run the following command to list all resources in the default namespace, besides the kubernetes and
# openshift services, which are expected to be in the default namespace
oc get all -n default -o json | jq '.items[] | select((.kind|test("Service"))
and (.metadata.name|test("openshift|kubernetes"))? | not) |
(.kind + "/" + .metadata.name)'
#The only entries there should be system managed resources such as the kubernetes and openshift service
remediation: |
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
resources and that all new resources are created in a specific namespace.
scored: false

View File

@ -250,6 +250,7 @@ func TestMapToCISVersion(t *testing.T) {
{kubeVersion: "gke-1.2.0", succeed: true, exp: "gke-1.2.0"},
{kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"},
{kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"},
{kubeVersion: "ocp-4.15", succeed: true, exp: "rh-1.6"},
{kubeVersion: "unknown", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: unknown"},
}
for _, c := range cases {

View File

@ -541,6 +541,8 @@ func getPlatformBenchmarkVersion(platform Platform) string {
return "rh-0.7"
case "4.1":
return "rh-1.0"
case "4.15":
return "rh-1.6"
}
case "vmware":
return "tkgi-1.2.53"
@ -619,7 +621,7 @@ func getOcpValidVersion(ocpVer string) (string, error) {
for !isEmpty(ocpVer) {
glog.V(3).Info(fmt.Sprintf("getOcpBenchmarkVersion check for ocp: %q \n", ocpVer))
if ocpVer == "3.10" || ocpVer == "4.1" {
if ocpVer == "4.15" || ocpVer == "4.1" || ocpVer == "3.10" {
glog.V(1).Info(fmt.Sprintf("getOcpBenchmarkVersion found valid version for ocp: %q \n", ocpVer))
return ocpVer, nil
}

View File

@ -713,6 +713,13 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) {
},
want: "rh-1.0",
},
{
name: "openshift4_15",
args: args{
platform: Platform{Name: "ocp", Version: "4.15"},
},
want: "rh-1.6",
},
{
name: "k3s",
args: args{
@ -763,6 +770,7 @@ func Test_getOcpValidVersion(t *testing.T) {
{openShiftVersion: "4.1", succeed: true, exp: "4.1"},
{openShiftVersion: "4.5", succeed: true, exp: "4.1"},
{openShiftVersion: "4.6", succeed: true, exp: "4.1"},
{openShiftVersion: "4.16", succeed: true, exp: "4.15"},
{openShiftVersion: "invalid", succeed: false, exp: ""},
}
for _, c := range cases {

View File

@ -23,7 +23,7 @@ The following table shows the valid targets based on the CIS Benchmark version.
| cis-1.7 | master, controlplane, node, etcd, policies |
| cis-1.8 | master, controlplane, node, etcd, policies |
| cis-1.9 | master, controlplane, node, etcd, policies |
| cis-1.10 | master, controlplane, node, etcd, policies |
| cis-1.10 | master, controlplane, node, etcd, policies |
| gke-1.0 | master, controlplane, node, etcd, policies, managedservices |
| gke-1.2.0 | controlplane, node, policies, managedservices |
| gke-1.6.0 | controlplane, node, policies, managedservices |
@ -36,6 +36,7 @@ The following table shows the valid targets based on the CIS Benchmark version.
| aks-1.7 | controlplane, node, policies, managedservices |
| rh-0.7 | master,node|
| rh-1.0 | master, controlplane, node, etcd, policies |
| rh-1.6 | master, controlplane, node, etcd, policies |
| cis-1.6-k3s | master, controlplane, node, etcd, policies |
| cis-1.24-microk8s | master, controlplane, node, etcd, policies |

View File

@ -32,7 +32,8 @@ Some defined by other hardenening guides.
20359)
| aks-1.7 | AKS |
| RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 |
| CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- |
| CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1-4.14 |
| CIS | [OCP4 1.6.0](https://workbench.cisecurity.org/benchmarks/16094) | rh-1.6 | OCP 4.15- |
| CIS | [1.6.0-k3s](https://docs.rancher.cn/docs/k3s/security/self-assessment/_index) | cis-1.6-k3s | k3s v1.16-v1.24 |
| DISA | [Kubernetes Ver 1, Rel 6](https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_Kubernetes_V1R6_STIG.zip) | eks-stig-kubernetes-v1r6 | EKS |
| CIS | [TKGI 1.2.53](https://network.pivotal.io/products/p-compliance-scanner#/releases/1248397) | tkgi-1.2.53 | vmware |

View File

@ -132,9 +132,10 @@ docker push <AWS_ACCT_NUMBER>.dkr.ecr.<AWS_REGION>.amazonaws.com/k8s/kube-bench:
### Running on OpenShift
| OpenShift Hardening Guide | kube-bench config |
| ------------------------- | ----------------- |
|---------------------------|-------------------|
| ocp-3.10 + | rh-0.7 |
| ocp-4.1 + | rh-1.0 |
| ocp-4.1-4.14 | rh-1.0 |
| ocp-4.15 + | rh-1.6 |
kube-bench includes a set of test files for Red Hat's OpenShift hardening guide for OCP 3.10 and 4.1. To run this you will need to specify `--benchmark rh-07`, or `--version ocp-3.10` or,`--version ocp-4.5` or `--benchmark rh-1.0`

191
job-ocp.yaml Normal file
View File

@ -0,0 +1,191 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-bench
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: kube-bench
name: kube-bench-sa
namespace: kube-bench
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: kube-bench
name: kube-bench-cluster-role
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- 'get'
- 'list'
- apiGroups:
- '*'
resources:
- 'pods/exec'
verbs:
- 'create'
- apiGroups:
- '*'
resources:
- 'pods'
- 'namespaces'
verbs:
- 'create'
- 'delete'
- 'watch'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: kube-bench
name: kube-bench-cluster-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-bench-cluster-role
subjects:
- kind: ServiceAccount
name: kube-bench-sa
# It is mandatory to give namespace here and it doesn't pick the one mentioned in kubeconfig file.
namespace: kube-bench
# In kube-bench pod for Openshift, oc cli creates random namespaces to deploy debug pods for CIS checks.
# So, it will need privileged access.
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: kube-bench
name: kube-bench-privileged
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:openshift:scc:privileged
subjects:
- kind: ServiceAccount
name: kube-bench-sa
namespace: kube-bench
---
apiVersion: batch/v1
kind: Job
metadata:
name: kube-bench
namespace: kube-bench
spec:
template:
metadata:
labels:
app: kube-bench
spec:
serviceAccountName: kube-bench-sa
automountServiceAccountToken: true
containers:
- command:
[
"kube-bench",
"run",
"--targets",
"master,controlplane,node,etcd,policies",
"--benchmark",
"rh-1.6",
]
image: docker.io/aquasec/kube-bench:latest
name: kube-bench
volumeMounts:
- name: var-lib-cni
mountPath: /var/lib/cni
readOnly: true
- mountPath: /var/lib/etcd
name: var-lib-etcd
readOnly: true
- mountPath: /var/lib/kubelet
name: var-lib-kubelet
readOnly: true
- mountPath: /var/lib/kube-scheduler
name: var-lib-kube-scheduler
readOnly: true
- mountPath: /var/lib/kube-controller-manager
name: var-lib-kube-controller-manager
readOnly: true
- mountPath: /etc/systemd
name: etc-systemd
readOnly: true
- mountPath: /lib/systemd/
name: lib-systemd
readOnly: true
- mountPath: /srv/kubernetes/
name: srv-kubernetes
readOnly: true
- mountPath: /etc/kubernetes
name: etc-kubernetes
readOnly: true
- mountPath: /usr/local/mount-from-host/bin
name: usr-bin
readOnly: true
- mountPath: /etc/cni/net.d/
name: etc-cni-netd
readOnly: true
- mountPath: /opt/cni/bin/
name: opt-cni-bin
readOnly: true
- name: etc-passwd
mountPath: /etc/passwd
readOnly: true
- name: etc-group
mountPath: /etc/group
readOnly: true
hostPID: true
restartPolicy: Never
volumes:
- name: var-lib-cni
hostPath:
path: /var/lib/cni
- hostPath:
path: /var/lib/etcd
name: var-lib-etcd
- hostPath:
path: /var/lib/kubelet
name: var-lib-kubelet
- hostPath:
path: /var/lib/kube-scheduler
name: var-lib-kube-scheduler
- hostPath:
path: /var/lib/kube-controller-manager
name: var-lib-kube-controller-manager
- hostPath:
path: /etc/systemd
name: etc-systemd
- hostPath:
path: /lib/systemd
name: lib-systemd
- hostPath:
path: /srv/kubernetes
name: srv-kubernetes
- hostPath:
path: /etc/kubernetes
name: etc-kubernetes
- hostPath:
path: /usr/bin
name: usr-bin
- hostPath:
path: /etc/cni/net.d/
name: etc-cni-netd
- hostPath:
path: /opt/cni/bin/
name: opt-cni-bin
- hostPath:
path: "/etc/passwd"
name: etc-passwd
- hostPath:
path: "/etc/group"
name: etc-group