1
0
mirror of https://github.com/aquasecurity/kube-bench.git synced 2024-11-21 23:58:06 +00:00

Add support for Redhat openshift 4.0 cis 1.1.0 (#860)

This commit is contained in:
Yoav Rotem 2021-04-29 17:08:41 +03:00 committed by GitHub
parent d528400881
commit 68c2ee2ebf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 2267 additions and 28 deletions

2
cfg/rh-1.0/config.yaml Normal file
View File

@ -0,0 +1,2 @@
---
## Version-specific settings that override the values in cfg/config.yaml

View File

@ -0,0 +1,62 @@
---
controls:
version: rh-1.0
id: 3
text: "Control Plane Configuration"
type: "controlplane"
groups:
- id: 3.1
text: "Authentication and Authorization"
checks:
- id: 3.1.1
text: "Client certificate authentication should not be used for users (Manual)"
audit: |
# To verify user authentication is enabled
oc describe authentication
# To verify that an identity provider is configured
oc get identity
# To verify that a custom cluster-admin user exists
oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | grep cluster-admin | grep User
# To verity that kbueadmin is removed, no results should be returned
oc get secrets kubeadmin -n kube-system
type: manual
remediation: |
Configure an identity provider for the OpenShift cluster.
Understanding identity provider configuration | Authentication | OpenShift
Container Platform 4.5. Once an identity provider has been defined,
you can use RBAC to define and apply permissions.
After you define an identity provider and create a new cluster-admin user,
remove the kubeadmin user to improve cluster security.
scored: false
- id: 3.2
text: "Logging"
checks:
- id: 3.2.1
text: "Ensure that a minimal audit policy is created (Manual)"
audit: |
#To view kube apiserver log files
oc adm node-logs --role=master --path=kube-apiserver/
#To view openshift apiserver log files
oc adm node-logs --role=master --path=openshift-apiserver/
#To verify kube apiserver audit config
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]'
#To verify openshift apiserver audit config
oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]'
type: manual
remediation: |
No remediation required.
scored: false
- id: 3.2.2
text: "Ensure that the audit policy covers key security concerns (Manual)"
audit: |
#To verify openshift apiserver audit config
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]'
#To verify kube apiserver audit config
oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]'
type: manual
remediation: |
In OpenShift 4.6 and higher, if appropriate for your needs,
modify the audit policy.
scored: false

154
cfg/rh-1.0/etcd.yaml Normal file
View File

@ -0,0 +1,154 @@
---
controls:
version: rh-1.0
id: 2
text: "Etcd Node Configuration"
type: "etcd"
groups:
- id: 2
text: "Etcd Node Configuration Files"
checks:
- id: 2.1
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)"
audit: |
# For --cert-file
for i in $(oc get pods -oname -n openshift-etcd)
do
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/'
done 2>/dev/null
# For --key-file
for i in $(oc get pods -oname -n openshift-etcd)
do
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/'
done 2>/dev/null
use_multiple_values: true
tests:
test_items:
- flag: "file"
compare:
op: regex
value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-serving\/etcd-serving-.*\.(?:crt|key)'
remediation: |
OpenShift does not use the etcd-certfile or etcd-keyfile flags.
Certificates for etcd are managed by the etcd cluster operator.
scored: false
- id: 2.2
text: "Ensure that the --client-cert-auth argument is set to true (Manual)"
audit: |
for i in $(oc get pods -oname -n openshift-etcd)
do
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/'
done 2>/dev/null
use_multiple_values: true
tests:
test_items:
- flag: "--client-cert-auth"
compare:
op: eq
value: true
remediation: |
This setting is managed by the cluster etcd operator. No remediation required."
scored: false
- id: 2.3
text: "Ensure that the --auto-tls argument is not set to true (Manual)"
audit: |
# Returns 0 if found, 1 if not found
for i in $(oc get pods -oname -n openshift-etcd)
do
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --auto-tls=true 2>&1>/dev/null ; echo exit_code=$?
done 2>/dev/null
use_multiple_values: true
tests:
test_items:
- flag: "exit_code"
compare:
op: eq
value: "1"
remediation: |
This setting is managed by the cluster etcd operator. No remediation required.e
scored: false
- id: 2.4
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)"
audit: |
# For --peer-cert-file
for i in $(oc get pods -oname -n openshift-etcd)
do
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/'
done 2>/dev/null
# For --peer-key-file
for i in $(oc get pods -oname -n openshift-etcd)
do
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/'
done 2>/dev/null
use_multiple_values: true
tests:
test_items:
- flag: "file"
compare:
op: regex
value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-peer\/etcd-peer-.*\.(?:crt|key)'
remediation: |
None. This configuration is managed by the etcd operator.
scored: false
- id: 2.5
text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)"
audit: |
for i in $(oc get pods -oname -n openshift-etcd)
do
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/'
done 2>/dev/null
use_multiple_values: true
tests:
test_items:
- flag: "--peer-client-cert-auth"
compare:
op: eq
value: true
remediation: |
This setting is managed by the cluster etcd operator. No remediation required.
scored: false
- id: 2.6
text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)"
audit: |
# Returns 0 if found, 1 if not found
for i in $(oc get pods -oname -n openshift-etcd)
do
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>&1>/dev/null ; echo exit_code=$?
done 2>/dev/null
use_multiple_values: true
tests:
test_items:
- flag: "exit_code"
compare:
op: eq
value: "1"
remediation: |
This setting is managed by the cluster etcd operator. No remediation required.
scored: false
- id: 2.7
text: "Ensure that a unique Certificate Authority is used for etcd (Manual)"
audit: |
for i in $(oc get pods -oname -n openshift-etcd)
do
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/'
done 2>/dev/null
for i in $(oc get pods -oname -n openshift-etcd)
do
oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/'
done 2>/dev/null
use_multiple_values: true
tests:
test_items:
- flag: "file"
compare:
op: regex
value: '\/etc\/kubernetes\/static-pod-certs\/configmaps\/etcd-(?:serving|peer-client)-ca\/ca-bundle\.(?:crt|key)'
remediation: |
None required. Certificates for etcd are managed by the OpenShift cluster etcd operator.
scored: false

1262
cfg/rh-1.0/master.yaml Normal file

File diff suppressed because it is too large Load Diff

453
cfg/rh-1.0/node.yaml Normal file
View File

@ -0,0 +1,453 @@
---
controls:
version: rh-1.0
id: 4
text: "Worker Node Security Configuration"
type: "node"
groups:
- id: 4.1
text: "Worker Node Configuration Files"
checks:
- id: 4.1.1
text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)"
audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/systemd/system/kubelet.service
done 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "644"
remediation: |
By default, the kubelet service file has permissions of 644.
scored: true
- id: 4.1.2
text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
audit: |
# Should return root:root for each node
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/systemd/system/kubelet.service
done 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: root:root
remediation: |
By default, the kubelet service file has ownership of root:root.
scored: true
- id: 4.1.3
text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)"
audit: |
for i in $(oc get pods -n openshift-sdn -l app=sdn -oname)
do
oc exec -n openshift-sdn $i -- stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml
done 2> /dev/null
use_multiple_values: true
tests:
bin_op: or
test_items:
- flag: "permissions"
set: true
compare:
op: bitmask
value: "644"
remediation: |
None needed.
scored: false
- id: 4.1.4
text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)"
audit: |
for i in $(oc get pods -n openshift-sdn -l app=sdn -oname)
do
oc exec -n openshift-sdn $i -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml
done 2> /dev/null
use_multiple_values: true
tests:
bin_op: or
test_items:
- flag: root:root
remediation: |
None required. The configuration is managed by OpenShift operators.
scored: false
- id: 4.1.5
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)"
audit: |
# Check permissions
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/kubernetes/kubelet.conf
done 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "644"
remediation: |
None required.
scored: false
- id: 4.1.6
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)"
audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/kubernetes/kubelet.conf
done 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: root:root
remediation: |
None required.
scored: false
- id: 4.1.7
text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)"
audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/kubernetes/kubelet-ca.crt
done 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "644"
remediation: |
None required.
scored: true
- id: 4.1.8
text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/kubernetes/kubelet-ca.crt
done 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: root:root
remediation: |
None required.
scored: true
- id: 4.1.9
text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)"
audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /var/lib/kubelet/kubeconfig
done 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "644"
remediation: |
None required.
scored: true
- id: 4.1.10
text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)"
audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /var/lib/kubelet/kubeconfig
done 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: root:root
remediation: |
None required.
scored: true
- id: 4.2
text: "Kubelet"
checks:
- id: 4.2.1
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot /host grep -B4 -A1 anonymous: /etc/kubernetes/kubelet.conf
done
use_multiple_values: true
tests:
test_items:
- flag: "enabled: true"
set: false
remediation: |
Follow the instructions in the documentation to create a Kubelet config CRD
and set the anonymous-auth is set to false.
scored: true
- id: 4.2.2
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
type: manual
# Takes a lot of time for connection to fail and
audit: |
POD=$(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}')
TOKEN=$(oc whoami -t)
for name in $(oc get nodes -ojsonpath='{.items[*].metadata.name}')
do
oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$name/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode'
done
use_multiple_values: true
tests:
test_items:
- flag: "Connection timed out"
remediation: |
None required. Unauthenticated/Unauthorized users have no access to OpenShift nodes.
scored: true
- id: 4.2.3
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot /host grep clientCAFile: /etc/kubernetes/kubelet.conf
done 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "clientCAFile"
compare:
op: eq
value: "/etc/kubernetes/kubelet-ca.crt"
remediation: |
None required. Changing the clientCAFile value is unsupported.
scored: true
- id: 4.2.4
text: "Verify that the read only port is not used or is set to 0 (Automated)"
audit: |
echo `oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o yaml | grep --color read-only-port` 2> /dev/null
echo `oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "read-only-port"` 2> /dev/null
tests:
bin_op: or
test_items:
- flag: "read-only-port"
compare:
op: has
value: "[\"0\"]"
- flag: "read-only-port"
set: false
remediation: |
In earlier versions of OpenShift 4, the read-only-port argument is not used.
Follow the instructions in the documentation to create a Kubelet config CRD
and set the --read-only-port is set to 0.
scored: true
- id: 4.2.5
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)"
audit: |
# Should return 1 for each node
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout
echo exit_code=$?
done 2>/dev/null
# Should return 1 for each node
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf
echo exit_code=$?
done 2>/dev/null
use_multiple_values: true
tests:
bin_op: or
test_items:
- flag: --streaming-connection-idle-timeout
compare:
op: noteq
value: 0
- flag: "exit_code"
compare:
op: eq
value: 1
remediation: |
Follow the instructions in the documentation to create a Kubelet config CRD and set
the --streaming-connection-idle-timeout to the desired value. Do not set the value to 0.
scored: true
- id: 4.2.6
text: "Ensure that the --protect-kernel-defaults argument is not set (Manual)"
audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}');
do
oc debug node/${node} -- chroot /host more /etc/kubernetes/kubelet.conf;
done
tests:
test_items:
- flag: protectKernelDefaults
set: false
remediation: |
None required. The OpenShift 4 kubelet modifies the system tunable;
using the protect-kernel-defaults flag will cause the kubelet to fail on start if the tunables
don't match the kubelet configuration and the OpenShift node will fail to start.
scored: false
- id: 4.2.7
text: "Ensure that the --make-iptables-util-chains argument is set to true (Manual)"
audit: |
/bin/bash
flag=make-iptables-util-chains
opt=makeIPTablesUtilChains
# look at each machineconfigpool
while read -r pool nodeconfig; do
# true by default
value='true'
# first look for the flag
oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.systemd[][] | select(.name=="kubelet.service") | .contents' | sed -n "/^ExecStart=/,/^\$/ { /^\\s*--$flag=false/ q 100 }"
# if the above command exited with 100, the flag was false
[ $? == 100 ] && value='false'
# now look in the yaml KubeletConfig
yamlconfig=$(oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.storage.files[] | select(.path=="/etc/kubernetes/kubelet.conf") | .contents.source ' | sed 's/^data:,//' | while read; do echo -e ${REPLY//%/\\x}; done)
echo "$yamlconfig" | sed -n "/^$opt:\\s*false\\s*$/ q 100"
[ $? == 100 ] && value='false'
echo "Pool $pool has $flag ($opt) set to $value"
done < <(oc get machineconfigpools -o json | jq -r '.items[] | select(.status.machineCount>0) | .metadata.name + " " + .spec.configuration.name')
use_multiple_values: true
tests:
test_items:
- flag: "set to true"
remediation: |
None required. The --make-iptables-util-chains argument is set to true by default.
scored: false
- id: 4.2.8
text: "Ensure that the --hostname-override argument is not set (Manual)"
audit: |
echo `oc get machineconfig 01-worker-kubelet -o yaml | grep hostname-override`
echo `oc get machineconfig 01-master-kubelet -o yaml | grep hostname-override`
tests:
test_items:
- flag: hostname-override
set: false
remediation: |
By default, --hostname-override argument is not set.
scored: false
- id: 4.2.9
text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Automated)"
audit: |
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}');
do
oc debug node/${node} -- chroot /host more /etc/kubernetes/kubelet.conf;
done
oc get machineconfig 01-worker-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050
oc get machineconfig 01-master-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050
type: "manual"
remediation: |
Follow the documentation to edit kubelet parameters
https://docs.openshift.com/container-platform/4.5/scalability_and_performance/recommended-host-practices.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters
KubeAPIQPS: <QPS>
scored: true
- id: 4.2.10
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
audit: |
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo'
tests:
bin_op: and
test_items:
- flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt"
- flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key"
remediation: |
OpenShift automatically manages TLS authentication for the API server communication with the node/kublet.
This is not configurable.
scored: true
- id: 4.2.11
text: "Ensure that the --rotate-certificates argument is not set to false (Manual)"
audit: |
#Verify the rotateKubeletClientCertificate feature gate is not set to false
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate
done 2> /dev/null
# Verify the rotateCertificates argument is set to true
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot host grep rotate /etc/kubernetes/kubelet.conf;
done 2> /dev/null
use_multiple_values: true
tests:
bin_op: or
test_items:
- flag: rotateCertificates
compare:
op: eq
value: true
- flag: rotateKubeletClientCertificates
compare:
op: noteq
value: false
- flag: rotateKubeletClientCertificates
set: false
remediation: |
None required.
scored: false
- id: 4.2.12
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)"
audit: |
#Verify the rotateKubeletServerCertificate feature gate is on
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}');
do
oc debug node/${node} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf;
done 2> /dev/null
# Verify the rotateCertificates argument is set to true
for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}')
do
oc debug node/${node} -- chroot host grep rotate /etc/kubernetes/kubelet.conf;
done 2> /dev/null
use_multiple_values: true
tests:
bin_op: or
test_items:
- flag: RotateKubeletServerCertificate
compare:
op: eq
value: true
- flag: rotateCertificates
compare:
op: eq
value: true
remediation: |
By default, kubelet server certificate rotation is disabled.
scored: false
- id: 4.2.13
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
audit: |
# needs verification
# verify cipher suites
oc describe --namespace=openshift-ingress-operator ingresscontroller/default
oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo
oc get openshiftapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo
oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo
#check value for tlsSecurityProfile; null is returned if default is used
oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile
type: manual
remediation: |
Follow the directions above and in the OpenShift documentation to configure the tlsSecurityProfile.
Configuring Ingress
scored: false

283
cfg/rh-1.0/policies.yaml Normal file
View File

@ -0,0 +1,283 @@
---
controls:
version: rh-1.0
id: 5
text: "Kubernetes Policies"
type: "policies"
groups:
- id: 5.1
text: "RBAC and Service Accounts"
checks:
- id: 5.1.1
text: "Ensure that the cluster-admin role is only used where required (Manual)"
type: "manual"
remediation: |
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
if they need this role or if they could use a role with fewer privileges.
Where possible, first bind users to a lower privileged role and then remove the
clusterrolebinding to the cluster-admin role :
kubectl delete clusterrolebinding [name]
scored: false
- id: 5.1.2
text: "Minimize access to secrets (Manual)"
type: "manual"
remediation: |
Where possible, remove get, list and watch access to secret objects in the cluster.
scored: false
- id: 5.1.3
text: "Minimize wildcard use in Roles and ClusterRoles (Manual)"
type: "manual"
remediation: |
Where possible replace any use of wildcards in clusterroles and roles with specific
objects or actions.
scored: false
- id: 5.1.4
text: "Minimize access to create pods (Manual)"
type: "manual"
remediation: |
Where possible, remove create access to pod objects in the cluster.
scored: false
- id: 5.1.5
text: "Ensure that default service accounts are not actively used. (Manual)"
type: "manual"
remediation: |
None required.
scored: false
- id: 5.1.6
text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
type: "manual"
remediation: |
Modify the definition of pods and service accounts which do not need to mount service
account tokens to disable it.
scored: false
- id: 5.2
text: "Pod Security Policies"
checks:
- id: 5.2.1
text: "Minimize the admission of privileged containers (Manual)"
audit: |
# needs verification
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
do
echo "$i"; oc describe scc $i | grep "Allow Privileged";
done
tests:
test_items:
- flag: "false"
remediation: |
Create a SCC as described in the OpenShift documentation, ensuring that the Allow
Privileged field is set to false.
scored: false
- id: 5.2.2
text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)"
audit: |
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
do
echo "$i"; oc describe scc $i | grep "Allow Host PID";
done
tests:
test_items:
- flag: "false"
remediation: |
Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host
PID field is set to false.
scored: false
- id: 5.2.3
text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)"
audit: |
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
do
echo "$i"; oc describe scc $i | grep "Allow Host IPC";
done
tests:
test_items:
- flag: "false"
remediation: |
Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host
IPC field is set to false.
scored: false
- id: 5.2.4
text: "Minimize the admission of containers wishing to share the host network namespace (Manual)"
audit: |
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
do
echo "$i"; oc describe scc $i | grep "Allow Host Network";
done
tests:
test_items:
- flag: "false"
remediation: |
Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host
Network field is omitted or set to false.
scored: false
- id: 5.2.5
text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)"
audit: |
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
do
echo "$i"; oc describe scc $i | grep "Allow Privilege Escalation";
done
tests:
test_items:
- flag: "false"
remediation: |
Create a SCC as described in the OpenShift documentation, ensuring that the Allow
Privilege Escalation field is omitted or set to false.
scored: false
- id: 5.2.6
text: "Minimize the admission of root containers (Manual)"
audit: |
# needs verification
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
do
echo "$i";
oc describe scc $i | grep "Run As User Strategy";
done
#For SCCs with MustRunAs verify that the range of UIDs does not include 0
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
do
echo "$i";
oc describe scc $i | grep "\sUID";
done
tests:
bin_op: or
test_items:
- flag: "MustRunAsNonRoot"
- flag: "MustRunAs"
compare:
op: nothave
value: 0
remediation: |
None required. By default, OpenShift includes the non-root SCC with the the Run As User
Strategy is set to either MustRunAsNonRoot. If additional SCCs are appropriate, follow the
OpenShift documentation to create custom SCCs.
scored: false
- id: 5.2.7
text: "Minimize the admission of containers with the NET_RAW capability (Manual)"
audit: |
# needs verification
for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`;
do
echo "$i";
oc describe scc $i | grep "Required Drop Capabilities";
done
tests:
bin_op: or
test_items:
- flag: "ALL"
- flag: "NET_RAW"
remediation: |
Create a SCC as described in the OpenShift documentation, ensuring that the Required
Drop Capabilities is set to include either NET_RAW or ALL.
scored: false
- id: 5.2.8
text: "Minimize the admission of containers with added capabilities (Manual)"
type: "manual"
remediation: |
Ensure that Allowed Capabilities is set to an empty array for every SCC in the cluster
except for the privileged SCC.
scored: false
- id: 5.2.9
text: "Minimize the admission of containers with capabilities assigned (Manual)"
type: "manual"
remediation: |
Review the use of capabilites in applications running on your cluster. Where a namespace
contains applicaions which do not require any Linux capabities to operate consider
adding a SCC which forbids the admission of containers which do not drop all capabilities.
scored: false
- id: 5.3
text: "Network Policies and CNI"
checks:
- id: 5.3.1
text: "Ensure that the CNI in use supports Network Policies (Manual)"
type: "manual"
remediation: |
None required.
scored: false
- id: 5.3.2
text: "Ensure that all Namespaces have Network Policies defined (Manual)"
type: "manual"
remediation: |
Follow the documentation and create NetworkPolicy objects as you need them.
scored: false
- id: 5.4
text: "Secrets Management"
checks:
- id: 5.4.1
text: "Prefer using secrets as files over secrets as environment variables (Manual)"
type: "manual"
remediation: |
If possible, rewrite application code to read secrets from mounted secret files, rather than
from environment variables.
scored: false
- id: 5.4.2
text: "Consider external secret storage (Manual)"
type: "manual"
remediation: |
Refer to the secrets management options offered by your cloud provider or a third-party
secrets management solution.
scored: false
- id: 5.5
text: "Extensible Admission Control"
checks:
- id: 5.5.1
text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)"
type: "manual"
remediation: |
Follow the OpenShift documentation: [Image configuration resources](https://docs.openshift.com/container-platform/4.5/openshift_images/image-configuration.html
scored: false
- id: 5.7
text: "General Policies"
checks:
- id: 5.7.1
text: "Create administrative boundaries between resources using namespaces (Manual)"
type: "manual"
remediation: |
Follow the documentation and create namespaces for objects in your deployment as you need
them.
scored: false
- id: 5.7.2
text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)"
type: "manual"
remediation: |
To enable the default seccomp profile, use the reserved value /runtime/default that will
make sure that the pod uses the default policy available on the host.
scored: false
- id: 5.7.3
text: "Apply Security Context to Your Pods and Containers (Manual)"
type: "manual"
remediation: |
Follow the Kubernetes documentation and apply security contexts to your pods. For a
suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
Containers.
scored: false
- id: 5.7.4
text: "The default namespace should not be used (Manual)"
type: "manual"
remediation: |
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
resources and that all new resources are created in a specific namespace.
scored: false

View File

@ -313,12 +313,12 @@ func loadTargetMapping(v *viper.Viper) (map[string][]string, error) {
return benchmarkVersionToTargetsMap, nil
}
func getBenchmarkVersion(kubeVersion, benchmarkVersion string, v *viper.Viper) (bv string, err error) {
func getBenchmarkVersion(kubeVersion, benchmarkVersion, platformName string, v *viper.Viper) (bv string, err error) {
if !isEmpty(kubeVersion) && !isEmpty(benchmarkVersion) {
return "", fmt.Errorf("It is an error to specify both --version and --benchmark flags")
}
if isEmpty(benchmarkVersion) && isEmpty(kubeVersion) {
benchmarkVersion = getPlatformBenchmarkVersion(getPlatformName())
if isEmpty(benchmarkVersion) && isEmpty(kubeVersion) && !isEmpty(platformName){
benchmarkVersion = getPlatformBenchmarkVersion(platformName)
}
if isEmpty(benchmarkVersion) {

View File

@ -322,11 +322,11 @@ func TestGetBenchmarkVersion(t *testing.T) {
t.Fatalf("Unable to load config file %v", err)
}
type getBenchmarkVersionFnToTest func(kubeVersion, benchmarkVersion string, v *viper.Viper) (string, error)
type getBenchmarkVersionFnToTest func(kubeVersion, benchmarkVersion, platformName string, v *viper.Viper) (string, error)
withFakeKubectl := func(kubeVersion, benchmarkVersion string, v *viper.Viper, fn getBenchmarkVersionFnToTest) (string, error) {
withFakeKubectl := func(kubeVersion, benchmarkVersion, platformName string, v *viper.Viper, fn getBenchmarkVersionFnToTest) (string, error) {
execCode := `#!/bin/sh
echo '{"serverVersion": {"major": "1", "minor": "15", "gitVersion": "v1.15.10"}}'
echo '{"serverVersion": {"major": "1", "minor": "18", "gitVersion": "v1.18.10"}}'
`
restore, err := fakeExecutableInPath("kubectl", execCode)
if err != nil {
@ -334,39 +334,40 @@ func TestGetBenchmarkVersion(t *testing.T) {
}
defer restore()
return fn(kubeVersion, benchmarkVersion, v)
return fn(kubeVersion, benchmarkVersion, platformName, v)
}
withNoPath := func(kubeVersion, benchmarkVersion string, v *viper.Viper, fn getBenchmarkVersionFnToTest) (string, error) {
withNoPath := func(kubeVersion, benchmarkVersion, platformName string, v *viper.Viper, fn getBenchmarkVersionFnToTest) (string, error) {
restore, err := prunePath()
if err != nil {
t.Fatal("Failed when calling prunePath ", err)
}
defer restore()
return fn(kubeVersion, benchmarkVersion, v)
return fn(kubeVersion, benchmarkVersion, platformName, v)
}
type getBenchmarkVersionFn func(string, string, *viper.Viper, getBenchmarkVersionFnToTest) (string, error)
type getBenchmarkVersionFn func(string, string, string, *viper.Viper, getBenchmarkVersionFnToTest) (string, error)
cases := []struct {
n string
kubeVersion string
benchmarkVersion string
platformName string
v *viper.Viper
callFn getBenchmarkVersionFn
exp string
succeed bool
}{
{n: "both versions", kubeVersion: "1.11", benchmarkVersion: "cis-1.3", exp: "cis-1.3", callFn: withNoPath, v: viper.New(), succeed: false},
{n: "no version-missing-kubectl", kubeVersion: "", benchmarkVersion: "", v: viperWithData, exp: "cis-1.6", callFn: withNoPath, succeed: true},
{n: "no version-fakeKubectl", kubeVersion: "", benchmarkVersion: "", v: viperWithData, exp: "cis-1.5", callFn: withFakeKubectl, succeed: true},
{n: "kubeVersion", kubeVersion: "1.15", benchmarkVersion: "", v: viperWithData, exp: "cis-1.5", callFn: withNoPath, succeed: true},
{n: "ocpVersion310", kubeVersion: "ocp-3.10", benchmarkVersion: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
{n: "ocpVersion311", kubeVersion: "ocp-3.11", benchmarkVersion: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
{n: "gke10", kubeVersion: "gke-1.0", benchmarkVersion: "", v: viperWithData, exp: "gke-1.0", callFn: withNoPath, succeed: true},
{n: "both versions", kubeVersion: "1.11", benchmarkVersion: "cis-1.3", platformName: "", exp: "cis-1.3", callFn: withNoPath, v: viper.New(), succeed: false},
{n: "no version-missing-kubectl", kubeVersion: "", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "cis-1.6", callFn: withNoPath, succeed: true},
{n: "no version-fakeKubectl", kubeVersion: "", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "cis-1.6", callFn: withFakeKubectl, succeed: true},
{n: "kubeVersion", kubeVersion: "1.15", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "cis-1.5", callFn: withNoPath, succeed: true},
{n: "ocpVersion310", kubeVersion: "ocp-3.10", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
{n: "ocpVersion311", kubeVersion: "ocp-3.11", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true},
{n: "gke10", kubeVersion: "gke-1.0", benchmarkVersion: "", platformName: "", v: viperWithData, exp: "gke-1.0", callFn: withNoPath, succeed: true},
}
for _, c := range cases {
rv, err := c.callFn(c.kubeVersion, c.benchmarkVersion, c.v, getBenchmarkVersion)
rv, err := c.callFn(c.kubeVersion, c.benchmarkVersion, c.platformName, c.v, getBenchmarkVersion)
if c.succeed {
if err != nil {
t.Errorf("[%q]-Unexpected error: %v", c.n, err)

View File

@ -28,7 +28,7 @@ var masterCmd = &cobra.Command{
Short: "Run Kubernetes benchmark checks from the master.yaml file.",
Long: `Run Kubernetes benchmark checks from the master.yaml file in cfg/<version>.`,
Run: func(cmd *cobra.Command, args []string) {
bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, viper.GetViper())
bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformName(), viper.GetViper())
if err != nil {
exitWithError(fmt.Errorf("unable to determine benchmark version: %v", err))
}

View File

@ -28,7 +28,7 @@ var nodeCmd = &cobra.Command{
Short: "Run Kubernetes benchmark checks from the node.yaml file.",
Long: `Run Kubernetes benchmark checks from the node.yaml file in cfg/<version>.`,
Run: func(cmd *cobra.Command, args []string) {
bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, viper.GetViper())
bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformName(), viper.GetViper())
if err != nil {
exitWithError(fmt.Errorf("unable to determine benchmark version: %v", err))
}

View File

@ -68,7 +68,7 @@ var RootCmd = &cobra.Command{
Short: "Run CIS Benchmarks checks against a Kubernetes deployment",
Long: `This tool runs the CIS Kubernetes Benchmark (https://www.cisecurity.org/benchmark/kubernetes/)`,
Run: func(cmd *cobra.Command, args []string) {
bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, viper.GetViper())
bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformName(), viper.GetViper())
if err != nil {
exitWithError(fmt.Errorf("unable to determine benchmark version: %v", err))
}

View File

@ -32,7 +32,7 @@ var runCmd = &cobra.Command{
exitWithError(fmt.Errorf("unable to get `targets` from command line :%v", err))
}
bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, viper.GetViper())
bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformName(), viper.GetViper())
if err != nil {
exitWithError(fmt.Errorf("unable to get benchmark version. error: %v", err))
}

View File

@ -460,6 +460,8 @@ func getPlatformBenchmarkVersion(platform string) string {
return "gke-1.0"
case "ocp-3.10":
return "rh-0.7"
case "ocp-4.1":
return "rh-1.0"
}
return ""
}
@ -475,15 +477,26 @@ func getOpenShiftVersion() string{
if err == nil {
versionRe := regexp.MustCompile(`oc v(\d+\.\d+)`)
subs := versionRe.FindStringSubmatch(string(out))
if len(subs) < 1 {
versionRe = regexp.MustCompile(`Client Version:\s*(\d+\.\d+)`)
subs = versionRe.FindStringSubmatch(string(out))
}
if len(subs) > 1 {
glog.V(2).Infof("OCP output '%s' \nplatform is %s \nocp %v",string(out),getPlatformNameFromVersion(string(out)),subs[1])
ocpBenchmarkVersion, err := getOcpValidVersion(subs[1])
if err == nil{
return fmt.Sprintf("ocp-%s", ocpBenchmarkVersion)
} else {
glog.V(1).Infof("Can't get getOcpValidVersion: %v", err)
}
} else {
glog.V(1).Infof("Can't parse version output: %v", subs)
}
} else {
glog.V(1).Infof("Can't use oc command: %v", err)
}
} else {
glog.V(1).Infof("Can't find oc command: %v", err)
}
return ""
}
@ -493,7 +506,7 @@ func getOcpValidVersion(ocpVer string) (string, error) {
for (!isEmpty(ocpVer)) {
glog.V(3).Info(fmt.Sprintf("getOcpBenchmarkVersion check for ocp: %q \n", ocpVer))
if ocpVer == "3.10"{
if ocpVer == "3.10" || ocpVer == "4.1"{
glog.V(1).Info(fmt.Sprintf("getOcpBenchmarkVersion found valid version for ocp: %q \n", ocpVer))
return ocpVer, nil
}

View File

@ -594,12 +594,19 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) {
want: "",
},
{
name: "open shift",
name: "openshift3",
args: args{
platform: "ocp-3.10",
},
want: "rh-0.7",
},
{
name: "openshift4",
args: args{
platform: "ocp-4.1",
},
want: "rh-1.0",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@ -620,18 +627,20 @@ func Test_getOcpValidVersion(t *testing.T) {
{openShiftVersion: "3.11", succeed: true, exp: "3.10"},
{openShiftVersion: "3.10", succeed: true, exp: "3.10"},
{openShiftVersion: "2.9", succeed: false, exp: ""},
{openShiftVersion: "4.1", succeed: false, exp: ""},
{openShiftVersion: "4.1", succeed: true, exp: "4.1"},
{openShiftVersion: "4.5", succeed: true, exp: "4.1"},
{openShiftVersion: "4.6", succeed: true, exp: "4.1"},
{openShiftVersion: "invalid", succeed: false, exp: ""},
}
for _, c := range cases {
ocpVer,_ := getOcpValidVersion(c.openShiftVersion)
if c.succeed {
if c.exp != ocpVer {
t.Fatalf("getOcpValidVersion(%q) - Got %q expected %s", c.openShiftVersion, ocpVer, c.exp)
t.Errorf("getOcpValidVersion(%q) - Got %q expected %s", c.openShiftVersion, ocpVer, c.exp)
}
} else {
if len(ocpVer) > 0 {
t.Fatalf("getOcpValidVersion(%q) - Expected empty string but Got %s", c.openShiftVersion, ocpVer)
t.Errorf("getOcpValidVersion(%q) - Expected empty string but Got %s", c.openShiftVersion, ocpVer)
}
}
}