1
0
mirror of https://github.com/aquasecurity/kube-bench.git synced 2024-12-24 07:28:06 +00:00

Merge branch 'master' into no-master-binaries

This commit is contained in:
Liz Rice 2019-04-24 10:02:32 +01:00 committed by GitHub
commit e5b6603da5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 2709 additions and 1914 deletions

11
Gopkg.lock generated
View File

@ -189,6 +189,17 @@
pruneopts = "UT"
revision = "c95af922eae69f190717a0b7148960af8c55a072"
[[projects]]
digest = "1:e8e3acc03397f71fad44385631e665c639a8d55bd187bcfa6e70b695e3705edd"
name = "k8s.io/client-go"
packages = [
"third_party/forked/golang/template",
"util/jsonpath",
]
pruneopts = "UT"
revision = "e64494209f554a6723674bd494d69445fb76a1d4"
version = "v10.0.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1

View File

@ -25,6 +25,8 @@ kube-bench supports the tests for Kubernetes as defined in the CIS Benchmarks 1.
By default kube-bench will determine the test set to run based on the Kubernetes version running on the machine.
There is also preliminary support for Red Hat's Openshift Hardening Guide for 3.10 and 3.11. Please note that kube-bench does not automatically detect Openshift - see below.
## Installation
You can choose to
@ -47,14 +49,14 @@ You can even use your own configs by mounting them over the default ones in `/op
docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -t -v path/to/my-config.yaml:/opt/kube-bench/cfg/config.yaml aquasec/kube-bench:latest [master|node]
```
> Note: the tests require either the kubelet or kubectl binary in the path in order to know the Kubernetes version. You can pass `-v $(which kubectl):/usr/bin/kubectl` to the above invocations to resolve this.
> Note: the tests require either the kubelet or kubectl binary in the path in order to auto-detect the Kubernetes version. You can pass `-v $(which kubectl):/usr/bin/kubectl` to the above invocations to resolve this.
### Running in a kubernetes cluster
You can run kube-bench inside a pod, but it will need access to the host's PID namespace in order to check the running processes, as well as access to some directories on the host where config files and other files are stored.
Master nodes are automatically detected by kube-bench and will run master checks when possible.
The detection is done by verifying that mandatory components for master are running. (see [config file](#configuration).
The detection is done by verifying that mandatory components for master, as defined in the config files, are running (see [Configuration](#configuration)).
The supplied `job.yaml` file can be applied to run the tests as a job. For example:
@ -72,7 +74,7 @@ NAME READY STATUS RESTARTS AGE
kube-bench-j76s9 0/1 Completed 0 11s
# The results are held in the pod's logs
k logs kube-bench-j76s9
kubectl logs kube-bench-j76s9
[INFO] 1 Master Node Security Configuration
[INFO] 1.1 API Server
...
@ -84,6 +86,15 @@ To run the tests on the master node, the pod needs to be scheduled on that node.
The default labels applied to master nodes has changed since Kubernetes 1.11, so if you are using an older version you may need to modify the nodeSelector and tolerations to run the job on the master node.
### Running in an EKS cluster
There is a `job-eks.yaml` file for running the kube-bench node checks on an EKS cluster. **Note that you must update the image reference in `job-eks.yaml`.** Typically you will push the container image for kube-bench to ECR and refer to it there in the YAML file.
There are two significant differences on EKS:
* It uses [config files in JSON format](https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/)
* It's not possible to schedule jobs onto the master node, so master checks can't be performed
### Installing from a container
This command copies the kube-bench binary and configuration files to your host from the Docker container:
@ -112,6 +123,9 @@ go build -o kube-bench .
./kube-bench
```
## Running on OpenShift
kube-bench includes a set of test files for Red Hat's OpenShift hardening guide for OCP 3.10 and 3.11. To run this you will need to specify `--version ocp-3.10` when you run the `kube-bench` command (either directly or through YAML). This config version is valid for OCP 3.10 and 3.11.
## Configuration
@ -190,6 +204,19 @@ tests:
value:
...
```
You can also define jsonpath and yamlpath tests using the following syntax:
```
tests:
- path:
set:
compare:
op:
value:
...
```
Tests have various `operations` which are used to compare the output of audit commands for success.
These operations are:

20
cfg/1.11-json/config.yaml Normal file
View File

@ -0,0 +1,20 @@
---
# Config file for systems such as EKS where config is in JSON files
# Master nodes are controlled by EKS and not user-accessible
node:
kubernetes:
confs:
- "/var/lib/kubelet/kubeconfig"
kubeconfig:
- "/var/lib/kubelet/kubeconfig"
kubelet:
bins:
- "hyperkube kubelet"
- "kubelet"
defaultconf: "/etc/kubernetes/kubelet/kubelet-config.json"
defaultsvc: "/etc/systemd/system/kubelet.service"
defaultkubeconfig: "/var/lib/kubelet/kubeconfig"
proxy:
defaultkubeconfig: "/var/lib/kubelet/kubeconfig"

508
cfg/1.11-json/node.yaml Normal file
View File

@ -0,0 +1,508 @@
---
controls:
version: 1.11
id: 2
text: "Worker Node Security Configuration"
type: "node"
groups:
- id: 2.1
text: "Kubelet"
checks:
- id: 2.1.1
text: "Ensure that the --allow-privileged argument is set to false (Scored)"
audit: "ps -fC $kubeletbin"
tests:
test_items:
- flag: "--allow-privileged"
compare:
op: eq
value: false
set: true
remediation: |
Edit the kubelet service file $kubeletsvc
on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
--allow-privileged=false
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 2.1.2
text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
audit: "cat $kubeletconf"
tests:
test_items:
- path: "{.authentication.anonymous.enabled}"
compare:
op: eq
value: false
set: true
remediation: |
If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
false .
If using executable arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
--anonymous-auth=false
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 2.1.3
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
audit: "cat $kubeletconf"
tests:
test_items:
- path: "{.authorization.mode}"
compare:
op: noteq
value: "AlwaysAllow"
set: true
remediation: |
If using a Kubelet config file, edit the file to set authorization: mode to Webhook.
If using executable arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_AUTHZ_ARGS variable.
--authorization-mode=Webhook
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 2.1.4
text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
audit: "cat $kubeletconf"
tests:
test_items:
- path: "{.authentication.x509.clientCAFile}"
set: true
remediation: |
If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to
the location of the client CA file.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_AUTHZ_ARGS variable.
--client-ca-file=<path/to/client-ca-file>
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 2.1.5
text: "Ensure that the --read-only-port argument is set to 0 (Scored)"
audit: "cat $kubeletconf"
tests:
bin_op: or
test_items:
- path: "{.readOnlyPort}"
set: false
- path: "{.readOnlyPort}"
compare:
op: eq
value: "0"
set: true
remediation: |
If using a Kubelet config file, edit the file to set readOnlyPort to 0 .
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
--read-only-port=0
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 2.1.6
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)"
audit: "cat $kubeletconf"
tests:
bin_op: or
test_items:
- path: "{.streamingConnectionIdleTimeout}"
set: false
- path: "{.streamingConnectionIdleTimeout}"
compare:
op: noteq
value: 0
set: true
remediation: |
If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a
value other than 0.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
--streaming-connection-idle-timeout=5m
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 2.1.7
text: "Ensure that the --protect-kernel-defaults argument is set to true (Scored)"
audit: "cat $kubeletconf"
tests:
test_items:
- path: "{.protectKernelDefaults}"
compare:
op: eq
value: true
set: true
remediation: |
If using a Kubelet config file, edit the file to set protectKernelDefaults: true .
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
--protect-kernel-defaults=true
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 2.1.8
text: "Ensure that the --make-iptables-util-chains argument is set to true (Scored)"
audit: "cat $kubeletconf"
tests:
bin_op: or
test_items:
- path: "{.makeIPTablesUtilChains}"
set: false
- path: "{.makeIPTablesUtilChains}"
compare:
op: eq
value: true
set: true
remediation: |
If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true .
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
remove the --make-iptables-util-chains argument from the
KUBELET_SYSTEM_PODS_ARGS variable.
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 2.1.9
text: "Ensure that the --hostname-override argument is not set (Scored)"
audit: "cat $kubeletconf"
tests:
test_items:
- path: "{.hostnameOverride}"
set: false
remediation: |
Edit the kubelet service file $kubeletsvc
on each worker node and remove the --hostname-override argument from the
KUBELET_SYSTEM_PODS_ARGS variable.
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 2.1.10
text: "Ensure that the --event-qps argument is set to 0 (Scored)"
audit: "cat $kubeletconf"
tests:
test_items:
- path: "{.eventRecordQPS}"
compare:
op: eq
value: 0
set: true
remediation: |
If using a Kubelet config file, edit the file to set eventRecordQPS: 0 .
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
--event-qps=0
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 2.1.11
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)"
audit: "cat $kubeletconf"
tests:
bin_op: and
test_items:
- path: "{.tlsCertFile}"
set: true
- path: "{.tlsPrivateKeyFile}"
set: true
remediation: |
If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate
file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the
corresponding private key file.
If using command line arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
--tls-cert-file=<path/to/tls-certificate-file>
file=<path/to/tls-key-file>
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 2.1.12
text: "Ensure that the --cadvisor-port argument is set to 0 (Scored)"
audit: "cat $kubeletconf"
tests:
bin_op: or
test_items:
- path: "{.cadvisorPort}"
compare:
op: eq
value: 0
set: true
- path: "{.cadvisorPort}"
set: false
remediation: |
Edit the kubelet service file $kubeletsvc
on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable.
--cadvisor-port=0
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 2.1.13
text: "Ensure that the --rotate-certificates argument is not set to false (Scored)"
audit: "cat $kubeletconf"
tests:
bin_op: or
test_items:
- path: "{.rotateCertificates}"
set: false
- path: "{.rotateCertificates}"
compare:
op: noteq
value: "false"
set: true
remediation: |
If using a Kubelet config file, edit the file to add the line rotateCertificates: true.
If using command line arguments, edit the kubelet service file $kubeletsvc
on each worker node and add --rotate-certificates=true argument to the KUBELET_CERTIFICATE_ARGS variable.
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 2.1.14
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
audit: "cat $kubeletconf"
tests:
test_items:
- path: "{.featureGates.RotateKubeletServerCertificate}"
compare:
op: eq
value: true
set: true
remediation: |
Edit the kubelet service file $kubeletsvc
on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
--feature-gates=RotateKubeletServerCertificate=true
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service
scored: true
- id: 2.1.15
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)"
audit: "cat $kubeletconf"
tests:
test_items:
- path: "{.tlsCipherSuites}"
compare:
op: eq
value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
set: true
remediation: |
If using a Kubelet config file, edit the file to set TLSCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
If using executable arguments, edit the kubelet service file $kubeletconf on each worker node and set the below parameter.
--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
scored: false
- id: 2.2
text: "Configuration Files"
checks:
- id: 2.2.1
text: "Ensure that the kubelet.conf file permissions are set to 644 or
more restrictive (Scored)"
audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %a $kubeletkubeconfig; fi'"
tests:
bin_op: or
test_items:
- flag: "644"
compare:
op: eq
value: "644"
set: true
- flag: "640"
compare:
op: eq
value: "640"
set: true
- flag: "600"
compare:
op: eq
value: "600"
set: true
remediation: |
Run the below command (based on the file location on your system) on the each worker
node. For example,
chmod 644 $kubeletkubeconfig
scored: true
- id: 2.2.2
text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)"
audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'"
tests:
test_items:
- flag: "root:root"
compare:
op: eq
value: root:root
set: true
remediation: |
Run the below command (based on the file location on your system) on the each worker
node. For example,
chown root:root $kubeletkubeconfig
scored: true
- id: 2.2.3
text: "Ensure that the kubelet service file permissions are set to 644 or
more restrictive (Scored)"
audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %a $kubeletsvc; fi'"
tests:
bin_op: or
test_items:
- flag: "644"
compare:
op: eq
value: 644
set: true
- flag: "640"
compare:
op: eq
value: "640"
set: true
- flag: "600"
compare:
op: eq
value: "600"
set: true
remediation: |
Run the below command (based on the file location on your system) on the each worker
node. For example,
chmod 755 $kubeletsvc
scored: true
- id: 2.2.4
text: "Ensure that the kubelet service file ownership is set to root:root (Scored)"
audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'"
tests:
test_items:
- flag: "root:root"
set: true
remediation: |
Run the below command (based on the file location on your system) on the each worker
node. For example,
chown root:root $kubeletsvc
scored: true
- id: 2.2.5
text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)"
audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %a $proxykubeconfig; fi'"
tests:
bin_op: or
test_items:
- flag: "644"
compare:
op: eq
value: "644"
set: true
- flag: "640"
compare:
op: eq
value: "640"
set: true
- flag: "600"
compare:
op: eq
value: "600"
set: true
remediation: |
Run the below command (based on the file location on your system) on the each worker
node. For example,
chmod 644 $proxykubeconfig
scored: true
- id: 2.2.6
text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)"
audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'"
tests:
test_items:
- flag: "root:root"
set: true
remediation: |
Run the below command (based on the file location on your system) on the each worker
node. For example,
chown root:root $proxykubeconfig
scored: true
- id: 2.2.7
text: "Ensure that the certificate authorities file permissions are set to
644 or more restrictive (Scored)"
type: manual
remediation: |
Run the following command to modify the file permissions of the --client-ca-file
chmod 644 <filename>
scored: true
- id: 2.2.8
text: "Ensure that the client certificate authorities file ownership is set to root:root (Scored)"
audit: "/bin/sh -c 'if test -e $ca-file; then stat -c %U:%G $ca-file; fi'"
type: manual
remediation: |
Run the following command to modify the ownership of the --client-ca-file .
chown root:root <filename>
scored: true
- id: 2.2.9
text: "Ensure that the kubelet configuration file ownership is set to root:root (Scored)"
audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'"
tests:
test_items:
- flag: "root:root"
set: true
remediation: |
Run the following command (using the config file location identied in the Audit step)
chown root:root $kubeletconf
scored: true
- id: 2.2.10
text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)"
audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'"
tests:
bin_op: or
test_items:
- flag: "644"
compare:
op: eq
value: "644"
set: true
- flag: "640"
compare:
op: eq
value: "640"
set: true
- flag: "600"
compare:
op: eq
value: "600"
set: true
remediation: |
Run the following command (using the config file location identied in the Audit step)
chmod 644 $kubeletconf
scored: true

View File

@ -1,20 +1,21 @@
---
controls:
version: 1.6
version: 3.10
id: 1
text: "Master Node Security Configuration"
text: "Securing the OpenShift Master"
type: "master"
groups:
- id: 1.1
text: "API Server"
- id: 1
text: "Protecting the API Server"
checks:
- id: 1.1.1
text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
- id: 1.1
text: "Maintain default behavior for anonymous access"
type: "skip"
scored: true
- id: 1.1.2
text: "Ensure that the --basic-auth-file argument is not set (Scored)"
- id: 1.2
text: "Verify that the basic-auth-file method is not enabled"
audit: "grep -A2 basic-auth-file /etc/origin/master/master-config.yaml"
tests:
test_items:
@ -33,13 +34,13 @@ groups:
   - /path/to/any/file
scored: true
- id: 1.1.3
text: "Ensure that the --insecure-allow-any-token argument is not set (Scored)"
- id: 1.3
text: "Insecure Tokens"
type: "skip"
scored: true
- id: 1.1.4
text: "Ensure that the --kubelet-https argument is set to true (Scored)"
- id: 1.4
text: "Secure communications between the API server and master nodes"
audit: "grep -A4 kubeletClientInfo /etc/origin/master/master-config.yaml"
tests:
bin_op: and
@ -80,8 +81,8 @@ groups:
 port: 10250
scored: true
- id: 1.1.5
text: "Ensure that the --insecure-bind-address argument is not set (Scored)"
- id: 1.5
text: "Prevent insecure bindings"
audit: "grep -A2 insecure-bind-address /etc/origin/master/master-config.yaml"
tests:
test_items:
@ -97,8 +98,8 @@ groups:
   - 127.0.0.1
scored: true
- id: 1.1.6
text: "Ensure that the --insecure-port argument is set to 0 (Scored)"
- id: 1.6
text: "Prevent insecure port access"
audit: "grep -A2 insecure-port /etc/origin/master/master-config.yaml"
tests:
test_items:
@ -114,8 +115,8 @@ groups:
 - 0
scored: true
- id: 1.1.7
text: "Ensure that the --secure-port argument is not set to 0 (Scored)"
- id: 1.7
text: "Use Secure Ports for API Server Traffic"
audit: "grep -A2 secure-port /etc/origin/master/master-config.yaml"
tests:
bin_op: or
@ -138,13 +139,13 @@ groups:
 - 8443
scored: true
- id: 1.1.8
text: "Ensure that the --profiling argument is set to false (Scored)"
- id: 1.8
text: "Do not expose API server profiling data"
type: "skip"
scored: true
- id: 1.1.9
text: "Ensure that the --repair-malformed-updates argument is set to false (Scored)"
- id: 1.9
text: "Verify repair-malformed-updates argument for API compatibility"
audit: "grep -A2 repair-malformed-updates /etc/origin/master/master-config.yaml"
tests:
bin_op: or
@ -161,8 +162,8 @@ groups:
and remove the repair-malformed-updates entry or set repair-malformed-updates=true.
scored: true
- id: 1.1.10
text: "Ensure that the admission control plugin AlwaysAdmit is not set (Scored)"
- id: 1.10
text: "Verify that the AlwaysAdmit admission controller is disabled"
audit: "grep -A4 AlwaysAdmit /etc/origin/master/master-config.yaml"
tests:
test_items:
@ -179,8 +180,8 @@ groups:
disable: false
scored: true
- id: 1.1.11
text: "Ensure that the admission control plugin AlwaysPullImages is set (Scored)"
- id: 1.11
text: "Manage the AlwaysPullImages admission controller"
audit: "grep -A4 AlwaysPullImages /etc/origin/master/master-config.yaml"
tests:
test_items:
@ -202,18 +203,18 @@ groups:
disable: false
scored: true
- id: 1.1.12
text: "Ensure that the admission control plugin DenyEscalatingExec is set (Scored)"
- id: 1.12
text: "Use Security Context Constraints instead of DenyEscalatingExec admission"
type: "skip"
scored: true
- id: 1.1.13
text: "Ensure that the admission control plugin SecurityContextDeny is set (Scored)"
- id: 1.13
text: "Use Security Context Constraints instead of the SecurityContextDeny admission controller"
type: "skip"
scored: true
- id: 1.1.14
text: "Ensure that the admission control plugin NamespaceLifecycle is set (Scored)"
- id: 1.14
text: "Manage the NamespaceLifecycle admission controller"
audit: "grep -A4 NamespaceLifecycle /etc/origin/master/master-config.yaml"
tests:
test_items:
@ -230,8 +231,8 @@ groups:
disable: true
scored: true
- id: 1.1.15
text: "Ensure that the --audit-log-path argument is set as appropriate (Scored)"
- id: 1.15
text: "Configure API server auditing - audit log file path"
audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml"
tests:
test_items:
@ -244,22 +245,22 @@ groups:
Edit the Openshift master config file /etc/origin/master/master-config.yaml, update the following entry and restart the API server.
auditConfig:
auditFilePath: "/var/log/audit-ocp.log"
auditFilePath: ""/etc/origin/master/audit-ocp.log""
enabled: true
maximumFileRetentionDays: 10
maximumFileSizeMegabytes: 100
maximumFileRetentionDays: 30
maximumFileSizeMegabytes: 10
maximumRetainedFiles: 10
Make the same changes in the inventory/ansible variables so the changes are not
lost when an upgrade occurs.
scored: true
- id: 1.1.16
text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Scored)"
- id: 1.16
text: "Configure API server auditing - audit log retention"
audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml"
tests:
test_items:
- flag: "maximumFileRetentionDays: 10"
- flag: "maximumFileRetentionDays: 30"
compare:
op: has
value: "maximumFileRetentionDays"
@ -269,18 +270,18 @@ groups:
update the maximumFileRetentionDays entry and restart the API server.
auditConfig:
auditFilePath: "/var/log/audit-ocp.log"
auditFilePath: ""/etc/origin/master/audit-ocp.log""
enabled: true
maximumFileRetentionDays: 10
maximumFileSizeMegabytes: 100
maximumFileRetentionDays: 30
maximumFileSizeMegabytes: 10
maximumRetainedFiles: 10
Make the same changes in the inventory/ansible variables so the changes are not
lost when an upgrade occurs.
scored: true
- id: 1.1.17
text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Scored)"
- id: 1.17
text: "Configure API server auditing - audit log backup retention"
audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml"
tests:
test_items:
@ -294,22 +295,22 @@ groups:
set enabled to true and restart the API server.
auditConfig:
auditFilePath: "/var/log/audit-ocp.log"
auditFilePath: ""/etc/origin/master/audit-ocp.log""
enabled: true
maximumFileRetentionDays: 10
maximumFileSizeMegabytes: 100
maximumFileRetentionDays: 30
maximumFileSizeMegabytes: 10
maximumRetainedFiles: 10
Make the same changes in the inventory/ansible variables so the changes are not
lost when an upgrade occurs.
scored: true
- id: 1.1.18
text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Scored)"
- id: 1.18
text: "Configure audit log file size"
audit: "grep -A5 auditConfig /etc/origin/master/master-config.yaml"
tests:
test_items:
- flag: "maximumFileSizeMegabytes: 100"
- flag: "maximumFileSizeMegabytes: 30"
compare:
op: has
value: "maximumFileSizeMegabytes"
@ -319,18 +320,18 @@ groups:
set enabled to true and restart the API server.
auditConfig:
auditFilePath: "/var/log/audit-ocp.log"
auditFilePath: ""/etc/origin/master/audit-ocp.log""
enabled: true
maximumFileRetentionDays: 10
maximumFileSizeMegabytes: 100
maximumFileRetentionDays: 30
maximumFileSizeMegabytes: 10
maximumRetainedFiles: 10
Make the same changes in the inventory/ansible variables so the changes are not
lost when an upgrade occurs.
scored: true
- id: 1.1.19
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
- id: 1.19
text: "Verify that authorization-mode is not set to AlwaysAllow"
audit: "grep -A1 authorization-mode /etc/origin/master/master-config.yaml"
tests:
test_items:
@ -346,8 +347,8 @@ groups:
   - AllowAll
scored: true
- id: 1.1.20
text: "Ensure that the --token-auth-file parameter is not set (Scored)"
- id: 1.20
text: "Verify that the token-auth-file flag is not set"
audit: "grep token-auth-file /etc/origin/master/master-config.yaml"
tests:
test_items:
@ -363,8 +364,8 @@ groups:
   - /path/to/file
scored: true
- id: 1.1.21
text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)"
- id: 1.21
text: "Verify the API server certificate authority"
audit: "grep -A1 kubelet-certificate-authority /etc/origin/master/master-config.yaml"
tests:
test_items:
@ -380,8 +381,8 @@ groups:
   - /path/to/ca
scored: true
- id: 1.1.22
text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Scored)"
- id: 1.22
text: "Verify the API server client certificate and client key"
audit: "grep -A4 kubeletClientInfo /etc/origin/master/master-config.yaml"
tests:
bin_op: and
@ -407,18 +408,18 @@ groups:
port: 10250
scored: true
- id: 1.1.23
text: "Ensure that the --service-account-lookup argument is set to true"
- id: 1.23
text: "Verify that the service account lookup flag is not set"
type: skip
scored: true
- id: 1.1.24
text: "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)"
- id: 1.24
text: "Verify the PodSecurityPolicy is disabled to ensure use of SecurityContextConstraints"
type: "skip"
scored: true
- id: 1.1.25
text: "Ensure that the --service-account-key-file argument is set as appropriate (Scored)"
- id: 1.25
text: "Verify that the service account key file argument is not set"
audit: "grep -A9 serviceAccountConfig /etc/origin/master/master-config.yaml"
tests:
bin_op: and
@ -457,8 +458,8 @@ groups:
Verify that privateKeyFile and publicKeyFile exist and set.
scored: true
- id: 1.1.26
text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Scored)"
- id: 1.26
text: "Verify the certificate and key used for communication with etcd"
audit: "grep -A3 etcdClientInfo /etc/origin/master/master-config.yaml"
tests:
bin_op: and
@ -483,8 +484,8 @@ groups:
keyFile: master.etcd-client.key
scored: true
- id: 1.1.27
text: "Ensure that the admission control plugin ServiceAccount is set (Scored)"
- id: 1.27
text: "Verify that the ServiceAccount admission controller is enabled"
audit: "grep -A4 ServiceAccount /etc/origin/master/master-config.yaml"
tests:
bin_op: or
@ -507,8 +508,8 @@ groups:
disable: false
scored: true
- id: 1.1.28
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)"
- id: 1.28
text: "Verify the certificate and key used to encrypt API server traffic"
audit: "grep -A7 servingInfo /etc/origin/master/master-config.yaml"
tests:
bin_op: and
@ -536,16 +537,13 @@ groups:
requestTimeoutSeconds: 3600
scored: true
- id: 1.1.29
text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
audit: "grep -A7 servingInfo /etc/origin/master/master-config.yaml"
- id: 1.29
text: "Verify that the --client-ca-file argument is not set"
audit: "grep client-ca-file /etc/origin/master/master-config.yaml"
tests:
test_items:
- flag: "clientCA: ca.crt"
compare:
op: has
value: "clientCA: ca.crt"
set: true
set: false
remediation: |
Edit the Openshift master config file /etc/origin/master/master-config.yaml and set clientCA under servingInfo.
@ -559,8 +557,8 @@ groups:
requestTimeoutSeconds: 3600
scored: true
- id: 1.1.30
text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)"
- id: 1.30
text: "Verify the CA used for communication with etcd"
audit: "grep -A3 etcdClientInfo /etc/origin/master/master-config.yaml"
tests:
test_items:
@ -578,13 +576,13 @@ groups:
keyFile: master.etcd-client.key
scored: true
- id: 1.1.31
text: "Ensure that the --etcd-cafile argument is set as appropriate (Scored)"
- id: 1.31
text: "Verify that the authorization-mode argument is not set"
type: "skip"
scored: true
- id: 1.1.32
text: "Ensure that the --authorization-mode argument is set to Node (Scored)"
- id: 1.32
text: "Verify that the NodeRestriction admission controller is enabled"
audit: "grep -A4 NodeRestriction /etc/origin/master/master-config.yaml"
tests:
bin_op: or
@ -606,8 +604,8 @@ groups:
disable: false
scored: true
- id: 1.1.33
text: "Ensure that the --experimental-encryption-provider-config argument is set as appropriate (Scored)"
- id: 1.33
text: "Configure encryption of data at rest in etcd datastore"
audit: "grep -A1 experimental-encryption-provider-config /etc/origin/master/master-config.yaml"
tests:
test_items:
@ -621,8 +619,8 @@ groups:
https://docs.openshift.com/container-platform/3.10/admin_guide/encrypting_data.html
scored: true
- id: 1.1.34
text: "Ensure that the encryption provider is set to aescbc (Scored)"
- id: 1.34
text: "Set the encryption provider to aescbc for etcd data at rest"
audit: "grep -A1 experimental-encryption-provider-config /etc/origin/master/master-config.yaml | sed -n '2p' | awk '{ print $2 }' | xargs grep -A1 providers"
tests:
test_items:
@ -636,8 +634,8 @@ groups:
See https://docs.openshift.com/container-platform/3.10/admin_guide/encrypting_data.html.
scored: true
- id: 1.1.35
text: "Ensure that the admission control policy is set to EventRateLimit (Scored)"
- id: 1.35
text: "Enable the EventRateLimit plugin"
audit: "grep -A4 EventRateLimit /etc/origin/master/master-config.yaml"
tests:
test_items:
@ -651,8 +649,8 @@ groups:
https://docs.openshift.com/container-platform/3.10/architecture/additional_concepts/admission_controllers.html#admission-controllers-general-admission-rules
scored: true
- id: 1.1.36
text: "Ensure that the AdvancedAuditing argument is not set to false (Scored)"
- id: 1.36
text: "Configure advanced auditing"
audit: "grep AdvancedAuditing /etc/origin/master/master-config.yaml"
tests:
bin_op: or
@ -674,8 +672,8 @@ groups:
scored: true
# Review 1.1.37 in Aquasec shared doc, the tests are net zero.
- id: 1.1.37
text: "Ensure that the --request-timeout argument is set as appropriate (Scored)"
- id: 1.37
text: "Adjust the request timeout argument for your cluster resources"
audit: "grep request-timeout /etc/origin/master/master-config.yaml"
type: manual
remediation: |
@ -683,27 +681,27 @@ groups:
scored: true
- id: 1.2
- id: 2
text: "Scheduler"
checks:
- id: 1.2.1
text: "Ensure that the --profiling argument is set to false (Scored)"
- id: 2.1
text: "Verify that Scheduler profiling is not exposed to the web"
type: "skip"
scored: true
- id: 1.3
- id: 3
text: "Controller Manager"
checks:
- id: 1.3.1
text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Scored)"
- id: 3.1
text: "Adjust the terminated-pod-gc-threshold argument as needed"
audit: "grep terminated-pod-gc-threshold -A1 /etc/origin/master/master-config.yaml"
tests:
test_items:
- flag: "true"
- flag: "terminated-pod-gc-threshold:"
compare:
op: has
value: "true"
value: "12500"
set: true
remediation: |
Edit the Openshift master config file /etc/origin/master/master-config.yaml and enable terminated-pod-gc-threshold.
@ -716,13 +714,13 @@ groups:
Enabling the "terminated-pod-gc-threshold" settings is optional.
scored: true
- id: 1.3.2
text: "Ensure that the --profiling argument is set to false (Scored)"
- id: 3.2
text: "Verify that Controller profiling is not exposed to the web"
type: "skip"
scored: true
- id: 1.3.3
text: "Ensure that the --use-service-account-credentials argument is set to true (Scored)"
- id: 3.3
text: "Verify that the --use-service-account-credentials argument is set to true"
audit: "grep -A2 use-service-account-credentials /etc/origin/master/master-config.yaml"
tests:
bin_op: or
@ -744,9 +742,9 @@ groups:
    - true
scored: true
# Review 1.3.4
- id: 1.3.4
text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Scored)"
# Review 3.4
- id: 3.4
text: "Verify that the --service-account-private-key-file argument is set as appropriate"
audit: |
grep -A9 serviceAccountConfig /etc/origin/master/master-config.yaml | grep privateKeyFile;
grep -A2 service-account-private-key-file /etc/origin/master/master-config.yaml
@ -763,9 +761,9 @@ groups:
Edit the Openshift master config file /etc/origin/master/master-config.yaml and remove service-account-private-key-file
scored: true
# Review 1.3.5
- id: 1.3.5
text: "Ensure that the --root-ca-file argument is set as appropriate (Scored)"
# Review 3.5
- id: 3.5
text: "Verify that the --root-ca-file argument is set as appropriate"
audit: "/bin/sh -c 'grep root-ca-file /etc/origin/master/master-config.yaml; grep -A9 serviceAccountConfig /etc/origin/master/master-config.yaml'"
tests:
bin_op: and
@ -790,13 +788,13 @@ groups:
https://docs.openshift.com/container-platform/3.10/admin_guide/service_accounts.html"
scored: true
- id: 1.3.6
text: "Apply Security Context to Your Pods and Containers (Not Scored)"
- id: 3.6
text: "Verify that Security Context Constraints are applied to Your Pods and Containers"
type: "skip"
scored: false
- id: 1.3.7
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
- id: 3.7
text: "Manage certificate rotation"
audit: "grep -B3 RotateKubeletServerCertificate=true /etc/origin/master/master-config.yaml"
tests:
test_items:
@ -812,25 +810,14 @@ groups:
scored: true
- id: 1.4
- id: 4
text: "Configuration Files"
checks:
- id: 1.4.1
text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored)"
- id: 4.1
text: "Verify the OpenShift default permissions for the API server pod specification file"
audit: "stat -c %a /etc/origin/node/pods/apiserver.yaml"
tests:
bin_op: or
test_items:
- flag: "644"
compare:
op: eq
value: "644"
set: true
- flag: "640"
compare:
op: eq
value: "640"
set: true
- flag: "600"
compare:
op: eq
@ -839,11 +826,11 @@ groups:
remediation: |
Run the below command.
chmod 644 /etc/origin/node/pods/apiserver.yaml
chmod 600 /etc/origin/node/pods/apiserver.yaml
scored: true
- id: 1.4.2
text: "Ensure that the API server pod specification file ownership is set to root:root (Scored)"
- id: 4.2
text: "Verify the OpenShift default file ownership for the API server pod specification file"
audit: "stat -c %U:%G /etc/origin/node/pods/apiserver.yaml"
tests:
test_items:
@ -858,22 +845,11 @@ groups:
chown root:root /etc/origin/node/pods/apiserver.yaml
scored: true
- id: 1.4.3
text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Scored)"
- id: 4.3
text: "Verify the OpenShift default file permissions for the controller manager pod specification file"
audit: "stat -c %a /etc/origin/node/pods/controller.yaml"
tests:
bin_op: or
test_items:
- flag: "644"
compare:
op: eq
value: "644"
set: true
- flag: "640"
compare:
op: eq
value: "640"
set: true
- flag: "600"
compare:
op: eq
@ -882,11 +858,11 @@ groups:
remediation: |
Run the below command on the master node.
chmod 644 /etc/origin/node/pods/controllermanager.yaml
chmod 600 /etc/origin/node/pods/controller.yaml
scored: true
- id: 1.4.4
text: "Ensure that the controller manager pod specification file ownership is set to root:root (Scored)"
- id: 4.4
text: "Verify the OpenShift default ownership for the controller manager pod specification file"
audit: "stat -c %U:%G /etc/origin/node/pods/controller.yaml"
tests:
test_items:
@ -898,25 +874,14 @@ groups:
remediation: |
Run the below command on the master node.
chown root:root /etc/origin/node/pods/controllermanager.yaml
chown root:root /etc/origin/node/pods/controller.yaml
scored: true
- id: 1.4.5
text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Scored)"
audit: "stat -c %a /etc/origin/node/pods/apiserver.yaml"
- id: 4.5
text: "Verify the OpenShift default permissions for the scheduler pod specification file"
audit: "stat -c %a /etc/origin/node/pods/controller.yaml"
tests:
bin_op: or
test_items:
- flag: "644"
compare:
op: eq
value: "644"
set: true
- flag: "640"
compare:
op: eq
value: "640"
set: true
- flag: "600"
compare:
op: eq
@ -925,12 +890,12 @@ groups:
remediation: |
Run the below command.
chmod 644 /etc/origin/node/pods/apiserver.yaml
chmod 600 stat -c %a /etc/origin/node/pods/controller.yaml
scored: true
- id: 1.4.6
text: "Ensure that the scheduler pod specification file ownership is set to root:root (Scored)"
audit: "stat -c %U:%G /etc/origin/node/pods/apiserver.yaml"
- id: 4.6
text: "Verify the scheduler pod specification file ownership set by OpenShift"
audit: "stat -c %u:%g /etc/origin/node/pods/controller.yaml"
tests:
test_items:
- flag: "root:root"
@ -941,25 +906,14 @@ groups:
remediation: |
Run the below command on the master node.
chown root:root /etc/origin/node/pods/apiserver.yaml
chown root:root /etc/origin/node/pods/controller.yaml
scored: true
- id: 1.4.7
text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Scored)"
- id: 4.7
text: "Verify the OpenShift default etcd pod specification file permissions"
audit: "stat -c %a /etc/origin/node/pods/etcd.yaml"
tests:
bin_op: or
test_items:
- flag: "644"
compare:
op: eq
value: "644"
set: true
- flag: "640"
compare:
op: eq
value: "640"
set: true
- flag: "600"
compare:
op: eq
@ -968,11 +922,11 @@ groups:
remediation: |
Run the below command.
chmod 644 /etc/origin/node/pods/etcd.yaml
chmod 600 /etc/origin/node/pods/etcd.yaml
scored: true
- id: 1.4.8
text: "Ensure that the etcd pod specification file ownership is set to root:root (Scored)"
- id: 4.8
text: "Verify the OpenShift default etcd pod specification file ownership"
audit: "stat -c %U:%G /etc/origin/node/pods/etcd.yaml"
tests:
test_items:
@ -987,9 +941,9 @@ groups:
chown root:root /etc/origin/node/pods/etcd.yaml
scored: true
- id: 1.4.9
text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Scored)"
audit: "stat -c %a /etc/origin/openvswitch/"
- id: 4.9
text: "Verify the default OpenShift Container Network Interface file permissions"
audit: "stat -c %a /etc/origin/openvswitch/ /etc/cni/net.d/"
tests:
bin_op: or
test_items:
@ -1011,12 +965,12 @@ groups:
remediation: |
Run the below command.
chmod 644 /etc/origin/openvswitch/
chmod 644 -R /etc/origin/openvswitch/ /etc/cni/net.d/
scored: true
- id: 1.4.10
text: "Ensure that the Container Network Interface file ownership is set to root:root (Scored)"
audit: "stat -c %U:%G /etc/origin/openvswitch/"
- id: 4.10
text: "Verify the default OpenShift Container Network Interface file ownership"
audit: "stat -c %U:%G /etc/origin/openvswitch/ /etc/cni/net.d/"
tests:
test_items:
- flag: "root:root"
@ -1027,11 +981,11 @@ groups:
remediation: |
Run the below command on the master node.
chown root:root /etc/origin/openvswitch/
chown root:root /etc/origin/openvswitch/ /etc/cni/net.d/
scored: true
- id: 1.4.11
text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive(Scored)"
- id: 4.11
text: "Verify the default OpenShift etcd data directory permissions"
audit: "stat -c %a /var/lib/etcd"
tests:
test_items:
@ -1048,8 +1002,8 @@ groups:
chmod 700 /var/lib/etcd
scored: true
- id: 1.4.12
text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)"
- id: 4.12
text: "Verify the default OpenShift etcd data directory ownership"
audit: "stat -c %U:%G /var/lib/etcd"
tests:
test_items:
@ -1064,8 +1018,8 @@ groups:
chown etcd:etcd /var/lib/etcd
scored: true
- id: 1.4.13
text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Scored)"
- id: 4.13
text: "Verify the default OpenShift admin.conf file permissions"
audit: "stat -c %a /etc/origin/master/admin.kubeconfig"
tests:
bin_op: or
@ -1091,8 +1045,8 @@ groups:
chmod 644 /etc/origin/master/admin.kubeconfig"
scored: true
- id: 1.4.14
text: "Ensure that the admin.conf file ownership is set to root:root (Scored)"
- id: 4.14
text: "Verify the default OpenShift admin.conf file ownership"
audit: "stat -c %U:%G /etc/origin/master/admin.kubeconfig"
tests:
test_items:
@ -1107,8 +1061,8 @@ groups:
chown root:root /etc/origin/master/admin.kubeconfig
scored: true
- id: 1.4.15
text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Scored)"
- id: 4.15
text: "Verify the default OpenShift scheduler.conf file permissions"
audit: "stat -c %a /etc/origin/master/openshift-master.kubeconfig"
tests:
bin_op: or
@ -1134,8 +1088,8 @@ groups:
chmod 644 /etc/origin/master/openshift-master.kubeconfig
scored: true
- id: 1.4.16
text: "Ensure that the scheduler.conf file ownership is set to root:root (Scored)"
- id: 4.16
text: "Verify the default OpenShift scheduler.conf file ownership"
audit: "stat -c %U:%G /etc/origin/master/openshift-master.kubeconfig"
tests:
test_items:
@ -1150,8 +1104,8 @@ groups:
chown root:root /etc/origin/master/openshift-master.kubeconfig
scored: true
- id: 1.4.17
text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Scored)"
- id: 4.17
text: "Verify the default Openshift controller-manager.conf file permissions"
audit: "stat -c %a /etc/origin/master/openshift-master.kubeconfig"
tests:
bin_op: or
@ -1177,7 +1131,7 @@ groups:
chmod 644 /etc/origin/master/openshift-master.kubeconfig
scored: true
- id: 1.4.18
- id: 4.18
text: "Ensure that the controller-manager.conf file ownership is set to root:root (Scored)"
audit: "stat -c %U:%G /etc/origin/master/openshift-master.kubeconfig"
tests:
@ -1194,11 +1148,11 @@ groups:
scored: true
- id: 1.5
- id: 5
text: "Etcd"
checks:
- id: 1.5.1
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)"
- id: 5.1
text: "Verify the default OpenShift cert-file and key-file configuration"
audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_CERT_FILE=/etc/etcd/server.crt /proc/1/environ; /usr/local/bin/master-exec etcd etcd grep etcd_key_file=/etc/etcd/server.key /proc/1/environ; grep ETCD_CERT_FILE=/etc/etcd/server.crt /etc/etcd/etcd.conf; grep ETCD_KEY_FILE=/etc/etcd/server.key /etc/etcd/etcd.conf'"
tests:
bin_op: and
@ -1222,8 +1176,8 @@ groups:
Reset to the OpenShift default configuration.
scored: true
- id: 1.5.2
text: "Ensure that the --client-cert-auth argument is set to true (Scored)"
- id: 5.2
text: "Verify the default OpenShift setting for the client-cert-auth argument"
audit: "/bin/sh -c'/usr/local/bin/master-exec etcd etcd grep ETCD_CLIENT_CERT_AUTH=true /proc/1/environ; grep ETCD_CLIENT_CERT_AUTH /etc/etcd/etcd.conf'"
tests:
bin_op: and
@ -1242,8 +1196,8 @@ groups:
Reset to the OpenShift default configuration.
scored: true
- id: 1.5.3
text: "Ensure that the --auto-tls argument is not set to true (Scored)"
- id: 5.3
text: "Verify the OpenShift default values for etcd_auto_tls"
audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_AUTO_TLS /proc/1/environ; grep ETCD_AUTO_TLS /etc/etcd/etcd.conf'"
tests:
bin_op: or
@ -1262,8 +1216,8 @@ groups:
Reset to the OpenShift default configuration.
scored: true
- id: 1.5.4
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Scored)"
- id: 5.4
text: "Verify the OpenShift default peer-cert-file and peer-key-file arguments for etcd"
audit: "/bin/sh -c'/usr/local/bin/master-exec etcd etcd grep ETCD_PEER_CERT_FILE=/etc/etcd/peer.crt /proc/1/environ; /usr/local/bin/master-exec etcd etcd grep ETCD_PEER_KEY_FILE=/etc/etcd/peer.key /proc/1/environ; grep ETCD_PEER_CERT_FILE /etc/etcd/etcd.conf; grep ETCD_PEER_KEY_FILE /etc/etcd/etcd.conf'"
tests:
bin_op: and
@ -1287,8 +1241,8 @@ groups:
Reset to the OpenShift default configuration.
scored: true
- id: 1.5.5
text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)"
- id: 5.5
text: "Verify the OpenShift default configuration for the peer-client-cert-auth"
audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_PEER_CLIENT_CERT_AUTH=true /proc/1/environ; grep ETCD_PEER_CLIENT_CERT_AUTH /etc/etcd/etcd.conf'"
tests:
bin_op: and
@ -1307,8 +1261,8 @@ groups:
Reset to the OpenShift default configuration.
scored: true
- id: 1.5.6
text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)"
- id: 5.6
text: "Verify the OpenShift default configuration for the peer-auto-tls argument"
audit: "/bin/sh -c '/usr/local/bin/master-exec etcd etcd grep ETCD_PEER_AUTO_TLS /proc/1/environ; grep ETCD_PEER_AUTO_TLS /etc/etcd/etcd.conf'"
tests:
bin_op: and
@ -1327,18 +1281,18 @@ groups:
Reset to the OpenShift default configuration.
scored: true
- id: 1.5.7
text: "Ensure that the --wal-dir argument is set as appropriate Scored)"
- id: 5.7
text: "Optionally modify the wal-dir argument"
type: "skip"
scored: true
- id: 1.5.8
text: "Ensure that the --max-wals argument is set to 0 (Scored)"
- id: 5.8
text: "Optionally modify the max-wals argument"
type: "skip"
scored: true
- id: 1.5.9
text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)"
- id: 5.9
text: "Verify the OpenShift default configuration for the etcd Certificate Authority"
audit: "openssl x509 -in /etc/origin/master/master.etcd-ca.crt -subject -issuer -noout | sed 's/@/ /'"
tests:
test_items:
@ -1352,11 +1306,11 @@ groups:
scored: false
- id: 1.6
- id: 6
text: "General Security Primitives"
checks:
- id: 1.6.1
text: "Ensure that the cluster-admin role is only used where required (Not Scored)"
- id: 6.1
text: "Ensure that the cluster-admin role is only used where required"
type: "manual"
remediation: |
Review users, groups, serviceaccounts bound to cluster-admin:
@ -1366,8 +1320,8 @@ groups:
such access. Consider creating least-privilege roles for users and service accounts
scored: false
- id: 1.6.2
text: "Create Pod Security Policies for your cluster (Not Scored)"
- id: 6.2
text: "Verify Security Context Constraints as in use"
type: "manual"
remediation: |
Review Security Context Constraints:
@ -1383,16 +1337,16 @@ groups:
https://docs.openshift.com/container-platform/3.10/admin_guide/manage_scc.html"
scored: false
- id: 1.6.3
text: "Create administrative boundaries between resources using namespaces (Not Scored)"
- id: 6.3
text: "Use OpenShift projects to maintain boundaries between resources"
type: "manual"
remediation: |
Review projects:
oc get projects
scored: false
- id: 1.6.4
text: "Create network segmentation using Network Policies (Not Scored)"
- id: 6.4
text: "Create network segmentation using the Multi-tenant plugin or Network Policies"
type: "manual"
remediation: |
Verify on masters the plugin being used:
@ -1414,8 +1368,8 @@ groups:
https://docs.openshift.com/container-platform/3.10/install/configuring_inventory_file.html
scored: false
- id: 1.6.5
text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Not Scored)"
- id: 6.5
text: "Enable seccomp and configure custom Security Context Constraints"
type: "manual"
remediation: |
Verify SCCs that have been configured with seccomp:
@ -1428,8 +1382,8 @@ groups:
https://docs.openshift.com/container-platform/3.9/admin_guide/seccomp.html#admin-guide-seccomp
scored: false
- id: 1.6.6
text: "Apply Security Context to Your Pods and Containers (Not Scored)"
- id: 6.6
text: "Review Security Context Constraints"
type: "manual"
remediation: |
Review SCCs:
@ -1452,15 +1406,15 @@ groups:
https://docs.openshift.com/container-platform/3.10/admin_guide/manage_scc.html
scored: false
- id: 1.6.7
text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)"
- id: 6.7
text: "Manage Image Provenance using ImagePolicyWebhook admission controller"
type: "manual"
remediation: |
Review imagePolicyConfig in /etc/origin/master/master-config.yaml.
scored: false
- id: 1.6.8
text: "Configure Network policies as appropriate (Not Scored)"
- id: 6.8
text: "Configure Network policies as appropriate"
type: "manual"
remediation: |
If ovs-networkplugin is used, review network policies:
@ -1471,8 +1425,8 @@ groups:
netnamespace by default.
scored: false
- id: 1.6.9
text: "Place compensating controls in the form of PSP and RBAC for privileged containers usage (Not Scored)"
- id: 6.9
text: "Use Security Context Constraints as compensating controls for privileged containers"
type: "manual"
remediation: |
1) Determine all sccs allowing privileged containers:

View File

@ -4,21 +4,21 @@ id: 2
text: "Worker Node Security Configuration"
type: "node"
groups:
- id: 2.1
- id: 7
text: "Kubelet"
checks:
- id: 2.1.1
text: "Ensure that the --allow-privileged argument is set to false (Scored)"
- id: 7.1
text: "Use Security Context Constraints to manage privileged containers as needed"
type: "skip"
scored: true
- id: 2.1.2
text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
- id: 7.2
text: "Ensure anonymous-auth is not disabled"
type: "skip"
scored: true
- id: 2.1.3
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
- id: 7.3
text: "Verify that the --authorization-mode argument is set to WebHook"
audit: "grep -A1 authorization-mode /etc/origin/node/node-config.yaml"
tests:
bin_op: or
@ -35,8 +35,8 @@ groups:
kubeletArguments in /etc/origin/node/node-config.yaml or set it to "Webhook".
scored: true
- id: 2.1.4
text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
- id: 7.4
text: "Verify the OpenShift default for the client-ca-file argument"
audit: "grep -A1 client-ca-file /etc/origin/node/node-config.yaml"
tests:
test_items:
@ -51,8 +51,8 @@ groups:
The config file does not have this defined in kubeletArgument, but in PodManifestConfig.
scored: true
- id: 2.1.5
text: "Ensure that the --read-only-port argument is set to 0 (Scored)"
- id: 7.5
text: "Verify the OpenShift default setting for the read-only-port argument"
audit: "grep -A1 read-only-port /etc/origin/node/node-config.yaml"
tests:
bin_op: or
@ -68,15 +68,15 @@ groups:
Edit the Openshift node config file /etc/origin/node/node-config.yaml and removed so that the OpenShift default is applied.
scored: true
- id: 2.1.6
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)"
- id: 7.6
text: "Adjust the streaming-connection-idle-timeout argument"
audit: "grep -A1 streaming-connection-idle-timeout /etc/origin/node/node-config.yaml"
tests:
bin_op: or
test_items:
- flag: "streaming-connection-idle-timeout"
set: false
- flag: "0"
- flag: "5m"
set: false
remediation: |
Edit the Openshift node config file /etc/origin/node/node-config.yaml and set the streaming-connection-timeout
@ -87,13 +87,13 @@ groups:
   - "5m"
scored: true
- id: 2.1.7
text: "Ensure that the --protect-kernel-defaults argument is set to true (Scored)"
- id: 7.7
text: "Verify the OpenShift defaults for the protect-kernel-defaults argument"
type: "skip"
scored: true
- id: 2.1.8
text: "Ensure that the --make-iptables-util-chains argument is set to true (Scored)"
- id: 7.8
text: "Verify the OpenShift default value of true for the make-iptables-util-chains argument"
audit: "grep -A1 make-iptables-util-chains /etc/origin/node/node-config.yaml"
tests:
bin_op: or
@ -110,8 +110,8 @@ groups:
default value of true.
scored: true
id: 2.1.9
text: "Ensure that the --keep-terminated-pod-volumeskeep-terminated-pod-volumes argument is set to false (Scored)"
- id: 7.9
text: "Verify that the --keep-terminated-pod-volumes argument is set to false"
audit: "grep -A1 keep-terminated-pod-volumes /etc/origin/node/node-config.yaml"
tests:
test_items:
@ -124,13 +124,13 @@ groups:
Reset to the OpenShift defaults
scored: true
- id: 2.1.10
text: "Ensure that the --hostname-override argument is not set (Scored)"
- id: 7.10
text: "Verify the OpenShift defaults for the hostname-override argument"
type: "skip"
scored: true
- id: 2.1.11
text: "Ensure that the --event-qps argument is set to 0 (Scored)"
- id: 7.11
text: "Set the --event-qps argument to 0"
audit: "grep -A1 event-qps /etc/origin/node/node-config.yaml"
tests:
bin_op: or
@ -147,8 +147,8 @@ groups:
the kubeletArguments section of.
scored: true
- id: 2.1.12
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)"
- id: 7.12
text: "Verify the OpenShift cert-dir flag for HTTPS traffic"
audit: "grep -A1 cert-dir /etc/origin/node/node-config.yaml"
tests:
test_items:
@ -161,8 +161,8 @@ groups:
Reset to the OpenShift default values.
scored: true
- id: 2.1.13
text: "Ensure that the --cadvisor-port argument is set to 0 (Scored)"
- id: 7.13
text: "Verify the OpenShift default of 0 for the cadvisor-port argument"
audit: "grep -A1 cadvisor-port /etc/origin/node/node-config.yaml"
tests:
bin_op: or
@ -179,8 +179,8 @@ groups:
if it is set in the kubeletArguments section.
scored: true
- id: 2.1.14
text: "Ensure that the RotateKubeletClientCertificate argument is not set to false (Scored)"
- id: 7.14
text: "Verify that the RotateKubeletClientCertificate argument is set to true"
audit: "grep -B1 RotateKubeletClientCertificate=true /etc/origin/node/node-config.yaml"
tests:
test_items:
@ -193,8 +193,8 @@ groups:
Edit the Openshift node config file /etc/origin/node/node-config.yaml and set RotateKubeletClientCertificate to true.
scored: true
- id: 2.1.15
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
- id: 7.15
text: "Verify that the RotateKubeletServerCertificate argument is set to true"
audit: "grep -B1 RotateKubeletServerCertificate=true /etc/origin/node/node-config.yaml"
test:
test_items:
@ -208,11 +208,11 @@ groups:
scored: true
- id: 2.2
- id: 8
text: "Configuration Files"
checks:
- id: 2.2.1
text: "Ensure that the kubelet.conf file permissions are set to 644 or more restrictive (Scored)"
- id: 8.1
text: "Verify the OpenShift default permissions for the kubelet.conf file"
audit: "stat -c %a /etc/origin/node/node.kubeconfig"
tests:
bin_op: or
@ -237,8 +237,8 @@ groups:
chmod 644 /etc/origin/node/node.kubeconfig
scored: true
- id: 2.2.2
text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)"
- id: 8.2
text: "Verify the kubeconfig file ownership of root:root"
audit: "stat -c %U:%G /etc/origin/node/node.kubeconfig"
tests:
test_items:
@ -252,8 +252,8 @@ groups:
chown root:root /etc/origin/node/node.kubeconfig
scored: true
- id: 2.2.3
text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored)"
- id: 8.3
text: "Verify the kubelet service file permissions of 644"
audit: "stat -c %a /etc/systemd/system/atomic-openshift-node.service"
tests:
bin_op: or
@ -278,8 +278,8 @@ groups:
chmod 644 /etc/systemd/system/atomic-openshift-node.service
scored: true
- id: 2.2.4
text: "Ensure that the kubelet service file ownership is set to root:root (Scored)"
- id: 8.4
text: "Verify the kubelet service file ownership of root:root"
audit: "stat -c %U:%G /etc/systemd/system/atomic-openshift-node.service"
tests:
test_items:
@ -293,8 +293,8 @@ groups:
chown root:root /etc/systemd/system/atomic-openshift-node.service
scored: true
- id: 2.2.5
text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)"
- id: 8.5
text: "Verify the OpenShift default permissions for the proxy kubeconfig file"
audit: "stat -c %a /etc/origin/node/node.kubeconfig"
tests:
bin_op: or
@ -319,8 +319,8 @@ groups:
chmod 644 /etc/origin/node/node.kubeconfig
scored: true
- id: 2.2.6
text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)"
- id: 8.6
text: "Verify the proxy kubeconfig file ownership of root:root"
audit: "stat -c %U:%G /etc/origin/node/node.kubeconfig"
tests:
test_items:
@ -334,8 +334,8 @@ groups:
chown root:root /etc/origin/node/node.kubeconfig
scored: true
- id: 2.2.7
text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Scored)"
- id: 8.7
text: "Verify the OpenShift default permissions for the certificate authorities file."
audit: "stat -c %a /etc/origin/node/client-ca.crt"
tests:
bin_op: or
@ -360,8 +360,8 @@ groups:
chmod 644 /etc/origin/node/client-ca.crt
scored: true
- id: 2.2.8
text: "Ensure that the client certificate authorities file ownership is set to root:root (Scored)"
- id: 8.8
text: "Verify the client certificate authorities file ownership of root:root"
audit: "stat -c %U:%G /etc/origin/node/client-ca.crt"
tests:
test_items:

View File

@ -2,6 +2,8 @@ package check
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
yaml "gopkg.in/yaml.v2"
@ -11,31 +13,28 @@ const cfgDir = "../cfg/"
// validate that the files we're shipping are valid YAML
func TestYamlFiles(t *testing.T) {
// TODO: make this list dynamic
dirs := []string{"1.6/", "1.7/"}
for _, dir := range dirs {
dir = cfgDir + dir
files, err := ioutil.ReadDir(dir)
err := filepath.Walk(cfgDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
t.Fatalf("error reading %s directory: %v", dir, err)
t.Fatalf("failure accessing path %q: %v\n", path, err)
}
for _, file := range files {
fileName := file.Name()
in, err := ioutil.ReadFile(dir + fileName)
if !info.IsDir() {
t.Logf("reading file: %s", path)
in, err := ioutil.ReadFile(path)
if err != nil {
t.Fatalf("error opening file %s: %v", fileName, err)
t.Fatalf("error opening file %s: %v", path, err)
}
c := new(Controls)
err = yaml.Unmarshal(in, c)
if err == nil {
t.Logf("YAML file successfully unmarshalled: %s", path)
} else {
t.Fatalf("failed to load YAML from %s: %v", path, err)
}
}
return nil
})
if err != nil {
t.Fatalf("failed to load YAML from %s: %v", fileName, err)
}
}
t.Fatalf("failure walking cfg dir: %v\n", err)
}
}

View File

@ -157,7 +157,6 @@ groups:
value: Something
set: true
- id: 14
text: "check that flag some-arg is set to some-val with ':' separator"
tests:
@ -167,3 +166,134 @@ groups:
op: eq
value: some-val
set: true
- id: 15
text: "jsonpath correct value on field"
tests:
test_items:
- path: "{.readOnlyPort}"
compare:
op: eq
value: 15000
set: true
- path: "{.readOnlyPort}"
compare:
op: gte
value: 15000
set: true
- path: "{.readOnlyPort}"
compare:
op: lte
value: 15000
set: true
- id: 16
text: "jsonpath correct case-sensitive value on string field"
tests:
test_items:
- path: "{.stringValue}"
compare:
op: noteq
value: "None"
set: true
- path: "{.stringValue}"
compare:
op: noteq
value: "webhook,Something,RBAC"
set: true
- path: "{.stringValue}"
compare:
op: eq
value: "WebHook,Something,RBAC"
set: true
- id: 17
text: "jsonpath correct value on boolean field"
tests:
test_items:
- path: "{.trueValue}"
compare:
op: noteq
value: somethingElse
set: true
- path: "{.trueValue}"
compare:
op: noteq
value: false
set: true
- path: "{.trueValue}"
compare:
op: eq
value: true
set: true
- id: 18
text: "jsonpath field absent"
tests:
test_items:
- path: "{.notARealField}"
set: false
- id: 19
text: "jsonpath correct value on nested field"
tests:
test_items:
- path: "{.authentication.anonymous.enabled}"
compare:
op: eq
value: "false"
set: true
- id: 20
text: "yamlpath correct value on field"
tests:
test_items:
- path: "{.readOnlyPort}"
compare:
op: gt
value: 14999
set: true
- id: 21
text: "yamlpath field absent"
tests:
test_items:
- path: "{.fieldThatIsUnset}"
set: false
- id: 22
text: "yamlpath correct value on nested field"
tests:
test_items:
- path: "{.authentication.anonymous.enabled}"
compare:
op: eq
value: "false"
set: true
- id: 23
text: "path on invalid json"
tests:
test_items:
- path: "{.authentication.anonymous.enabled}"
compare:
op: eq
value: "false"
set: true
- id: 24
text: "path with broken expression"
tests:
test_items:
- path: "{.missingClosingBrace"
set: true
- id: 25
text: "yamlpath on invalid yaml"
tests:
test_items:
- path: "{.authentication.anonymous.enabled}"
compare:
op: eq
value: "false"
set: true

View File

@ -15,11 +15,16 @@
package check
import (
"bytes"
"encoding/json"
"fmt"
"os"
"regexp"
"strconv"
"strings"
yaml "gopkg.in/yaml.v2"
"k8s.io/client-go/util/jsonpath"
)
// test:
@ -38,6 +43,7 @@ const (
type testItem struct {
Flag string
Path string
Output string
Value string
Set bool
@ -54,20 +60,65 @@ type testOutput struct {
actualResult string
}
func failTestItem(s string) *testOutput {
return &testOutput{testResult: false, actualResult: s}
}
func (t *testItem) execute(s string) *testOutput {
result := &testOutput{}
match := strings.Contains(s, t.Flag)
var match bool
var flagVal string
if t.Flag != "" {
// Flag comparison: check if the flag is present in the input
match = strings.Contains(s, t.Flag)
} else {
// Path != "" - we don't know whether it's YAML or JSON but
// we can just try one then the other
buf := new(bytes.Buffer)
var jsonInterface interface{}
if t.Path != "" {
err := json.Unmarshal([]byte(s), &jsonInterface)
if err != nil {
err := yaml.Unmarshal([]byte(s), &jsonInterface)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to load YAML or JSON from provided input \"%s\": %v\n", s, err)
return failTestItem("failed to load YAML or JSON")
}
}
}
// Parse the jsonpath/yamlpath expression...
j := jsonpath.New("jsonpath")
j.AllowMissingKeys(true)
err := j.Parse(t.Path)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to parse path expression \"%s\": %v\n", t.Path, err)
return failTestItem("unable to parse path expression")
}
err = j.Execute(buf, jsonInterface)
if err != nil {
fmt.Fprintf(os.Stderr, "error executing path expression \"%s\": %v\n", t.Path, err)
return failTestItem("error executing path expression")
}
jsonpathResult := fmt.Sprintf("%s", buf)
match = (jsonpathResult != "")
flagVal = jsonpathResult
}
if t.Set {
var flagVal string
isset := match
if isset && t.Compare.Op != "" {
if t.Flag != "" {
// Expects flags in the form;
// --flag=somevalue
// flag: somevalue
// --flag
// somevalue
//pttn := `(` + t.Flag + `)(=)*([^\s,]*) *`
pttn := `(` + t.Flag + `)(=|: *)*([^\s]*) *`
flagRe := regexp.MustCompile(pttn)
vals := flagRe.FindStringSubmatch(s)
@ -82,6 +133,7 @@ func (t *testItem) execute(s string) *testOutput {
fmt.Fprintf(os.Stderr, "invalid flag in testitem definition")
os.Exit(1)
}
}
result.actualResult = strings.ToLower(flagVal)
switch t.Compare.Op {

View File

@ -120,6 +120,38 @@ func TestTestExecute(t *testing.T) {
controls.Groups[0].Checks[14],
"2:45 kube-apiserver some-arg:some-val --admission-control=Something ---audit-log-maxage=40",
},
{
controls.Groups[0].Checks[15],
"{\"readOnlyPort\": 15000}",
},
{
controls.Groups[0].Checks[16],
"{\"stringValue\": \"WebHook,Something,RBAC\"}",
},
{
controls.Groups[0].Checks[17],
"{\"trueValue\": true}",
},
{
controls.Groups[0].Checks[18],
"{\"readOnlyPort\": 15000}",
},
{
controls.Groups[0].Checks[19],
"{\"authentication\": { \"anonymous\": {\"enabled\": false}}}",
},
{
controls.Groups[0].Checks[20],
"readOnlyPort: 15000",
},
{
controls.Groups[0].Checks[21],
"readOnlyPort: 15000",
},
{
controls.Groups[0].Checks[22],
"authentication:\n anonymous:\n enabled: false",
},
}
for _, c := range cases {
@ -129,3 +161,31 @@ func TestTestExecute(t *testing.T) {
}
}
}
func TestTestExecuteExceptions(t *testing.T) {
cases := []struct {
*Check
str string
}{
{
controls.Groups[0].Checks[23],
"this is not valid json {} at all",
},
{
controls.Groups[0].Checks[24],
"{\"key\": \"value\"}",
},
{
controls.Groups[0].Checks[25],
"broken } yaml\nenabled: true",
},
}
for _, c := range cases {
res := c.Tests.execute(c.str).testResult
if res {
t.Errorf("%s, expected:%v, got:%v\n", c.Text, false, res)
}
}
}

34
job-eks.yaml Normal file
View File

@ -0,0 +1,34 @@
apiVersion: batch/v1
kind: Job
metadata:
name: kube-bench
spec:
template:
spec:
hostPID: true
containers:
- name: kube-bench
# Push the image to your ECR and then refer to it here
image: <ID.dkr.ecr.region.amazonaws.com/aquasec/kube-bench:ref>
command: ["kube-bench", "--version", "1.11-json"]
volumeMounts:
- name: var-lib-kubelet
mountPath: /var/lib/kubelet
- name: etc-systemd
mountPath: /etc/systemd
- name: etc-kubernetes
mountPath: /etc/kubernetes
restartPolicy: Never
volumes:
- name: var-lib-kubelet
hostPath:
path: "/var/lib/kubelet"
- name: etc-systemd
hostPath:
path: "/etc/systemd"
- name: etc-kubernetes
hostPath:
path: "/etc/kubernetes"
- name: usr-bin
hostPath:
path: "/usr/bin"