diff --git a/Gopkg.lock b/Gopkg.lock index 4f431e3..e74b52e 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -189,6 +189,17 @@ pruneopts = "UT" revision = "c95af922eae69f190717a0b7148960af8c55a072" +[[projects]] + digest = "1:e8e3acc03397f71fad44385631e665c639a8d55bd187bcfa6e70b695e3705edd" + name = "k8s.io/client-go" + packages = [ + "third_party/forked/golang/template", + "util/jsonpath", + ] + pruneopts = "UT" + revision = "e64494209f554a6723674bd494d69445fb76a1d4" + version = "v10.0.0" + [solve-meta] analyzer-name = "dep" analyzer-version = 1 diff --git a/README.md b/README.md index 56e815d..8eb2b98 100644 --- a/README.md +++ b/README.md @@ -74,7 +74,7 @@ NAME READY STATUS RESTARTS AGE kube-bench-j76s9 0/1 Completed 0 11s # The results are held in the pod's logs -k logs kube-bench-j76s9 +kubectl logs kube-bench-j76s9 [INFO] 1 Master Node Security Configuration [INFO] 1.1 API Server ... @@ -86,6 +86,15 @@ To run the tests on the master node, the pod needs to be scheduled on that node. The default labels applied to master nodes has changed since Kubernetes 1.11, so if you are using an older version you may need to modify the nodeSelector and tolerations to run the job on the master node. +### Running in an EKS cluster + +There is a `job-eks.yaml` file for running the kube-bench node checks on an EKS cluster. **Note that you must update the image reference in `job-eks.yaml`.** Typically you will push the container image for kube-bench to ECR and refer to it there in the YAML file. + +There are two significant differences on EKS: + +* It uses [config files in JSON format](https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/) +* It's not possible to schedule jobs onto the master node, so master checks can't be performed + ### Installing from a container This command copies the kube-bench binary and configuration files to your host from the Docker container: @@ -195,6 +204,19 @@ tests: value: ... ``` + +You can also define jsonpath and yamlpath tests using the following syntax: + +``` +tests: +- path: + set: + compare: + op: + value: +... +``` + Tests have various `operations` which are used to compare the output of audit commands for success. These operations are: diff --git a/cfg/1.11-json/config.yaml b/cfg/1.11-json/config.yaml new file mode 100644 index 0000000..ce3c054 --- /dev/null +++ b/cfg/1.11-json/config.yaml @@ -0,0 +1,20 @@ +--- +# Config file for systems such as EKS where config is in JSON files +# Master nodes are controlled by EKS and not user-accessible +node: + kubernetes: + confs: + - "/var/lib/kubelet/kubeconfig" + kubeconfig: + - "/var/lib/kubelet/kubeconfig" + + kubelet: + bins: + - "hyperkube kubelet" + - "kubelet" + defaultconf: "/etc/kubernetes/kubelet/kubelet-config.json" + defaultsvc: "/etc/systemd/system/kubelet.service" + defaultkubeconfig: "/var/lib/kubelet/kubeconfig" + + proxy: + defaultkubeconfig: "/var/lib/kubelet/kubeconfig" diff --git a/cfg/1.11-json/node.yaml b/cfg/1.11-json/node.yaml new file mode 100644 index 0000000..88ae739 --- /dev/null +++ b/cfg/1.11-json/node.yaml @@ -0,0 +1,508 @@ +--- +controls: +version: 1.11 +id: 2 +text: "Worker Node Security Configuration" +type: "node" +groups: +- id: 2.1 + text: "Kubelet" + checks: + - id: 2.1.1 + text: "Ensure that the --allow-privileged argument is set to false (Scored)" + audit: "ps -fC $kubeletbin" + tests: + test_items: + - flag: "--allow-privileged" + compare: + op: eq + value: false + set: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --allow-privileged=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.2 + text: "Ensure that the --anonymous-auth argument is set to false (Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - path: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: false + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false . + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.3 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - path: "{.authorization.mode}" + compare: + op: noteq + value: "AlwaysAllow" + set: true + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.4 + text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - path: "{.authentication.x509.clientCAFile}" + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.5 + text: "Ensure that the --read-only-port argument is set to 0 (Scored)" + audit: "cat $kubeletconf" + tests: + bin_op: or + test_items: + - path: "{.readOnlyPort}" + set: false + - path: "{.readOnlyPort}" + compare: + op: eq + value: "0" + set: true + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0 . + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.6 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)" + audit: "cat $kubeletconf" + tests: + bin_op: or + test_items: + - path: "{.streamingConnectionIdleTimeout}" + set: false + - path: "{.streamingConnectionIdleTimeout}" + compare: + op: noteq + value: 0 + set: true + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.7 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - path: "{.protectKernelDefaults}" + compare: + op: eq + value: true + set: true + remediation: | + If using a Kubelet config file, edit the file to set protectKernelDefaults: true . + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.8 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Scored)" + audit: "cat $kubeletconf" + tests: + bin_op: or + test_items: + - path: "{.makeIPTablesUtilChains}" + set: false + - path: "{.makeIPTablesUtilChains}" + compare: + op: eq + value: true + set: true + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true . + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.9 + text: "Ensure that the --hostname-override argument is not set (Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - path: "{.hostnameOverride}" + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.10 + text: "Ensure that the --event-qps argument is set to 0 (Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - path: "{.eventRecordQPS}" + compare: + op: eq + value: 0 + set: true + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: 0 . + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --event-qps=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.11 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)" + audit: "cat $kubeletconf" + tests: + bin_op: and + test_items: + - path: "{.tlsCertFile}" + set: true + - path: "{.tlsPrivateKeyFile}" + set: true + remediation: | + If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate + file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the + corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.12 + text: "Ensure that the --cadvisor-port argument is set to 0 (Scored)" + audit: "cat $kubeletconf" + tests: + bin_op: or + test_items: + - path: "{.cadvisorPort}" + compare: + op: eq + value: 0 + set: true + - path: "{.cadvisorPort}" + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable. + --cadvisor-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.13 + text: "Ensure that the --rotate-certificates argument is not set to false (Scored)" + audit: "cat $kubeletconf" + tests: + bin_op: or + test_items: + - path: "{.rotateCertificates}" + set: false + - path: "{.rotateCertificates}" + compare: + op: noteq + value: "false" + set: true + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true. + If using command line arguments, edit the kubelet service file $kubeletsvc + on each worker node and add --rotate-certificates=true argument to the KUBELET_CERTIFICATE_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.14 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - path: "{.featureGates.RotateKubeletServerCertificate}" + compare: + op: eq + value: true + set: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 2.1.15 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)" + audit: "cat $kubeletconf" + tests: + test_items: + - path: "{.tlsCipherSuites}" + compare: + op: eq + value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + set: true + remediation: | + If using a Kubelet config file, edit the file to set TLSCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + If using executable arguments, edit the kubelet service file $kubeletconf on each worker node and set the below parameter. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + scored: false + +- id: 2.2 + text: "Configuration Files" + checks: + - id: 2.2.1 + text: "Ensure that the kubelet.conf file permissions are set to 644 or + more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %a $kubeletkubeconfig; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chmod 644 $kubeletkubeconfig + scored: true + + - id: 2.2.2 + text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: root:root + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 2.2.3 + text: "Ensure that the kubelet service file permissions are set to 644 or + more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %a $kubeletsvc; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: 644 + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chmod 755 $kubeletsvc + scored: true + + - id: 2.2.4 + text: "Ensure that the kubelet service file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chown root:root $kubeletsvc + scored: true + + - id: 2.2.5 + text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %a $proxykubeconfig; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chmod 644 $proxykubeconfig + scored: true + + - id: 2.2.6 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker + node. For example, + chown root:root $proxykubeconfig + scored: true + + - id: 2.2.7 + text: "Ensure that the certificate authorities file permissions are set to + 644 or more restrictive (Scored)" + type: manual + remediation: | + Run the following command to modify the file permissions of the --client-ca-file + chmod 644 + scored: true + + - id: 2.2.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $ca-file; then stat -c %U:%G $ca-file; fi'" + type: manual + remediation: | + Run the following command to modify the ownership of the --client-ca-file . + chown root:root + scored: true + + - id: 2.2.9 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the following command (using the config file location identied in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 2.2.10 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)" + audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'" + tests: + bin_op: or + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the following command (using the config file location identied in the Audit step) + chmod 644 $kubeletconf + scored: true diff --git a/check/controls_test.go b/check/controls_test.go index 3cf9b60..17c62e5 100644 --- a/check/controls_test.go +++ b/check/controls_test.go @@ -2,6 +2,8 @@ package check import ( "io/ioutil" + "os" + "path/filepath" "testing" yaml "gopkg.in/yaml.v2" @@ -11,31 +13,28 @@ const cfgDir = "../cfg/" // validate that the files we're shipping are valid YAML func TestYamlFiles(t *testing.T) { - // TODO: make this list dynamic - dirs := []string{"1.6/", "1.7/"} - - for _, dir := range dirs { - dir = cfgDir + dir - - files, err := ioutil.ReadDir(dir) + err := filepath.Walk(cfgDir, func(path string, info os.FileInfo, err error) error { if err != nil { - t.Fatalf("error reading %s directory: %v", dir, err) + t.Fatalf("failure accessing path %q: %v\n", path, err) } - - for _, file := range files { - - fileName := file.Name() - in, err := ioutil.ReadFile(dir + fileName) + if !info.IsDir() { + t.Logf("reading file: %s", path) + in, err := ioutil.ReadFile(path) if err != nil { - t.Fatalf("error opening file %s: %v", fileName, err) + t.Fatalf("error opening file %s: %v", path, err) } c := new(Controls) - err = yaml.Unmarshal(in, c) - if err != nil { - t.Fatalf("failed to load YAML from %s: %v", fileName, err) + if err == nil { + t.Logf("YAML file successfully unmarshalled: %s", path) + } else { + t.Fatalf("failed to load YAML from %s: %v", path, err) } } + return nil + }) + if err != nil { + t.Fatalf("failure walking cfg dir: %v\n", err) } } diff --git a/check/data b/check/data index cfc65ca..116a5f9 100644 --- a/check/data +++ b/check/data @@ -157,7 +157,6 @@ groups: value: Something set: true - - id: 14 text: "check that flag some-arg is set to some-val with ':' separator" tests: @@ -167,3 +166,134 @@ groups: op: eq value: some-val set: true + + - id: 15 + text: "jsonpath correct value on field" + tests: + test_items: + - path: "{.readOnlyPort}" + compare: + op: eq + value: 15000 + set: true + - path: "{.readOnlyPort}" + compare: + op: gte + value: 15000 + set: true + - path: "{.readOnlyPort}" + compare: + op: lte + value: 15000 + set: true + + - id: 16 + text: "jsonpath correct case-sensitive value on string field" + tests: + test_items: + - path: "{.stringValue}" + compare: + op: noteq + value: "None" + set: true + - path: "{.stringValue}" + compare: + op: noteq + value: "webhook,Something,RBAC" + set: true + - path: "{.stringValue}" + compare: + op: eq + value: "WebHook,Something,RBAC" + set: true + + - id: 17 + text: "jsonpath correct value on boolean field" + tests: + test_items: + - path: "{.trueValue}" + compare: + op: noteq + value: somethingElse + set: true + - path: "{.trueValue}" + compare: + op: noteq + value: false + set: true + - path: "{.trueValue}" + compare: + op: eq + value: true + set: true + + - id: 18 + text: "jsonpath field absent" + tests: + test_items: + - path: "{.notARealField}" + set: false + + - id: 19 + text: "jsonpath correct value on nested field" + tests: + test_items: + - path: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: "false" + set: true + + - id: 20 + text: "yamlpath correct value on field" + tests: + test_items: + - path: "{.readOnlyPort}" + compare: + op: gt + value: 14999 + set: true + + - id: 21 + text: "yamlpath field absent" + tests: + test_items: + - path: "{.fieldThatIsUnset}" + set: false + + - id: 22 + text: "yamlpath correct value on nested field" + tests: + test_items: + - path: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: "false" + set: true + + - id: 23 + text: "path on invalid json" + tests: + test_items: + - path: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: "false" + set: true + + - id: 24 + text: "path with broken expression" + tests: + test_items: + - path: "{.missingClosingBrace" + set: true + + - id: 25 + text: "yamlpath on invalid yaml" + tests: + test_items: + - path: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: "false" + set: true diff --git a/check/test.go b/check/test.go index 9289b0a..9ddb469 100644 --- a/check/test.go +++ b/check/test.go @@ -15,11 +15,16 @@ package check import ( + "bytes" + "encoding/json" "fmt" "os" "regexp" "strconv" "strings" + + yaml "gopkg.in/yaml.v2" + "k8s.io/client-go/util/jsonpath" ) // test: @@ -38,6 +43,7 @@ const ( type testItem struct { Flag string + Path string Output string Value string Set bool @@ -54,33 +60,79 @@ type testOutput struct { actualResult string } +func failTestItem(s string) *testOutput { + return &testOutput{testResult: false, actualResult: s} +} + func (t *testItem) execute(s string) *testOutput { result := &testOutput{} - match := strings.Contains(s, t.Flag) + var match bool + var flagVal string + + if t.Flag != "" { + // Flag comparison: check if the flag is present in the input + match = strings.Contains(s, t.Flag) + } else { + // Path != "" - we don't know whether it's YAML or JSON but + // we can just try one then the other + buf := new(bytes.Buffer) + var jsonInterface interface{} + + if t.Path != "" { + err := json.Unmarshal([]byte(s), &jsonInterface) + if err != nil { + err := yaml.Unmarshal([]byte(s), &jsonInterface) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to load YAML or JSON from provided input \"%s\": %v\n", s, err) + return failTestItem("failed to load YAML or JSON") + } + } + } + + // Parse the jsonpath/yamlpath expression... + j := jsonpath.New("jsonpath") + j.AllowMissingKeys(true) + err := j.Parse(t.Path) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to parse path expression \"%s\": %v\n", t.Path, err) + return failTestItem("unable to parse path expression") + } + + err = j.Execute(buf, jsonInterface) + if err != nil { + fmt.Fprintf(os.Stderr, "error executing path expression \"%s\": %v\n", t.Path, err) + return failTestItem("error executing path expression") + } + + jsonpathResult := fmt.Sprintf("%s", buf) + match = (jsonpathResult != "") + flagVal = jsonpathResult + } if t.Set { - var flagVal string isset := match if isset && t.Compare.Op != "" { - // Expects flags in the form; - // --flag=somevalue - // --flag - // somevalue - //pttn := `(` + t.Flag + `)(=)*([^\s,]*) *` - pttn := `(` + t.Flag + `)(=|: *)*([^\s]*) *` - flagRe := regexp.MustCompile(pttn) - vals := flagRe.FindStringSubmatch(s) - - if len(vals) > 0 { - if vals[3] != "" { - flagVal = vals[3] + if t.Flag != "" { + // Expects flags in the form; + // --flag=somevalue + // flag: somevalue + // --flag + // somevalue + pttn := `(` + t.Flag + `)(=|: *)*([^\s]*) *` + flagRe := regexp.MustCompile(pttn) + vals := flagRe.FindStringSubmatch(s) + + if len(vals) > 0 { + if vals[3] != "" { + flagVal = vals[3] + } else { + flagVal = vals[1] + } } else { - flagVal = vals[1] + fmt.Fprintf(os.Stderr, "invalid flag in testitem definition") + os.Exit(1) } - } else { - fmt.Fprintf(os.Stderr, "invalid flag in testitem definition") - os.Exit(1) } result.actualResult = strings.ToLower(flagVal) diff --git a/check/test_test.go b/check/test_test.go index 72e4f46..308dcad 100644 --- a/check/test_test.go +++ b/check/test_test.go @@ -120,6 +120,38 @@ func TestTestExecute(t *testing.T) { controls.Groups[0].Checks[14], "2:45 kube-apiserver some-arg:some-val --admission-control=Something ---audit-log-maxage=40", }, + { + controls.Groups[0].Checks[15], + "{\"readOnlyPort\": 15000}", + }, + { + controls.Groups[0].Checks[16], + "{\"stringValue\": \"WebHook,Something,RBAC\"}", + }, + { + controls.Groups[0].Checks[17], + "{\"trueValue\": true}", + }, + { + controls.Groups[0].Checks[18], + "{\"readOnlyPort\": 15000}", + }, + { + controls.Groups[0].Checks[19], + "{\"authentication\": { \"anonymous\": {\"enabled\": false}}}", + }, + { + controls.Groups[0].Checks[20], + "readOnlyPort: 15000", + }, + { + controls.Groups[0].Checks[21], + "readOnlyPort: 15000", + }, + { + controls.Groups[0].Checks[22], + "authentication:\n anonymous:\n enabled: false", + }, } for _, c := range cases { @@ -129,3 +161,31 @@ func TestTestExecute(t *testing.T) { } } } + +func TestTestExecuteExceptions(t *testing.T) { + + cases := []struct { + *Check + str string + }{ + { + controls.Groups[0].Checks[23], + "this is not valid json {} at all", + }, + { + controls.Groups[0].Checks[24], + "{\"key\": \"value\"}", + }, + { + controls.Groups[0].Checks[25], + "broken } yaml\nenabled: true", + }, + } + + for _, c := range cases { + res := c.Tests.execute(c.str).testResult + if res { + t.Errorf("%s, expected:%v, got:%v\n", c.Text, false, res) + } + } +} diff --git a/cmd/common.go b/cmd/common.go index 9e29ede..ed6e9b5 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -199,12 +199,17 @@ func loadConfig(nodetype check.NodeType) string { // isMaster verify if master components are running on the node. func isMaster() bool { - _ = loadConfig(check.MASTER) glog.V(2).Info("Checking if the current node is running master components") masterConf := viper.Sub(string(check.MASTER)) - if _, err := getBinaries(masterConf); err != nil { + components, err := getBinaries(masterConf) + + if err != nil { glog.V(2).Info(err) return false } + if len(components) == 0 { + glog.V(2).Info("No master binaries specified") + return false + } return true } diff --git a/job-eks.yaml b/job-eks.yaml new file mode 100644 index 0000000..d51909f --- /dev/null +++ b/job-eks.yaml @@ -0,0 +1,34 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + spec: + hostPID: true + containers: + - name: kube-bench + # Push the image to your ECR and then refer to it here + image: + command: ["kube-bench", "--version", "1.11-json"] + volumeMounts: + - name: var-lib-kubelet + mountPath: /var/lib/kubelet + - name: etc-systemd + mountPath: /etc/systemd + - name: etc-kubernetes + mountPath: /etc/kubernetes + restartPolicy: Never + volumes: + - name: var-lib-kubelet + hostPath: + path: "/var/lib/kubelet" + - name: etc-systemd + hostPath: + path: "/etc/systemd" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" + - name: usr-bin + hostPath: + path: "/usr/bin"