From 82ebcac31f94e82408d364e05ec0724a70557976 Mon Sep 17 00:00:00 2001 From: Carter Williamson Date: Wed, 12 Mar 2025 17:14:37 -0700 Subject: [PATCH 01/10] Done going through k8s stig file from DOD, now to work on failure cases --- cfg/config.yaml | 5 + cfg/gke-stig-kubernetes-v2r2/config.yaml | 16 + .../controlplane.yaml | 250 +++++ .../managedservices.yaml | 987 ++++++++++++++++++ cfg/gke-stig-kubernetes-v2r2/node.yaml | 487 +++++++++ cfg/gke-stig-kubernetes-v2r2/policies.yaml | 110 ++ go.mod | 9 +- go.sum | 17 + hack/kind-stig.test.yaml | 4 +- hack/kind-stig.yaml | 2 +- makefile | 4 +- 11 files changed, 1885 insertions(+), 6 deletions(-) create mode 100644 cfg/gke-stig-kubernetes-v2r2/config.yaml create mode 100644 cfg/gke-stig-kubernetes-v2r2/controlplane.yaml create mode 100644 cfg/gke-stig-kubernetes-v2r2/managedservices.yaml create mode 100644 cfg/gke-stig-kubernetes-v2r2/node.yaml create mode 100644 cfg/gke-stig-kubernetes-v2r2/policies.yaml diff --git a/cfg/config.yaml b/cfg/config.yaml index e656166..4c8b698 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -447,6 +447,11 @@ target_mapping: - "controlplane" - "policies" - "managedservices" + "gke-stig-kubernetes-v2r2": + - "node" + - "controlplane" + - "policies" + - "managedservices" "tkgi-1.2.53": - "master" - "etcd" diff --git a/cfg/gke-stig-kubernetes-v2r2/config.yaml b/cfg/gke-stig-kubernetes-v2r2/config.yaml new file mode 100644 index 0000000..b39b29d --- /dev/null +++ b/cfg/gke-stig-kubernetes-v2r2/config.yaml @@ -0,0 +1,16 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml +## These settings are required if you are using the --asff option to report findings to AWS Security Hub +## AWS account number is required. +# AWS_ACCOUNT: "" +## AWS region is required. +# AWS_REGION: "" +## EKS Cluster ARN is required. +# CLUSTER_ARN: "" + +node: + proxy: + defaultkubeconfig: "/var/lib/kubelet/kubeconfig" + + kubelet: + defaultconf: "/etc/kubernetes/kubelet/kubelet-config.yaml" diff --git a/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml b/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml new file mode 100644 index 0000000..b6532af --- /dev/null +++ b/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml @@ -0,0 +1,250 @@ +--- +controls: +version: "gke-stig-kubernetes-v1r6" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "DISA Category Code I - API Server Security" + checks: + - id: V-242378 + text: "The Kubernetes API Server must use TLS 1.2, at a minimum, to protect the confidentiality of sensitive data during electronic dissemination." + audit: "grep -i tls-min-version /etc/kubernetes/manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "--tls-min-version" + compare: + op: nothave + value: "VersionTLS10" + - flag: "--tls-min-version" + compare: + op: nothave + value: "VersionTLS11" + remediation: | + Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--tls-min-version" to "VersionTLS12" or higher. + scored: true + - id: V-242388 + text: "The Kubernetes API server must not have the insecure bind address set." + audit: "grep -i insecure-bind-address /etc/kubernetes/manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "--insecure-bind-address" + set: false + remediation: | + Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Remove the value of "--insecure-bind-address" setting. + scored: true + + - id: V-242389 + text: "The Kubernetes API server must have the secure port set." + audit: "grep -i secure-port /etc/kubernetes/manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "--secure-port" + compare: + op: gt + value: "0" + remediation: | + Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--secure-port" to a value greater than "0". + scored: true + + + - id: V-242390 # Similar to CIS 3.2.1 + text: "The Kubernetes API server must have anonymous authentication disabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + set: true + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit $kubeletconf to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + + # TODO: This is pretty different from what the stig is asking for, double check + - id: V-242400 + text: "The Kubernetes API server must have Alpha APIs disabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "AllAlpha=true" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit any manifest files or $kubeletconf that contain the feature-gates + setting with AllAlpha set to "true". + Set the flag to "false" or remove the "AllAlpha" setting + completely. Restart the kubelet service if the kubelet config file + if the kubelet config file is changed. + scored: true + + + # - id: V-242400 + # text: "The Kubernetes API server must have Alpha APIs disabled." + # audit: "grep -i feature-gates /etc/kubernetes/manifests/kube-apiserver.yaml" + # tests: + # test_items: + # - flag: "--feature-gates" + # compare: + # op: nothave + # value: "AllAlpha=true" + # remediation: | + # Edit any manifest file that contains the "--feature-gates" setting with "AllAlpha" set to "true". + # Set the value of "AllAlpha" to "false" or remove the setting completely. + # scored: true + + - id: 2.2 + text: "DISA Category Code II - Controller Manager Security" + checks: + - id: V-242381 + text: "The Kubernetes Controller Manager must create unique service accounts for each work payload. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + - id: V-242376 + text: "The Kubernetes Controller Manager must use TLS 1.2, at a minimum, to protect the confidentiality of sensitive data during electronic dissemination." + audit: "grep -i tls-min-version /etc/kubernetes/manifests/kube-controller-manager.yaml" + tests: + test_items: + - flag: "--tls-min-version" + compare: + op: nothave + value: "VersionTLS10" + - flag: "--tls-min-version" + compare: + op: nothave + value: "VersionTLS11" + remediation: | + Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--tls-min-version" to "VersionTLS12" or higher. + scored: true + - id: V-242443 + text: " Kubernetes must contain the latest updates as authorized by IAVMs, CTOs, DTMs, and STIGs. (Manual)" + type: "manual" + remediation: | + Upgrade Kubernetes to a supported version. + + # TODO: Update this ref + - id: V-242461 + text: "Kubernetes API Server audit logs must be enabled. (Manual)" + type: "manual" + remediation: | + Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + + + # TODO: Validate this one + - id: V-242462 + text: "The Kubernetes PKI directory must be owned by root." + audit: "stat -c %U:%G /etc/kubernetes/pki" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Change the ownership of the PKI directory to root:root by executing the command: + chown -R root:root /etc/kubernetes/pki + scored: true + + # TODO: Validate this one + - id: V-242463 + text: "The Kubernetes PKI directory must have file permissions set to 644 or more restrictive." + audit: "find /etc/kubernetes/pki -type f -name '*.crt' -exec stat -c %a {} \\;" + tests: + test_items: + - flag: "644" + compare: + op: lte + value: "644" + remediation: | + Change the permissions of the PKI certificate files to 644 by executing the command: + find /etc/kubernetes/pki -type f -name '*.crt' -exec chmod 644 {} \; + scored: true + + # TODO: Validate this one + - id: V-242464 + text: "The Kubernetes PKI keys must have file permissions set to 600 or more restrictive." + audit: "find /etc/kubernetes/pki -type f -name '*.key' -exec stat -c %a {} \\;" + tests: + test_items: + - flag: "600" + compare: + op: lte + value: "600" + remediation: | + Change the permissions of the PKI key files to 600 by executing the command: + find /etc/kubernetes/pki -type f -name '*.key' -exec chmod 600 {} \; + scored: true + + # TODO: Validate this one + - id: V-242465 + text: "The Kubernetes Controller Manager must have secure binding." + audit: "grep -i bind-address /etc/kubernetes/manifests/kube-controller-manager.yaml" + tests: + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + remediation: | + Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Ensure the "--bind-address" flag is set to "127.0.0.1". + scored: true + + - id: 2.3 + text: "DISA Category Code III - Scheduler Security" + checks: + - id: V-242377 + text: "The Kubernetes Scheduler must use TLS 1.2, at a minimum, to protect the confidentiality of sensitive data during electronic dissemination." + audit: "grep -i tls-min-version /etc/kubernetes/manifests/kube-scheduler.yaml" + tests: + test_items: + - flag: "--tls-min-version" + compare: + op: nothave + value: "VersionTLS10" + - flag: "--tls-min-version" + compare: + op: nothave + value: "VersionTLS11" + remediation: | + Edit the Kubernetes Scheduler manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--tls-min-version" to "VersionTLS12" or higher. + scored: true + - id: V-242411 + text: "The Kubernetes Scheduler must enforce ports, protocols, and services (PPS) that adhere to the PPSM CAL." + audit: "grep -i scheduler /etc/kubernetes/manifests/kube-scheduler.yaml" + tests: + test_items: + - flag: "--secure-port" + compare: + op: gt + value: "0" + remediation: | + Amend any system documentation requiring revision to comply with the PPSM CAL. + Update Kubernetes Scheduler manifest and namespace PPS configuration to comply with the PPSM CAL. + scored: true diff --git a/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml b/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml new file mode 100644 index 0000000..f9abd6d --- /dev/null +++ b/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml @@ -0,0 +1,987 @@ +--- +controls: +version: "gke-stig-kubernetes-v2r2" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "DISA Category Code I" + checks: + # TODO: Validate this one + - id: V-242386 + text: "The Kubernetes API server must have the insecure port flag disabled." + audit: "grep -i insecure-port /etc/kubernetes/manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "--insecure-port" + compare: + op: eq + value: "0" + remediation: | + Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--insecure-port" to "0". + Note: The "--insecure-port" flag has been deprecated and can only be set to "0". This flag will be removed in Kubernetes v1.24. + scored: false + + # - id: V-242386 + # text: "The Kubernetes API server must have the insecure port flag disabled | Component of GKE Control Plane" + # type: "skip" + + # TODO: Validate this one + - id: V-242388 + text: "The Kubernetes API server must not have the insecure bind address set." + audit: "grep -i insecure-bind-address /etc/kubernetes/manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "--insecure-bind-address" + set: false + remediation: | + Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Remove the value of "--insecure-bind-address" setting. + scored: false + + # - id: V-242388 + # text: "The Kubernetes API server must have the insecure bind address not set | Component of GKE Control Plane" + # type: "skip" + + # TODO Verify this one (can't find it like on the aws side https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html) + - id: V-242436 + text: "The Kubernetes API server must have the ValidatingAdmissionWebhook enabled (manual)" + type: "manual" + remediation: GKE automatically enable ValidatingAdmissionWebhook + scored: false + + - id: V-242437 + text: "[Deprecated] Kubernetes must have a pod security policy set. policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+" + type: "skip" + + # TODO pretty sure this doesn't work + # - id: V-245542 + # text: "Kubernetes API Server must disable basic authentication to protect information in transit." + # audit: "grep -i basic-auth-file /etc/kubernetes/manifests/kube-apiserver.yaml" + # tests: + # test_items: + # - flag: "--basic-auth-file" + # set: false + # remediation: | + # Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + # Remove the setting "--basic-auth-file". + # scored: false + + + - id: V-245542 + text: "Kubernetes API Server must disable basic authentication to protect information in transit | Component of EKS Control Plane" + type: "skip" + + - id: 5.2 + text: "DISA Category Code II" + checks: + - id: V-242376 + text: "The Kubernetes Controller Manager must use TLS 1.2, at a minimum | Component of GKE Control Plane" + type: "skip" + + - id: V-242377 + text: "The Kubernetes Scheduler must use TLS 1.2, at a minimum | Component of GKE Control Plane" + type: "skip" + + - id: V-242378 + text: "The Kubernetes API Server must use TLS 1.2, at a minimum | Component of GKE Control Plane" + type: "skip" + + - id: V-242379 + text: "The Kubernetes etcd must use TLS to protect the confidentiality of sensitive data during electronic dissemination | Component of GKE Control Plane" + type: "skip" + + - id: V-242380 + text: "The Kubernetes API Server must use TLS to protect the confidentiality of sensitive data during electronic dissemination | Component of GKE Control Plane" + type: "skip" + + - id: V-242382 + text: "The Kubernetes API Server must enable Node,RBAC as the authorization mode | Component of GKE Control Plane" + type: "skip" + + # TODO: Move to controlplane if this works in GKE + - id: V-242384 + text: "The Kubernetes Scheduler must have secure binding." + audit: "grep -i bind-address /etc/kubernetes/manifests/kube-scheduler.yaml" + tests: + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + remediation: | + Edit the Kubernetes Scheduler manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the argument "--bind-address" to "127.0.0.1". + scored: false + + # - id: V-242384 + # text: "The Kubernetes Scheduler must have secure binding | Component of GKE Control Plane" + # type: "skip" + + + # TODO: Move to controlplane if this works in GKE + - id: V-242385 + text: "The Kubernetes Controller Manager must have secure binding." + audit: "grep -i bind-address /etc/kubernetes/manifests/kube-controller-manager.yaml" + tests: + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + remediation: | + Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the argument "--bind-address" to "127.0.0.1". + scored: false + + # - id: V-242385 + # text: "The Kubernetes Controller Manager must have secure binding | Component of GKE Control Plane" + # type: "skip" + + # TODO: Move to controlplane if this works in GKE + - id: V-242389 + text: "The Kubernetes API server must have the secure port set." + audit: "grep -i secure-port /etc/kubernetes/manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "--secure-port" + compare: + op: gt + value: "0" + remediation: | + Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--secure-port" to a value greater than "0". + scored: false + + # - id: V-242389 + # text: "The Kubernetes API server must have the secure port set | Component of EKS Control Plane" + # type: "skip" + + # TODO: Didn't actually see this one in the k8s stig file + # - id: V-242401 + # text: "The Kubernetes API Server must have an audit policy set | Component of GKE Control Plane" + # type: "skip" + + - id: V-242402 + text: "The Kubernetes API Server must have an audit log path set | Component of GKE Control Plane" + type: "skip" + + - id: V-242403 + text: "Kubernetes API Server must generate audit records | Component of GKE Control Plane" + type: "skip" + + # TODO This will need tweaks to work I think but might be automatable + # - id: V-242405 + # text: "The Kubernetes manifests must be owned by root." + # audit: "ls -l /etc/kubernetes/manifests/*" + # tests: + # test_items: + # - flag: "owner" + # compare: + # op: eq + # value: "root:root" + # remediation: | + # On the Control Plane, change to the /etc/kubernetes/manifests directory. + # Run the command: + # chown root:root * + + # To verify the change took place, run the command: + # ls -l * + + # All the manifest files should be owned by root:root. + # scored: false + + - id: V-242405 + text: "The Kubernetes manifests must be owned by root | Component of GKE Control Plane" + type: "skip" + + # TODO verify this one, I think the permissions flag just needs to be added to the ls cmd + - id: V-242408 + text: "The Kubernetes manifest files must have least privileges." + audit: "ls -l /etc/kubernetes/manifests/*" + tests: + test_items: + - flag: "permissions" + compare: + op: lte + value: "644" + remediation: | + On both Control Plane and Worker Nodes, change to the /etc/kubernetes/manifests directory. + Run the command: + chmod 644 * + To verify the change took place, run the command: + ls -l * + All the manifest files should now have privileges of "644". + scored: false + + # - id: V-242408 + # text: "The Kubernetes manifests must have least privileges | Component of GKE Control Plane" + # type: "skip" + + # TODO Pretty sure this is actually a GKE setting + # - id: V-242409 + # text: "Kubernetes Controller Manager must disable profiling." + # audit: "grep -i profiling /etc/kubernetes/manifests/kube-controller-manager.yaml" + # tests: + # test_items: + # - flag: "--profiling" + # compare: + # op: eq + # value: "false" + # remediation: | + # Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + # Set the argument "--profiling" to "false". + # scored: false + + - id: V-242409 + text: "Kubernetes Controller Manager must disable profiling | Component of GKE Control Plane" + type: "skip" + + - id: V-242410 + text: "The Kubernetes API Server must enforce PPS that adhere to PPSM CAL | Component of GKE Control Plane" + type: "skip" + + - id: V-242411 + text: "The Kubernetes Scheduler must enforce PPS that adhere to PPSM CAL | Component of GKE Control Plane" + type: "skip" + + - id: V-242412 + text: "The Kubernetes Controllers must enforce PPS that adhere to PPSM CAL | Component of GKE Control Plane" + type: "skip" + + - id: V-242413 + text: "The Kubernetes etcd must enforce PPS that adhere to PPSM CAL | Component of GKE Control Plane" + type: "skip" + + - id: V-242418 + text: "The Kubernetes API server must use approved cipher suites | Component of GKE Control Plane" + type: "skip" + + # TODO Validate this one + - id: V-242419 + text: "Kubernetes API Server must have the SSL Certificate Authority set." + audit: "grep -i client-ca-file /etc/kubernetes/manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--client-ca-file" to the path containing an Approved Organizational Certificate. + scored: false + + # - id: V-242419 + # text: "Kubernetes API Server must have the SSL Certificate Authority set | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242420 + text: "Kubernetes Kubelet must have the SSL Certificate Authority set." + audit: "ps -ef | grep kubelet | grep -- --client-ca-file" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + On the Control Plane, run the command: + ps -ef | grep kubelet + + If the "--client-ca-file" option exists, verify its value is correctly set. + Note the path to the config file (identified by --config). + + Edit the Kubernetes Kubelet config file: + Set the value of "clientCAFile" to a path containing an Approved Organizational Certificate. + + Restart the kubelet service using the following command: + systemctl daemon-reload && systemctl restart kubelet + scored: false + + # - id: V-242420 + # text: "Kubernetes Kubelet must have the SSL Certificate Authority set | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242421 + text: "Kubernetes Controller Manager must have the SSL Certificate Authority set." + audit: "grep -i root-ca-file /etc/kubernetes/manifests/kube-controller-manager.yaml" + tests: + test_items: + - flag: "--root-ca-file" + set: true + remediation: | + Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--root-ca-file" to a path containing an Approved Organizational Certificate. + scored: false + + # - id: V-242421 + # text: "Kubernetes Controller Manager must have the SSL Certificate Authority set | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242422 + text: "Kubernetes API Server must have a certificate for communication." + audit: "grep -i tls-cert-file /etc/kubernetes/manifests/kube-apiserver.yaml && grep -i tls-private-key-file /etc/kubernetes/manifests/kube-apiserver.yaml" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Ensure the "--tls-cert-file" and "--tls-private-key-file" flags are set to paths containing an Approved Organizational Certificate and corresponding private key. + scored: false + + # - id: V-242422 + # text: "Kubernetes API Server must have a certificate for communication | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242423 + text: "Kubernetes etcd must enable client authentication to secure service." + audit: "grep -i client-cert-auth /etc/kubernetes/manifests/etcd.yaml" + tests: + test_items: + - flag: "--client-cert-auth" + compare: + op: eq + value: "true" + remediation: | + Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--client-cert-auth" to "true" for etcd. + scored: false + + # - id: V-242423 + # text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242424 + text: "Kubernetes etcd must have a certificate for communication." + audit: "grep -i cert-file /etc/kubernetes/manifests/etcd.yaml && grep -i key-file /etc/kubernetes/manifests/etcd.yaml" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + set: true + - flag: "--key-file" + set: true + remediation: | + Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Ensure the "--cert-file" and "--key-file" flags are set to paths containing an Approved Organizational Certificate and corresponding private key. + scored: false + + # - id: V-242424 + # text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242425 + text: "Kubernetes Kubelet must have a certificate for communication." + audit: "ps -ef | grep kubelet | grep -- --tls-cert-file" + tests: + test_items: + - flag: "--tls-cert-file" + set: true + remediation: | + On the Control Plane, run the command: + ps -ef | grep kubelet + + If the "--tls-cert-file" option exists, verify its value is correctly set. + Note the path to the config file (identified by --config). + + Edit the Kubernetes Kubelet config file: + Set the value of "tlsCertFile" to a path containing an Approved Organizational Certificate. + + Restart the kubelet service using the following command: + systemctl daemon-reload && systemctl restart kubelet + scored: false + + + # - id: V-242425 + # text: "Kubernetes Kubelet must enable tls-cert-file for client authentication to secure service | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242426 + text: "Kubernetes etcd must enable peer client authentication." + audit: "grep -i peer-client-cert-auth /etc/kubernetes/manifests/etcd.yaml" + tests: + test_items: + - flag: "--peer-client-cert-auth" + compare: + op: eq + value: "true" + remediation: | + Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--peer-client-cert-auth" to "true" for etcd. + scored: false + + # - id: V-242426 + # text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242427 + text: "Kubernetes etcd must have a key file for secure communication." + audit: "grep -i key-file /etc/kubernetes/manifests/etcd.yaml" + tests: + test_items: + - flag: "--key-file" + set: true + remediation: | + Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--key-file" to the Approved Organizational Certificate. + scored: false + + # - id: V-242427 + # text: "Kubernetes etcd must have a key file for secure communication | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242428 + text: "Kubernetes etcd must have a certificate for communication." + audit: "grep -i cert-file /etc/kubernetes/manifests/etcd.yaml" + tests: + test_items: + - flag: "--cert-file" + set: true + remediation: | + Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--cert-file" to the Approved Organizational Certificate. + scored: false + + # - id: V-242428 + # text: "Kubernetes etcd must have a certificate for communication | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242429 + text: "Kubernetes etcd must have the SSL Certificate Authority set." + audit: "grep -i etcd-cafile /etc/kubernetes/manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "--etcd-cafile" + set: true + remediation: | + Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--etcd-cafile" to the Certificate Authority for etcd. + scored: false + + # - id: V-242429 + # text: "Kubernetes etcd must have the SSL Certificate Authority set | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242430 + text: "Kubernetes etcd must have a certificate for communication." + audit: "grep -i etcd-certfile /etc/kubernetes/manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "--etcd-certfile" + set: true + remediation: | + Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--etcd-certfile" to the certificate to be used for communication with etcd. + scored: false + + # - id: V-242430 + # text: "Kubernetes etcd must have a certificate for communication | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242431 + text: "Kubernetes etcd must have a key file for secure communication." + audit: "grep -i etcd-keyfile /etc/kubernetes/manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "--etcd-keyfile" + set: true + remediation: | + Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--etcd-keyfile" to the key file used for secure communication with etcd. + scored: false + + + # - id: V-242431 + # text: "Kubernetes etcd must have a key file for secure communication | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242432 + text: "Kubernetes etcd must have peer-cert-file set for secure communication." + audit: "grep -i peer-cert-file /etc/kubernetes/manifests/etcd.yaml" + tests: + test_items: + - flag: "--peer-cert-file" + set: true + remediation: | + Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--peer-cert-file" to the certificate to be used for communication with etcd. + scored: false + + # - id: V-242432 + # text: "Kubernetes etcd must have peer-cert-file set for secure communication | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242433 + text: "Kubernetes etcd must have a peer-key-file set for secure communication." + audit: "grep -i peer-key-file /etc/kubernetes/manifests/etcd.yaml" + tests: + test_items: + - flag: "--peer-key-file" + set: true + remediation: | + Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--peer-key-file" to the certificate to be used for communication with etcd. + scored: false + + + # - id: V-242433 + # text: "Kubernetes etcd must have a peer-key-file set for secure communication | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242438 + text: "Kubernetes API Server must configure timeouts to limit attack surface." + audit: "grep -i request-timeout /etc/kubernetes/manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "--request-timeout" + compare: + op: gt + value: "0" + remediation: | + Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Ensure the "--request-timeout" flag is set to a value greater than "0". + scored: false + + # - id: V-242438 + # text: "Kubernetes API Server must configure timeouts to limit attack surface | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242444 + text: "The Kubernetes component manifests must be owned by root." + audit: "stat -c %U:%G /etc/kubernetes/manifests/*" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Change the ownership of the manifest files to root:root by executing the command: + chown root:root /etc/kubernetes/manifests/* + scored: false + + # - id: V-242444 + # text: "The Kubernetes component manifests must be owned by root | Component of GKE Control Plane" + # type: "skip" + + - id: V-242445 + text: "The Kubernetes component etcd must be owned by etcd | Component of GKE Control Plane" + type: "skip" + + # TODO Validate this one + - id: V-242446 + text: "The Kubernetes conf files must be owned by root." + audit: "stat -c %U:%G /etc/kubernetes/admin.conf /etc/kubernetes/scheduler.conf /etc/kubernetes/controller-manager.conf" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Change the ownership of the Kubernetes conf files to root:root by executing the commands: + chown root:root /etc/kubernetes/admin.conf + chown root:root /etc/kubernetes/scheduler.conf + chown root:root /etc/kubernetes/controller-manager.conf + scored: false + + + # - id: V-242446 + # text: "The Kubernetes conf files must be owned by root | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242447 + text: "The Kubernetes Kube Proxy kubeconfig must have file permissions set to 644 or more restrictive." + audit: "stat -c %a $proxykubeconfig" + tests: + test_items: + - flag: "644" + compare: + op: lte + value: "644" + remediation: | + Change the permissions of the Kube Proxy kubeconfig to 644 by executing the command: + chmod 644 $proxykubeconfig + scored: false + + + # - id: V-242447 + # text: "The Kubernetes Kube Proxy must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242448 + text: "The Kubernetes Kube Proxy kubeconfig must be owned by root." + audit: "stat -c %U:%G $proxykubeconfig" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Change the ownership of the Kube Proxy kubeconfig to root:root by executing the command: + chown root:root $proxykubeconfig + scored: false + + # - id: V-242448 + # text: "The Kubernetes Kube Proxy must be owned by root | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242449 + text: "The Kubernetes Kubelet certificate authority file must have file permissions set to 644 or more restrictive." + audit: "stat -c %a $kubeletcafile" + tests: + test_items: + - flag: "644" + compare: + op: lte + value: "644" + remediation: | + Change the permissions of the Kubernetes Kubelet certificate authority file to 644 by executing the command: + chmod 644 $kubeletcafile + scored: false + + # - id: V-242449 + # text: "The Kubernetes Kubelet certificate authority file must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242450 + text: "The Kubernetes Kubelet certificate authority must be owned by root." + audit: "stat -c %U:%G $kubeletcafile" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Change the ownership of the Kubernetes Kubelet certificate authority file to root:root by executing the command: + chown root:root $kubeletcafile + scored: false + + + # - id: V-242450 + # text: "The Kubernetes Kubelet certificate authority must be owned by root | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242451 + text: "The Kubernetes component PKI must be owned by root." + audit: "stat -c %U:%G /etc/kubernetes/pki/*" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Change the ownership of the PKI directory and its contents to root:root by executing the command: + chown -R root:root /etc/kubernetes/pki/ + scored: false + + + # - id: V-242451 + # text: "The Kubernetes component PKI must be owned by root | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242452 + text: "The Kubernetes kubelet KubeConfig must have file permissions set to 644 or more restrictive." + audit: "stat -c %a /etc/kubernetes/kubelet.conf" + tests: + test_items: + - flag: "644" + compare: + op: lte + value: "644" + remediation: | + Change the permissions of the Kubelet KubeConfig file to 644 by executing the command: + chmod 644 /etc/kubernetes/kubelet.conf + scored: false + + # - id: V-242452 + # text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242453 + text: "The Kubernetes kubelet KubeConfig file must be owned by root." + audit: "stat -c %U:%G /etc/kubernetes/kubelet.conf" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Change the ownership of the kubelet.conf file to root:root by executing the command: + chown root:root /etc/kubernetes/kubelet.conf + scored: false + + # - id: V-242453 + # text: "The Kubernetes kubelet config must be owned by root | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242454 + text: "The Kubernetes kubeadm.conf must be owned by root." + audit: "stat -c %U:%G $kubeletdefaultsvc" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Change the ownership of the kubeadm.conf to root:root by executing the command: + chown root:root $kubeletdefaultsvc + scored: false + + # - id: V-242454 + # text: "The Kubernetes kubeadm.conf must be owned by root | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242455 + text: "The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive." + audit: "stat -c %a $kubeletdefaultsvc" + tests: + test_items: + - flag: "644" + compare: + op: lte + value: "644" + remediation: | + Change the permissions of the kubeadm.conf to 644 by executing the command: + chmod 644 $kubeletdefaultsvc + scored: false + + # - id: V-242455 + # text: "The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + # type: "skip" + + + # TODO Validate this one + - id: V-242456 + text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive." + audit: "stat -c %a /var/lib/kubelet/config.yaml" + tests: + test_items: + - flag: "644" + compare: + op: lte + value: "644" + remediation: | + Change the permissions of the config.yaml to 644 by executing the command: + chmod 644 /var/lib/kubelet/config.yaml + scored: false + + + # - id: V-242456 + # text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242457 + text: "The Kubernetes kubelet config must be owned by root." + audit: "stat -c %U:%G /var/lib/kubelet/config.yaml" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Change the ownership of the kubelet config file to root:root by executing the command: + chown root:root /var/lib/kubelet/config.yaml + scored: false + + + # - id: V-242457 + # text: "The Kubernetes kubelet config must be owned by root | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one don't think it works + # - id: V-242458 + # text: "The Kubernetes etcd must have file permissions set to 644 or more restrictive." + # audit: "stat -c %a /var/lib/etcd/*" + # tests: + # test_items: + # - flag: "644" + # compare: + # op: lte + # value: "644" + # remediation: | + # Change the permissions of the etcd data directory to 644 by executing the command: + # chmod -R 644 /var/lib/etcd/* + # scored: false + + - id: V-242458 + text: "The Kubernetes API Server must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + type: "skip" + + - id: V-242459 + text: "The Kubernetes etcd must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + type: "skip" + + # TODO Validate this one + - id: V-242460 + text: "The Kubernetes admin kubeconfig must have file permissions set to 644 or more restrictive." + audit: "stat -c %a /etc/kubernetes/admin.conf" + tests: + test_items: + - flag: "644" + compare: + op: lte + value: "644" + remediation: | + Change the permissions of the admin kubeconfig file to 644 by executing the command: + chmod 644 /etc/kubernetes/admin.conf + scored: false + + # - id: V-242460 + # text: "The Kubernetes admin.conf must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242466 + text: "The Kubernetes Scheduler must have secure binding." + audit: "grep -i bind-address /etc/kubernetes/manifests/kube-scheduler.yaml" + tests: + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + remediation: | + Edit the Kubernetes Scheduler manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Ensure the "--bind-address" flag is set to "127.0.0.1". + scored: false + + # - id: V-242466 + # text: "The Kubernetes PKI CRT must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + # type: "skip" + + # TODO Validate this one + - id: V-242467 + text: "The Kubernetes PKI keys must have file permissions set to 600 or more restrictive." + audit: "find /etc/kubernetes/pki -type f -name '*.key' -exec stat -c %a {} \\;" + tests: + test_items: + - flag: "600" + compare: + op: lte + value: "600" + remediation: | + Change the permissions of the PKI key files to 600 by executing the command: + find /etc/kubernetes/pki -type f -name '*.key' -exec chmod 600 {} \; + scored: false + + # - id: V-242467 + # text: "The Kubernetes PKI keys must have file permissions set to 600 or more restrictive | Component of GKE Control Plane" + # type: "skip" + + - id: V-242468 + text: "The Kubernetes API Server must prohibit communication using TLS version 1.0 and 1.1, and SSL 2.0 and 3.0 | Component of EKS Control Plane" + type: "skip" + +#TODO Test this, pretty sure it doesn't work + # - id: V-245541 + # text: "Kubernetes Kubelet must not disable timeouts." + # audit: "ps -ef | grep kubelet | grep -- --streaming-connection-idle-timeout" + # tests: + # test_items: + # - flag: "--streaming-connection-idle-timeout" + # compare: + # op: gte + # value: "5m" + # remediation: | + # On the Control Plane, run the command: + # ps -ef | grep kubelet + + # If the "--streaming-connection-idle-timeout" option exists, verify its value. + + # Edit the Kubernetes Kubelet config file: + # Set the argument "streamingConnectionIdleTimeout" to a value of "5m" or greater. + + # Restart the kubelet service using the following command: + # systemctl daemon-reload && systemctl restart kubelet + # scored: false + + - id: V-245541 + text: "Kubernetes Kubelet must not disable timeouts | Component of GKE Control Plane" + type: "skip" + + # TODO Check this, probably doesn't work + # - id: V-245543 + # text: "Kubernetes API Server must disable token authentication to protect information in transit." + # audit: "grep -i token-auth-file /etc/kubernetes/manifests/kube-apiserver.yaml" + # tests: + # test_items: + # - flag: "--token-auth-file" + # set: false + # remediation: | + # Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + # Remove the setting "--token-auth-file". + # scored: false + + - id: V-245543 + text: "Kubernetes API Server must disable token authentication to protect information in transit | Component of GKE Control Plane" + type: "skip" + + # TODO Verify this one + - id: V-245544 + text: "Kubernetes endpoints must use approved organizational certificate and key pair to protect information in transit." + audit: "grep -i kubelet-client-certificate /etc/kubernetes/manifests/kube-apiserver.yaml && grep -i kubelet-client-key /etc/kubernetes/manifests/kube-apiserver.yaml" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-client-key" + set: true + remediation: | + Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + Set the value of "--kubelet-client-certificate" and "--kubelet-client-key" to an Approved Organizational Certificate and key pair. + Restart the kubelet service using the following command: + service kubelet restart + + # - id: V-245544 + # text: "Kubernetes endpoints must use approved organizational certificate and key pair to protect information in transit | Component of GKE Control Plane" + # type: "skip" + + # TODO This one is "new" doesn't appear to work though + # - id: V-254800 + # text: "Kubernetes must have a Pod Security Admission control file configured." + # audit: "grep -i admission-control-config-file /etc/kubernetes/manifests/kube-apiserver.yaml" + # tests: + # test_items: + # - flag: "--admission-control-config-file" + # set: true + # remediation: | + # Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + # Set the value of "--admission-control-config-file" to a valid path for the file. + # Create an admission controller config file with the necessary PodSecurity configuration. + # scored: false + + # TODO This one is "new" doesn't appear to work though + # - id: V-254801 + # text: "Kubernetes must enable PodSecurity admission controller on static pods and Kubelets." + # audit: "grep -i feature-gates /etc/kubernetes/manifests/kube-apiserver.yaml" + # tests: + # test_items: + # - flag: "--feature-gates" + # compare: + # op: has + # value: "PodSecurity=true" + # remediation: | + # Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + # Ensure the argument "--feature-gates=PodSecurity=true" is present. + # scored: false \ No newline at end of file diff --git a/cfg/gke-stig-kubernetes-v2r2/node.yaml b/cfg/gke-stig-kubernetes-v2r2/node.yaml new file mode 100644 index 0000000..4a8ffde --- /dev/null +++ b/cfg/gke-stig-kubernetes-v2r2/node.yaml @@ -0,0 +1,487 @@ +--- +controls: +version: "gke-stig-kubernetes-v1r6" +id: 3 +text: "Node Configuration" +type: "node" +groups: + - id: 3.1 + text: "DISA Category Code I" + checks: + - id: V-242387 # CIS 3.2.4 + text: "The Kubernetes Kubelet must have the read-only port flag disabled (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + bin_op: or + remediation: | + If modifying the Kubelet config file, edit the kubelet-config.json file + /etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to 0 + + "readOnlyPort": 0 + + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --read-only-port=0 + + For each remediation: + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: V-242391 # CIS 3.2.1 + text: "Ensure that the Anonymous Auth is Not Enabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + Remediation Method 1: + If configuring via the Kubelet config file, you first need to locate the file. + To do this, SSH to each node and execute the following command to find the kubelet + process: + + ps -ef | grep kubelet + + The output of the above command provides details of the active kubelet process, from + which we can see the location of the configuration file provided to the kubelet service + with the --config argument. The file can be viewed with a command such as more or + less, like so: + + sudo less /home/kubernetes/kubelet-config.yaml + + Disable Anonymous Authentication by setting the following parameter: + + "authentication": { "anonymous": { "enabled": false } } + + Remediation Method 2: + If using executable arguments, edit the kubelet service file on each worker node and + ensure the below parameters are part of the KUBELET_ARGS variable string. + + For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or + Bottlerocket AMIs, then this file can be found at + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise, + you may need to look up documentation for your chosen operating system to determine + which service manager is configured: + + --anonymous-auth=false + + For Both Remediation Steps: + Based on your system, restart the kubelet service and check the service status. + The following example is for operating systems using systemd, such as the Amazon + EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl + command. If systemctl is not available then you will need to look up documentation for + your chosen operating system to determine which service manager is configured: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: V-242392 # CIS 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + Remediation Method 1: + If configuring via the Kubelet config file, you first need to locate the file. + To do this, SSH to each node and execute the following command to find the kubelet + process: + + ps -ef | grep kubelet + + The output of the above command provides details of the active kubelet process, from + which we can see the location of the configuration file provided to the kubelet service + with the --config argument. The file can be viewed with a command such as more or + less, like so: + + sudo less /path/to/kubelet-config.json + + Enable Webhook Authentication by setting the following parameter: + + "authentication": { "webhook": { "enabled": true } } + + Next, set the Authorization Mode to Webhook by setting the following parameter: + + "authorization": { "mode": "Webhook } + + Finer detail of the authentication and authorization fields can be found in the + Kubelet Configuration documentation (https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/). + + Remediation Method 2: + If using executable arguments, edit the kubelet service file on each worker node and + ensure the below parameters are part of the KUBELET_ARGS variable string. + For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or + Bottlerocket AMIs, then this file can be found at + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise, + you may need to look up documentation for your chosen operating system to determine + which service manager is configured: + + --authentication-token-webhook + --authorization-mode=Webhook + + For Both Remediation Steps: + Based on your system, restart the kubelet service and check the service status. + The following example is for operating systems using systemd, such as the Amazon + EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl + command. If systemctl is not available then you will need to look up documentation for + your chosen operating system to determine which service manager is configured: + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + # TODO Verify this, low confidence this will work + - id: V-242393 + text: "Kubernetes Worker Nodes must not have sshd service running. (Automated)" + audit: '/bin/sh -c ''systemctl show -p ActiveState sshd'' ' + tests: + test_items: + - flag: ActiveState + compare: + op: eq + value: inactive + remediation: | + To stop the sshd service, run the command: systemctl stop sshd + scored: true + # TODO Verify this, low confidence this will work + - id: V-242394 + text: "Kubernetes Worker Nodes must not have the sshd service enabled. (Automated)" + audit: "/bin/sh -c 'systemctl is-enabled sshd.service'" + tests: + test_items: + - flag: "disabled" + remediation: | + To disable the sshd service, run the command: + chkconfig sshd off + scored: true + # TODO: Verify this, probably requires rbac permissions using kubectl + - id: V-242395 + text: "Kubernetes dashboard must not be enabled." + audit: "kubectl get pods --all-namespaces -l k8s-app=kubernetes-dashboard" + tests: + test_items: + - flag: "k8s-app=kubernetes-dashboard" + set: false + remediation: | + Delete the Kubernetes dashboard deployment with the following command: + kubectl delete deployment kubernetes-dashboard --namespace=kube-system + scored: true + # TODO This could be automated, but requires a little more effort or adding jq to the docker image + # maybe test path will work + - id: V-242396 + text: "Kubernetes Kubectl cp command must give expected access and results. (Manual)" + type: "manual" + remediation: | + If any Worker nodes are not using kubectl version 1.12.9 or newer, this is a finding. + Upgrade the Master and Worker nodes to the latest version of kubectl. + scored: false + - id: V-242397 + text: "The Kubernetes kubelet static PodPath must not enable static pods (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - path: '{.staticPodPath}' + set: false + remediation: | + Edit $kubeletconf on each node to to remove the staticPodPath + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: V-242398 + text: "Kubernetes DynamicAuditing must not be enabled. (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "DynamicAuditing=true" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit any manifest files or kubelet config files that contain the feature-gates + setting with DynamicAuditing set to "true". + Set the flag to "false" or remove the "DynamicAuditing" setting + completely. Restart the kubelet service if the kubelet config file + if the kubelet config file is changed. + scored: true + - id: V-242399 + text: "Kubernetes DynamicKubeletConfig must not be enabled. (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "DynamicKubeletConfig=true" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit any manifest files or $kubeletconf that contain the feature-gates + setting with DynamicKubeletConfig set to "true". + Set the flag to "false" or remove the "DynamicKubeletConfig" setting + completely. Restart the kubelet service if the kubelet config file + if the kubelet config file is changed. + scored: true + - id: V-242404 # CIS 3.2.8 + text: "Ensure that the --rotate-certificates argument is not present or is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + Remediation Method 1: + If modifying the Kubelet config file, edit the kubelet-config.yaml file + /etc/kubernetes/kubelet/kubelet-config.yaml and set the below parameter to + true + + "RotateCertificate":true + + Additionally, ensure that the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate + executable argument to false because this would override the Kubelet + config file. + + Remediation Method 2: + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --RotateCertificate=true + scored: true + - id: V-242406 + text: "The Kubernetes kubelet configuration file must be owned by root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + - id: V-242407 + text: "The Kubernetes kubelet configuration files must have file permissions set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: true + - id: V-242414 + text: "The Kubernetes cluster must use non-privileged host ports for user pods. (Manual)" + type: "manual" + remediation: | + For any of the pods that are using ports below 1024, + reconfigure the pod to use a service to map a host non-privileged + port to the pod port or reconfigure the image to use non-privileged ports. + scored: false + - id: V-242415 + text: "Secrets in Kubernetes must not be stored as environment variables.(Manual)" + type: "manual" + remediation: | + Run the following command: + kubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {"\n"}{end}' -A + If any of the values returned reference environment variables + rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + - id: V-242442 + text: "Kubernetes must remove old components after updated versions have been installed. (Manual)" + type: "manual" + remediation: | + To view all pods and the images used to create the pods, from the Master node, run the following command: + kubectl get pods --all-namespaces -o jsonpath="{..image}" | \ + tr -s '[[:space:]]' '\n' | \ + sort | \ + uniq -c + Review the images used for pods running within Kubernetes. + Remove any old pods that are using older images. + scored: false + - id: 3.2 + text: "DISA Category Code II - Node Security" + checks: + - id: V-242391 + text: "The Kubernetes Kubelet must have anonymous authentication disabled." + audit: "ps -ef | grep kubelet | grep -- --anonymous-auth" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: "false" + remediation: | + Edit the Kubernetes Kubelet configuration file. + Set the value of "anonymousAuth" to "false". + Restart the kubelet service using: + systemctl daemon-reload && systemctl restart kubelet + scored: true + + - id: V-242392 + text: "The Kubernetes kubelet must enable explicit authorization." + audit: "ps -ef | grep kubelet | grep -- --authorization-mode" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: eq + value: "Webhook" + remediation: | + Edit the Kubernetes Kubelet configuration file. + Set the "authorization.mode" to "Webhook". + Restart the kubelet service using: + systemctl daemon-reload && systemctl restart kubelet + scored: true + + - id: V-242393 + text: "Kubernetes Worker Nodes must not have sshd service running." + audit: "systemctl status sshd" + tests: + test_items: + - flag: "sshd" + compare: + op: eq + value: "inactive" + remediation: | + To stop the sshd service, run the command: + systemctl stop sshd + To disable the service: + systemctl disable sshd + scored: true + + - id: V-242394 + text: "Kubernetes Worker Nodes must not have the sshd service enabled." + audit: "systemctl is-enabled sshd" + tests: + test_items: + - flag: "sshd" + compare: + op: eq + value: "disabled" + remediation: | + To disable the sshd service, run the command: + systemctl disable sshd + scored: true + + - id: V-242397 + text: "The Kubernetes kubelet staticPodPath must not enable static pods." + audit: "ps -ef | grep kubelet | grep -- --config" + tests: + test_items: + - flag: "staticPodPath" + set: false + remediation: | + Edit the Kubernetes kubelet configuration file. + Remove the setting "staticPodPath". + Restart the kubelet service using: + systemctl daemon-reload && systemctl restart kubelet + scored: true + + - id: V-242434 # CIS 3.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-utils-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + Remediation Method 1: + If modifying the Kubelet config file, edit the kubelet-config.json file + /etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to + true + + "makeIPTablesUtilChains": true + + Ensure that /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf + does not set the --make-iptables-util-chains argument because that would + override your Kubelet config file. + + Remediation Method 2: + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --make-iptables-util-chains:true + + Remediation Method 3: + If using the api configz endpoint consider searching for the status of + "makeIPTablesUtilChains.: true by extracting the live configuration from the nodes + running kubelet. + + **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a + Live Cluster (https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), + and then rerun the curl statement from audit process to check for kubelet + configuration changes + + kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=gke-cluster-1-pool1-5e572947-r2hg (example node name from + "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + + For all three remediations: + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true \ No newline at end of file diff --git a/cfg/gke-stig-kubernetes-v2r2/policies.yaml b/cfg/gke-stig-kubernetes-v2r2/policies.yaml new file mode 100644 index 0000000..0581539 --- /dev/null +++ b/cfg/gke-stig-kubernetes-v2r2/policies.yaml @@ -0,0 +1,110 @@ +--- +controls: +version: "gke-stig-kubernetes-v1r6" +id: 4 +text: "Kubernetes Security Policies" +type: "policies" +groups: + - id: 4.1 + text: "DISA Category Code I - Pod Security Policies" + checks: + - id: V-242381 + text: "The Kubernetes Controller Manager must create unique service accounts for each work payload. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: V-242383 + text: "User-managed resources must be created in dedicated namespaces. (Manual)" + type: "manual" + remediation: | + Move any user-managed resources from the default, kube-public and kube-node-lease namespaces, to user namespaces. + scored: false + - id: V-242437 + text: "Kubernetes must have a pod security policy set." + audit: "kubectl get podsecuritypolicy" + tests: + test_items: + - flag: "runAsUser" + compare: + op: eq + value: "MustRunAsNonRoot" + - flag: "supplementalGroups" + compare: + op: gt + value: "0" + - flag: "fsGroup" + compare: + op: gt + value: "0" + remediation: | + From the Control Plane, save the following policy to a file called restricted.yml: + + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI + - persistentVolumeClaim + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + + Apply the policy with: + + kubectl create -f restricted.yml + scored: true + - id: V-242417 + text: "Kubernetes must separate user functionality. (Manual)" + type: "manual" + remediation: | + Move any user pods that are present in the Kubernetes system namespaces to user specific namespaces. + scored: false + + - id: 4.2 + text: "DISA Category Code I - PodSecurity Admission Controller" + checks: + - id: V-254801 + text: "Kubernetes must enable PodSecurity admission controller on static pods and Kubelets." + audit: "grep -i feature-gates /etc/kubernetes/manifests/*" + tests: + test_items: + - flag: "--feature-gates" + compare: + op: eq + value: "PodSecurity=true" + remediation: | + On the Control Plane, change to the manifests directory: + + grep -i feature-gates /etc/kubernetes/manifests/* + + Ensure the argument "--feature-gates=PodSecurity=true" i diff --git a/go.mod b/go.mod index d79c1eb..6d94b29 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,8 @@ require ( ) require ( + al.essio.dev/pkg/shellescape v1.6.0 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.62 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect @@ -35,6 +37,7 @@ require ( github.com/aws/smithy-go v1.22.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.4.2 // indirect @@ -46,6 +49,7 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/safetext v0.0.0-20240722112252-5a72de7e7962 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -64,6 +68,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pborman/uuid v1.2.1 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect @@ -82,7 +88,7 @@ require ( golang.org/x/net v0.33.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/sys v0.31.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.7.0 // indirect @@ -96,6 +102,7 @@ require ( k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/kind v0.27.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 1631703..cf2f2c6 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,7 @@ +al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= +al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= github.com/aws/aws-sdk-go-v2/config v1.29.9 h1:Kg+fAYNaJeGXp1vmjtidss8O2uXIsXwaRqsQJKXVr+0= @@ -34,6 +38,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= @@ -84,6 +90,9 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/safetext v0.0.0-20240722112252-5a72de7e7962 h1:+9C/TgFfcCmZBV7Fjb3kQCGlkpFrhtvFDgbdQHB9RaA= +github.com/google/safetext v0.0.0-20240722112252-5a72de7e7962/go.mod h1:H3K1Iu/utuCfa10JO+GsmKUYSWi7ug57Rk6GaDRHaaQ= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= @@ -147,6 +156,10 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= +github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -236,6 +249,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -301,6 +316,8 @@ k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6J k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/kind v0.27.0 h1:PQ3f0iAWNIj66LYkZ1ivhEg/+Zb6UPMbO+qVei/INZA= +sigs.k8s.io/kind v0.27.0/go.mod h1:RZVFmy6qcwlSWwp6xeIUv7kXCPF3i8MXsEXxW/J+gJY= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/hack/kind-stig.test.yaml b/hack/kind-stig.test.yaml index 5051277..a4ee538 100644 --- a/hack/kind-stig.test.yaml +++ b/hack/kind-stig.test.yaml @@ -12,12 +12,12 @@ spec: hostPID: true containers: - name: kube-bench - image: docker.io/aquasec/kube-bench:latest + image: docker.io/aquasec/kube-bench:152d0e7 command: [ "kube-bench", "run", "--benchmark", - "eks-stig-kubernetes-v1r6", + "gke-stig-kubernetes-v2r2", ] volumeMounts: - name: var-lib-etcd diff --git a/hack/kind-stig.yaml b/hack/kind-stig.yaml index 3b1ab69..e38c290 100644 --- a/hack/kind-stig.yaml +++ b/hack/kind-stig.yaml @@ -17,7 +17,7 @@ spec: "kube-bench", "run", "--benchmark", - "eks-stig-kubernetes-v1r6", + "gke-stig-kubernetes-v2r2", ] volumeMounts: - name: var-lib-etcd diff --git a/makefile b/makefile index 62b6d8d..bb44e26 100644 --- a/makefile +++ b/makefile @@ -104,5 +104,5 @@ kind-run-stig: kind-push KUBECONFIG=$(KUBECONFIG) \ kubectl apply -f ./hack/kind-stig.test.yaml && \ kubectl wait --for=condition=complete job.batch/kube-bench --timeout=60s && \ - kubectl logs job/kube-bench > ./test.data && \ - diff ./test.data integration/testdata/Expected_output_stig.data + kubectl logs job/kube-bench > ./test.data + cat ./test.data From db6528ab80cea8a78a178056f935aff0bdd7a73a Mon Sep 17 00:00:00 2001 From: Carter Williamson Date: Fri, 14 Mar 2025 15:43:21 -0700 Subject: [PATCH 02/10] Making lots of progress --- .../controlplane.yaml | 93 ++++----------- .../managedservices.yaml | 95 ++------------- cfg/gke-stig-kubernetes-v2r2/node.yaml | 108 ++++++------------ cfg/gke-stig-kubernetes-v2r2/policies.yaml | 1 + check/check.go | 2 +- cmd/root.go | 4 +- docs/controls.md | 2 +- job-gke-stig.yaml | 46 ++++++++ 8 files changed, 119 insertions(+), 232 deletions(-) create mode 100644 job-gke-stig.yaml diff --git a/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml b/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml index b6532af..37b3b70 100644 --- a/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml @@ -8,54 +8,12 @@ groups: - id: 2.1 text: "DISA Category Code I - API Server Security" checks: - - id: V-242378 - text: "The Kubernetes API Server must use TLS 1.2, at a minimum, to protect the confidentiality of sensitive data during electronic dissemination." - audit: "grep -i tls-min-version /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--tls-min-version" - compare: - op: nothave - value: "VersionTLS10" - - flag: "--tls-min-version" - compare: - op: nothave - value: "VersionTLS11" - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--tls-min-version" to "VersionTLS12" or higher. - scored: true - - id: V-242388 - text: "The Kubernetes API server must not have the insecure bind address set." - audit: "grep -i insecure-bind-address /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--insecure-bind-address" - set: false - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Remove the value of "--insecure-bind-address" setting. - scored: true - - - id: V-242389 - text: "The Kubernetes API server must have the secure port set." - audit: "grep -i secure-port /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--secure-port" - compare: - op: gt - value: "0" - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--secure-port" to a value greater than "0". - scored: true - - - id: V-242390 # Similar to CIS 3.2.1 text: "The Kubernetes API server must have anonymous authentication disabled (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" + audit: "/bin/ps -fC kubelet" + # audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /etc/kubernetes/kubelet-config.yaml" + # audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: "--anonymous-auth" @@ -116,32 +74,23 @@ groups: - id: 2.2 text: "DISA Category Code II - Controller Manager Security" checks: - - id: V-242381 - text: "The Kubernetes Controller Manager must create unique service accounts for each work payload. (Manual)" - type: "manual" - remediation: | - Create explicit service accounts wherever a Kubernetes workload requires specific access - to the Kubernetes API server. - Modify the configuration of each default service account to include this value - automountServiceAccountToken: false - scored: false - - id: V-242376 - text: "The Kubernetes Controller Manager must use TLS 1.2, at a minimum, to protect the confidentiality of sensitive data during electronic dissemination." - audit: "grep -i tls-min-version /etc/kubernetes/manifests/kube-controller-manager.yaml" - tests: - test_items: - - flag: "--tls-min-version" - compare: - op: nothave - value: "VersionTLS10" - - flag: "--tls-min-version" - compare: - op: nothave - value: "VersionTLS11" - remediation: | - Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--tls-min-version" to "VersionTLS12" or higher. - scored: true + # - id: V-242376 + # text: "The Kubernetes Controller Manager must use TLS 1.2, at a minimum, to protect the confidentiality of sensitive data during electronic dissemination." + # audit: "grep -i tls-min-version /etc/kubernetes/manifests/kube-controller-manager.yaml" + # tests: + # test_items: + # - flag: "--tls-min-version" + # compare: + # op: nothave + # value: "VersionTLS10" + # - flag: "--tls-min-version" + # compare: + # op: nothave + # value: "VersionTLS11" + # remediation: | + # Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. + # Set the value of "--tls-min-version" to "VersionTLS12" or higher. + - id: V-242443 text: " Kubernetes must contain the latest updates as authorized by IAVMs, CTOs, DTMs, and STIGs. (Manual)" type: "manual" diff --git a/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml b/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml index f9abd6d..1b27c74 100644 --- a/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml @@ -8,42 +8,13 @@ groups: - id: 5.1 text: "DISA Category Code I" checks: - # TODO: Validate this one - id: V-242386 - text: "The Kubernetes API server must have the insecure port flag disabled." - audit: "grep -i insecure-port /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--insecure-port" - compare: - op: eq - value: "0" - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--insecure-port" to "0". - Note: The "--insecure-port" flag has been deprecated and can only be set to "0". This flag will be removed in Kubernetes v1.24. - scored: false + text: "The Kubernetes API server must have the insecure port flag disabled | Component of GKE Control Plane" + type: "skip" - # - id: V-242386 - # text: "The Kubernetes API server must have the insecure port flag disabled | Component of GKE Control Plane" - # type: "skip" - - # TODO: Validate this one - id: V-242388 - text: "The Kubernetes API server must not have the insecure bind address set." - audit: "grep -i insecure-bind-address /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--insecure-bind-address" - set: false - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Remove the value of "--insecure-bind-address" setting. - scored: false - - # - id: V-242388 - # text: "The Kubernetes API server must have the insecure bind address not set | Component of GKE Control Plane" - # type: "skip" + text: "The Kubernetes API server must have the insecure bind address not set | Component of GKE Control Plane" + type: "skip" # TODO Verify this one (can't find it like on the aws side https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html) - id: V-242436 @@ -101,63 +72,17 @@ groups: text: "The Kubernetes API Server must enable Node,RBAC as the authorization mode | Component of GKE Control Plane" type: "skip" - # TODO: Move to controlplane if this works in GKE - id: V-242384 - text: "The Kubernetes Scheduler must have secure binding." - audit: "grep -i bind-address /etc/kubernetes/manifests/kube-scheduler.yaml" - tests: - test_items: - - flag: "--bind-address" - compare: - op: eq - value: "127.0.0.1" - remediation: | - Edit the Kubernetes Scheduler manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the argument "--bind-address" to "127.0.0.1". - scored: false + text: "The Kubernetes Scheduler must have secure binding | Component of GKE Control Plane" + type: "skip" - # - id: V-242384 - # text: "The Kubernetes Scheduler must have secure binding | Component of GKE Control Plane" - # type: "skip" - - - # TODO: Move to controlplane if this works in GKE - id: V-242385 - text: "The Kubernetes Controller Manager must have secure binding." - audit: "grep -i bind-address /etc/kubernetes/manifests/kube-controller-manager.yaml" - tests: - test_items: - - flag: "--bind-address" - compare: - op: eq - value: "127.0.0.1" - remediation: | - Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the argument "--bind-address" to "127.0.0.1". - scored: false + text: "The Kubernetes Controller Manager must have secure binding | Component of GKE Control Plane" + type: "skip" - # - id: V-242385 - # text: "The Kubernetes Controller Manager must have secure binding | Component of GKE Control Plane" - # type: "skip" - - # TODO: Move to controlplane if this works in GKE - id: V-242389 - text: "The Kubernetes API server must have the secure port set." - audit: "grep -i secure-port /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--secure-port" - compare: - op: gt - value: "0" - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--secure-port" to a value greater than "0". - scored: false - - # - id: V-242389 - # text: "The Kubernetes API server must have the secure port set | Component of EKS Control Plane" - # type: "skip" + text: "The Kubernetes API server must have the secure port set | Component of EKS Control Plane" + type: "skip" # TODO: Didn't actually see this one in the k8s stig file # - id: V-242401 diff --git a/cfg/gke-stig-kubernetes-v2r2/node.yaml b/cfg/gke-stig-kubernetes-v2r2/node.yaml index 4a8ffde..5802411 100644 --- a/cfg/gke-stig-kubernetes-v2r2/node.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/node.yaml @@ -157,31 +157,9 @@ groups: systemctl restart kubelet.service systemctl status kubelet -l scored: true - # TODO Verify this, low confidence this will work - - id: V-242393 - text: "Kubernetes Worker Nodes must not have sshd service running. (Automated)" - audit: '/bin/sh -c ''systemctl show -p ActiveState sshd'' ' - tests: - test_items: - - flag: ActiveState - compare: - op: eq - value: inactive - remediation: | - To stop the sshd service, run the command: systemctl stop sshd - scored: true - # TODO Verify this, low confidence this will work - - id: V-242394 - text: "Kubernetes Worker Nodes must not have the sshd service enabled. (Automated)" - audit: "/bin/sh -c 'systemctl is-enabled sshd.service'" - tests: - test_items: - - flag: "disabled" - remediation: | - To disable the sshd service, run the command: - chkconfig sshd off - scored: true + # TODO: Verify this, probably requires rbac permissions using kubectl + # This needs proper permissions set, TODO!! - id: V-242395 text: "Kubernetes dashboard must not be enabled." audit: "kubectl get pods --all-namespaces -l k8s-app=kubernetes-dashboard" @@ -193,7 +171,8 @@ groups: Delete the Kubernetes dashboard deployment with the following command: kubectl delete deployment kubernetes-dashboard --namespace=kube-system scored: true - # TODO This could be automated, but requires a little more effort or adding jq to the docker image + + # TODO This could be automated, but requires a little more effort or adding jq to the docker image # maybe test path will work - id: V-242396 text: "Kubernetes Kubectl cp command must give expected access and results. (Manual)" @@ -202,20 +181,24 @@ groups: If any Worker nodes are not using kubectl version 1.12.9 or newer, this is a finding. Upgrade the Master and Worker nodes to the latest version of kubectl. scored: false + - id: V-242397 - text: "The Kubernetes kubelet static PodPath must not enable static pods (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" + text: "The Kubernetes kubelet staticPodPath must not enable static pods." + audit: "ps -ef | grep $kubeletbin | grep -- --config" tests: + bin_op: or test_items: + - flag: "staticPodPath" + set: false - path: '{.staticPodPath}' set: false remediation: | - Edit $kubeletconf on each node to to remove the staticPodPath - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service + Edit the Kubernetes kubelet configuration file. + Remove the setting "staticPodPath". + Restart the kubelet service using: + systemctl daemon-reload && systemctl restart kubelet scored: true + - id: V-242398 text: "Kubernetes DynamicAuditing must not be enabled. (Automated)" audit: "/bin/ps -fC $kubeletbin" @@ -351,41 +334,25 @@ groups: - id: 3.2 text: "DISA Category Code II - Node Security" checks: - - id: V-242391 - text: "The Kubernetes Kubelet must have anonymous authentication disabled." - audit: "ps -ef | grep kubelet | grep -- --anonymous-auth" + + # TODO Verify this, low confidence this will work + # These both don't work. Might need to be a manual check. + - id: V-242393 + text: "Kubernetes Worker Nodes must not have sshd service running. (Automated)" + audit: '/bin/sh -c ''systemctl show -p ActiveState sshd'' ' tests: test_items: - - flag: "--anonymous-auth" + - flag: ActiveState compare: op: eq - value: "false" + value: inactive remediation: | - Edit the Kubernetes Kubelet configuration file. - Set the value of "anonymousAuth" to "false". - Restart the kubelet service using: - systemctl daemon-reload && systemctl restart kubelet - scored: true - - - id: V-242392 - text: "The Kubernetes kubelet must enable explicit authorization." - audit: "ps -ef | grep kubelet | grep -- --authorization-mode" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: eq - value: "Webhook" - remediation: | - Edit the Kubernetes Kubelet configuration file. - Set the "authorization.mode" to "Webhook". - Restart the kubelet service using: - systemctl daemon-reload && systemctl restart kubelet + To stop the sshd service, run the command: systemctl stop sshd scored: true - id: V-242393 text: "Kubernetes Worker Nodes must not have sshd service running." - audit: "systemctl status sshd" + audit: "/bin/sh -c \"systemctl status sshd\"" tests: test_items: - flag: "sshd" @@ -399,6 +366,18 @@ groups: systemctl disable sshd scored: true + # TODO Verify this, low confidence this will work + # Both of these are not working at the moment + - id: V-242394 + text: "Kubernetes Worker Nodes must not have the sshd service enabled. (Automated)" + audit: "/bin/sh -c 'systemctl is-enabled sshd.service'" + tests: + test_items: + - flag: "disabled" + remediation: | + To disable the sshd service, run the command: + chkconfig sshd off + scored: true - id: V-242394 text: "Kubernetes Worker Nodes must not have the sshd service enabled." audit: "systemctl is-enabled sshd" @@ -413,19 +392,6 @@ groups: systemctl disable sshd scored: true - - id: V-242397 - text: "The Kubernetes kubelet staticPodPath must not enable static pods." - audit: "ps -ef | grep kubelet | grep -- --config" - tests: - test_items: - - flag: "staticPodPath" - set: false - remediation: | - Edit the Kubernetes kubelet configuration file. - Remove the setting "staticPodPath". - Restart the kubelet service using: - systemctl daemon-reload && systemctl restart kubelet - scored: true - id: V-242434 # CIS 3.2.6 text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" diff --git a/cfg/gke-stig-kubernetes-v2r2/policies.yaml b/cfg/gke-stig-kubernetes-v2r2/policies.yaml index 0581539..34c59d3 100644 --- a/cfg/gke-stig-kubernetes-v2r2/policies.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/policies.yaml @@ -24,6 +24,7 @@ groups: remediation: | Move any user-managed resources from the default, kube-public and kube-node-lease namespaces, to user namespaces. scored: false + - id: V-242437 text: "Kubernetes must have a pod security policy set." audit: "kubectl get podsecuritypolicy" diff --git a/check/check.go b/check/check.go index 58ce6fb..490ec43 100644 --- a/check/check.go +++ b/check/check.go @@ -23,7 +23,7 @@ import ( "github.com/golang/glog" ) -// NodeType indicates the type of node (master, node). +// NodeType indicates the type of node (master, node, federated, etcd, controlplane, policies, managedservices). type NodeType string // State is the state of a control check. diff --git a/cmd/root.go b/cmd/root.go index c674aeb..bb2d2a6 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -66,8 +66,8 @@ var ( // RootCmd represents the base command when called without any subcommands var RootCmd = &cobra.Command{ Use: os.Args[0], - Short: "Run CIS Benchmarks checks against a Kubernetes deployment", - Long: `This tool runs the CIS Kubernetes Benchmark (https://www.cisecurity.org/benchmark/kubernetes/)`, + Short: "Run CIS and STIG Benchmarks checks against a Kubernetes deployment", + Long: `This tool runs the CIS and STIG Kubernetes Benchmark (https://www.cisecurity.org/benchmark/kubernetes/)`, Run: func(cmd *cobra.Command, args []string) { bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformInfo(), viper.GetViper()) if err != nil { diff --git a/docs/controls.md b/docs/controls.md index 8655f31..6de299b 100644 --- a/docs/controls.md +++ b/docs/controls.md @@ -57,7 +57,7 @@ the `controls` components have an id and a text description which are displayed in the `kube-bench` output. `type` specifies what kubernetes node type a `controls` is for. Possible values -for `type` are `master` and `node`. +for `type` are `[master, node, federated, etcd, controlplane, policies, managedservices]`. ## Groups diff --git a/job-gke-stig.yaml b/job-gke-stig.yaml new file mode 100644 index 0000000..8d8fc70 --- /dev/null +++ b/job-gke-stig.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + spec: + hostPID: true + containers: + - name: kube-bench + imagePullPolicy: Always + # Push the image to your ECR and then refer to it here + # image: + image: us-docker.pkg.dev/dev-frm-core/dev-frm/kube-bench:gke-stig + # To send findings to AWS Security Hub, refer to `job-eks-asff.yaml` instead + command: + [ + "kube-bench", + "run", + "--benchmark", + "gke-stig-kubernetes-v2r2", + "-v", + "10", + ] + volumeMounts: + - name: var-lib-kubelet + mountPath: /var/lib/kubelet + readOnly: true + - name: etc-systemd + mountPath: /etc/systemd + readOnly: true + - name: etc-kubernetes + mountPath: /etc/kubernetes + readOnly: true + restartPolicy: Never + volumes: + - name: var-lib-kubelet + hostPath: + path: "/var/lib/kubelet" + - name: etc-systemd + hostPath: + path: "/etc/systemd" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" From d249137e7d6fb111a157c6adedd4840289bf1a3c Mon Sep 17 00:00:00 2001 From: Carter Williamson Date: Tue, 18 Mar 2025 12:47:34 -0700 Subject: [PATCH 03/10] Done with first passes over k8s stig, still a few issues --- cfg/gke-stig-kubernetes-v2r2/config.yaml | 15 +- .../controlplane.yaml | 156 +--- .../managedservices.yaml | 800 ++---------------- cfg/gke-stig-kubernetes-v2r2/node.yaml | 250 ++++-- cfg/gke-stig-kubernetes-v2r2/policies.yaml | 79 +- job-gke-stig.yaml | 44 + 6 files changed, 321 insertions(+), 1023 deletions(-) diff --git a/cfg/gke-stig-kubernetes-v2r2/config.yaml b/cfg/gke-stig-kubernetes-v2r2/config.yaml index b39b29d..d8a7090 100644 --- a/cfg/gke-stig-kubernetes-v2r2/config.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/config.yaml @@ -1,16 +1,7 @@ --- ## Version-specific settings that override the values in cfg/config.yaml -## These settings are required if you are using the --asff option to report findings to AWS Security Hub -## AWS account number is required. -# AWS_ACCOUNT: "" -## AWS region is required. -# AWS_REGION: "" -## EKS Cluster ARN is required. -# CLUSTER_ARN: "" - node: - proxy: - defaultkubeconfig: "/var/lib/kubelet/kubeconfig" - kubelet: - defaultconf: "/etc/kubernetes/kubelet/kubelet-config.yaml" + confs: + - "/home/kubernetes/kubelet-config.yaml" + - "/etc/kubernetes/kubelet-config.yaml" \ No newline at end of file diff --git a/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml b/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml index 37b3b70..be7f732 100644 --- a/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml @@ -33,167 +33,43 @@ groups: systemctl daemon-reload systemctl restart kubelet.service - # TODO: This is pretty different from what the stig is asking for, double check - id: V-242400 - text: "The Kubernetes API server must have Alpha APIs disabled (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - bin_op: or - test_items: - - flag: "--feature-gates" - compare: - op: nothave - value: "AllAlpha=true" - set: true - - flag: "--feature-gates" - set: false + text: "The Kubernetes API server must have Alpha APIs disabled" + type: "manual" remediation: | - Edit any manifest files or $kubeletconf that contain the feature-gates - setting with AllAlpha set to "true". - Set the flag to "false" or remove the "AllAlpha" setting - completely. Restart the kubelet service if the kubelet config file - if the kubelet config file is changed. - scored: true - - - # - id: V-242400 - # text: "The Kubernetes API server must have Alpha APIs disabled." - # audit: "grep -i feature-gates /etc/kubernetes/manifests/kube-apiserver.yaml" - # tests: - # test_items: - # - flag: "--feature-gates" - # compare: - # op: nothave - # value: "AllAlpha=true" - # remediation: | - # Edit any manifest file that contains the "--feature-gates" setting with "AllAlpha" set to "true". - # Set the value of "AllAlpha" to "false" or remove the setting completely. - # scored: true + Check the release channel using the GCP gcloud CLI. + gcloud container clusters describe --region --format json | jq -r '.releaseChannel.channel' + This should be set to "STABLE". Any "Alpha" clusters will need to be rebuilt on the STABLE release channel. - id: 2.2 text: "DISA Category Code II - Controller Manager Security" - checks: - # - id: V-242376 - # text: "The Kubernetes Controller Manager must use TLS 1.2, at a minimum, to protect the confidentiality of sensitive data during electronic dissemination." - # audit: "grep -i tls-min-version /etc/kubernetes/manifests/kube-controller-manager.yaml" - # tests: - # test_items: - # - flag: "--tls-min-version" - # compare: - # op: nothave - # value: "VersionTLS10" - # - flag: "--tls-min-version" - # compare: - # op: nothave - # value: "VersionTLS11" - # remediation: | - # Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - # Set the value of "--tls-min-version" to "VersionTLS12" or higher. - + checks: - id: V-242443 text: " Kubernetes must contain the latest updates as authorized by IAVMs, CTOs, DTMs, and STIGs. (Manual)" type: "manual" remediation: | Upgrade Kubernetes to a supported version. - # TODO: Update this ref - id: V-242461 text: "Kubernetes API Server audit logs must be enabled. (Manual)" type: "manual" remediation: | Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. - Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + Ref: https://cloud.google.com/kubernetes-engine/docs/how-to/view-logs#control-plane-access-logs - - # TODO: Validate this one - id: V-242462 - text: "The Kubernetes PKI directory must be owned by root." - audit: "stat -c %U:%G /etc/kubernetes/pki" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the PKI directory to root:root by executing the command: - chown -R root:root /etc/kubernetes/pki - scored: true + text: "The Kubernetes API Server must be set to audit log max size | Component of GKE Control Plane" + type: "skip" - # TODO: Validate this one - id: V-242463 - text: "The Kubernetes PKI directory must have file permissions set to 644 or more restrictive." - audit: "find /etc/kubernetes/pki -type f -name '*.crt' -exec stat -c %a {} \\;" - tests: - test_items: - - flag: "644" - compare: - op: lte - value: "644" - remediation: | - Change the permissions of the PKI certificate files to 644 by executing the command: - find /etc/kubernetes/pki -type f -name '*.crt' -exec chmod 644 {} \; - scored: true + text: "The Kubernetes API Server must be set to audit log maximum backup | Component of GKE Control Plane" + type: "skip" - # TODO: Validate this one - id: V-242464 - text: "The Kubernetes PKI keys must have file permissions set to 600 or more restrictive." - audit: "find /etc/kubernetes/pki -type f -name '*.key' -exec stat -c %a {} \\;" - tests: - test_items: - - flag: "600" - compare: - op: lte - value: "600" - remediation: | - Change the permissions of the PKI key files to 600 by executing the command: - find /etc/kubernetes/pki -type f -name '*.key' -exec chmod 600 {} \; - scored: true + text: "The Kubernetes API Server audit log retention must be set | Component of GKE Control Plane" + type: "skip" - # TODO: Validate this one - - id: V-242465 - text: "The Kubernetes Controller Manager must have secure binding." - audit: "grep -i bind-address /etc/kubernetes/manifests/kube-controller-manager.yaml" - tests: - test_items: - - flag: "--bind-address" - compare: - op: eq - value: "127.0.0.1" - remediation: | - Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Ensure the "--bind-address" flag is set to "127.0.0.1". - scored: true + - id: V-242394 + text: "The Kubernetes API Server audit log path must be set | Component of GKE Control Plane" + type: "skip" - - id: 2.3 - text: "DISA Category Code III - Scheduler Security" - checks: - - id: V-242377 - text: "The Kubernetes Scheduler must use TLS 1.2, at a minimum, to protect the confidentiality of sensitive data during electronic dissemination." - audit: "grep -i tls-min-version /etc/kubernetes/manifests/kube-scheduler.yaml" - tests: - test_items: - - flag: "--tls-min-version" - compare: - op: nothave - value: "VersionTLS10" - - flag: "--tls-min-version" - compare: - op: nothave - value: "VersionTLS11" - remediation: | - Edit the Kubernetes Scheduler manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--tls-min-version" to "VersionTLS12" or higher. - scored: true - - id: V-242411 - text: "The Kubernetes Scheduler must enforce ports, protocols, and services (PPS) that adhere to the PPSM CAL." - audit: "grep -i scheduler /etc/kubernetes/manifests/kube-scheduler.yaml" - tests: - test_items: - - flag: "--secure-port" - compare: - op: gt - value: "0" - remediation: | - Amend any system documentation requiring revision to comply with the PPSM CAL. - Update Kubernetes Scheduler manifest and namespace PPS configuration to comply with the PPSM CAL. - scored: true diff --git a/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml b/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml index 1b27c74..ebd0d5f 100644 --- a/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml @@ -16,33 +16,16 @@ groups: text: "The Kubernetes API server must have the insecure bind address not set | Component of GKE Control Plane" type: "skip" - # TODO Verify this one (can't find it like on the aws side https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html) - id: V-242436 - text: "The Kubernetes API server must have the ValidatingAdmissionWebhook enabled (manual)" - type: "manual" - remediation: GKE automatically enable ValidatingAdmissionWebhook - scored: false + text: "The Kubernetes API server must have the ValidatingAdmissionWebhook enabled | Component of GKE Control Plane" + type: "skip" - id: V-242437 text: "[Deprecated] Kubernetes must have a pod security policy set. policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+" type: "skip" - # TODO pretty sure this doesn't work - # - id: V-245542 - # text: "Kubernetes API Server must disable basic authentication to protect information in transit." - # audit: "grep -i basic-auth-file /etc/kubernetes/manifests/kube-apiserver.yaml" - # tests: - # test_items: - # - flag: "--basic-auth-file" - # set: false - # remediation: | - # Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - # Remove the setting "--basic-auth-file". - # scored: false - - - id: V-245542 - text: "Kubernetes API Server must disable basic authentication to protect information in transit | Component of EKS Control Plane" + text: "Kubernetes API Server must disable basic authentication to protect information in transit | Component of GKE Control Plane" type: "skip" - id: 5.2 @@ -81,7 +64,7 @@ groups: type: "skip" - id: V-242389 - text: "The Kubernetes API server must have the secure port set | Component of EKS Control Plane" + text: "The Kubernetes API server must have the secure port set | Component of GKE Control Plane" type: "skip" # TODO: Didn't actually see this one in the k8s stig file @@ -97,68 +80,13 @@ groups: text: "Kubernetes API Server must generate audit records | Component of GKE Control Plane" type: "skip" - # TODO This will need tweaks to work I think but might be automatable - # - id: V-242405 - # text: "The Kubernetes manifests must be owned by root." - # audit: "ls -l /etc/kubernetes/manifests/*" - # tests: - # test_items: - # - flag: "owner" - # compare: - # op: eq - # value: "root:root" - # remediation: | - # On the Control Plane, change to the /etc/kubernetes/manifests directory. - # Run the command: - # chown root:root * - - # To verify the change took place, run the command: - # ls -l * - - # All the manifest files should be owned by root:root. - # scored: false - - id: V-242405 text: "The Kubernetes manifests must be owned by root | Component of GKE Control Plane" type: "skip" - # TODO verify this one, I think the permissions flag just needs to be added to the ls cmd - id: V-242408 - text: "The Kubernetes manifest files must have least privileges." - audit: "ls -l /etc/kubernetes/manifests/*" - tests: - test_items: - - flag: "permissions" - compare: - op: lte - value: "644" - remediation: | - On both Control Plane and Worker Nodes, change to the /etc/kubernetes/manifests directory. - Run the command: - chmod 644 * - To verify the change took place, run the command: - ls -l * - All the manifest files should now have privileges of "644". - scored: false - - # - id: V-242408 - # text: "The Kubernetes manifests must have least privileges | Component of GKE Control Plane" - # type: "skip" - - # TODO Pretty sure this is actually a GKE setting - # - id: V-242409 - # text: "Kubernetes Controller Manager must disable profiling." - # audit: "grep -i profiling /etc/kubernetes/manifests/kube-controller-manager.yaml" - # tests: - # test_items: - # - flag: "--profiling" - # compare: - # op: eq - # value: "false" - # remediation: | - # Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - # Set the argument "--profiling" to "false". - # scored: false + text: "The Kubernetes manifests must have least privileges | Component of GKE Control Plane" + type: "skip" - id: V-242409 text: "Kubernetes Controller Manager must disable profiling | Component of GKE Control Plane" @@ -184,729 +112,135 @@ groups: text: "The Kubernetes API server must use approved cipher suites | Component of GKE Control Plane" type: "skip" - # TODO Validate this one - id: V-242419 - text: "Kubernetes API Server must have the SSL Certificate Authority set." - audit: "grep -i client-ca-file /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--client-ca-file" - set: true - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--client-ca-file" to the path containing an Approved Organizational Certificate. - scored: false + text: "Kubernetes API Server must have the SSL Certificate Authority set | Component of GKE Control Plane" + type: "skip" - # - id: V-242419 - # text: "Kubernetes API Server must have the SSL Certificate Authority set | Component of GKE Control Plane" - # type: "skip" - # TODO Validate this one - - id: V-242420 - text: "Kubernetes Kubelet must have the SSL Certificate Authority set." - audit: "ps -ef | grep kubelet | grep -- --client-ca-file" - tests: - test_items: - - flag: "--client-ca-file" - set: true - remediation: | - On the Control Plane, run the command: - ps -ef | grep kubelet - - If the "--client-ca-file" option exists, verify its value is correctly set. - Note the path to the config file (identified by --config). - - Edit the Kubernetes Kubelet config file: - Set the value of "clientCAFile" to a path containing an Approved Organizational Certificate. - - Restart the kubelet service using the following command: - systemctl daemon-reload && systemctl restart kubelet - scored: false - - # - id: V-242420 - # text: "Kubernetes Kubelet must have the SSL Certificate Authority set | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242421 - text: "Kubernetes Controller Manager must have the SSL Certificate Authority set." - audit: "grep -i root-ca-file /etc/kubernetes/manifests/kube-controller-manager.yaml" - tests: - test_items: - - flag: "--root-ca-file" - set: true - remediation: | - Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--root-ca-file" to a path containing an Approved Organizational Certificate. - scored: false + text: "Kubernetes Controller Manager must have the SSL Certificate Authority set | Component of GKE Control Plane" + type: "skip" - # - id: V-242421 - # text: "Kubernetes Controller Manager must have the SSL Certificate Authority set | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242422 - text: "Kubernetes API Server must have a certificate for communication." - audit: "grep -i tls-cert-file /etc/kubernetes/manifests/kube-apiserver.yaml && grep -i tls-private-key-file /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - bin_op: and - test_items: - - flag: "--tls-cert-file" - set: true - - flag: "--tls-private-key-file" - set: true - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Ensure the "--tls-cert-file" and "--tls-private-key-file" flags are set to paths containing an Approved Organizational Certificate and corresponding private key. - scored: false + text: "Kubernetes API Server must have a certificate for communication | Component of GKE Control Plane" + type: "skip" - # - id: V-242422 - # text: "Kubernetes API Server must have a certificate for communication | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242423 - text: "Kubernetes etcd must enable client authentication to secure service." - audit: "grep -i client-cert-auth /etc/kubernetes/manifests/etcd.yaml" - tests: - test_items: - - flag: "--client-cert-auth" - compare: - op: eq - value: "true" - remediation: | - Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--client-cert-auth" to "true" for etcd. - scored: false + text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane" + type: "skip" - # - id: V-242423 - # text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242424 - text: "Kubernetes etcd must have a certificate for communication." - audit: "grep -i cert-file /etc/kubernetes/manifests/etcd.yaml && grep -i key-file /etc/kubernetes/manifests/etcd.yaml" - tests: - bin_op: and - test_items: - - flag: "--cert-file" - set: true - - flag: "--key-file" - set: true - remediation: | - Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Ensure the "--cert-file" and "--key-file" flags are set to paths containing an Approved Organizational Certificate and corresponding private key. - scored: false + text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane" + type: "skip" - # - id: V-242424 - # text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242425 - text: "Kubernetes Kubelet must have a certificate for communication." - audit: "ps -ef | grep kubelet | grep -- --tls-cert-file" - tests: - test_items: - - flag: "--tls-cert-file" - set: true - remediation: | - On the Control Plane, run the command: - ps -ef | grep kubelet + text: "Kubernetes Kubelet must enable tls-cert-file for client authentication to secure service | Component of GKE Control Plane" + type: "skip" - If the "--tls-cert-file" option exists, verify its value is correctly set. - Note the path to the config file (identified by --config). - - Edit the Kubernetes Kubelet config file: - Set the value of "tlsCertFile" to a path containing an Approved Organizational Certificate. - - Restart the kubelet service using the following command: - systemctl daemon-reload && systemctl restart kubelet - scored: false - - - # - id: V-242425 - # text: "Kubernetes Kubelet must enable tls-cert-file for client authentication to secure service | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242426 - text: "Kubernetes etcd must enable peer client authentication." - audit: "grep -i peer-client-cert-auth /etc/kubernetes/manifests/etcd.yaml" - tests: - test_items: - - flag: "--peer-client-cert-auth" - compare: - op: eq - value: "true" - remediation: | - Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--peer-client-cert-auth" to "true" for etcd. - scored: false + text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane" + type: "skip" - # - id: V-242426 - # text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242427 - text: "Kubernetes etcd must have a key file for secure communication." - audit: "grep -i key-file /etc/kubernetes/manifests/etcd.yaml" - tests: - test_items: - - flag: "--key-file" - set: true - remediation: | - Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--key-file" to the Approved Organizational Certificate. - scored: false + text: "Kubernetes etcd must have a key file for secure communication | Component of GKE Control Plane" + type: "skip" - # - id: V-242427 - # text: "Kubernetes etcd must have a key file for secure communication | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242428 - text: "Kubernetes etcd must have a certificate for communication." - audit: "grep -i cert-file /etc/kubernetes/manifests/etcd.yaml" - tests: - test_items: - - flag: "--cert-file" - set: true - remediation: | - Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--cert-file" to the Approved Organizational Certificate. - scored: false + text: "Kubernetes etcd must have a certificate for communication | Component of GKE Control Plane" + type: "skip" - # - id: V-242428 - # text: "Kubernetes etcd must have a certificate for communication | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242429 - text: "Kubernetes etcd must have the SSL Certificate Authority set." - audit: "grep -i etcd-cafile /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--etcd-cafile" - set: true - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--etcd-cafile" to the Certificate Authority for etcd. - scored: false + text: "Kubernetes etcd must have the SSL Certificate Authority set | Component of GKE Control Plane" + type: "skip" - # - id: V-242429 - # text: "Kubernetes etcd must have the SSL Certificate Authority set | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242430 - text: "Kubernetes etcd must have a certificate for communication." - audit: "grep -i etcd-certfile /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--etcd-certfile" - set: true - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--etcd-certfile" to the certificate to be used for communication with etcd. - scored: false + text: "Kubernetes etcd must have a certificate for communication | Component of GKE Control Plane" + type: "skip" - # - id: V-242430 - # text: "Kubernetes etcd must have a certificate for communication | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242431 - text: "Kubernetes etcd must have a key file for secure communication." - audit: "grep -i etcd-keyfile /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--etcd-keyfile" - set: true - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--etcd-keyfile" to the key file used for secure communication with etcd. - scored: false + text: "Kubernetes etcd must have a key file for secure communication | Component of GKE Control Plane" + type: "skip" - - # - id: V-242431 - # text: "Kubernetes etcd must have a key file for secure communication | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242432 - text: "Kubernetes etcd must have peer-cert-file set for secure communication." - audit: "grep -i peer-cert-file /etc/kubernetes/manifests/etcd.yaml" - tests: - test_items: - - flag: "--peer-cert-file" - set: true - remediation: | - Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--peer-cert-file" to the certificate to be used for communication with etcd. - scored: false + text: "Kubernetes etcd must have peer-cert-file set for secure communication | Component of GKE Control Plane" + type: "skip" - # - id: V-242432 - # text: "Kubernetes etcd must have peer-cert-file set for secure communication | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242433 - text: "Kubernetes etcd must have a peer-key-file set for secure communication." - audit: "grep -i peer-key-file /etc/kubernetes/manifests/etcd.yaml" - tests: - test_items: - - flag: "--peer-key-file" - set: true - remediation: | - Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--peer-key-file" to the certificate to be used for communication with etcd. - scored: false + text: "Kubernetes etcd must have a peer-key-file set for secure communication | Component of GKE Control Plane" + type: "skip" - - # - id: V-242433 - # text: "Kubernetes etcd must have a peer-key-file set for secure communication | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242438 - text: "Kubernetes API Server must configure timeouts to limit attack surface." - audit: "grep -i request-timeout /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--request-timeout" - compare: - op: gt - value: "0" - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Ensure the "--request-timeout" flag is set to a value greater than "0". - scored: false + text: "Kubernetes API Server must configure timeouts to limit attack surface | Component of GKE Control Plane" + type: "skip" - # - id: V-242438 - # text: "Kubernetes API Server must configure timeouts to limit attack surface | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242444 - text: "The Kubernetes component manifests must be owned by root." - audit: "stat -c %U:%G /etc/kubernetes/manifests/*" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the manifest files to root:root by executing the command: - chown root:root /etc/kubernetes/manifests/* - scored: false - - # - id: V-242444 - # text: "The Kubernetes component manifests must be owned by root | Component of GKE Control Plane" - # type: "skip" + text: "The Kubernetes component manifests must be owned by root | Component of GKE Control Plane" + type: "skip" - id: V-242445 text: "The Kubernetes component etcd must be owned by etcd | Component of GKE Control Plane" type: "skip" - # TODO Validate this one - id: V-242446 - text: "The Kubernetes conf files must be owned by root." - audit: "stat -c %U:%G /etc/kubernetes/admin.conf /etc/kubernetes/scheduler.conf /etc/kubernetes/controller-manager.conf" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the Kubernetes conf files to root:root by executing the commands: - chown root:root /etc/kubernetes/admin.conf - chown root:root /etc/kubernetes/scheduler.conf - chown root:root /etc/kubernetes/controller-manager.conf - scored: false + text: "The Kubernetes conf files must be owned by root | Component of GKE Control Plane" + type: "skip" - - # - id: V-242446 - # text: "The Kubernetes conf files must be owned by root | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242447 - text: "The Kubernetes Kube Proxy kubeconfig must have file permissions set to 644 or more restrictive." - audit: "stat -c %a $proxykubeconfig" - tests: - test_items: - - flag: "644" - compare: - op: lte - value: "644" - remediation: | - Change the permissions of the Kube Proxy kubeconfig to 644 by executing the command: - chmod 644 $proxykubeconfig - scored: false + text: "The Kubernetes Kube Proxy must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + type: "skip" - - # - id: V-242447 - # text: "The Kubernetes Kube Proxy must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242448 - text: "The Kubernetes Kube Proxy kubeconfig must be owned by root." - audit: "stat -c %U:%G $proxykubeconfig" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the Kube Proxy kubeconfig to root:root by executing the command: - chown root:root $proxykubeconfig - scored: false + text: "The Kubernetes Kube Proxy must be owned by root | Component of GKE Control Plane" + type: "skip" - # - id: V-242448 - # text: "The Kubernetes Kube Proxy must be owned by root | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242449 - text: "The Kubernetes Kubelet certificate authority file must have file permissions set to 644 or more restrictive." - audit: "stat -c %a $kubeletcafile" - tests: - test_items: - - flag: "644" - compare: - op: lte - value: "644" - remediation: | - Change the permissions of the Kubernetes Kubelet certificate authority file to 644 by executing the command: - chmod 644 $kubeletcafile - scored: false + text: "The Kubernetes Kubelet certificate authority file must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + type: "skip" - # - id: V-242449 - # text: "The Kubernetes Kubelet certificate authority file must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242450 - text: "The Kubernetes Kubelet certificate authority must be owned by root." - audit: "stat -c %U:%G $kubeletcafile" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the Kubernetes Kubelet certificate authority file to root:root by executing the command: - chown root:root $kubeletcafile - scored: false + text: "The Kubernetes Kubelet certificate authority must be owned by root | Component of GKE Control Plane" + type: "skip" - - # - id: V-242450 - # text: "The Kubernetes Kubelet certificate authority must be owned by root | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242451 - text: "The Kubernetes component PKI must be owned by root." - audit: "stat -c %U:%G /etc/kubernetes/pki/*" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the PKI directory and its contents to root:root by executing the command: - chown -R root:root /etc/kubernetes/pki/ - scored: false - - - # - id: V-242451 - # text: "The Kubernetes component PKI must be owned by root | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - - id: V-242452 - text: "The Kubernetes kubelet KubeConfig must have file permissions set to 644 or more restrictive." - audit: "stat -c %a /etc/kubernetes/kubelet.conf" - tests: - test_items: - - flag: "644" - compare: - op: lte - value: "644" - remediation: | - Change the permissions of the Kubelet KubeConfig file to 644 by executing the command: - chmod 644 /etc/kubernetes/kubelet.conf - scored: false - - # - id: V-242452 - # text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - - id: V-242453 - text: "The Kubernetes kubelet KubeConfig file must be owned by root." - audit: "stat -c %U:%G /etc/kubernetes/kubelet.conf" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the kubelet.conf file to root:root by executing the command: - chown root:root /etc/kubernetes/kubelet.conf - scored: false - - # - id: V-242453 - # text: "The Kubernetes kubelet config must be owned by root | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - - id: V-242454 - text: "The Kubernetes kubeadm.conf must be owned by root." - audit: "stat -c %U:%G $kubeletdefaultsvc" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the kubeadm.conf to root:root by executing the command: - chown root:root $kubeletdefaultsvc - scored: false - - # - id: V-242454 - # text: "The Kubernetes kubeadm.conf must be owned by root | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - - id: V-242455 - text: "The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive." - audit: "stat -c %a $kubeletdefaultsvc" - tests: - test_items: - - flag: "644" - compare: - op: lte - value: "644" - remediation: | - Change the permissions of the kubeadm.conf to 644 by executing the command: - chmod 644 $kubeletdefaultsvc - scored: false - - # - id: V-242455 - # text: "The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" - # type: "skip" - - - # TODO Validate this one - - id: V-242456 - text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive." - audit: "stat -c %a /var/lib/kubelet/config.yaml" - tests: - test_items: - - flag: "644" - compare: - op: lte - value: "644" - remediation: | - Change the permissions of the config.yaml to 644 by executing the command: - chmod 644 /var/lib/kubelet/config.yaml - scored: false - - - # - id: V-242456 - # text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - - id: V-242457 - text: "The Kubernetes kubelet config must be owned by root." - audit: "stat -c %U:%G /var/lib/kubelet/config.yaml" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the kubelet config file to root:root by executing the command: - chown root:root /var/lib/kubelet/config.yaml - scored: false - - - # - id: V-242457 - # text: "The Kubernetes kubelet config must be owned by root | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one don't think it works - # - id: V-242458 - # text: "The Kubernetes etcd must have file permissions set to 644 or more restrictive." - # audit: "stat -c %a /var/lib/etcd/*" - # tests: - # test_items: - # - flag: "644" - # compare: - # op: lte - # value: "644" - # remediation: | - # Change the permissions of the etcd data directory to 644 by executing the command: - # chmod -R 644 /var/lib/etcd/* - # scored: false - - - id: V-242458 - text: "The Kubernetes API Server must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + text: "The Kubernetes component PKI must be owned by root | Component of GKE Control Plane" type: "skip" - id: V-242459 text: "The Kubernetes etcd must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" type: "skip" - # TODO Validate this one - id: V-242460 - text: "The Kubernetes admin kubeconfig must have file permissions set to 644 or more restrictive." - audit: "stat -c %a /etc/kubernetes/admin.conf" - tests: - test_items: - - flag: "644" - compare: - op: lte - value: "644" - remediation: | - Change the permissions of the admin kubeconfig file to 644 by executing the command: - chmod 644 /etc/kubernetes/admin.conf - scored: false + text: "The Kubernetes admin.conf must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + type: "skip" - # - id: V-242460 - # text: "The Kubernetes admin.conf must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242466 - text: "The Kubernetes Scheduler must have secure binding." - audit: "grep -i bind-address /etc/kubernetes/manifests/kube-scheduler.yaml" - tests: - test_items: - - flag: "--bind-address" - compare: - op: eq - value: "127.0.0.1" - remediation: | - Edit the Kubernetes Scheduler manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Ensure the "--bind-address" flag is set to "127.0.0.1". - scored: false + text: "The Kubernetes PKI CRT must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + type: "skip" - # - id: V-242466 - # text: "The Kubernetes PKI CRT must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242467 - text: "The Kubernetes PKI keys must have file permissions set to 600 or more restrictive." - audit: "find /etc/kubernetes/pki -type f -name '*.key' -exec stat -c %a {} \\;" - tests: - test_items: - - flag: "600" - compare: - op: lte - value: "600" - remediation: | - Change the permissions of the PKI key files to 600 by executing the command: - find /etc/kubernetes/pki -type f -name '*.key' -exec chmod 600 {} \; - scored: false - - # - id: V-242467 - # text: "The Kubernetes PKI keys must have file permissions set to 600 or more restrictive | Component of GKE Control Plane" - # type: "skip" + text: "The Kubernetes PKI keys must have file permissions set to 600 or more restrictive | Component of GKE Control Plane" + type: "skip" - id: V-242468 - text: "The Kubernetes API Server must prohibit communication using TLS version 1.0 and 1.1, and SSL 2.0 and 3.0 | Component of EKS Control Plane" + text: "The Kubernetes API Server must prohibit communication using TLS version 1.0 and 1.1, and SSL 2.0 and 3.0 | Component of GKE Control Plane" type: "skip" -#TODO Test this, pretty sure it doesn't work - # - id: V-245541 - # text: "Kubernetes Kubelet must not disable timeouts." - # audit: "ps -ef | grep kubelet | grep -- --streaming-connection-idle-timeout" - # tests: - # test_items: - # - flag: "--streaming-connection-idle-timeout" - # compare: - # op: gte - # value: "5m" - # remediation: | - # On the Control Plane, run the command: - # ps -ef | grep kubelet - - # If the "--streaming-connection-idle-timeout" option exists, verify its value. - - # Edit the Kubernetes Kubelet config file: - # Set the argument "streamingConnectionIdleTimeout" to a value of "5m" or greater. - - # Restart the kubelet service using the following command: - # systemctl daemon-reload && systemctl restart kubelet - # scored: false - - - id: V-245541 - text: "Kubernetes Kubelet must not disable timeouts | Component of GKE Control Plane" - type: "skip" - - # TODO Check this, probably doesn't work - # - id: V-245543 - # text: "Kubernetes API Server must disable token authentication to protect information in transit." - # audit: "grep -i token-auth-file /etc/kubernetes/manifests/kube-apiserver.yaml" - # tests: - # test_items: - # - flag: "--token-auth-file" - # set: false - # remediation: | - # Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - # Remove the setting "--token-auth-file". - # scored: false - - id: V-245543 text: "Kubernetes API Server must disable token authentication to protect information in transit | Component of GKE Control Plane" type: "skip" - # TODO Verify this one - id: V-245544 - text: "Kubernetes endpoints must use approved organizational certificate and key pair to protect information in transit." - audit: "grep -i kubelet-client-certificate /etc/kubernetes/manifests/kube-apiserver.yaml && grep -i kubelet-client-key /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - bin_op: and - test_items: - - flag: "--kubelet-client-certificate" - set: true - - flag: "--kubelet-client-key" - set: true - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--kubelet-client-certificate" and "--kubelet-client-key" to an Approved Organizational Certificate and key pair. - Restart the kubelet service using the following command: - service kubelet restart + text: "Kubernetes endpoints must use approved organizational certificate and key pair to protect information in transit | Component of GKE Control Plane" + type: "skip" - # - id: V-245544 - # text: "Kubernetes endpoints must use approved organizational certificate and key pair to protect information in transit | Component of GKE Control Plane" - # type: "skip" + - id: V-254800 + text: "Kubernetes must have a Pod Security Admission control file configured. | Component of GKE Control Plane" + type: "skip" - # TODO This one is "new" doesn't appear to work though - # - id: V-254800 - # text: "Kubernetes must have a Pod Security Admission control file configured." - # audit: "grep -i admission-control-config-file /etc/kubernetes/manifests/kube-apiserver.yaml" - # tests: - # test_items: - # - flag: "--admission-control-config-file" - # set: true - # remediation: | - # Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - # Set the value of "--admission-control-config-file" to a valid path for the file. - # Create an admission controller config file with the necessary PodSecurity configuration. - # scored: false + - id: V-254801 + text: "Kubernetes must enable PodSecurity admission controller on static pods and Kubelets. | Component of GKE Control Plane" + type: "skip" - # TODO This one is "new" doesn't appear to work though - # - id: V-254801 - # text: "Kubernetes must enable PodSecurity admission controller on static pods and Kubelets." - # audit: "grep -i feature-gates /etc/kubernetes/manifests/kube-apiserver.yaml" - # tests: - # test_items: - # - flag: "--feature-gates" - # compare: - # op: has - # value: "PodSecurity=true" - # remediation: | - # Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - # Ensure the argument "--feature-gates=PodSecurity=true" is present. - # scored: false \ No newline at end of file + - id: V-242394 + text: "Kubernetes Worker Nodes must not have the sshd service enabled | Component of GKE Control Plane" + type: "skip" \ No newline at end of file diff --git a/cfg/gke-stig-kubernetes-v2r2/node.yaml b/cfg/gke-stig-kubernetes-v2r2/node.yaml index 5802411..ddbb58b 100644 --- a/cfg/gke-stig-kubernetes-v2r2/node.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/node.yaml @@ -9,23 +9,22 @@ groups: text: "DISA Category Code I" checks: - id: V-242387 # CIS 3.2.4 - text: "The Kubernetes Kubelet must have the read-only port flag disabled (Manual)" + text: "The Kubernetes Kubelet must have the read-only port flag disabled" audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: "--read-only-port" path: '{.readOnlyPort}' set: false - - flag: "--read-only-port" - path: '{.readOnlyPort}' + - path: '{.readOnlyPort}' compare: op: eq value: 0 bin_op: or remediation: | If modifying the Kubelet config file, edit the kubelet-config.json file - /etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to 0 + $kubeletconf and set the below parameter to 0 "readOnlyPort": 0 @@ -47,7 +46,7 @@ groups: - id: V-242391 # CIS 3.2.1 text: "Ensure that the Anonymous Auth is Not Enabled (Automated)" audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: "--anonymous-auth" @@ -68,7 +67,7 @@ groups: with the --config argument. The file can be viewed with a command such as more or less, like so: - sudo less /home/kubernetes/kubelet-config.yaml + sudo less $kubeletconf Disable Anonymous Authentication by setting the following parameter: @@ -100,7 +99,7 @@ groups: - id: V-242392 # CIS 3.2.2 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: --authorization-mode @@ -158,8 +157,6 @@ groups: systemctl status kubelet -l scored: true - # TODO: Verify this, probably requires rbac permissions using kubectl - # This needs proper permissions set, TODO!! - id: V-242395 text: "Kubernetes dashboard must not be enabled." audit: "kubectl get pods --all-namespaces -l k8s-app=kubernetes-dashboard" @@ -172,11 +169,27 @@ groups: kubectl delete deployment kubernetes-dashboard --namespace=kube-system scored: true - # TODO This could be automated, but requires a little more effort or adding jq to the docker image - # maybe test path will work - id: V-242396 text: "Kubernetes Kubectl cp command must give expected access and results. (Manual)" type: "manual" + # audit: "kubectl version --client --output=yaml | grep 'gitVersion' | sed -E 's/.*v([0-9]+)\\.([0-9]+)\\.([0-9]+)/major=\\1\\nminor=\\2\\npatch=\\3/'" + # tests: + # bin_op: or + # test_items: + # - flag: "major=" + # compare: + # op: gte + # value: 1 + + # - flag: "minor=" + # compare: + # op: gte + # value: 12 + + # - flag: "patch=" + # compare: + # op: gte + # value: 9 remediation: | If any Worker nodes are not using kubectl version 1.12.9 or newer, this is a finding. Upgrade the Master and Worker nodes to the latest version of kubectl. @@ -244,7 +257,7 @@ groups: - id: V-242404 # CIS 3.2.8 text: "Ensure that the --rotate-certificates argument is not present or is set to true (Automated)" audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: --rotate-certificates @@ -308,6 +321,8 @@ groups: For any of the pods that are using ports below 1024, reconfigure the pod to use a service to map a host non-privileged port to the pod port or reconfigure the image to use non-privileged ports. + kubectl get services -A -o json | jq '.items[].spec.ports' + Note this should excempt non-configurable services from the GKE managed service, such as anthos, gatewaykeeper, kubelet, etc. scored: false - id: V-242415 text: "Secrets in Kubernetes must not be stored as environment variables.(Manual)" @@ -335,68 +350,53 @@ groups: text: "DISA Category Code II - Node Security" checks: - # TODO Verify this, low confidence this will work - # These both don't work. Might need to be a manual check. + # TODO Verify this.. seems to be failing but also not sure if this can be disabled with GKE - id: V-242393 text: "Kubernetes Worker Nodes must not have sshd service running. (Automated)" - audit: '/bin/sh -c ''systemctl show -p ActiveState sshd'' ' + audit: 'ps aux | grep sshd' tests: test_items: - - flag: ActiveState - compare: - op: eq - value: inactive + - flag: bin/sshd + set: false remediation: | To stop the sshd service, run the command: systemctl stop sshd scored: true - - id: V-242393 - text: "Kubernetes Worker Nodes must not have sshd service running." - audit: "/bin/sh -c \"systemctl status sshd\"" - tests: - test_items: - - flag: "sshd" - compare: - op: eq - value: "inactive" - remediation: | - To stop the sshd service, run the command: - systemctl stop sshd - To disable the service: - systemctl disable sshd - scored: true - # TODO Verify this, low confidence this will work # Both of these are not working at the moment - - id: V-242394 - text: "Kubernetes Worker Nodes must not have the sshd service enabled. (Automated)" - audit: "/bin/sh -c 'systemctl is-enabled sshd.service'" - tests: - test_items: - - flag: "disabled" - remediation: | - To disable the sshd service, run the command: - chkconfig sshd off - scored: true - - id: V-242394 - text: "Kubernetes Worker Nodes must not have the sshd service enabled." - audit: "systemctl is-enabled sshd" - tests: - test_items: - - flag: "sshd" - compare: - op: eq - value: "disabled" - remediation: | - To disable the sshd service, run the command: - systemctl disable sshd - scored: true + # - id: V-242394 + # text: "Kubernetes Worker Nodes must not have the sshd service enabled. (Automated)" + # audit: "/bin/sh -c 'systemctl list-unit-files | grep sshd'" + # tests: + # bin_op: + # test_items: + # - flag: "disabled" + # - flag: "sshd" + # set: false + # remediation: | + # To disable the sshd service, run the command: + # chkconfig sshd off + # scored: true + + # - id: V-242394 + # text: "Kubernetes Worker Nodes must not have the sshd service enabled." + # audit: "systemctl is-enabled sshd" + # tests: + # test_items: + # - flag: "sshd" + # compare: + # op: eq + # value: "disabled" + # remediation: | + # To disable the sshd service, run the command: + # systemctl disable sshd + # scored: true - id: V-242434 # CIS 3.2.6 text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: --make-iptables-util-chains @@ -450,4 +450,134 @@ groups: systemctl daemon-reload systemctl restart kubelet.service systemctl status kubelet -l + scored: true + + - id: V-242420 + text: "Kubernetes Kubelet must have the SSL Certificate Authority set." + audit: "ps -ef | grep kubelet" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--client-ca-file" + set: true + - path: "{.authentication.x509.clientCAFile}" + set: true + remediation: | + On the Control Plane, run the command: + ps -ef | grep kubelet + + If the "--client-ca-file" option exists, verify its value is correctly set. + Note the path to the config file (identified by --config). + + Edit the Kubernetes Kubelet config file: + Set the value of "clientCAFile" to a path containing an Approved Organizational Certificate. + + Restart the kubelet service using the following command: + systemctl daemon-reload && systemctl restart kubelet + scored: false + + - id: V-242452 + text: "The Kubernetes kubelet KubeConfig must have file permissions set to 644 or more restrictive." + audit: "stat -c %a $kubeletconf" + tests: + test_items: + - flag: "644" + compare: + op: lte + value: "644" + remediation: | + Change the permissions of the Kubelet KubeConfig file to 644 by executing the command: + chmod 644 $kubeletconf + scored: false + + - id: V-242453 + text: "The Kubernetes kubelet KubeConfig file must be owned by root." + audit: "stat -c %U:%G $kubeletconf" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Change the ownership of the kubelet.conf file to root:root by executing the command: + chown root:root $kubeletconf + scored: false + + - id: V-242454 + text: "The Kubernetes kubeadm.conf must be owned by root." + audit: "stat -c %U:%G $kubeletsvc" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Change the ownership of the kubeadm.conf to root:root by executing the command: + chown root:root $kubeletsvc + scored: false + + - id: V-242455 + text: "The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive." + audit: "stat -c %a $kubeletsvc" + tests: + test_items: + - flag: "644" + compare: + op: lte + value: "644" + remediation: | + Change the permissions of the kubeadm.conf to 644 by executing the command: + chmod 644 $kubeletsvc + scored: false + + - id: V-242456 + text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive." + audit: "stat -c %a $kubeletconf" + tests: + test_items: + - flag: "644" + compare: + op: lte + value: "644" + remediation: | + Change the permissions of the config.yaml to 644 by executing the command: + chmod 644 $kubeletconf + scored: false + + - id: V-242457 + text: "The Kubernetes kubelet config must be owned by root." + audit: "stat -c %U:%G $kubeletconf" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Change the ownership of the kubelet config file to root:root by executing the command: + chown root:root $kubeletconf + scored: false + + - id: V-245541 + text: "Kubernetes Kubelet must not disable timeouts." + audit: "ps -ef | grep kubelet" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: and + test_items: + - flag: "--streaming-connection-idle-timeout" + set: false + - path: "{.streamingConnectionIdleTimeout}" + set: true + compare: + op: gte + value: "5m" + remediation: | + On the Control Plane, run the command: + ps -ef | grep kubelet + + If the "--streaming-connection-idle-timeout" option exists, verify its value. + + Edit the Kubernetes Kubelet config file: + Set the argument "streamingConnectionIdleTimeout" to a value of "5m" or greater. + + Restart the kubelet service using the following command: + systemctl daemon-reload && systemctl restart kubelet scored: true \ No newline at end of file diff --git a/cfg/gke-stig-kubernetes-v2r2/policies.yaml b/cfg/gke-stig-kubernetes-v2r2/policies.yaml index 34c59d3..4099a3d 100644 --- a/cfg/gke-stig-kubernetes-v2r2/policies.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/policies.yaml @@ -24,88 +24,11 @@ groups: remediation: | Move any user-managed resources from the default, kube-public and kube-node-lease namespaces, to user namespaces. scored: false - - - id: V-242437 - text: "Kubernetes must have a pod security policy set." - audit: "kubectl get podsecuritypolicy" - tests: - test_items: - - flag: "runAsUser" - compare: - op: eq - value: "MustRunAsNonRoot" - - flag: "supplementalGroups" - compare: - op: gt - value: "0" - - flag: "fsGroup" - compare: - op: gt - value: "0" - remediation: | - From the Control Plane, save the following policy to a file called restricted.yml: - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - - ALL - volumes: - - configMap - - emptyDir - - projected - - secret - - downwardAPI - - persistentVolumeClaim - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: MustRunAs - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: MustRunAs - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false - - Apply the policy with: - - kubectl create -f restricted.yml - scored: true - id: V-242417 text: "Kubernetes must separate user functionality. (Manual)" type: "manual" remediation: | Move any user pods that are present in the Kubernetes system namespaces to user specific namespaces. scored: false - - - id: 4.2 - text: "DISA Category Code I - PodSecurity Admission Controller" - checks: - - id: V-254801 - text: "Kubernetes must enable PodSecurity admission controller on static pods and Kubelets." - audit: "grep -i feature-gates /etc/kubernetes/manifests/*" - tests: - test_items: - - flag: "--feature-gates" - compare: - op: eq - value: "PodSecurity=true" - remediation: | - On the Control Plane, change to the manifests directory: - - grep -i feature-gates /etc/kubernetes/manifests/* - - Ensure the argument "--feature-gates=PodSecurity=true" i + \ No newline at end of file diff --git a/job-gke-stig.yaml b/job-gke-stig.yaml index 8d8fc70..416a094 100644 --- a/job-gke-stig.yaml +++ b/job-gke-stig.yaml @@ -1,3 +1,40 @@ +# Service account role required for 242395 + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-bench-sa + namespace: kube-bench + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kube-bench-list-pods +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["list"] + resourceNames: [] # Can't use labels here, enforced via RBAC + admission controls + # - apiGroups: [""] + # resources: ["pods"] + # verbs: ["get"] + # resourceNames: [] # For explicit pod access if needed + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-bench-sa-binding +subjects: + - kind: ServiceAccount + name: kube-bench-sa + namespace: kube-bench +roleRef: + kind: ClusterRole + name: kube-bench-list-pods + apiGroup: rbac.authorization.k8s.io + --- apiVersion: batch/v1 kind: Job @@ -6,6 +43,7 @@ metadata: spec: template: spec: + serviceAccountName: kube-bench-sa hostPID: true containers: - name: kube-bench @@ -33,6 +71,9 @@ spec: - name: etc-kubernetes mountPath: /etc/kubernetes readOnly: true + - name: home-kubernetes + mountPath: /home/kubernetes + readOnly: true restartPolicy: Never volumes: - name: var-lib-kubelet @@ -44,3 +85,6 @@ spec: - name: etc-kubernetes hostPath: path: "/etc/kubernetes" + - name: home-kubernetes + hostPath: + path: "/home/kubernetes" From 5cff2b0a4b9f0cff7390bfe896c7e944f414dd4f Mon Sep 17 00:00:00 2001 From: Carter Williamson Date: Tue, 18 Mar 2025 13:18:08 -0700 Subject: [PATCH 04/10] Removing some unwanted changes --- go.mod | 9 +-------- go.sum | 17 ----------------- hack/kind-stig.test.yaml | 4 ++-- hack/kind-stig.yaml | 2 +- makefile | 4 ++-- 5 files changed, 6 insertions(+), 30 deletions(-) diff --git a/go.mod b/go.mod index 6d94b29..d79c1eb 100644 --- a/go.mod +++ b/go.mod @@ -22,8 +22,6 @@ require ( ) require ( - al.essio.dev/pkg/shellescape v1.6.0 // indirect - github.com/BurntSushi/toml v1.4.0 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.62 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect @@ -37,7 +35,6 @@ require ( github.com/aws/smithy-go v1.22.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.4.2 // indirect @@ -49,7 +46,6 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/safetext v0.0.0-20240722112252-5a72de7e7962 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -68,8 +64,6 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pborman/uuid v1.2.1 // indirect - github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect @@ -88,7 +82,7 @@ require ( golang.org/x/net v0.33.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.31.0 // indirect + golang.org/x/sys v0.28.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.7.0 // indirect @@ -102,7 +96,6 @@ require ( k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/kind v0.27.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index cf2f2c6..1631703 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,3 @@ -al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= -al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= -github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= github.com/aws/aws-sdk-go-v2/config v1.29.9 h1:Kg+fAYNaJeGXp1vmjtidss8O2uXIsXwaRqsQJKXVr+0= @@ -38,8 +34,6 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= -github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= @@ -90,9 +84,6 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/safetext v0.0.0-20240722112252-5a72de7e7962 h1:+9C/TgFfcCmZBV7Fjb3kQCGlkpFrhtvFDgbdQHB9RaA= -github.com/google/safetext v0.0.0-20240722112252-5a72de7e7962/go.mod h1:H3K1Iu/utuCfa10JO+GsmKUYSWi7ug57Rk6GaDRHaaQ= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= @@ -156,10 +147,6 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= -github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= -github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= -github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -249,8 +236,6 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -316,8 +301,6 @@ k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6J k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/kind v0.27.0 h1:PQ3f0iAWNIj66LYkZ1ivhEg/+Zb6UPMbO+qVei/INZA= -sigs.k8s.io/kind v0.27.0/go.mod h1:RZVFmy6qcwlSWwp6xeIUv7kXCPF3i8MXsEXxW/J+gJY= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/hack/kind-stig.test.yaml b/hack/kind-stig.test.yaml index a4ee538..5051277 100644 --- a/hack/kind-stig.test.yaml +++ b/hack/kind-stig.test.yaml @@ -12,12 +12,12 @@ spec: hostPID: true containers: - name: kube-bench - image: docker.io/aquasec/kube-bench:152d0e7 + image: docker.io/aquasec/kube-bench:latest command: [ "kube-bench", "run", "--benchmark", - "gke-stig-kubernetes-v2r2", + "eks-stig-kubernetes-v1r6", ] volumeMounts: - name: var-lib-etcd diff --git a/hack/kind-stig.yaml b/hack/kind-stig.yaml index e38c290..3b1ab69 100644 --- a/hack/kind-stig.yaml +++ b/hack/kind-stig.yaml @@ -17,7 +17,7 @@ spec: "kube-bench", "run", "--benchmark", - "gke-stig-kubernetes-v2r2", + "eks-stig-kubernetes-v1r6", ] volumeMounts: - name: var-lib-etcd diff --git a/makefile b/makefile index bb44e26..62b6d8d 100644 --- a/makefile +++ b/makefile @@ -104,5 +104,5 @@ kind-run-stig: kind-push KUBECONFIG=$(KUBECONFIG) \ kubectl apply -f ./hack/kind-stig.test.yaml && \ kubectl wait --for=condition=complete job.batch/kube-bench --timeout=60s && \ - kubectl logs job/kube-bench > ./test.data - cat ./test.data + kubectl logs job/kube-bench > ./test.data && \ + diff ./test.data integration/testdata/Expected_output_stig.data From b6304d41ff45d05304284e99426bdade665be2e6 Mon Sep 17 00:00:00 2001 From: Carter Williamson Date: Tue, 18 Mar 2025 13:18:51 -0700 Subject: [PATCH 05/10] Removing some unwanted changes --- check/check.go | 2 +- docs/controls.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/check/check.go b/check/check.go index 490ec43..58ce6fb 100644 --- a/check/check.go +++ b/check/check.go @@ -23,7 +23,7 @@ import ( "github.com/golang/glog" ) -// NodeType indicates the type of node (master, node, federated, etcd, controlplane, policies, managedservices). +// NodeType indicates the type of node (master, node). type NodeType string // State is the state of a control check. diff --git a/docs/controls.md b/docs/controls.md index 6de299b..8655f31 100644 --- a/docs/controls.md +++ b/docs/controls.md @@ -57,7 +57,7 @@ the `controls` components have an id and a text description which are displayed in the `kube-bench` output. `type` specifies what kubernetes node type a `controls` is for. Possible values -for `type` are `[master, node, federated, etcd, controlplane, policies, managedservices]`. +for `type` are `master` and `node`. ## Groups From bc5a90782827b7c0a2869029cdf2daf5bcb7ce79 Mon Sep 17 00:00:00 2001 From: Carter Williamson Date: Tue, 18 Mar 2025 13:21:36 -0700 Subject: [PATCH 06/10] Cleanup k8s manifest --- job-gke-stig.yaml | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/job-gke-stig.yaml b/job-gke-stig.yaml index 416a094..38fc2a1 100644 --- a/job-gke-stig.yaml +++ b/job-gke-stig.yaml @@ -1,5 +1,4 @@ # Service account role required for 242395 - apiVersion: v1 kind: ServiceAccount metadata: @@ -15,11 +14,7 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["list"] - resourceNames: [] # Can't use labels here, enforced via RBAC + admission controls - # - apiGroups: [""] - # resources: ["pods"] - # verbs: ["get"] - # resourceNames: [] # For explicit pod access if needed + resourceNames: [] --- apiVersion: rbac.authorization.k8s.io/v1 @@ -48,18 +43,15 @@ spec: containers: - name: kube-bench imagePullPolicy: Always - # Push the image to your ECR and then refer to it here - # image: - image: us-docker.pkg.dev/dev-frm-core/dev-frm/kube-bench:gke-stig - # To send findings to AWS Security Hub, refer to `job-eks-asff.yaml` instead + # Push the image to your GCP Artifact Registry and then refer to it here + # image: -docker.pkg.dev///kube-bench:latest + image: docker.io/aquasec/kube-bench:latest command: [ "kube-bench", "run", "--benchmark", - "gke-stig-kubernetes-v2r2", - "-v", - "10", + "gke-stig-kubernetes-v2r2" ] volumeMounts: - name: var-lib-kubelet From f9b204bbc6660b281003fc054f817a451c4cdfa0 Mon Sep 17 00:00:00 2001 From: Carter Williamson Date: Tue, 18 Mar 2025 13:36:18 -0700 Subject: [PATCH 07/10] Moving 242390 to correct file --- .../controlplane.yaml | 24 ----------------- cfg/gke-stig-kubernetes-v2r2/node.yaml | 27 ++++++++++++++++++- 2 files changed, 26 insertions(+), 25 deletions(-) diff --git a/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml b/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml index be7f732..d6c7d11 100644 --- a/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml @@ -8,30 +8,6 @@ groups: - id: 2.1 text: "DISA Category Code I - API Server Security" checks: - - id: V-242390 # Similar to CIS 3.2.1 - text: "The Kubernetes API server must have anonymous authentication disabled (Automated)" - audit: "/bin/ps -fC kubelet" - # audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat /etc/kubernetes/kubelet-config.yaml" - # audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: "--anonymous-auth" - path: '{.authentication.anonymous.enabled}' - set: true - compare: - op: eq - value: false - remediation: | - If using a Kubelet config file, edit $kubeletconf to set authentication: anonymous: enabled to - false. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --anonymous-auth=false - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - id: V-242400 text: "The Kubernetes API server must have Alpha APIs disabled" diff --git a/cfg/gke-stig-kubernetes-v2r2/node.yaml b/cfg/gke-stig-kubernetes-v2r2/node.yaml index ddbb58b..fd42ae4 100644 --- a/cfg/gke-stig-kubernetes-v2r2/node.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/node.yaml @@ -580,4 +580,29 @@ groups: Restart the kubelet service using the following command: systemctl daemon-reload && systemctl restart kubelet - scored: true \ No newline at end of file + scored: true + + - id: V-242390 # Similar to CIS 3.2.1 + text: "The Kubernetes API server must have anonymous authentication disabled (Automated)" + # audit: "/bin/ps -fC kubelet" + audit: "/bin/ps -fC $kubeletbin" + # audit_config: "/bin/cat /etc/kubernetes/kubelet-config.yaml" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + set: true + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit $kubeletconf to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service \ No newline at end of file From cd80b85f0ad82412d65c924f40f88f0bd77b799c Mon Sep 17 00:00:00 2001 From: Carter Williamson Date: Tue, 18 Mar 2025 13:37:47 -0700 Subject: [PATCH 08/10] Found the definition for 242401 so added that back in --- cfg/gke-stig-kubernetes-v2r2/managedservices.yaml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml b/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml index ebd0d5f..8346cc9 100644 --- a/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml @@ -67,10 +67,9 @@ groups: text: "The Kubernetes API server must have the secure port set | Component of GKE Control Plane" type: "skip" - # TODO: Didn't actually see this one in the k8s stig file - # - id: V-242401 - # text: "The Kubernetes API Server must have an audit policy set | Component of GKE Control Plane" - # type: "skip" + - id: V-242401 + text: "The Kubernetes API Server must have an audit policy set | Component of GKE Control Plane" + type: "skip" - id: V-242402 text: "The Kubernetes API Server must have an audit log path set | Component of GKE Control Plane" From 7e411a87737158a47caaf604b0fee969a46dd4ff Mon Sep 17 00:00:00 2001 From: Carter Williamson Date: Wed, 19 Mar 2025 15:50:22 -0700 Subject: [PATCH 09/10] Initial code done, ready for testing --- check/controls.go | 70 +++++++++++++++++++++++++++ cmd/common.go | 17 ++++++- cmd/root.go | 2 + cmd/securityCommandCenter.go | 73 +++++++++++++++++++++++++++++ cmd/securityHub.go | 12 ++--- go.mod | 43 +++++++++++++---- go.sum | 67 ++++++++++++++++++++++++++ internal/findings/gscc_publisher.go | 66 ++++++++++++++++++++++++++ 8 files changed, 333 insertions(+), 17 deletions(-) create mode 100644 cmd/securityCommandCenter.go create mode 100644 internal/findings/gscc_publisher.go diff --git a/check/controls.go b/check/controls.go index 1718350..8d1ca27 100644 --- a/check/controls.go +++ b/check/controls.go @@ -19,13 +19,17 @@ import ( "encoding/json" "encoding/xml" "fmt" + "log" "time" + securitypb "cloud.google.com/go/securitycenter/apiv1/securitycenterpb" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/securityhub/types" "github.com/golang/glog" "github.com/onsi/ginkgo/reporters" "github.com/spf13/viper" + "google.golang.org/protobuf/types/known/structpb" + "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/yaml.v2" ) @@ -291,6 +295,72 @@ func (controls *Controls) ASFF() ([]types.AwsSecurityFinding, error) { return fs, nil } +func (controls *Controls) GSCC() ([]*securitypb.Finding, error) { + fs := []*securitypb.Finding{} + project, err := getConfig("GCP_PROJECT") + if err != nil { + return nil, err + } + region, err := getConfig("GCP_REGION") + if err != nil { + return nil, err + } + cluster, err := getConfig("CLUSTER_NAME") + if err != nil { + return nil, err + } + resourceName := fmt.Sprintf("//container.googleapis.com/projects/%s/locations/%s/clusters/%s", project, region, cluster) + + ti := timestamppb.Now() + for _, g := range controls.Groups { + for _, check := range g.Checks { + if check.State == FAIL || check.State == WARN { + actualValue := check.ActualValue + remediation := check.Remediation + reason := check.Reason + + if len(actualValue) > 1024 { + actualValue = actualValue[:1023] + } + if len(remediation) > 512 { + remediation = remediation[:511] + } + if len(reason) > 1024 { + reason = reason[:1023] + } + + id := fmt.Sprintf("%s/stig-kubernetes-benchmark/%s/%s", resourceName, controls.Version, check.ID) + + // Create SourceProperties map with structpb.NewValue() properly handled + sourceProperties, err := structpb.NewStruct(map[string]interface{}{ + "Reason": reason, + "Actual result": actualValue, + "Expected result": check.ExpectedResult, + "Section": fmt.Sprintf("%s %s", controls.ID, controls.Text), + "Subsection": fmt.Sprintf("%s %s", g.ID, g.Text), + }) + if err != nil { + log.Fatalf("Failed to create SourceProperties: %v", err) + } + + f := &securitypb.Finding{ + Name: id, + Category: "CIS_KUBERNETES_BENCHMARK", + ResourceName: resourceName, + Severity: securitypb.Finding_HIGH, + State: securitypb.Finding_ACTIVE, + EventTime: ti, + Description: check.Text, + SourceProperties: sourceProperties.GetFields(), + } + fs = append(fs, f) + } + } + } + return fs, nil +} + + func getConfig(name string) (string, error) { r := viper.GetString(name) if len(r) == 0 { diff --git a/cmd/common.go b/cmd/common.go index 6f83cee..63bba8a 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -416,6 +416,9 @@ func writeOutput(controlsCollection []*check.Controls) { writeASFFOutput(controlsCollection) return } + if GSCC { + writeGSCCOutput((controlsCollection)) + } writeStdoutOutput(controlsCollection) } @@ -468,12 +471,24 @@ func writeASFFOutput(controlsCollection []*check.Controls) { if err != nil { exitWithError(fmt.Errorf("failed to format findings as ASFF: %v", err)) } - if err := writeFinding(out); err != nil { + if err := writeASSFFinding(out); err != nil { exitWithError(fmt.Errorf("failed to output to ASFF: %v", err)) } } } +func writeGSCCOutput(controlsCollection []*check.Controls) { + for _, controls := range controlsCollection { + out, err := controls.GSCC() + if err != nil { + exitWithError(fmt.Errorf("failed to format findings as GSCC: %v", err)) + } + if err := writeGSCCFinding(out); err != nil { + exitWithError(fmt.Errorf("failed to output to GSCC: %v", err)) + } + } +} + func writeStdoutOutput(controlsCollection []*check.Controls) { for _, controls := range controlsCollection { summary := controls.Summary diff --git a/cmd/root.go b/cmd/root.go index c674aeb..a0870f7 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -44,6 +44,7 @@ var ( junitFmt bool pgSQL bool aSFF bool + GSCC bool masterFile = "master.yaml" nodeFile = "node.yaml" etcdFile = "etcd.yaml" @@ -168,6 +169,7 @@ func init() { RootCmd.PersistentFlags().BoolVar(&junitFmt, "junit", false, "Prints the results as JUnit") RootCmd.PersistentFlags().BoolVar(&pgSQL, "pgsql", false, "Save the results to PostgreSQL") RootCmd.PersistentFlags().BoolVar(&aSFF, "asff", false, "Send the results to AWS Security Hub") + RootCmd.PersistentFlags().BoolVar(&GSCC, "gscc", false, "Send the results to GCP Security Command Center") RootCmd.PersistentFlags().BoolVar(&filterOpts.Scored, "scored", true, "Run the scored CIS checks") RootCmd.PersistentFlags().BoolVar(&filterOpts.Unscored, "unscored", true, "Run the unscored CIS checks") RootCmd.PersistentFlags().StringVar(&skipIds, "skip", "", "List of comma separated values of checks to be skipped") diff --git a/cmd/securityCommandCenter.go b/cmd/securityCommandCenter.go new file mode 100644 index 0000000..9c602dd --- /dev/null +++ b/cmd/securityCommandCenter.go @@ -0,0 +1,73 @@ +package cmd + +import ( + "context" + "fmt" + "log" + + securitycenter "cloud.google.com/go/securitycenter/apiv1" + securitypb "cloud.google.com/go/securitycenter/apiv1/securitycenterpb" + "github.com/aquasecurity/kube-bench/internal/findings" + "github.com/spf13/viper" +) + +// GCP_REGION and ORG_ID should be set in the config +const GCP_REGION = "GCP_REGION" +const ORG_ID = "GCP_ORG_ID" + +func writeGSCCFinding(in []*securitypb.Finding) error { + r := viper.GetString(GCP_REGION) + if len(r) == 0 { + return fmt.Errorf("%s not set", GCP_REGION) + } + orgId := viper.GetString(ORG_ID) + if len(orgId) == 0 { + return fmt.Errorf("%s not set", ORG_ID) + } + ctx := context.Background() + client, err := securitycenter.NewClient(ctx) + if err != nil { + return fmt.Errorf("failed to create SCC client: %w", err) + } + defer client.Close() + + // SCC Source ID - replace with your actual SCC source ID + sourceID := fmt.Sprintf("organizations/%s/sources/1234567890", orgId) + +// Iterate over findings and publish them + for _, f := range in { + req := &securitypb.CreateFindingRequest{ + Parent: sourceID, + FindingId: f.GetName(), // Ensure unique finding ID + Finding: f, + } + + resp, err := client.CreateFinding(ctx, req) + if err != nil { + return fmt.Errorf("failed to create finding %s: %w", f.GetName(), err) + } + fmt.Printf("Finding created: %s\n", resp.Name) + } + + return nil + + // svc := securityhub.NewFromConfig(cfg) + // p := findings.New(*svc) + // out, perr := p.GSCCPublishFinding(in) + // printGSCC(out) + // return perr +} + +func printGSCC(out *findings.PublisherOutput) { + if out.SuccessCount > 0 { + log.Printf("Number of findings that were successfully imported:%v\n", out.SuccessCount) + } + if out.FailedCount > 0 { + log.Printf("Number of findings that failed to import:%v\n", out.FailedCount) + for _, f := range out.FailedFindings { + log.Printf("ID:%s", *f.Id) + log.Printf("Message:%s", *f.ErrorMessage) + log.Printf("Error Code:%s", *f.ErrorCode) + } + } +} diff --git a/cmd/securityHub.go b/cmd/securityHub.go index 2c9d0a9..4d680a8 100644 --- a/cmd/securityHub.go +++ b/cmd/securityHub.go @@ -13,12 +13,12 @@ import ( ) // REGION ... -const REGION = "AWS_REGION" +const AWS_REGION = "AWS_REGION" -func writeFinding(in []types.AwsSecurityFinding) error { - r := viper.GetString(REGION) +func writeASSFFinding(in []types.AwsSecurityFinding) error { + r := viper.GetString(AWS_REGION) if len(r) == 0 { - return fmt.Errorf("%s not set", REGION) + return fmt.Errorf("%s not set", AWS_REGION) } cfg, err := config.LoadDefaultConfig(context.Background(), config.WithRegion(r)) if err != nil { @@ -28,11 +28,11 @@ func writeFinding(in []types.AwsSecurityFinding) error { svc := securityhub.NewFromConfig(cfg) p := findings.New(*svc) out, perr := p.PublishFinding(in) - print(out) + printASSF(out) return perr } -func print(out *findings.PublisherOutput) { +func printASSF(out *findings.PublisherOutput) { if out.SuccessCount > 0 { log.Printf("Number of findings that were successfully imported:%v\n", out.SuccessCount) } diff --git a/go.mod b/go.mod index a3eba42..1920794 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,13 @@ require ( ) require ( + cloud.google.com/go v0.118.3 // indirect + cloud.google.com/go/auth v0.15.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/iam v1.4.1 // indirect + cloud.google.com/go/longrunning v0.6.5 // indirect + cloud.google.com/go/securitycenter v1.36.1 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.62 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect @@ -35,18 +42,23 @@ require ( github.com/aws/smithy-go v1.22.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/s2a-go v0.1.9 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.5 // indirect + github.com/googleapis/gax-go/v2 v2.14.1 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect @@ -75,18 +87,29 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/x448/float16 v0.8.4 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.9.0 // indirect - golang.org/x/crypto v0.35.0 // indirect + golang.org/x/crypto v0.36.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect - golang.org/x/net v0.36.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.11.0 // indirect - golang.org/x/sys v0.30.0 // indirect - golang.org/x/term v0.29.0 // indirect - golang.org/x/text v0.22.0 // indirect - golang.org/x/time v0.7.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect + golang.org/x/net v0.37.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.10.0 // indirect + google.golang.org/api v0.224.0 // indirect + google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/grpc v1.71.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 79d2d3d..45c2185 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,17 @@ +cloud.google.com/go v0.118.3 h1:jsypSnrE/w4mJysioGdMBg4MiW/hHx/sArFpaBWHdME= +cloud.google.com/go v0.118.3/go.mod h1:Lhs3YLnBlwJ4KA6nuObNMZ/fCbOQBPuWKPoE0Wa/9Vc= +cloud.google.com/go/auth v0.15.0 h1:Ly0u4aA5vG/fsSsxu98qCQBemXtAtJf+95z9HK+cxps= +cloud.google.com/go/auth v0.15.0/go.mod h1:WJDGqZ1o9E9wKIL+IwStfyn/+s59zl4Bi+1KQNVXLZ8= +cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M= +cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/iam v1.4.1 h1:cFC25Nv+u5BkTR/BT1tXdoF2daiVbZ1RLx2eqfQ9RMM= +cloud.google.com/go/iam v1.4.1/go.mod h1:2vUEJpUG3Q9p2UdsyksaKpDzlwOrnMzS30isdReIcLM= +cloud.google.com/go/longrunning v0.6.5 h1:sD+t8DO8j4HKW4QfouCklg7ZC1qC4uzVZt8iz3uTW+Q= +cloud.google.com/go/longrunning v0.6.5/go.mod h1:Et04XK+0TTLKa5IPYryKf5DkpwImy6TluQ1QTLwlKmY= +cloud.google.com/go/securitycenter v1.36.1 h1:QOXZRilyXK80/61Szse35K1w3SU5mzBlEM8/XVJOkzI= +cloud.google.com/go/securitycenter v1.36.1/go.mod h1:SxE1r7Y5V9AVPa+DU0d+4QAOIJzcKglO3Vc4zvcQtPo= github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= github.com/aws/aws-sdk-go-v2/config v1.29.9 h1:Kg+fAYNaJeGXp1vmjtidss8O2uXIsXwaRqsQJKXVr+0= @@ -36,6 +50,8 @@ github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxER github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -44,8 +60,11 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= @@ -79,13 +98,21 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.5 h1:VgzTY2jogw3xt39CusEnFJWm7rlsq5yL5q9XdLOuP5g= +github.com/googleapis/enterprise-certificate-proxy v0.3.5/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= +github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -194,6 +221,18 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= @@ -203,6 +242,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -215,14 +256,20 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= +golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= +golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -236,14 +283,22 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= +golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -255,6 +310,16 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.224.0 h1:Ir4UPtDsNiwIOHdExr3fAj4xZ42QjK7uQte3lORLJwU= +google.golang.org/api v0.224.0/go.mod h1:3V39my2xAGkodXy0vEqcEtkqgw2GtrFL5WuBZlCTCOQ= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= +google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -263,6 +328,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/internal/findings/gscc_publisher.go b/internal/findings/gscc_publisher.go new file mode 100644 index 0000000..23de31d --- /dev/null +++ b/internal/findings/gscc_publisher.go @@ -0,0 +1,66 @@ +package findings + +import ( + "context" + "fmt" + + securitycenter "cloud.google.com/go/securitycenter/apiv1" + securitypb "cloud.google.com/go/securitycenter/apiv1/securitycenterpb" + "github.com/pkg/errors" +) + +// Publisher represents an object that publishes findings to GCP Security Command Center (SCC). +type GSCCPublisher struct { + client *securitycenter.Client // GCP SCC Client + sourceID string // SCC Source ID +} + +type GSCCPublisherOutput struct { + // The number of findings that failed to import. + // + // FailedCount is a required field + FailedCount int32 + + // The list of findings that failed to import. + FailedFindings []string + + // The number of findings that were successfully imported. + // + // SuccessCount is a required field + SuccessCount int32 +} + +// New creates a new Publisher. +func NewGSCC(client *securitycenter.Client, sourceID string) *GSCCPublisher { + return &GSCCPublisher{ + client: client, + sourceID: sourceID, + } +} + +// PublishFinding publishes findings to GCP SCC. +func (p *GSCCPublisher) PublishFinding(findings []*securitypb.Finding) (*GSCCPublisherOutput, error) { + o := GSCCPublisherOutput{} + var errs error + ctx := context.Background() + + for _, finding := range findings { + req := &securitypb.CreateFindingRequest{ + Parent: p.sourceID, + FindingId: finding.GetName(), // Ensure unique finding ID + Finding: finding, + } + + resp, err := p.client.CreateFinding(ctx, req) + if err != nil { + errs = errors.Wrap(err, "finding publish failed") + o.FailedCount++ + o.FailedFindings = append(o.FailedFindings, finding.GetName()) + continue + } + fmt.Printf("Finding created: %s\n", resp.Name) + o.SuccessCount++ + } + + return &o, errs +} From 8e151b75c0180860682a8022830e9e3ad728d20b Mon Sep 17 00:00:00 2001 From: Carter Williamson Date: Fri, 21 Mar 2025 14:14:14 -0700 Subject: [PATCH 10/10] Completed testing for GCP SCC publisher, documentation, test updates and deployment scripts. Added helper script for creating the SCC Source --- .gitignore | 3 +- cfg/gke-stig-kubernetes-v2r2/config.yaml | 11 ++ cfg/gke-stig-kubernetes-v2r2/node.yaml | 12 ++ check/check.go | 1 + check/check_test.go | 27 +++++ check/controls.go | 29 +++-- check/controls_test.go | 2 + cmd/securityCommandCenter.go | 51 +++------ docs/controls.md | 7 +- docs/gscc.md | 57 ++++++++++ helper_scripts/create_gcp_source/__main__.py | 40 +++++++ .../create_gcp_source/requirements.txt | 1 + internal/findings/gscc_publisher.go | 19 +++- job-gke-stig-gscc.yaml | 105 ++++++++++++++++++ job-gke-stig.yaml | 21 ++-- 15 files changed, 330 insertions(+), 56 deletions(-) create mode 100644 docs/gscc.md create mode 100644 helper_scripts/create_gcp_source/__main__.py create mode 100644 helper_scripts/create_gcp_source/requirements.txt create mode 100644 job-gke-stig-gscc.yaml diff --git a/.gitignore b/.gitignore index 36a1253..da35a75 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ dist .vscode/ hack/kind.test.yaml coverage.txt +venv/ .idea/ @@ -13,4 +14,4 @@ coverage.txt thumbs.db /kubeconfig.kube-bench /test.data -*.iml \ No newline at end of file +*.iml diff --git a/cfg/gke-stig-kubernetes-v2r2/config.yaml b/cfg/gke-stig-kubernetes-v2r2/config.yaml index d8a7090..3446b24 100644 --- a/cfg/gke-stig-kubernetes-v2r2/config.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/config.yaml @@ -1,5 +1,16 @@ --- ## Version-specific settings that override the values in cfg/config.yaml +## These settings are required if you are using the --gscc option to report findings to GCP Security Command Center +## GCP Organization ID is required. +GCP_SCC_SOURCE_ID: "" +## GCP project ID is required. +GCP_PROJECT_ID: "" +## GCP region is required. +GCP_REGION: "" +## GKE Cluster Name is required. +CLUSTER_NAME: "" + + node: kubelet: confs: diff --git a/cfg/gke-stig-kubernetes-v2r2/node.yaml b/cfg/gke-stig-kubernetes-v2r2/node.yaml index fd42ae4..cc71e20 100644 --- a/cfg/gke-stig-kubernetes-v2r2/node.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/node.yaml @@ -12,6 +12,7 @@ groups: text: "The Kubernetes Kubelet must have the read-only port flag disabled" audit: "/bin/ps -fC $kubeletbin" audit_config: "/bin/cat $kubeletconf" + severity: high tests: test_items: - flag: "--read-only-port" @@ -354,6 +355,7 @@ groups: - id: V-242393 text: "Kubernetes Worker Nodes must not have sshd service running. (Automated)" audit: 'ps aux | grep sshd' + severity: medium tests: test_items: - flag: bin/sshd @@ -395,6 +397,7 @@ groups: - id: V-242434 # CIS 3.2.6 text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + severity: high audit: "/bin/ps -fC $kubeletbin" audit_config: "/bin/cat $kubeletconf" tests: @@ -453,6 +456,7 @@ groups: scored: true - id: V-242420 + severity: medium text: "Kubernetes Kubelet must have the SSL Certificate Authority set." audit: "ps -ef | grep kubelet" audit_config: "/bin/cat $kubeletconf" @@ -478,6 +482,7 @@ groups: scored: false - id: V-242452 + severity: medium text: "The Kubernetes kubelet KubeConfig must have file permissions set to 644 or more restrictive." audit: "stat -c %a $kubeletconf" tests: @@ -492,6 +497,7 @@ groups: scored: false - id: V-242453 + severity: medium text: "The Kubernetes kubelet KubeConfig file must be owned by root." audit: "stat -c %U:%G $kubeletconf" tests: @@ -504,6 +510,7 @@ groups: scored: false - id: V-242454 + severity: medium text: "The Kubernetes kubeadm.conf must be owned by root." audit: "stat -c %U:%G $kubeletsvc" tests: @@ -516,6 +523,7 @@ groups: scored: false - id: V-242455 + severity: medium text: "The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive." audit: "stat -c %a $kubeletsvc" tests: @@ -530,6 +538,7 @@ groups: scored: false - id: V-242456 + severity: medium text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive." audit: "stat -c %a $kubeletconf" tests: @@ -544,6 +553,7 @@ groups: scored: false - id: V-242457 + severity: medium text: "The Kubernetes kubelet config must be owned by root." audit: "stat -c %U:%G $kubeletconf" tests: @@ -556,6 +566,7 @@ groups: scored: false - id: V-245541 + severity: medium text: "Kubernetes Kubelet must not disable timeouts." audit: "ps -ef | grep kubelet" audit_config: "/bin/cat $kubeletconf" @@ -583,6 +594,7 @@ groups: scored: true - id: V-242390 # Similar to CIS 3.2.1 + severity: high text: "The Kubernetes API server must have anonymous authentication disabled (Automated)" # audit: "/bin/ps -fC kubelet" audit: "/bin/ps -fC $kubeletbin" diff --git a/check/check.go b/check/check.go index 58ce6fb..cbe8b21 100644 --- a/check/check.go +++ b/check/check.go @@ -85,6 +85,7 @@ type Check struct { AuditEnvOutput string `json:"-"` AuditConfigOutput string `json:"-"` DisableEnvTesting bool `json:"-"` + Severity string `json:"severity,omitempty"` } // Runner wraps the basic Run method. diff --git a/check/check_test.go b/check/check_test.go index 124e6f9..6b23d3d 100644 --- a/check/check_test.go +++ b/check/check_test.go @@ -94,6 +94,33 @@ func TestCheck_Run(t *testing.T) { }, Expected: FAIL, }, + { + name: "Scored checks that pass should FAIL when config file is not present", + check: Check{ + Scored: true, + AuditConfig: "/test/config.yaml", + Tests: &tests{TestItems: []*testItem{{ + Flag: "hello", + Set: true, + }}}, + Severity: "medium", + }, + Expected: FAIL, + }, + { + name: "Scored checks that pass should PASS when config file is not present", + check: Check{ + Scored: true, + Audit: "echo hello", + AuditConfig: "/test/config.yaml", + Tests: &tests{TestItems: []*testItem{{ + Flag: "hello", + Set: true, + }}}, + Severity: "high", + }, + Expected: PASS, + }, } for _, testCase := range testCases { diff --git a/check/controls.go b/check/controls.go index 8d1ca27..35ef25e 100644 --- a/check/controls.go +++ b/check/controls.go @@ -20,12 +20,14 @@ import ( "encoding/xml" "fmt" "log" + "strings" "time" securitypb "cloud.google.com/go/securitycenter/apiv1/securitycenterpb" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/securityhub/types" "github.com/golang/glog" + "github.com/google/uuid" "github.com/onsi/ginkgo/reporters" "github.com/spf13/viper" "google.golang.org/protobuf/types/known/structpb" @@ -297,7 +299,7 @@ func (controls *Controls) ASFF() ([]types.AwsSecurityFinding, error) { func (controls *Controls) GSCC() ([]*securitypb.Finding, error) { fs := []*securitypb.Finding{} - project, err := getConfig("GCP_PROJECT") + project, err := getConfig("GCP_PROJECT_ID") if err != nil { return nil, err } @@ -318,6 +320,7 @@ func (controls *Controls) GSCC() ([]*securitypb.Finding, error) { actualValue := check.ActualValue remediation := check.Remediation reason := check.Reason + severity := securitypb.Finding_HIGH if len(actualValue) > 1024 { actualValue = actualValue[:1023] @@ -329,15 +332,26 @@ func (controls *Controls) GSCC() ([]*securitypb.Finding, error) { reason = reason[:1023] } - id := fmt.Sprintf("%s/stig-kubernetes-benchmark/%s/%s", resourceName, controls.Version, check.ID) + if strings.ToLower(check.Severity) == "medium" { + severity = securitypb.Finding_MEDIUM + } + + if strings.ToLower(check.Severity) == "low" { + severity = securitypb.Finding_LOW + } + + + // id := fmt.Sprintf("%s/stig/%s/%s", cluster, controls.Version, check.ID) + id := strings.Replace(uuid.New().String(), "-", "", -1) // Create SourceProperties map with structpb.NewValue() properly handled sourceProperties, err := structpb.NewStruct(map[string]interface{}{ "Reason": reason, - "Actual result": actualValue, - "Expected result": check.ExpectedResult, + "ActualResult": actualValue, + "ExpectedResult": check.ExpectedResult, "Section": fmt.Sprintf("%s %s", controls.ID, controls.Text), "Subsection": fmt.Sprintf("%s %s", g.ID, g.Text), + "Remediation": remediation, }) if err != nil { log.Fatalf("Failed to create SourceProperties: %v", err) @@ -345,12 +359,13 @@ func (controls *Controls) GSCC() ([]*securitypb.Finding, error) { f := &securitypb.Finding{ Name: id, - Category: "CIS_KUBERNETES_BENCHMARK", + Category: "KUBERNETES_BENCHMARK", ResourceName: resourceName, - Severity: securitypb.Finding_HIGH, + FindingClass: securitypb.Finding_MISCONFIGURATION, + Severity: severity, State: securitypb.Finding_ACTIVE, EventTime: ti, - Description: check.Text, + Description: fmt.Sprintf("%s - %s", check.ID, check.Text), SourceProperties: sourceProperties.GetFields(), } fs = append(fs, f) diff --git a/check/controls_test.go b/check/controls_test.go index c2f6ab3..3b5c279 100644 --- a/check/controls_test.go +++ b/check/controls_test.go @@ -193,6 +193,7 @@ groups: remediation: | Edit the config file /this/is/a/file/path and set SomeSampleFlag to true. scored: true + severity: medium `) // and controls, err := NewControls(MASTER, in, "") @@ -224,6 +225,7 @@ groups: assert.Equal(t, "SomeSampleFlag=true", G2.Checks[0].Tests.TestItems[0].Flag) assert.Equal(t, "Edit the config file /this/is/a/file/path and set SomeSampleFlag to true.\n", G2.Checks[0].Remediation) assert.Equal(t, true, G2.Checks[0].Scored) + assert.Equal(t, "medium", G2.Checks[0].Severity) assertEqualGroupSummary(t, 0, 1, 0, 0, G2) // and assert.Equal(t, 1, controls.Summary.Pass) diff --git a/cmd/securityCommandCenter.go b/cmd/securityCommandCenter.go index 9c602dd..c4e65f0 100644 --- a/cmd/securityCommandCenter.go +++ b/cmd/securityCommandCenter.go @@ -11,63 +11,46 @@ import ( "github.com/spf13/viper" ) -// GCP_REGION and ORG_ID should be set in the config const GCP_REGION = "GCP_REGION" -const ORG_ID = "GCP_ORG_ID" +const GCP_PROJECT_ID = "GCP_PROJECT_ID" +const GCP_SCC_SOURCE_ID = "GCP_SCC_SOURCE_ID" func writeGSCCFinding(in []*securitypb.Finding) error { r := viper.GetString(GCP_REGION) if len(r) == 0 { return fmt.Errorf("%s not set", GCP_REGION) } - orgId := viper.GetString(ORG_ID) - if len(orgId) == 0 { - return fmt.Errorf("%s not set", ORG_ID) + projectId := viper.GetString(GCP_PROJECT_ID) + if len(projectId) == 0 { + return fmt.Errorf("%s not set", GCP_PROJECT_ID) } + sccSourceId := viper.GetString(GCP_SCC_SOURCE_ID) + if len(sccSourceId) == 0 { + return fmt.Errorf("%s not set", GCP_SCC_SOURCE_ID) + } + ctx := context.Background() client, err := securitycenter.NewClient(ctx) if err != nil { return fmt.Errorf("failed to create SCC client: %w", err) } defer client.Close() - - // SCC Source ID - replace with your actual SCC source ID - sourceID := fmt.Sprintf("organizations/%s/sources/1234567890", orgId) -// Iterate over findings and publish them - for _, f := range in { - req := &securitypb.CreateFindingRequest{ - Parent: sourceID, - FindingId: f.GetName(), // Ensure unique finding ID - Finding: f, - } - - resp, err := client.CreateFinding(ctx, req) - if err != nil { - return fmt.Errorf("failed to create finding %s: %w", f.GetName(), err) - } - fmt.Printf("Finding created: %s\n", resp.Name) - } - - return nil - - // svc := securityhub.NewFromConfig(cfg) - // p := findings.New(*svc) - // out, perr := p.GSCCPublishFinding(in) - // printGSCC(out) - // return perr + p := findings.NewGSCC(client, sccSourceId) + out, perr := p.PublishFinding(in) + printGSCC(out) + return perr } -func printGSCC(out *findings.PublisherOutput) { +func printGSCC(out *findings.GSCCPublisherOutput) { if out.SuccessCount > 0 { log.Printf("Number of findings that were successfully imported:%v\n", out.SuccessCount) } if out.FailedCount > 0 { log.Printf("Number of findings that failed to import:%v\n", out.FailedCount) for _, f := range out.FailedFindings { - log.Printf("ID:%s", *f.Id) - log.Printf("Message:%s", *f.ErrorMessage) - log.Printf("Error Code:%s", *f.ErrorCode) + log.Printf("ID:%s", f.Finding.GetName()) + log.Printf("Message:%s", f.Error) } } } diff --git a/docs/controls.md b/docs/controls.md index 8655f31..03fb0fe 100644 --- a/docs/controls.md +++ b/docs/controls.md @@ -104,7 +104,7 @@ command line, with the flag `--group` or `-g`. ## Check -The CIS Kubernetes Benchmark recommends configurations to harden Kubernetes components. These recommendations are usually configuration options and can be +The STIG/CIS Kubernetes Benchmarks recommend configurations to harden Kubernetes components. These recommendations are usually configuration options and can be specified by flags to Kubernetes binaries, or in configuration files. The Benchmark also provides commands to audit a Kubernetes installation, identify @@ -130,11 +130,16 @@ remediation: | on the master node and set the below parameter. --anonymous-auth=false scored: false +severity: high ``` A `check` object has an `id`, a `text`, an `audit`, a `tests`, `remediation` and `scored` fields. +Optionally, `severity` can be provided. The severity will default to `high` if not set. +This field is used for sending GCP SCC results. AWS Security Hub does not currently support setting severity. +Valid options are `high`, `medium` or `low`. + `kube-bench` supports running individual checks by specifying the check's `id` as a comma-delimited list on the command line with the `--check` flag. diff --git a/docs/gscc.md b/docs/gscc.md new file mode 100644 index 0000000..eeda889 --- /dev/null +++ b/docs/gscc.md @@ -0,0 +1,57 @@ +# Integrating kube-bench with GCP Security Command Center + +You can configure kube-bench with the `--gscc` to send findings to GCP Security Command Center (SCC). There are some additional steps required so that kube-bench has information and permissions to send these findings. + +A few notes before getting started: + +- There's multiple ways to assign pod identity in GCP. For this walkthrough we are using [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity). +- The SCC `source` for kube-bench is created using a python script. This needs to be ran prior to executing kube-bench. + - Creating sources is not currently supported in the gcloud cli. + - Creating a source is an organizational permission, which is excessive for the kube-bench pod. This is why it is not part of the kube-bench application. + +## Create the GCP SCC Source for kube-bench + +This only needs to be done once per GCP organization. +This script requires the user to have the following perission: `securitycenter.sources.update` at the organization scope. The current role associated with this is `roles/securitycenter.sourcesEditor` + +```bash +python3 -m venv venv +source venv/bin/activate +pip install -r ./helper_scripts/create_gcp_source/requirements.txt +python ./helper_scripts/create_gcp_source/__main__.py +``` + +The output of this script is the name/id for the source. Format `organizations//sources/` + +## Enable API Access the GCP Security Command Center + +_You will need GCP Security Command Center to be enabled in your project._ + +The details for assigning roles to the workload identity service account created by the job deployment is [documented here.](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#authenticating_to) +This step can be taken before you create the service account. + +```bash +PROJECT_NUMBER="1234567890" +PROJECT_ID="my_gcp_project_id" +NAMESPACE="kube-bench" +KSA_NAME="kube-bench-sa" +ROLE="roles/securitycenter.findingsEditor" +gcloud projects add-iam-policy-binding projects/$PROJECT_ID --role=$ROLE \ + --member=principal://iam.googleapis.com/projects/$PROJECT_NUMBER/locations/global/workloadIdentityPools/$PROJECT_ID.svc.id.goog/subject/ns/$NAMESPACE/sa/$KSA_NAME +``` + +### Modify the job configuration + +- Modify the kube-bench Configmap in `job-gke-stig-gscc.yaml` to specify the project ID, region, cluster name and source ID. +- In the same file, modify the image specifed in the Job to use the kube-bench image pushed to your GCP Artifact Registry. +- You may also need to modify the volume mount location for `kube-bench-gke-config` to match the version of the GKE STIG benchmark you are using. + +You can now run kube-bench as a pod in your cluster: `kubectl apply -f job-gke-stig-gscc.yaml` + +Findings will be generated for any kube-bench test that generates a `[FAIL]` or `[WARN]` output. If all tests pass, no findings will be generated. However, it's recommended that you consult the pod log output to check whether any findings were generated but could not be written to Security Command Center. + +Query findings in SCC with the following: + +``` +state="ACTIVE" AND NOT mute="MUTED" AND parent_display_name="KubeBench" AND category="KUBERNETES_BENCHMARK" +``` diff --git a/helper_scripts/create_gcp_source/__main__.py b/helper_scripts/create_gcp_source/__main__.py new file mode 100644 index 0000000..85b9065 --- /dev/null +++ b/helper_scripts/create_gcp_source/__main__.py @@ -0,0 +1,40 @@ +import sys +from google.cloud import securitycenter_v2 + + +def create_source(organization_id) -> dict: + """ + Create a new findings source + Args: + organization_id: organization_id is the numeric ID of the organization. e.g.:organization_id = "111122222444" + """ + client = securitycenter_v2.SecurityCenterClient() + org_name = f"organizations/{organization_id}" + + response = client.list_sources(parent=org_name) + + source_exists = False + for source in response: + if source.display_name == "KubeBench": + print(f"Found exisitng source: {source.name}") + source_exists = True + break + + if not source_exists: + response = client.create_source( + request={ + "parent": org_name, + "source": { + "display_name": "KubeBench", + "description": "KubeBench is an open-source CIS and STIG scanning tool for Kubernetes", + }, + } + ) + print(f"Created Source: {response.name}") + + +if __name__ == "__main__": + if len(sys.argv) == 2: + create_source(sys.argv[1]) + else: + print("Syntax: python __main__.py ") diff --git a/helper_scripts/create_gcp_source/requirements.txt b/helper_scripts/create_gcp_source/requirements.txt new file mode 100644 index 0000000..19f0139 --- /dev/null +++ b/helper_scripts/create_gcp_source/requirements.txt @@ -0,0 +1 @@ +google-cloud-securitycenter \ No newline at end of file diff --git a/internal/findings/gscc_publisher.go b/internal/findings/gscc_publisher.go index 23de31d..211228e 100644 --- a/internal/findings/gscc_publisher.go +++ b/internal/findings/gscc_publisher.go @@ -11,8 +11,14 @@ import ( // Publisher represents an object that publishes findings to GCP Security Command Center (SCC). type GSCCPublisher struct { - client *securitycenter.Client // GCP SCC Client - sourceID string // SCC Source ID + client *securitycenter.Client // GCP SCC Client + sourceID string // SCC Source ID +} + +// Capture the error and the finding which threw the error +type FailedFinding struct { + Error string `json:"error"` + Finding *securitypb.Finding `json:"finding"` } type GSCCPublisherOutput struct { @@ -22,7 +28,7 @@ type GSCCPublisherOutput struct { FailedCount int32 // The list of findings that failed to import. - FailedFindings []string + FailedFindings []FailedFinding // The number of findings that were successfully imported. // @@ -47,7 +53,7 @@ func (p *GSCCPublisher) PublishFinding(findings []*securitypb.Finding) (*GSCCPub for _, finding := range findings { req := &securitypb.CreateFindingRequest{ Parent: p.sourceID, - FindingId: finding.GetName(), // Ensure unique finding ID + FindingId: finding.GetName(), Finding: finding, } @@ -55,7 +61,10 @@ func (p *GSCCPublisher) PublishFinding(findings []*securitypb.Finding) (*GSCCPub if err != nil { errs = errors.Wrap(err, "finding publish failed") o.FailedCount++ - o.FailedFindings = append(o.FailedFindings, finding.GetName()) + o.FailedFindings = append(o.FailedFindings, FailedFinding{ + Error: err.Error(), + Finding: finding, + }) continue } fmt.Printf("Finding created: %s\n", resp.Name) diff --git a/job-gke-stig-gscc.yaml b/job-gke-stig-gscc.yaml new file mode 100644 index 0000000..bce5876 --- /dev/null +++ b/job-gke-stig-gscc.yaml @@ -0,0 +1,105 @@ +# Service account role required for V-242395 +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-bench-sa + namespace: kube-bench + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kube-bench-list-pods +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["list"] + resourceNames: [] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-bench-sa-binding +subjects: + - kind: ServiceAccount + name: kube-bench-sa + namespace: kube-bench +roleRef: + kind: ClusterRole + name: kube-bench-list-pods + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-bench-gke-config +data: + config.yaml: | + GCP_PROJECT_ID: "" + GCP_REGION: "" + CLUSTER_NAME: "" + GCP_SCC_SOURCE_ID: "projects//sources/" + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + spec: + serviceAccountName: kube-bench-sa + hostPID: true + containers: + - name: kube-bench + imagePullPolicy: Always + # Push the image to your GCP Artifact Registry and then refer to it here + # image: -docker.pkg.dev///kube-bench:latest + image: docker.io/aquasec/kube-bench:latest + command: + [ + "kube-bench", + "run", + "--benchmark", + "gke-stig-kubernetes-v2r2", + "--gscc", + ] + volumeMounts: + - name: var-lib-kubelet + mountPath: /var/lib/kubelet + readOnly: true + - name: etc-systemd + mountPath: /etc/systemd + readOnly: true + - name: etc-kubernetes + mountPath: /etc/kubernetes + readOnly: true + - name: home-kubernetes + mountPath: /home/kubernetes + readOnly: true + - name: kube-bench-gke-config + mountPath: "/opt/kube-bench/cfg/gke-stig-kubernetes-v2r2/config.yaml" + subPath: config.yaml + readOnly: true + restartPolicy: Never + volumes: + - name: var-lib-kubelet + hostPath: + path: "/var/lib/kubelet" + - name: etc-systemd + hostPath: + path: "/etc/systemd" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" + - name: home-kubernetes + hostPath: + path: "/home/kubernetes" + - name: kube-bench-gke-config + configMap: + name: kube-bench-gke-config + items: + - key: config.yaml + path: config.yaml diff --git a/job-gke-stig.yaml b/job-gke-stig.yaml index 38fc2a1..b23947c 100644 --- a/job-gke-stig.yaml +++ b/job-gke-stig.yaml @@ -1,4 +1,4 @@ -# Service account role required for 242395 +# Service account role required for V-242395 apiVersion: v1 kind: ServiceAccount metadata: @@ -38,7 +38,7 @@ metadata: spec: template: spec: - serviceAccountName: kube-bench-sa + serviceAccountName: kube-bench-sa hostPID: true containers: - name: kube-bench @@ -47,12 +47,7 @@ spec: # image: -docker.pkg.dev///kube-bench:latest image: docker.io/aquasec/kube-bench:latest command: - [ - "kube-bench", - "run", - "--benchmark", - "gke-stig-kubernetes-v2r2" - ] + ["kube-bench", "run", "--benchmark", "gke-stig-kubernetes-v2r2"] volumeMounts: - name: var-lib-kubelet mountPath: /var/lib/kubelet @@ -66,6 +61,10 @@ spec: - name: home-kubernetes mountPath: /home/kubernetes readOnly: true + - name: kube-bench-gke-config + mountPath: "/opt/kube-bench/cfg/gke-stig-kubernetes-v2r2/config.yaml" + subPath: config.yaml + readOnly: true restartPolicy: Never volumes: - name: var-lib-kubelet @@ -80,3 +79,9 @@ spec: - name: home-kubernetes hostPath: path: "/home/kubernetes" + - name: kube-bench-gke-config + configMap: + name: kube-bench-gke-config + items: + - key: config.yaml + path: config.yaml