From d249137e7d6fb111a157c6adedd4840289bf1a3c Mon Sep 17 00:00:00 2001 From: Carter Williamson Date: Tue, 18 Mar 2025 12:47:34 -0700 Subject: [PATCH] Done with first passes over k8s stig, still a few issues --- cfg/gke-stig-kubernetes-v2r2/config.yaml | 15 +- .../controlplane.yaml | 156 +--- .../managedservices.yaml | 800 ++---------------- cfg/gke-stig-kubernetes-v2r2/node.yaml | 250 ++++-- cfg/gke-stig-kubernetes-v2r2/policies.yaml | 79 +- job-gke-stig.yaml | 44 + 6 files changed, 321 insertions(+), 1023 deletions(-) diff --git a/cfg/gke-stig-kubernetes-v2r2/config.yaml b/cfg/gke-stig-kubernetes-v2r2/config.yaml index b39b29d..d8a7090 100644 --- a/cfg/gke-stig-kubernetes-v2r2/config.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/config.yaml @@ -1,16 +1,7 @@ --- ## Version-specific settings that override the values in cfg/config.yaml -## These settings are required if you are using the --asff option to report findings to AWS Security Hub -## AWS account number is required. -# AWS_ACCOUNT: "" -## AWS region is required. -# AWS_REGION: "" -## EKS Cluster ARN is required. -# CLUSTER_ARN: "" - node: - proxy: - defaultkubeconfig: "/var/lib/kubelet/kubeconfig" - kubelet: - defaultconf: "/etc/kubernetes/kubelet/kubelet-config.yaml" + confs: + - "/home/kubernetes/kubelet-config.yaml" + - "/etc/kubernetes/kubelet-config.yaml" \ No newline at end of file diff --git a/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml b/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml index 37b3b70..be7f732 100644 --- a/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/controlplane.yaml @@ -33,167 +33,43 @@ groups: systemctl daemon-reload systemctl restart kubelet.service - # TODO: This is pretty different from what the stig is asking for, double check - id: V-242400 - text: "The Kubernetes API server must have Alpha APIs disabled (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - bin_op: or - test_items: - - flag: "--feature-gates" - compare: - op: nothave - value: "AllAlpha=true" - set: true - - flag: "--feature-gates" - set: false + text: "The Kubernetes API server must have Alpha APIs disabled" + type: "manual" remediation: | - Edit any manifest files or $kubeletconf that contain the feature-gates - setting with AllAlpha set to "true". - Set the flag to "false" or remove the "AllAlpha" setting - completely. Restart the kubelet service if the kubelet config file - if the kubelet config file is changed. - scored: true - - - # - id: V-242400 - # text: "The Kubernetes API server must have Alpha APIs disabled." - # audit: "grep -i feature-gates /etc/kubernetes/manifests/kube-apiserver.yaml" - # tests: - # test_items: - # - flag: "--feature-gates" - # compare: - # op: nothave - # value: "AllAlpha=true" - # remediation: | - # Edit any manifest file that contains the "--feature-gates" setting with "AllAlpha" set to "true". - # Set the value of "AllAlpha" to "false" or remove the setting completely. - # scored: true + Check the release channel using the GCP gcloud CLI. + gcloud container clusters describe --region --format json | jq -r '.releaseChannel.channel' + This should be set to "STABLE". Any "Alpha" clusters will need to be rebuilt on the STABLE release channel. - id: 2.2 text: "DISA Category Code II - Controller Manager Security" - checks: - # - id: V-242376 - # text: "The Kubernetes Controller Manager must use TLS 1.2, at a minimum, to protect the confidentiality of sensitive data during electronic dissemination." - # audit: "grep -i tls-min-version /etc/kubernetes/manifests/kube-controller-manager.yaml" - # tests: - # test_items: - # - flag: "--tls-min-version" - # compare: - # op: nothave - # value: "VersionTLS10" - # - flag: "--tls-min-version" - # compare: - # op: nothave - # value: "VersionTLS11" - # remediation: | - # Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - # Set the value of "--tls-min-version" to "VersionTLS12" or higher. - + checks: - id: V-242443 text: " Kubernetes must contain the latest updates as authorized by IAVMs, CTOs, DTMs, and STIGs. (Manual)" type: "manual" remediation: | Upgrade Kubernetes to a supported version. - # TODO: Update this ref - id: V-242461 text: "Kubernetes API Server audit logs must be enabled. (Manual)" type: "manual" remediation: | Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. - Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + Ref: https://cloud.google.com/kubernetes-engine/docs/how-to/view-logs#control-plane-access-logs - - # TODO: Validate this one - id: V-242462 - text: "The Kubernetes PKI directory must be owned by root." - audit: "stat -c %U:%G /etc/kubernetes/pki" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the PKI directory to root:root by executing the command: - chown -R root:root /etc/kubernetes/pki - scored: true + text: "The Kubernetes API Server must be set to audit log max size | Component of GKE Control Plane" + type: "skip" - # TODO: Validate this one - id: V-242463 - text: "The Kubernetes PKI directory must have file permissions set to 644 or more restrictive." - audit: "find /etc/kubernetes/pki -type f -name '*.crt' -exec stat -c %a {} \\;" - tests: - test_items: - - flag: "644" - compare: - op: lte - value: "644" - remediation: | - Change the permissions of the PKI certificate files to 644 by executing the command: - find /etc/kubernetes/pki -type f -name '*.crt' -exec chmod 644 {} \; - scored: true + text: "The Kubernetes API Server must be set to audit log maximum backup | Component of GKE Control Plane" + type: "skip" - # TODO: Validate this one - id: V-242464 - text: "The Kubernetes PKI keys must have file permissions set to 600 or more restrictive." - audit: "find /etc/kubernetes/pki -type f -name '*.key' -exec stat -c %a {} \\;" - tests: - test_items: - - flag: "600" - compare: - op: lte - value: "600" - remediation: | - Change the permissions of the PKI key files to 600 by executing the command: - find /etc/kubernetes/pki -type f -name '*.key' -exec chmod 600 {} \; - scored: true + text: "The Kubernetes API Server audit log retention must be set | Component of GKE Control Plane" + type: "skip" - # TODO: Validate this one - - id: V-242465 - text: "The Kubernetes Controller Manager must have secure binding." - audit: "grep -i bind-address /etc/kubernetes/manifests/kube-controller-manager.yaml" - tests: - test_items: - - flag: "--bind-address" - compare: - op: eq - value: "127.0.0.1" - remediation: | - Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Ensure the "--bind-address" flag is set to "127.0.0.1". - scored: true + - id: V-242394 + text: "The Kubernetes API Server audit log path must be set | Component of GKE Control Plane" + type: "skip" - - id: 2.3 - text: "DISA Category Code III - Scheduler Security" - checks: - - id: V-242377 - text: "The Kubernetes Scheduler must use TLS 1.2, at a minimum, to protect the confidentiality of sensitive data during electronic dissemination." - audit: "grep -i tls-min-version /etc/kubernetes/manifests/kube-scheduler.yaml" - tests: - test_items: - - flag: "--tls-min-version" - compare: - op: nothave - value: "VersionTLS10" - - flag: "--tls-min-version" - compare: - op: nothave - value: "VersionTLS11" - remediation: | - Edit the Kubernetes Scheduler manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--tls-min-version" to "VersionTLS12" or higher. - scored: true - - id: V-242411 - text: "The Kubernetes Scheduler must enforce ports, protocols, and services (PPS) that adhere to the PPSM CAL." - audit: "grep -i scheduler /etc/kubernetes/manifests/kube-scheduler.yaml" - tests: - test_items: - - flag: "--secure-port" - compare: - op: gt - value: "0" - remediation: | - Amend any system documentation requiring revision to comply with the PPSM CAL. - Update Kubernetes Scheduler manifest and namespace PPS configuration to comply with the PPSM CAL. - scored: true diff --git a/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml b/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml index 1b27c74..ebd0d5f 100644 --- a/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/managedservices.yaml @@ -16,33 +16,16 @@ groups: text: "The Kubernetes API server must have the insecure bind address not set | Component of GKE Control Plane" type: "skip" - # TODO Verify this one (can't find it like on the aws side https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html) - id: V-242436 - text: "The Kubernetes API server must have the ValidatingAdmissionWebhook enabled (manual)" - type: "manual" - remediation: GKE automatically enable ValidatingAdmissionWebhook - scored: false + text: "The Kubernetes API server must have the ValidatingAdmissionWebhook enabled | Component of GKE Control Plane" + type: "skip" - id: V-242437 text: "[Deprecated] Kubernetes must have a pod security policy set. policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+" type: "skip" - # TODO pretty sure this doesn't work - # - id: V-245542 - # text: "Kubernetes API Server must disable basic authentication to protect information in transit." - # audit: "grep -i basic-auth-file /etc/kubernetes/manifests/kube-apiserver.yaml" - # tests: - # test_items: - # - flag: "--basic-auth-file" - # set: false - # remediation: | - # Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - # Remove the setting "--basic-auth-file". - # scored: false - - - id: V-245542 - text: "Kubernetes API Server must disable basic authentication to protect information in transit | Component of EKS Control Plane" + text: "Kubernetes API Server must disable basic authentication to protect information in transit | Component of GKE Control Plane" type: "skip" - id: 5.2 @@ -81,7 +64,7 @@ groups: type: "skip" - id: V-242389 - text: "The Kubernetes API server must have the secure port set | Component of EKS Control Plane" + text: "The Kubernetes API server must have the secure port set | Component of GKE Control Plane" type: "skip" # TODO: Didn't actually see this one in the k8s stig file @@ -97,68 +80,13 @@ groups: text: "Kubernetes API Server must generate audit records | Component of GKE Control Plane" type: "skip" - # TODO This will need tweaks to work I think but might be automatable - # - id: V-242405 - # text: "The Kubernetes manifests must be owned by root." - # audit: "ls -l /etc/kubernetes/manifests/*" - # tests: - # test_items: - # - flag: "owner" - # compare: - # op: eq - # value: "root:root" - # remediation: | - # On the Control Plane, change to the /etc/kubernetes/manifests directory. - # Run the command: - # chown root:root * - - # To verify the change took place, run the command: - # ls -l * - - # All the manifest files should be owned by root:root. - # scored: false - - id: V-242405 text: "The Kubernetes manifests must be owned by root | Component of GKE Control Plane" type: "skip" - # TODO verify this one, I think the permissions flag just needs to be added to the ls cmd - id: V-242408 - text: "The Kubernetes manifest files must have least privileges." - audit: "ls -l /etc/kubernetes/manifests/*" - tests: - test_items: - - flag: "permissions" - compare: - op: lte - value: "644" - remediation: | - On both Control Plane and Worker Nodes, change to the /etc/kubernetes/manifests directory. - Run the command: - chmod 644 * - To verify the change took place, run the command: - ls -l * - All the manifest files should now have privileges of "644". - scored: false - - # - id: V-242408 - # text: "The Kubernetes manifests must have least privileges | Component of GKE Control Plane" - # type: "skip" - - # TODO Pretty sure this is actually a GKE setting - # - id: V-242409 - # text: "Kubernetes Controller Manager must disable profiling." - # audit: "grep -i profiling /etc/kubernetes/manifests/kube-controller-manager.yaml" - # tests: - # test_items: - # - flag: "--profiling" - # compare: - # op: eq - # value: "false" - # remediation: | - # Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - # Set the argument "--profiling" to "false". - # scored: false + text: "The Kubernetes manifests must have least privileges | Component of GKE Control Plane" + type: "skip" - id: V-242409 text: "Kubernetes Controller Manager must disable profiling | Component of GKE Control Plane" @@ -184,729 +112,135 @@ groups: text: "The Kubernetes API server must use approved cipher suites | Component of GKE Control Plane" type: "skip" - # TODO Validate this one - id: V-242419 - text: "Kubernetes API Server must have the SSL Certificate Authority set." - audit: "grep -i client-ca-file /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--client-ca-file" - set: true - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--client-ca-file" to the path containing an Approved Organizational Certificate. - scored: false + text: "Kubernetes API Server must have the SSL Certificate Authority set | Component of GKE Control Plane" + type: "skip" - # - id: V-242419 - # text: "Kubernetes API Server must have the SSL Certificate Authority set | Component of GKE Control Plane" - # type: "skip" - # TODO Validate this one - - id: V-242420 - text: "Kubernetes Kubelet must have the SSL Certificate Authority set." - audit: "ps -ef | grep kubelet | grep -- --client-ca-file" - tests: - test_items: - - flag: "--client-ca-file" - set: true - remediation: | - On the Control Plane, run the command: - ps -ef | grep kubelet - - If the "--client-ca-file" option exists, verify its value is correctly set. - Note the path to the config file (identified by --config). - - Edit the Kubernetes Kubelet config file: - Set the value of "clientCAFile" to a path containing an Approved Organizational Certificate. - - Restart the kubelet service using the following command: - systemctl daemon-reload && systemctl restart kubelet - scored: false - - # - id: V-242420 - # text: "Kubernetes Kubelet must have the SSL Certificate Authority set | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242421 - text: "Kubernetes Controller Manager must have the SSL Certificate Authority set." - audit: "grep -i root-ca-file /etc/kubernetes/manifests/kube-controller-manager.yaml" - tests: - test_items: - - flag: "--root-ca-file" - set: true - remediation: | - Edit the Kubernetes Controller Manager manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--root-ca-file" to a path containing an Approved Organizational Certificate. - scored: false + text: "Kubernetes Controller Manager must have the SSL Certificate Authority set | Component of GKE Control Plane" + type: "skip" - # - id: V-242421 - # text: "Kubernetes Controller Manager must have the SSL Certificate Authority set | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242422 - text: "Kubernetes API Server must have a certificate for communication." - audit: "grep -i tls-cert-file /etc/kubernetes/manifests/kube-apiserver.yaml && grep -i tls-private-key-file /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - bin_op: and - test_items: - - flag: "--tls-cert-file" - set: true - - flag: "--tls-private-key-file" - set: true - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Ensure the "--tls-cert-file" and "--tls-private-key-file" flags are set to paths containing an Approved Organizational Certificate and corresponding private key. - scored: false + text: "Kubernetes API Server must have a certificate for communication | Component of GKE Control Plane" + type: "skip" - # - id: V-242422 - # text: "Kubernetes API Server must have a certificate for communication | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242423 - text: "Kubernetes etcd must enable client authentication to secure service." - audit: "grep -i client-cert-auth /etc/kubernetes/manifests/etcd.yaml" - tests: - test_items: - - flag: "--client-cert-auth" - compare: - op: eq - value: "true" - remediation: | - Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--client-cert-auth" to "true" for etcd. - scored: false + text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane" + type: "skip" - # - id: V-242423 - # text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242424 - text: "Kubernetes etcd must have a certificate for communication." - audit: "grep -i cert-file /etc/kubernetes/manifests/etcd.yaml && grep -i key-file /etc/kubernetes/manifests/etcd.yaml" - tests: - bin_op: and - test_items: - - flag: "--cert-file" - set: true - - flag: "--key-file" - set: true - remediation: | - Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Ensure the "--cert-file" and "--key-file" flags are set to paths containing an Approved Organizational Certificate and corresponding private key. - scored: false + text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane" + type: "skip" - # - id: V-242424 - # text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242425 - text: "Kubernetes Kubelet must have a certificate for communication." - audit: "ps -ef | grep kubelet | grep -- --tls-cert-file" - tests: - test_items: - - flag: "--tls-cert-file" - set: true - remediation: | - On the Control Plane, run the command: - ps -ef | grep kubelet + text: "Kubernetes Kubelet must enable tls-cert-file for client authentication to secure service | Component of GKE Control Plane" + type: "skip" - If the "--tls-cert-file" option exists, verify its value is correctly set. - Note the path to the config file (identified by --config). - - Edit the Kubernetes Kubelet config file: - Set the value of "tlsCertFile" to a path containing an Approved Organizational Certificate. - - Restart the kubelet service using the following command: - systemctl daemon-reload && systemctl restart kubelet - scored: false - - - # - id: V-242425 - # text: "Kubernetes Kubelet must enable tls-cert-file for client authentication to secure service | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242426 - text: "Kubernetes etcd must enable peer client authentication." - audit: "grep -i peer-client-cert-auth /etc/kubernetes/manifests/etcd.yaml" - tests: - test_items: - - flag: "--peer-client-cert-auth" - compare: - op: eq - value: "true" - remediation: | - Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--peer-client-cert-auth" to "true" for etcd. - scored: false + text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane" + type: "skip" - # - id: V-242426 - # text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242427 - text: "Kubernetes etcd must have a key file for secure communication." - audit: "grep -i key-file /etc/kubernetes/manifests/etcd.yaml" - tests: - test_items: - - flag: "--key-file" - set: true - remediation: | - Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--key-file" to the Approved Organizational Certificate. - scored: false + text: "Kubernetes etcd must have a key file for secure communication | Component of GKE Control Plane" + type: "skip" - # - id: V-242427 - # text: "Kubernetes etcd must have a key file for secure communication | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242428 - text: "Kubernetes etcd must have a certificate for communication." - audit: "grep -i cert-file /etc/kubernetes/manifests/etcd.yaml" - tests: - test_items: - - flag: "--cert-file" - set: true - remediation: | - Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--cert-file" to the Approved Organizational Certificate. - scored: false + text: "Kubernetes etcd must have a certificate for communication | Component of GKE Control Plane" + type: "skip" - # - id: V-242428 - # text: "Kubernetes etcd must have a certificate for communication | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242429 - text: "Kubernetes etcd must have the SSL Certificate Authority set." - audit: "grep -i etcd-cafile /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--etcd-cafile" - set: true - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--etcd-cafile" to the Certificate Authority for etcd. - scored: false + text: "Kubernetes etcd must have the SSL Certificate Authority set | Component of GKE Control Plane" + type: "skip" - # - id: V-242429 - # text: "Kubernetes etcd must have the SSL Certificate Authority set | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242430 - text: "Kubernetes etcd must have a certificate for communication." - audit: "grep -i etcd-certfile /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--etcd-certfile" - set: true - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--etcd-certfile" to the certificate to be used for communication with etcd. - scored: false + text: "Kubernetes etcd must have a certificate for communication | Component of GKE Control Plane" + type: "skip" - # - id: V-242430 - # text: "Kubernetes etcd must have a certificate for communication | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242431 - text: "Kubernetes etcd must have a key file for secure communication." - audit: "grep -i etcd-keyfile /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--etcd-keyfile" - set: true - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--etcd-keyfile" to the key file used for secure communication with etcd. - scored: false + text: "Kubernetes etcd must have a key file for secure communication | Component of GKE Control Plane" + type: "skip" - - # - id: V-242431 - # text: "Kubernetes etcd must have a key file for secure communication | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242432 - text: "Kubernetes etcd must have peer-cert-file set for secure communication." - audit: "grep -i peer-cert-file /etc/kubernetes/manifests/etcd.yaml" - tests: - test_items: - - flag: "--peer-cert-file" - set: true - remediation: | - Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--peer-cert-file" to the certificate to be used for communication with etcd. - scored: false + text: "Kubernetes etcd must have peer-cert-file set for secure communication | Component of GKE Control Plane" + type: "skip" - # - id: V-242432 - # text: "Kubernetes etcd must have peer-cert-file set for secure communication | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242433 - text: "Kubernetes etcd must have a peer-key-file set for secure communication." - audit: "grep -i peer-key-file /etc/kubernetes/manifests/etcd.yaml" - tests: - test_items: - - flag: "--peer-key-file" - set: true - remediation: | - Edit the Kubernetes etcd manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--peer-key-file" to the certificate to be used for communication with etcd. - scored: false + text: "Kubernetes etcd must have a peer-key-file set for secure communication | Component of GKE Control Plane" + type: "skip" - - # - id: V-242433 - # text: "Kubernetes etcd must have a peer-key-file set for secure communication | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242438 - text: "Kubernetes API Server must configure timeouts to limit attack surface." - audit: "grep -i request-timeout /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - test_items: - - flag: "--request-timeout" - compare: - op: gt - value: "0" - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Ensure the "--request-timeout" flag is set to a value greater than "0". - scored: false + text: "Kubernetes API Server must configure timeouts to limit attack surface | Component of GKE Control Plane" + type: "skip" - # - id: V-242438 - # text: "Kubernetes API Server must configure timeouts to limit attack surface | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242444 - text: "The Kubernetes component manifests must be owned by root." - audit: "stat -c %U:%G /etc/kubernetes/manifests/*" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the manifest files to root:root by executing the command: - chown root:root /etc/kubernetes/manifests/* - scored: false - - # - id: V-242444 - # text: "The Kubernetes component manifests must be owned by root | Component of GKE Control Plane" - # type: "skip" + text: "The Kubernetes component manifests must be owned by root | Component of GKE Control Plane" + type: "skip" - id: V-242445 text: "The Kubernetes component etcd must be owned by etcd | Component of GKE Control Plane" type: "skip" - # TODO Validate this one - id: V-242446 - text: "The Kubernetes conf files must be owned by root." - audit: "stat -c %U:%G /etc/kubernetes/admin.conf /etc/kubernetes/scheduler.conf /etc/kubernetes/controller-manager.conf" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the Kubernetes conf files to root:root by executing the commands: - chown root:root /etc/kubernetes/admin.conf - chown root:root /etc/kubernetes/scheduler.conf - chown root:root /etc/kubernetes/controller-manager.conf - scored: false + text: "The Kubernetes conf files must be owned by root | Component of GKE Control Plane" + type: "skip" - - # - id: V-242446 - # text: "The Kubernetes conf files must be owned by root | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242447 - text: "The Kubernetes Kube Proxy kubeconfig must have file permissions set to 644 or more restrictive." - audit: "stat -c %a $proxykubeconfig" - tests: - test_items: - - flag: "644" - compare: - op: lte - value: "644" - remediation: | - Change the permissions of the Kube Proxy kubeconfig to 644 by executing the command: - chmod 644 $proxykubeconfig - scored: false + text: "The Kubernetes Kube Proxy must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + type: "skip" - - # - id: V-242447 - # text: "The Kubernetes Kube Proxy must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242448 - text: "The Kubernetes Kube Proxy kubeconfig must be owned by root." - audit: "stat -c %U:%G $proxykubeconfig" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the Kube Proxy kubeconfig to root:root by executing the command: - chown root:root $proxykubeconfig - scored: false + text: "The Kubernetes Kube Proxy must be owned by root | Component of GKE Control Plane" + type: "skip" - # - id: V-242448 - # text: "The Kubernetes Kube Proxy must be owned by root | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242449 - text: "The Kubernetes Kubelet certificate authority file must have file permissions set to 644 or more restrictive." - audit: "stat -c %a $kubeletcafile" - tests: - test_items: - - flag: "644" - compare: - op: lte - value: "644" - remediation: | - Change the permissions of the Kubernetes Kubelet certificate authority file to 644 by executing the command: - chmod 644 $kubeletcafile - scored: false + text: "The Kubernetes Kubelet certificate authority file must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + type: "skip" - # - id: V-242449 - # text: "The Kubernetes Kubelet certificate authority file must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242450 - text: "The Kubernetes Kubelet certificate authority must be owned by root." - audit: "stat -c %U:%G $kubeletcafile" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the Kubernetes Kubelet certificate authority file to root:root by executing the command: - chown root:root $kubeletcafile - scored: false + text: "The Kubernetes Kubelet certificate authority must be owned by root | Component of GKE Control Plane" + type: "skip" - - # - id: V-242450 - # text: "The Kubernetes Kubelet certificate authority must be owned by root | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242451 - text: "The Kubernetes component PKI must be owned by root." - audit: "stat -c %U:%G /etc/kubernetes/pki/*" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the PKI directory and its contents to root:root by executing the command: - chown -R root:root /etc/kubernetes/pki/ - scored: false - - - # - id: V-242451 - # text: "The Kubernetes component PKI must be owned by root | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - - id: V-242452 - text: "The Kubernetes kubelet KubeConfig must have file permissions set to 644 or more restrictive." - audit: "stat -c %a /etc/kubernetes/kubelet.conf" - tests: - test_items: - - flag: "644" - compare: - op: lte - value: "644" - remediation: | - Change the permissions of the Kubelet KubeConfig file to 644 by executing the command: - chmod 644 /etc/kubernetes/kubelet.conf - scored: false - - # - id: V-242452 - # text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - - id: V-242453 - text: "The Kubernetes kubelet KubeConfig file must be owned by root." - audit: "stat -c %U:%G /etc/kubernetes/kubelet.conf" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the kubelet.conf file to root:root by executing the command: - chown root:root /etc/kubernetes/kubelet.conf - scored: false - - # - id: V-242453 - # text: "The Kubernetes kubelet config must be owned by root | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - - id: V-242454 - text: "The Kubernetes kubeadm.conf must be owned by root." - audit: "stat -c %U:%G $kubeletdefaultsvc" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the kubeadm.conf to root:root by executing the command: - chown root:root $kubeletdefaultsvc - scored: false - - # - id: V-242454 - # text: "The Kubernetes kubeadm.conf must be owned by root | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - - id: V-242455 - text: "The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive." - audit: "stat -c %a $kubeletdefaultsvc" - tests: - test_items: - - flag: "644" - compare: - op: lte - value: "644" - remediation: | - Change the permissions of the kubeadm.conf to 644 by executing the command: - chmod 644 $kubeletdefaultsvc - scored: false - - # - id: V-242455 - # text: "The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" - # type: "skip" - - - # TODO Validate this one - - id: V-242456 - text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive." - audit: "stat -c %a /var/lib/kubelet/config.yaml" - tests: - test_items: - - flag: "644" - compare: - op: lte - value: "644" - remediation: | - Change the permissions of the config.yaml to 644 by executing the command: - chmod 644 /var/lib/kubelet/config.yaml - scored: false - - - # - id: V-242456 - # text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - - id: V-242457 - text: "The Kubernetes kubelet config must be owned by root." - audit: "stat -c %U:%G /var/lib/kubelet/config.yaml" - tests: - test_items: - - flag: "root:root" - set: true - remediation: | - Change the ownership of the kubelet config file to root:root by executing the command: - chown root:root /var/lib/kubelet/config.yaml - scored: false - - - # - id: V-242457 - # text: "The Kubernetes kubelet config must be owned by root | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one don't think it works - # - id: V-242458 - # text: "The Kubernetes etcd must have file permissions set to 644 or more restrictive." - # audit: "stat -c %a /var/lib/etcd/*" - # tests: - # test_items: - # - flag: "644" - # compare: - # op: lte - # value: "644" - # remediation: | - # Change the permissions of the etcd data directory to 644 by executing the command: - # chmod -R 644 /var/lib/etcd/* - # scored: false - - - id: V-242458 - text: "The Kubernetes API Server must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + text: "The Kubernetes component PKI must be owned by root | Component of GKE Control Plane" type: "skip" - id: V-242459 text: "The Kubernetes etcd must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" type: "skip" - # TODO Validate this one - id: V-242460 - text: "The Kubernetes admin kubeconfig must have file permissions set to 644 or more restrictive." - audit: "stat -c %a /etc/kubernetes/admin.conf" - tests: - test_items: - - flag: "644" - compare: - op: lte - value: "644" - remediation: | - Change the permissions of the admin kubeconfig file to 644 by executing the command: - chmod 644 /etc/kubernetes/admin.conf - scored: false + text: "The Kubernetes admin.conf must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + type: "skip" - # - id: V-242460 - # text: "The Kubernetes admin.conf must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242466 - text: "The Kubernetes Scheduler must have secure binding." - audit: "grep -i bind-address /etc/kubernetes/manifests/kube-scheduler.yaml" - tests: - test_items: - - flag: "--bind-address" - compare: - op: eq - value: "127.0.0.1" - remediation: | - Edit the Kubernetes Scheduler manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Ensure the "--bind-address" flag is set to "127.0.0.1". - scored: false + text: "The Kubernetes PKI CRT must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" + type: "skip" - # - id: V-242466 - # text: "The Kubernetes PKI CRT must have file permissions set to 644 or more restrictive | Component of GKE Control Plane" - # type: "skip" - - # TODO Validate this one - id: V-242467 - text: "The Kubernetes PKI keys must have file permissions set to 600 or more restrictive." - audit: "find /etc/kubernetes/pki -type f -name '*.key' -exec stat -c %a {} \\;" - tests: - test_items: - - flag: "600" - compare: - op: lte - value: "600" - remediation: | - Change the permissions of the PKI key files to 600 by executing the command: - find /etc/kubernetes/pki -type f -name '*.key' -exec chmod 600 {} \; - scored: false - - # - id: V-242467 - # text: "The Kubernetes PKI keys must have file permissions set to 600 or more restrictive | Component of GKE Control Plane" - # type: "skip" + text: "The Kubernetes PKI keys must have file permissions set to 600 or more restrictive | Component of GKE Control Plane" + type: "skip" - id: V-242468 - text: "The Kubernetes API Server must prohibit communication using TLS version 1.0 and 1.1, and SSL 2.0 and 3.0 | Component of EKS Control Plane" + text: "The Kubernetes API Server must prohibit communication using TLS version 1.0 and 1.1, and SSL 2.0 and 3.0 | Component of GKE Control Plane" type: "skip" -#TODO Test this, pretty sure it doesn't work - # - id: V-245541 - # text: "Kubernetes Kubelet must not disable timeouts." - # audit: "ps -ef | grep kubelet | grep -- --streaming-connection-idle-timeout" - # tests: - # test_items: - # - flag: "--streaming-connection-idle-timeout" - # compare: - # op: gte - # value: "5m" - # remediation: | - # On the Control Plane, run the command: - # ps -ef | grep kubelet - - # If the "--streaming-connection-idle-timeout" option exists, verify its value. - - # Edit the Kubernetes Kubelet config file: - # Set the argument "streamingConnectionIdleTimeout" to a value of "5m" or greater. - - # Restart the kubelet service using the following command: - # systemctl daemon-reload && systemctl restart kubelet - # scored: false - - - id: V-245541 - text: "Kubernetes Kubelet must not disable timeouts | Component of GKE Control Plane" - type: "skip" - - # TODO Check this, probably doesn't work - # - id: V-245543 - # text: "Kubernetes API Server must disable token authentication to protect information in transit." - # audit: "grep -i token-auth-file /etc/kubernetes/manifests/kube-apiserver.yaml" - # tests: - # test_items: - # - flag: "--token-auth-file" - # set: false - # remediation: | - # Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - # Remove the setting "--token-auth-file". - # scored: false - - id: V-245543 text: "Kubernetes API Server must disable token authentication to protect information in transit | Component of GKE Control Plane" type: "skip" - # TODO Verify this one - id: V-245544 - text: "Kubernetes endpoints must use approved organizational certificate and key pair to protect information in transit." - audit: "grep -i kubelet-client-certificate /etc/kubernetes/manifests/kube-apiserver.yaml && grep -i kubelet-client-key /etc/kubernetes/manifests/kube-apiserver.yaml" - tests: - bin_op: and - test_items: - - flag: "--kubelet-client-certificate" - set: true - - flag: "--kubelet-client-key" - set: true - remediation: | - Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - Set the value of "--kubelet-client-certificate" and "--kubelet-client-key" to an Approved Organizational Certificate and key pair. - Restart the kubelet service using the following command: - service kubelet restart + text: "Kubernetes endpoints must use approved organizational certificate and key pair to protect information in transit | Component of GKE Control Plane" + type: "skip" - # - id: V-245544 - # text: "Kubernetes endpoints must use approved organizational certificate and key pair to protect information in transit | Component of GKE Control Plane" - # type: "skip" + - id: V-254800 + text: "Kubernetes must have a Pod Security Admission control file configured. | Component of GKE Control Plane" + type: "skip" - # TODO This one is "new" doesn't appear to work though - # - id: V-254800 - # text: "Kubernetes must have a Pod Security Admission control file configured." - # audit: "grep -i admission-control-config-file /etc/kubernetes/manifests/kube-apiserver.yaml" - # tests: - # test_items: - # - flag: "--admission-control-config-file" - # set: true - # remediation: | - # Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - # Set the value of "--admission-control-config-file" to a valid path for the file. - # Create an admission controller config file with the necessary PodSecurity configuration. - # scored: false + - id: V-254801 + text: "Kubernetes must enable PodSecurity admission controller on static pods and Kubelets. | Component of GKE Control Plane" + type: "skip" - # TODO This one is "new" doesn't appear to work though - # - id: V-254801 - # text: "Kubernetes must enable PodSecurity admission controller on static pods and Kubelets." - # audit: "grep -i feature-gates /etc/kubernetes/manifests/kube-apiserver.yaml" - # tests: - # test_items: - # - flag: "--feature-gates" - # compare: - # op: has - # value: "PodSecurity=true" - # remediation: | - # Edit the Kubernetes API Server manifest file in the /etc/kubernetes/manifests directory on the Kubernetes Control Plane. - # Ensure the argument "--feature-gates=PodSecurity=true" is present. - # scored: false \ No newline at end of file + - id: V-242394 + text: "Kubernetes Worker Nodes must not have the sshd service enabled | Component of GKE Control Plane" + type: "skip" \ No newline at end of file diff --git a/cfg/gke-stig-kubernetes-v2r2/node.yaml b/cfg/gke-stig-kubernetes-v2r2/node.yaml index 5802411..ddbb58b 100644 --- a/cfg/gke-stig-kubernetes-v2r2/node.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/node.yaml @@ -9,23 +9,22 @@ groups: text: "DISA Category Code I" checks: - id: V-242387 # CIS 3.2.4 - text: "The Kubernetes Kubelet must have the read-only port flag disabled (Manual)" + text: "The Kubernetes Kubelet must have the read-only port flag disabled" audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: "--read-only-port" path: '{.readOnlyPort}' set: false - - flag: "--read-only-port" - path: '{.readOnlyPort}' + - path: '{.readOnlyPort}' compare: op: eq value: 0 bin_op: or remediation: | If modifying the Kubelet config file, edit the kubelet-config.json file - /etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to 0 + $kubeletconf and set the below parameter to 0 "readOnlyPort": 0 @@ -47,7 +46,7 @@ groups: - id: V-242391 # CIS 3.2.1 text: "Ensure that the Anonymous Auth is Not Enabled (Automated)" audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: "--anonymous-auth" @@ -68,7 +67,7 @@ groups: with the --config argument. The file can be viewed with a command such as more or less, like so: - sudo less /home/kubernetes/kubelet-config.yaml + sudo less $kubeletconf Disable Anonymous Authentication by setting the following parameter: @@ -100,7 +99,7 @@ groups: - id: V-242392 # CIS 3.2.2 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: --authorization-mode @@ -158,8 +157,6 @@ groups: systemctl status kubelet -l scored: true - # TODO: Verify this, probably requires rbac permissions using kubectl - # This needs proper permissions set, TODO!! - id: V-242395 text: "Kubernetes dashboard must not be enabled." audit: "kubectl get pods --all-namespaces -l k8s-app=kubernetes-dashboard" @@ -172,11 +169,27 @@ groups: kubectl delete deployment kubernetes-dashboard --namespace=kube-system scored: true - # TODO This could be automated, but requires a little more effort or adding jq to the docker image - # maybe test path will work - id: V-242396 text: "Kubernetes Kubectl cp command must give expected access and results. (Manual)" type: "manual" + # audit: "kubectl version --client --output=yaml | grep 'gitVersion' | sed -E 's/.*v([0-9]+)\\.([0-9]+)\\.([0-9]+)/major=\\1\\nminor=\\2\\npatch=\\3/'" + # tests: + # bin_op: or + # test_items: + # - flag: "major=" + # compare: + # op: gte + # value: 1 + + # - flag: "minor=" + # compare: + # op: gte + # value: 12 + + # - flag: "patch=" + # compare: + # op: gte + # value: 9 remediation: | If any Worker nodes are not using kubectl version 1.12.9 or newer, this is a finding. Upgrade the Master and Worker nodes to the latest version of kubectl. @@ -244,7 +257,7 @@ groups: - id: V-242404 # CIS 3.2.8 text: "Ensure that the --rotate-certificates argument is not present or is set to true (Automated)" audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: --rotate-certificates @@ -308,6 +321,8 @@ groups: For any of the pods that are using ports below 1024, reconfigure the pod to use a service to map a host non-privileged port to the pod port or reconfigure the image to use non-privileged ports. + kubectl get services -A -o json | jq '.items[].spec.ports' + Note this should excempt non-configurable services from the GKE managed service, such as anthos, gatewaykeeper, kubelet, etc. scored: false - id: V-242415 text: "Secrets in Kubernetes must not be stored as environment variables.(Manual)" @@ -335,68 +350,53 @@ groups: text: "DISA Category Code II - Node Security" checks: - # TODO Verify this, low confidence this will work - # These both don't work. Might need to be a manual check. + # TODO Verify this.. seems to be failing but also not sure if this can be disabled with GKE - id: V-242393 text: "Kubernetes Worker Nodes must not have sshd service running. (Automated)" - audit: '/bin/sh -c ''systemctl show -p ActiveState sshd'' ' + audit: 'ps aux | grep sshd' tests: test_items: - - flag: ActiveState - compare: - op: eq - value: inactive + - flag: bin/sshd + set: false remediation: | To stop the sshd service, run the command: systemctl stop sshd scored: true - - id: V-242393 - text: "Kubernetes Worker Nodes must not have sshd service running." - audit: "/bin/sh -c \"systemctl status sshd\"" - tests: - test_items: - - flag: "sshd" - compare: - op: eq - value: "inactive" - remediation: | - To stop the sshd service, run the command: - systemctl stop sshd - To disable the service: - systemctl disable sshd - scored: true - # TODO Verify this, low confidence this will work # Both of these are not working at the moment - - id: V-242394 - text: "Kubernetes Worker Nodes must not have the sshd service enabled. (Automated)" - audit: "/bin/sh -c 'systemctl is-enabled sshd.service'" - tests: - test_items: - - flag: "disabled" - remediation: | - To disable the sshd service, run the command: - chkconfig sshd off - scored: true - - id: V-242394 - text: "Kubernetes Worker Nodes must not have the sshd service enabled." - audit: "systemctl is-enabled sshd" - tests: - test_items: - - flag: "sshd" - compare: - op: eq - value: "disabled" - remediation: | - To disable the sshd service, run the command: - systemctl disable sshd - scored: true + # - id: V-242394 + # text: "Kubernetes Worker Nodes must not have the sshd service enabled. (Automated)" + # audit: "/bin/sh -c 'systemctl list-unit-files | grep sshd'" + # tests: + # bin_op: + # test_items: + # - flag: "disabled" + # - flag: "sshd" + # set: false + # remediation: | + # To disable the sshd service, run the command: + # chkconfig sshd off + # scored: true + + # - id: V-242394 + # text: "Kubernetes Worker Nodes must not have the sshd service enabled." + # audit: "systemctl is-enabled sshd" + # tests: + # test_items: + # - flag: "sshd" + # compare: + # op: eq + # value: "disabled" + # remediation: | + # To disable the sshd service, run the command: + # systemctl disable sshd + # scored: true - id: V-242434 # CIS 3.2.6 text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: --make-iptables-util-chains @@ -450,4 +450,134 @@ groups: systemctl daemon-reload systemctl restart kubelet.service systemctl status kubelet -l + scored: true + + - id: V-242420 + text: "Kubernetes Kubelet must have the SSL Certificate Authority set." + audit: "ps -ef | grep kubelet" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--client-ca-file" + set: true + - path: "{.authentication.x509.clientCAFile}" + set: true + remediation: | + On the Control Plane, run the command: + ps -ef | grep kubelet + + If the "--client-ca-file" option exists, verify its value is correctly set. + Note the path to the config file (identified by --config). + + Edit the Kubernetes Kubelet config file: + Set the value of "clientCAFile" to a path containing an Approved Organizational Certificate. + + Restart the kubelet service using the following command: + systemctl daemon-reload && systemctl restart kubelet + scored: false + + - id: V-242452 + text: "The Kubernetes kubelet KubeConfig must have file permissions set to 644 or more restrictive." + audit: "stat -c %a $kubeletconf" + tests: + test_items: + - flag: "644" + compare: + op: lte + value: "644" + remediation: | + Change the permissions of the Kubelet KubeConfig file to 644 by executing the command: + chmod 644 $kubeletconf + scored: false + + - id: V-242453 + text: "The Kubernetes kubelet KubeConfig file must be owned by root." + audit: "stat -c %U:%G $kubeletconf" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Change the ownership of the kubelet.conf file to root:root by executing the command: + chown root:root $kubeletconf + scored: false + + - id: V-242454 + text: "The Kubernetes kubeadm.conf must be owned by root." + audit: "stat -c %U:%G $kubeletsvc" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Change the ownership of the kubeadm.conf to root:root by executing the command: + chown root:root $kubeletsvc + scored: false + + - id: V-242455 + text: "The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive." + audit: "stat -c %a $kubeletsvc" + tests: + test_items: + - flag: "644" + compare: + op: lte + value: "644" + remediation: | + Change the permissions of the kubeadm.conf to 644 by executing the command: + chmod 644 $kubeletsvc + scored: false + + - id: V-242456 + text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive." + audit: "stat -c %a $kubeletconf" + tests: + test_items: + - flag: "644" + compare: + op: lte + value: "644" + remediation: | + Change the permissions of the config.yaml to 644 by executing the command: + chmod 644 $kubeletconf + scored: false + + - id: V-242457 + text: "The Kubernetes kubelet config must be owned by root." + audit: "stat -c %U:%G $kubeletconf" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Change the ownership of the kubelet config file to root:root by executing the command: + chown root:root $kubeletconf + scored: false + + - id: V-245541 + text: "Kubernetes Kubelet must not disable timeouts." + audit: "ps -ef | grep kubelet" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: and + test_items: + - flag: "--streaming-connection-idle-timeout" + set: false + - path: "{.streamingConnectionIdleTimeout}" + set: true + compare: + op: gte + value: "5m" + remediation: | + On the Control Plane, run the command: + ps -ef | grep kubelet + + If the "--streaming-connection-idle-timeout" option exists, verify its value. + + Edit the Kubernetes Kubelet config file: + Set the argument "streamingConnectionIdleTimeout" to a value of "5m" or greater. + + Restart the kubelet service using the following command: + systemctl daemon-reload && systemctl restart kubelet scored: true \ No newline at end of file diff --git a/cfg/gke-stig-kubernetes-v2r2/policies.yaml b/cfg/gke-stig-kubernetes-v2r2/policies.yaml index 34c59d3..4099a3d 100644 --- a/cfg/gke-stig-kubernetes-v2r2/policies.yaml +++ b/cfg/gke-stig-kubernetes-v2r2/policies.yaml @@ -24,88 +24,11 @@ groups: remediation: | Move any user-managed resources from the default, kube-public and kube-node-lease namespaces, to user namespaces. scored: false - - - id: V-242437 - text: "Kubernetes must have a pod security policy set." - audit: "kubectl get podsecuritypolicy" - tests: - test_items: - - flag: "runAsUser" - compare: - op: eq - value: "MustRunAsNonRoot" - - flag: "supplementalGroups" - compare: - op: gt - value: "0" - - flag: "fsGroup" - compare: - op: gt - value: "0" - remediation: | - From the Control Plane, save the following policy to a file called restricted.yml: - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - - ALL - volumes: - - configMap - - emptyDir - - projected - - secret - - downwardAPI - - persistentVolumeClaim - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: MustRunAs - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: MustRunAs - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false - - Apply the policy with: - - kubectl create -f restricted.yml - scored: true - id: V-242417 text: "Kubernetes must separate user functionality. (Manual)" type: "manual" remediation: | Move any user pods that are present in the Kubernetes system namespaces to user specific namespaces. scored: false - - - id: 4.2 - text: "DISA Category Code I - PodSecurity Admission Controller" - checks: - - id: V-254801 - text: "Kubernetes must enable PodSecurity admission controller on static pods and Kubelets." - audit: "grep -i feature-gates /etc/kubernetes/manifests/*" - tests: - test_items: - - flag: "--feature-gates" - compare: - op: eq - value: "PodSecurity=true" - remediation: | - On the Control Plane, change to the manifests directory: - - grep -i feature-gates /etc/kubernetes/manifests/* - - Ensure the argument "--feature-gates=PodSecurity=true" i + \ No newline at end of file diff --git a/job-gke-stig.yaml b/job-gke-stig.yaml index 8d8fc70..416a094 100644 --- a/job-gke-stig.yaml +++ b/job-gke-stig.yaml @@ -1,3 +1,40 @@ +# Service account role required for 242395 + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-bench-sa + namespace: kube-bench + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kube-bench-list-pods +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["list"] + resourceNames: [] # Can't use labels here, enforced via RBAC + admission controls + # - apiGroups: [""] + # resources: ["pods"] + # verbs: ["get"] + # resourceNames: [] # For explicit pod access if needed + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-bench-sa-binding +subjects: + - kind: ServiceAccount + name: kube-bench-sa + namespace: kube-bench +roleRef: + kind: ClusterRole + name: kube-bench-list-pods + apiGroup: rbac.authorization.k8s.io + --- apiVersion: batch/v1 kind: Job @@ -6,6 +43,7 @@ metadata: spec: template: spec: + serviceAccountName: kube-bench-sa hostPID: true containers: - name: kube-bench @@ -33,6 +71,9 @@ spec: - name: etc-kubernetes mountPath: /etc/kubernetes readOnly: true + - name: home-kubernetes + mountPath: /home/kubernetes + readOnly: true restartPolicy: Never volumes: - name: var-lib-kubelet @@ -44,3 +85,6 @@ spec: - name: etc-kubernetes hostPath: path: "/etc/kubernetes" + - name: home-kubernetes + hostPath: + path: "/home/kubernetes"