diff --git a/charts/turbinia/Chart.lock b/charts/turbinia/Chart.lock index 12308786..3a48e446 100644 --- a/charts/turbinia/Chart.lock +++ b/charts/turbinia/Chart.lock @@ -8,8 +8,5 @@ dependencies: - name: dfdewey repository: https://google.github.io/osdfir-infrastructure/ version: 1.0.0 -- name: kube-prometheus-stack - repository: https://prometheus-community.github.io/helm-charts - version: 60.3.0 -digest: sha256:4541a72c66cf8ea1bf728dabc3cdabecb0013e04ac4568babc0f82fb59470c58 -generated: "2024-07-15T13:19:48.514639-07:00" +digest: sha256:6c0c5f81d133cf28a6504d01571d17b1fed42b7908bfa3ef0b4d1a913b62fc03 +generated: "2024-09-23T10:08:20.718691-07:00" diff --git a/charts/turbinia/Chart.yaml b/charts/turbinia/Chart.yaml index fbe6cc03..53a1d0e9 100644 --- a/charts/turbinia/Chart.yaml +++ b/charts/turbinia/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: turbinia -version: 1.1.2 +version: 1.1.3 description: A Helm chart for Turbinia Kubernetes deployments. keywords: - turbinia @@ -23,11 +23,6 @@ dependencies: name: dfdewey repository: https://google.github.io/osdfir-infrastructure/ version: 1.0.0 -- condition: monitoring.deployKubePrometheus - name: kube-prometheus-stack - alias: monitoring - repository: https://prometheus-community.github.io/helm-charts - version: 60.3.0 maintainers: - name: Open Source DFIR email: osdfir-maintainers@googlegroups.com @@ -35,7 +30,7 @@ maintainers: sources: - https://github.com/google/osdfir-infrastructure icon: https://raw.githubusercontent.com/google/turbinia/master/web/src/assets/turbinia-logo-mark.png -appVersion: "latest" +appVersion: "20240820" annotations: category: Security licenses: Apache-2.0 diff --git a/charts/turbinia/README.md b/charts/turbinia/README.md index 67a0413c..6c6dc5d6 100644 --- a/charts/turbinia/README.md +++ b/charts/turbinia/README.md @@ -17,20 +17,19 @@ helm install my-release osdfir-charts/turbinia > **Note**: By default, Turbinia is not externally accessible and can be reached via `kubectl port-forward` within the cluster. -For a quick start with a local Kubernetes cluster on your desktop, check out the -[getting started with Minikube guide](https://github.com/google/osdfir-infrastructure/blob/main/docs/getting-started.md). - ## Introduction This chart bootstraps a [Turbinia](https://github.com/google/turbinia) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. +For a quick start with a local Kubernetes cluster on your desktop, check out the +[getting started with Minikube guide](https://github.com/google/osdfir-infrastructure/blob/main/docs/getting-started.md). + ## Prerequisites -* Kubernetes 1.19+ -* Helm 3.2.0+ +* Kubernetes 1.23+ +* Helm 3.8.0+ * PV provisioner support in the underlying infrastructure - -> **Note**: See [GKE Installations](#gke-installations) for deploying to GKE. +* Shared storage for clusters larger then one machine. ## Installing the Chart @@ -41,44 +40,200 @@ helm repo add osdfir-charts https://google.github.io/osdfir-infrastructure/ helm repo update ``` -To install the chart, specify any release name of your choice. For example, using `my-release` as the release name, run: +To install the chart, specify any release name of your choice. +For example, using `my-release` as the release name, run: ```console helm install my-release osdfir-charts/turbinia ``` -The command deploys Turbinia on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured -during installation or see [Installating for Production](#installing-for-production) -for a recommended production installation. +The command deploys Turbinia on the Kubernetes cluster in the default configuration. +The [Parameters](#parameters) section lists the parameters that can be configured +during installation. + +> **Tip**: See the [Managing and updating the Turbinia config](#managing-and-updating-the-turbinia-config) +section for more details on managing the Turbinia config. + +## Configuration and installation details + +### Use a different Turbinia version -> **Tip**: You can override the default Turbinia configuration by placing the -`turbinia.conf` config at the root of the Helm chart. When choosing this option, -pull and install the Helm chart locally. +The Turbinia Helm chart utilizes the latest container release tags by default. +OSDFIR Infrastructure actively monitors for new versions of the main containers +and releases updated charts accordingly. -## Installing for Production +To modify the application version used in Turbinia, specify a different version +of the image using the `image.tag` parameter and/or a different repository using +the `image.repository` parameter. -Pull the chart locally then cd into `/turbinia` and review the `values-production.yaml` file for a list of values that will be used for production. +For example, to use the most recent development +version instead, set the following variables: ```console -helm pull osdfir-charts/turbinia --untar +turbinia.server.image.repository="us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-server-dev" +turbinia.server.image.tag="latest" +turbinia.api.image.repository="us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-api-server-dev" +turbinia.api.image.tag="latest" +turbinia.worker.image.repository="us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-worker-dev" +turbinia.worker.image.tag="latest" ``` -### GKE Installations +### Upgrading the Helm chart + +Helm chart updates can be retrieved by running `helm repo update`. + +To explore available charts and versions, use `helm search repo osdfir-charts/`. +Install a specific chart version with `helm install my-release osdfir-charts/turbinia --version `. + +A major Helm chart version change (like v1.0.0 -> v2.0.0) indicates that there +is an incompatible breaking change needing manual actions. + +### Managing and updating the Turbinia config + +This section outlines how to deploy and manage the Turbinia configuration file +within OSDFIR infrastructure. + +There are two primary methods: + +#### Using Default Configurations -Create a Turbinia GCP account using the helper script in `tools/create-gcp-sa.sh` prior to installing the chart. +If you don't provide your own Turbinia config file during deployment, +the Turbinia deployment will automatically retrieve the latest default configs +from the Turbinia Github repository. This method requires no further action from you. + +> **NOTE:** When using the default method, you cannot update the Turbinia config +file directly. See the next section below for instructions on using a custom Turbinia +config instead. + +#### Managing Turbinia configs externally + +For more advanced configuration management, you can manage the Turbinia config +file independently of the Helm chart: + +1. Prepare your Config File: + + Organize the Turbinia config file with your desired customizations. + +2. Create a ConfigMap: + + ```console + kubectl create configmap turbinia-configs --from-file=turbinia.conf + ``` + + Replace `turbinia.conf` with the actual name of your config file. + +3. Install or Upgrade the Helm Chart: + + ```console + helm install my-release osdfir-charts/turbinia --set config.existingConfigMap="turbinia-configs" + ``` + + This command instructs the Helm chart to use the `turbinia-configs` ConfigMap for + Turbinia's config file. + +To update the config changes using this method: + +1. Update the ConfigMap: + + ```console + kubectl create configmap turbinia-configs --from-file=turbinia.conf --dry-run -o yaml | kubectl replace -f - + ``` -Install the chart with the base values in `values.yaml`, the production values in `values-production.yaml`, and set appropriate values to enable GCP for Turbinia. Using a release name such as `my-release`, run: +2. Restart the Turbinia deployment to apply the new configs + + ```console + kubectl rollout restart deployment -l app.kubernetes.io/name=turbinia + ``` + +### Metrics and monitoring + +The chart starts a metrics exporter for prometheus. The metrics endpoint (port 9200) +is exposed in the service. Metrics can be scraped from within the cluster by either +a Prometheus server running in your cluster or a cloud-based Prometheus service. +Currently, Turbinia application metrics and system metrics are available. + +One recommended option for an integrated monitoring solution would be the +[kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack). + +To setup, first add the repository containing the kube-prometheus-stack +Helm chart: + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +Create a file to disable the default selector: ```console -helm install my-release ../turbinia \ - -f values.yaml -f values-production.yaml \ +cat >> values-monitoring.yaml << EOF +prometheus: + prometheusSpec: + serviceMonitorSelectorNilUsesHelmValues: false +EOF +``` + +Then to install the kube prometheus chart in a namespace called `monitoring`: + +```console +helm install kube-prometheus prometheus-community/kube-prometheus-stack -f values-monitoring.yaml --namespace monitoring +``` + +> **NOTE**: To confirm Turbinia is recording metrics, check Prometheus or Grafana for entries starting with `turbinia_`. If nothing shows up, you might need to update your Turbinia installation (helm upgrade) to apply the necessary CustomResourceDefinition (CRD). + +### Resource requests and limits + +OSDFIR Infrastructure charts allow setting resource requests and limits for all +containers inside the chart deployment. These are inside the `resources` value +(check parameter table). Setting requests is essential for production workloads +and these should be adapted to your specific use case. + +To maximize deployment success across different environments, resources are +minimally defined by default. + +### Persistence + +By default, the chart mounts a Persistent Volume at the `/mnt/turbiniavolume` path. +The volume is created using dynamic volume provisioning. + +Configuration files can be found at the `/etc/turbinia` path of the container +while logs can be found at `/mnt/turbiniavolume/logs/`. + +For clusters running more than one node or machine, the Persistent Volume will +need to have the ability to be mounted by multiple machines, such as NFS, GCP +Filestore, AWS EFS, and other shared file storage equivalents. + +## Installing Turbinia for Google Kubernetes Engine (GKE) + +In order to process Google Cloud Platform (GCP) disks with Turbinia, some additional +setup steps are required. + +The first is to create a Turbinia GCP account using the helper script in +`tools/create-gcp-sa.sh` prior to installing the chart. + +Once done, install the chart with the appropriate values to enable GCP disk +processing for Turbinia. Using a release name such as `my-release`, run: + +```console +helm install my-release osdfir-charts/turbinia \ --set gcp.enabled=true \ --set gcp.projectID= \ --set gcp.projectRegion= \ --set gcp.projectZone= ``` -### Enabling GKE Ingress and OIDC Authentication +Turbinia offers worker autoscaling based on CPU utilization. This feature can +significantly increase the speed of task processing by automatically adjusting +the number of active worker pods. To enable autoscaling on your existing +deployment, run the following command: + +```console +helm upgrade my-release osdfir-charts/turbinia \ +--reuse-values \ +--set autoscaling.enabled.true +``` + +### Enabling External Access and OIDC Authentication Follow these steps to externally expose Turbinia and enable Google Cloud OIDC using the Oauth2 Proxy to control user access to Turbinia. @@ -92,8 +247,9 @@ using the Oauth2 Proxy to control user access to Turbinia. 2. Register a new domain or use an existing one, ensuring a DNS entry points to the IP created earlier. -3. Create OAuth web client credentials following the [Google Support guide](https://support.google.com/cloud/answer/6158849). If using the CLI client, also create a Desktop/Native -OAuth client. +3. Create OAuth web client credentials following the +[Google Support guide](https://support.google.com/cloud/answer/6158849). If using +the CLI client, also create a Desktop/Native OAuth client. * Fill in Authorized JavaScript origins with your domain as `https://.com` * Fill in Authorized redirect URIs with `https://.com/oauth2/callback/` @@ -126,14 +282,14 @@ OAuth client. kubectl create secret generic authenticated-emails --from-file=authenticated-emails-list=authenticated-emails.txt ``` -8. Then to upgrade an existing release with production values, externally expose -Turbinia through a load balancer with GCP managed certificates, and deploy the +8. Then to upgrade an existing release, externally expose Turbinia through a load balancer with GCP managed certificates, and deploy the Oauth2 Proxy for authentication, run: ```console - helm upgrade my-release \ - -f values.yaml -f values-production.yaml \ + helm upgrade my-release osdfir-charts/turbinia \ + --reuse-values \ --set ingress.enabled=true \ + --set ingress.className="gce" \ --set ingress.host= \ --set ingress.gcp.managedCertificates=true \ --set ingress.gcp.staticIPName= \ @@ -144,54 +300,17 @@ Oauth2 Proxy for authentication, run: > **Warning**: Turbinia relies on the Oauth2 Proxy for authentication. If you plan to expose Turbinia with a public facing IP, it is highly recommended that -the Oauth2 Proxy is deployed alongside with the command provided above. +the Oauth2 Proxy is deployed alongside with the command provided above. Otherwise, +Turbinia will be accessible from anyone on the internet without authentication. -### Deploying Monitoring - -Application and system monitoring is available through the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack). -Kube Prometheus is a collection of Grafana dashboards and Prometheus rules combined -with documentation to provide easy to operate end-to-end K8s cluster monitoring. - -To setup monitoring, first add the repository containing the kube-prometheus-stack -Helm chart: - -```console -helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -helm repo update -``` +## Installing Turbinia for Other Cloud Platforms -If using GKE, EKS, or similar K8s managed services some options will need to be -disabled due to the control plane nodes not being visible to Prometheus. To -address this create a values file containing the following updates: - -```console -cat >> values-monitoring.yaml << EOF -kubeScheduler: - enabled: false -kubeControllerManager: - enabled: false -coreDns: - enabled: false -kubeProxy: - enabled: false -kubeDns: - enabled: true -prometheus: - prometheusSpec: - serviceMonitorSelectorNilUsesHelmValues: false -EOF -``` - -Then to install the kube prometheus chart in a namespace called `monitoring`: - -```console -helm install kube-prometheus prometheus-community/kube-prometheus-stack -f values-monitoring.yaml --namespace monitoring -``` - -That's it! To verify Turbinia metrics are being collected, connect to either -Prometheus or Grafana and search for `turbinia_*` in metrics explorer. If no -metrics appear, you may need to run a helm upgrade on your existing Turbinia -deployment so that the CustomResourceDefinitions (CRDs) can be applied. +Turbinia currently offers native support only for Google Cloud Disks. This means +you can seamlessly process evidence from Google Cloud Disks. For other cloud +providers, you'll need to manually mount the disk to your Turbinia instance or +copy the evidence into Turbinia for processing. We are actively working to expand +native disk processing support for other cloud environments in the future. +Installing the Turbinia Helm Chart remains the same regardless of your cloud provider. ## Uninstalling the Chart @@ -239,7 +358,7 @@ kubectl delete pvc -l release=my-release | ------------------------------- | ------------------------------------------------------------------------- | -------------------------------------------------------------------- | | `server.image.repository` | Turbinia image repository | `us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-server` | | `server.image.pullPolicy` | Turbinia image pull policy | `IfNotPresent` | -| `server.image.tag` | Overrides the image tag whose default is the chart appVersion | `latest` | +| `server.image.tag` | Overrides the image tag whose default is the chart appVersion | `20240820` | | `server.image.imagePullSecrets` | Specify secrets if pulling from a private repository | `[]` | | `server.podSecurityContext` | Holds pod-level security attributes and common server container settings | `{}` | | `server.securityContext` | Holds security configuration that will be applied to the server container | `{}` | @@ -255,7 +374,7 @@ kubectl delete pvc -l release=my-release | --------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------- | | `worker.image.repository` | Turbinia image repository | `us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-worker` | | `worker.image.pullPolicy` | Turbinia image pull policy | `IfNotPresent` | -| `worker.image.tag` | Overrides the image tag whose default is the chart appVersion | `latest` | +| `worker.image.tag` | Overrides the image tag whose default is the chart appVersion | `20240820` | | `worker.image.imagePullSecrets` | Specify secrets if pulling from a private repository | `[]` | | `worker.replicaCount` | Number of worker pods to run at once | `1` | | `worker.autoscaling.enabled` | Enables Turbinia Worker autoscaling | `false` | @@ -277,7 +396,7 @@ kubectl delete pvc -l release=my-release | ---------------------------- | ----------------------------------------------------------------------------------- | ------------------------------------------------------------------------ | | `api.image.repository` | Turbinia image repository for API / Web server | `us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-api-server` | | `api.image.pullPolicy` | Turbinia image pull policy | `IfNotPresent` | -| `api.image.tag` | Overrides the image tag whose default is the chart appVersion | `latest` | +| `api.image.tag` | Overrides the image tag whose default is the chart appVersion | `20240820` | | `api.image.imagePullSecrets` | Specify secrets if pulling from a private repository | `[]` | | `api.podSecurityContext` | Holds pod-level security attributes that will be applied to the API / Web container | `{}` | | `api.securityContext` | Holds security configuration that will be applied to the API / Web container | `{}` | @@ -287,28 +406,10 @@ kubectl delete pvc -l release=my-release | `api.tolerations` | Tolerations for Turbinia api pods assignment | `[]` | | `api.affinity` | Affinity for Turbinia api pods assignment | `{}` | -### Turbinia controller configuration - -| Name | Description | Value | -| ----------------------------------- | ---------------------------------------------------------------------------- | ------------------------------------------------------------------------ | -| `controller.enabled` | If enabled, deploys the Turbinia controller | `false` | -| `controller.image.repository` | Turbinia image repository for the Turbinia controller | `us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-controller` | -| `controller.image.pullPolicy` | Turbinia image pull policy | `IfNotPresent` | -| `controller.image.tag` | Overrides the image tag whose default is the chart appVersion | `latest` | -| `controller.image.imagePullSecrets` | Specify secrets if pulling from a private repository | `[]` | -| `controller.podSecurityContext` | Holds pod-level security attributes and common API / Web container settings | `{}` | -| `controller.securityContext` | Holds security configuration that will be applied to the API / Web container | `{}` | -| `controller.resources.limits` | Resource limits for the controller container | `{}` | -| `controller.resources.requests` | Requested resources for the controller container | `{}` | -| `controller.nodeSelector` | Node labels for Turbinia controller pods assignment | `{}` | -| `controller.tolerations` | Tolerations for Turbinia controller pods assignment | `[]` | -| `controller.affinity` | Affinity for Turbinia controller pods assignment | `{}` | - ### Common Parameters | Name | Description | Value | | --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -| `config.override` | Overrides the default Turbinia config to instead use a user specified config. Please ensure | `turbinia.conf` | | `config.existingConfigMap` | Use an existing ConfigMap as the default Turbinia config. | `""` | | `config.disabledJobs` | List of Turbinia Jobs to disable. Overrides DISABLED_JOBS in the Turbinia config. | `['BinaryExtractorJob', 'BulkExtractorJob', 'HindsightJob', 'PhotorecJob', 'VolatilityJob']` | | `config.existingVertexSecret` | Name of existing secret containing Vertex API Key in order to enable the Turbinia LLM Artifacts Analyzer. The secret must contain the key `turbinia-vertexapi` | `""` | @@ -335,10 +436,10 @@ kubectl delete pvc -l release=my-release | `ingress.host` | The domain name Turbinia will be hosted under | `""` | | `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | | `ingress.certManager` | Add the corresponding annotations for cert-manager integration | `false` | -| `ingress.className` | IngressClass that will be be used to implement the Ingress | `gce` | +| `ingress.className` | IngressClass that will be be used to implement the Ingress | `""` | | `ingress.gcp.managedCertificates` | Enabled GCP managed certificates for your domain | `false` | | `ingress.gcp.staticIPName` | Name of the static IP address you reserved in GCP | `""` | -| `ingress.gcp.staticIPV6Name` | Name of the static IPV6 address you reserved in GCP. This can be optionally provided to deploy a loadbalancer with an IPV6 address | `""` | +| `ingress.gcp.staticIPV6Name` | Name of the static IPV6 address you reserved. This can be optionally provided to deploy a loadbalancer with an IPV6 address in GCP. | `""` | ### dfDewey PostgreSQL Configuration Parameters @@ -378,18 +479,6 @@ kubectl delete pvc -l release=my-release ### Third Party Configuration -### Monitoring configuration parameters - -| Name | Description | Value | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- | -| `monitoring.deployKubePrometheus` | Deploy kube-prometheus-stack as a subchart. For production environments, it is best practice to deploy this chart separately. | `false` | -| `monitoring.kubeScheduler.enabled` | Component scraping kube scheduler. Disabled by default due to lack of Prometheus endpoint access for managed K8s clusters (e.g. GKE, EKS). | `false` | -| `monitoring.kubeControllerManager.enabled` | Component scraping kube controller. Disabled by default due to lack of Prometheus endpoint access for managed K8s clusters (e.g. GKE, EKS). | `false` | -| `monitoring.coreDns.enabled` | Component scraping core dns. Disabled by default in favor of kube dns. | `false` | -| `monitoring.kubeProxy.enabled` | Component scraping kube proxy. Disabled by default due to lack of Prometheus endpoint access for managed K8s clusters (e.g. GKE, EKS). | `false` | -| `monitoring.kubeDns.enabled` | Component scraping kube dns. | `true` | -| `monitoring.prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues` | Disable so that custom servicemonitors can be created and monitored | `false` | - ### Redis configuration parameters | Name | Description | Value | @@ -436,13 +525,16 @@ kubectl delete pvc -l release=my-release Specify each parameter using the --set key=value[,key=value] argument to helm install. For example, ```console -helm install my-release osdfir-charts/turbinia --set controller.enabled=true +helm install my-release osdfir-charts/turbinia \ +--set ingress.enabled=true \ +--set ingress.host="mydomain.com" \ +--set ingress.selfSigned=true ``` -The above command installs Turbinia with the Turbinia Controller deployed. +The above command installs Turbinia with an attached Ingress. -Alternatively, the `values.yaml` and `values-production.yaml` file can be -directly updated if the Helm chart was pulled locally. For example, +Alternatively, the `values.yaml` file can be directly updated if the Helm chart +was pulled locally. For example, ```console helm pull osdfir-charts/turbinia --untar @@ -455,128 +547,9 @@ chart with the updated values. helm install my-release ../turbinia ``` -### Managing and updating the Turbinia config - -This section outlines how to deploy and manage the Turbinia configuration file -within OSDFIR infrastructure. There are three primary methods: - -1. **Using Default Configurations** - - If you don't provide your own Turbinia config file during deployment, - the Turbinia deployment will automatically retrieve the latest default configs - from the Turbinia Github repository. This method requires no further action from you. - - > **NOTE:** When using the default method, you cannot update the Turbinia config file directly. - -2. **Embedding Turbinia config in the Helm Chart** - - To customize Turbinia with your own config file and include it directly in - the Helm chart deployment, follow these steps: - - 1. Download and extract the Helm chart: - - ```console - helm pull osdfir-charts/turbinia --untar - cd turbinia/ - ``` - - 2. Download the default Turbinia config: - - ```console - wget https://raw.githubusercontent.com/google/turbinia/master/turbinia/config/turbinia_config_tmpl.py > turbinia.conf - ``` - - 3. Modify the config file then deploy the Helm chart: - - ```console - helm install my-release ../turbinia - ``` - - > **NOTE**: The Helm chart uses the `config.override` value in the `values.yaml` file to determine the location of your config file. By default, `config.override` is set to the root directory of the Helm chart. - - To update config changes using this method: - - 1. Modify your Config File - - Make the necessary changes to your Turbinia config file. - - 2. Upgrade the Helm Release: - - ```console - helm upgrade my-release ../turbinia - ``` - - This will automatically apply the updated config changes and restart the Turbinia deployment so the changes can be picked up. - - -3. **Managing Turbinia configs externally** - - For more advanced configuration management, you can manage the Turbinia config - file independently of the Helm chart: - - 1. Prepare your Config File: - - Organize the Turbinia config file with your desired customizations. - - 2. Create a ConfigMap: - - ```console - kubectl create configmap turbinia-configs --from-file=turbinia.conf - ``` - - Replace `turbinia.conf` with the actual name of your config file. - - 3. Install or Upgrade the Helm Chart: - - ```console - helm install my-release osdfir-charts/turbinia --set config.existingConfigMap="turbinia-configs" - ``` - - This command instructs the Helm chart to use the `turbinia-configs` ConfigMap for - Turbinia's config file. - - To update the config changes using this method: - - 1. Update the ConfigMap: - - ```console - kubectl create configmap turbinia-configs --from-file=turbinia.conf --dry-run -o yaml | kubectl replace -f - - ``` - - 2. Restart the Turbinia deployment to apply the new configs - - ```console - kubectl rollout restart deployment -l app.kubernetes.io/name=turbinia - ``` - -## Persistence - -The Turbinia deployment stores data at the `/mnt/turbiniavolume` path of the container and stores configuration files at the `/etc/turbinia` path of the container. - -Persistent Volume Claims are used to keep the data across deployments. This is -known to work in GCP and Minikube. See the Parameters section to configure the -PVC or to disable persistence. - -## Upgrading - -If you need to upgrade an existing release to update a value, such as -persistent volume size or upgrading to a new release, you can run -[helm upgrade](https://helm.sh/docs/helm/helm_upgrade/). -For example, to set a new release and upgrade storage capacity, run: - -```console -helm upgrade my-release ../turbinia \ - --set image.tag=latest \ - --set persistence.size=10T -``` - -The above command upgrades an existing release named `my-release` updating the -image tag to `latest` and increasing persistent volume size of an existing volume to 10 Terabytes. Note that existing data will not be deleted and instead triggers an expansion -of the volume that backs the underlying PersistentVolume. See [here](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). - ## License -Copyright © 2023 OSDFIR Infrastructure +Copyright © 2024 OSDFIR Infrastructure Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/charts/turbinia/templates/_helpers.tpl b/charts/turbinia/templates/_helpers.tpl index 6c715110..f361db08 100644 --- a/charts/turbinia/templates/_helpers.tpl +++ b/charts/turbinia/templates/_helpers.tpl @@ -60,11 +60,10 @@ Return the proper persistence volume claim name */}} {{- define "turbinia.pvc.name" -}} {{- $pvcName := .Values.persistence.name -}} -{{- if .Values.global -}} - {{- if .Values.global.existingPVC -}} - {{- $pvcName = .Values.global.existingPVC -}} - {{- end -}} -{{- printf "%s-%s" $pvcName "claim" }} +{{- if and .Values.global .Values.global.existingPVC -}} +{{- .Values.global.existingPVC -}} +{{- else -}} +{{- printf "%s-%s-claim" .Release.Name $pvcName }} {{- end -}} {{- end -}} diff --git a/charts/turbinia/templates/api-deployment.yaml b/charts/turbinia/templates/api-deployment.yaml index a5c3c20f..3729644a 100644 --- a/charts/turbinia/templates/api-deployment.yaml +++ b/charts/turbinia/templates/api-deployment.yaml @@ -15,8 +15,8 @@ spec: template: metadata: annotations: - # Have Deployment restart after each upgrade - roll: {{ randAlphaNum 5 | quote }} + # Restart pod if values.yaml parameters that affect the config were changed + checksum/config: {{ include (print $.Template.BasePath "/init-configmap.yaml") . | sha256sum }} {{- if .Values.metrics.enabled }} prometheus.io/port: {{ .Values.metrics.port | quote }} prometheus.io/scrape: "true" diff --git a/charts/turbinia/templates/certs/tls-secrets.yaml b/charts/turbinia/templates/certs/tls-secrets.yaml index 7b3b9f98..4caba77e 100644 --- a/charts/turbinia/templates/certs/tls-secrets.yaml +++ b/charts/turbinia/templates/certs/tls-secrets.yaml @@ -1,5 +1,4 @@ {{- if and .Values.ingress.enabled (or .Values.ingress.selfSigned .Values.ingress.certManager) }} -{{- $secretName := printf "%s-tls" (include "turbinia.fullname" .) }} {{- $ca := genCA "turbinia-ca" 365 }} {{- $cert := genSignedCert "turbinia-apps" nil (list .Values.ingress.host) 365 $ca }} apiVersion: v1 diff --git a/charts/turbinia/templates/configmap.yaml b/charts/turbinia/templates/configmap.yaml deleted file mode 100644 index 17f76aa3..00000000 --- a/charts/turbinia/templates/configmap.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- $userconfigs := .Files.Glob .Values.config.override }} -{{- if $userconfigs }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "turbinia.fullname" . }}-configmap - namespace: {{ .Release.Namespace | quote }} - labels: - {{- include "turbinia.labels" . | nindent 4 }} -data: -{{ ($userconfigs).AsConfig | indent 2 }} -{{- end }} \ No newline at end of file diff --git a/charts/turbinia/templates/controller-deployment.yaml b/charts/turbinia/templates/controller-deployment.yaml deleted file mode 100644 index 554147a2..00000000 --- a/charts/turbinia/templates/controller-deployment.yaml +++ /dev/null @@ -1,108 +0,0 @@ -{{- if .Values.controller.enabled }} -{{- $userconfigs := .Files.Get .Values.config.override }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "turbinia.fullname" . }}-controller - namespace: {{ .Release.Namespace | quote }} - labels: - app.kubernetes.io/component: controller - {{- include "turbinia.labels" . | nindent 4 }} -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/component: controller - {{- include "turbinia.selectorLabels" . | nindent 6 }} - template: - metadata: - {{- if .Values.metrics.enabled }} - annotations: - prometheus.io/port: {{ .Values.metrics.port | quote }} - prometheus.io/scrape: "true" - {{- end }} - labels: - app.kubernetes.io/component: controller - {{- include "turbinia.selectorLabels" . | nindent 8 }} - spec: - serviceAccountName: {{ include "turbinia.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} - initContainers: - {{- include "turbinia.initContainer" . | nindent 8 }} - containers: - - name: controller - securityContext: - {{- toYaml .Values.controller.securityContext | nindent 12 }} - image: "{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.controller.image.pullPolicy }} - lifecycle: - preStop: - exec: - command: - - "/bin/sh" - - "-c" - - "touch /tmp/turbinia-to-scaledown.lock && sleep 5 && /usr/bin/python3 /home/turbinia/check-lockfile.py" - env: - - name: TURBINIA_EXTRA_ARGS - value: "-d" - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - {{- if .Values.gcp.enabled }} - - mountPath: "/dev" - name: dev - - mountPath: "/var/run/lock" - name: lockfolder - {{- end }} - - mountPath: /mnt/turbiniavolume - name: turbiniavolume - - mountPath: /etc/turbinia - name: turbinia-configs - ports: - {{- if .Values.metrics.enabled }} - - containerPort: {{ .Values.metrics.port }} - {{- end }} - resources: - {{- toYaml .Values.controller.resources | nindent 12 }} - volumes: - {{- if .Values.gcp.enabled }} - - name: dev - hostPath: - path: /dev - readOnly: true - - name: lockfolder - hostPath: - path: /var/run/lock - readOnly: false - {{- end }} - - name: turbiniavolume - persistentVolumeClaim: - claimName: {{ include "turbinia.pvc.name" . }} - readOnly: false - - name: init-turbinia - configMap: - name: {{ include "turbinia.fullname" . }}-init-configmap - defaultMode: 0744 - - name: turbinia-configs - emptyDir: {} - {{- if $userconfigs }} - - name: user-configs - configMap: - name: {{ include "turbinia.configmap" . }} - {{- end }} - {{- with .Values.controller.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.controller.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.controller.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} -{{- end }} diff --git a/charts/turbinia/templates/crds/servicemonitor.yaml b/charts/turbinia/templates/crds/servicemonitor.yaml index ba5eb290..5d239402 100644 --- a/charts/turbinia/templates/crds/servicemonitor.yaml +++ b/charts/turbinia/templates/crds/servicemonitor.yaml @@ -2,7 +2,7 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - name: turbinia-metrics-servicemonitor + name: {{ printf "%s-%s" .Release.Name "turbinia-metrics-servicemonitor" }} spec: selector: matchLabels: diff --git a/charts/turbinia/templates/gcp/managedcertificate.yaml b/charts/turbinia/templates/gcp/managedcertificate.yaml index e40dcca2..3b4052d1 100644 --- a/charts/turbinia/templates/gcp/managedcertificate.yaml +++ b/charts/turbinia/templates/gcp/managedcertificate.yaml @@ -1,4 +1,4 @@ -{{- if and (.Values.ingress.enabled) (eq .Values.ingress.className "gce") }} +{{- if and (.Values.ingress.enabled) (.Values.ingress.gcp.managedCertificates) }} apiVersion: networking.gke.io/v1 kind: ManagedCertificate metadata: diff --git a/charts/turbinia/templates/ingress.yaml b/charts/turbinia/templates/ingress.yaml index 9833a311..a212e411 100644 --- a/charts/turbinia/templates/ingress.yaml +++ b/charts/turbinia/templates/ingress.yaml @@ -19,8 +19,6 @@ metadata: {{- if .Values.ingress.gcp.staticIPName }} kubernetes.io/ingress.global-static-ip-name: {{ .Values.ingress.gcp.staticIPName }} networking.gke.io/v1beta1.FrontendConfig: {{ include "turbinia.fullname" . }}-frontend-config - {{- else }} - {{- fail "A valied .Values.ingress.gcp.staticIPName entry is required when using the GCE Ingress" }} {{- end }} {{- end }} spec: @@ -31,7 +29,7 @@ spec: secretName: {{ include "turbinia.fullname" . }}-tls {{- end }} rules: - - host: {{ required "A valid .Values.ingress.host entry is required!" .Values.ingress.host }} + - host: {{ .Values.ingress.host }} http: paths: - path: / @@ -72,14 +70,14 @@ metadata: kubernetes.io/ingressClassName: {{ .Values.ingress.className }} {{- if .Values.ingress.gcp.managedCertificates }} networking.gke.io/managed-certificates: {{ include "turbinia.fullname" . }}-managed-ssl + networking.gke.io/v1beta1.FrontendConfig: {{ include "turbinia.fullname" . }}-frontend-config {{- end }} {{- if .Values.ingress.certManager }} kubernetes.io/tls-acme: "true" cert-manager.io/issuer: {{ include "turbinia.fullname" . }}-letsencrypt-production {{- end }} - {{- if (eq .Values.ingress.className "gce") }} + {{- if .Values.ingress.gcp.staticIPV6Name }} kubernetes.io/ingress.global-static-ip-name: {{ .Values.ingress.gcp.staticIPV6Name }} - networking.gke.io/v1beta1.FrontendConfig: {{ include "turbinia.fullname" . }}-frontend-config {{- end }} spec: {{- if or .Values.ingress.selfSigned .Values.ingress.certManager }} @@ -89,7 +87,7 @@ spec: secretName: {{ include "turbinia.fullname" . }}-tls {{- end }} rules: - - host: {{ required "A valid .Values.ingress.host entry is required!" .Values.ingress.host }} + - host: {{ .Values.ingress.host }} http: paths: - path: / diff --git a/charts/turbinia/templates/server-deployment.yaml b/charts/turbinia/templates/server-deployment.yaml index 544b59b5..919d4341 100644 --- a/charts/turbinia/templates/server-deployment.yaml +++ b/charts/turbinia/templates/server-deployment.yaml @@ -15,8 +15,8 @@ spec: template: metadata: annotations: - # Have Deployment restart after each upgrade - roll: {{ randAlphaNum 5 | quote }} + # Restart pod if values.yaml parameters that affect the config were changed the config + checksum/config: {{ include (print $.Template.BasePath "/init-configmap.yaml") . | sha256sum }} {{- if .Values.metrics.enabled }} prometheus.io/port: {{ .Values.metrics.port | quote }} prometheus.io/scrape: "true" diff --git a/charts/turbinia/templates/serviceaccount.yaml b/charts/turbinia/templates/serviceaccount.yaml index a27ba614..b43f6aba 100644 --- a/charts/turbinia/templates/serviceaccount.yaml +++ b/charts/turbinia/templates/serviceaccount.yaml @@ -1,4 +1,5 @@ {{- if .Values.serviceAccount.create -}} +{{- if not (lookup "v1" "ServiceAccount" .Release.Namespace .Values.serviceAccount.name) -}} apiVersion: v1 kind: ServiceAccount metadata: @@ -11,3 +12,4 @@ metadata: iam.gke.io/gcp-service-account: {{ .Values.serviceAccount.gcpName }}@{{ .Values.gcp.projectID }}.iam.gserviceaccount.com {{- end }} {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/turbinia/templates/worker-deployment.yaml b/charts/turbinia/templates/worker-deployment.yaml index ee94fa98..a2f236b8 100644 --- a/charts/turbinia/templates/worker-deployment.yaml +++ b/charts/turbinia/templates/worker-deployment.yaml @@ -17,8 +17,8 @@ spec: template: metadata: annotations: - # Have Deployment restart after each upgrade - roll: {{ randAlphaNum 5 | quote }} + # Restart pod if values.yaml parameters that affect the config were changed + checksum/config: {{ include (print $.Template.BasePath "/init-configmap.yaml") . | sha256sum }} {{- if .Values.metrics.enabled }} prometheus.io/port: {{ .Values.metrics.port | quote }} prometheus.io/scrape: "true" @@ -33,8 +33,8 @@ spec: initContainers: {{- include "turbinia.initContainer" . | nindent 8 }} # The grace period needs to be set to the largest task timeout as - # set in the turbinia configuration file. - terminationGracePeriodSeconds: 86400 + # set in the turbinia configuration file plus five seconds. + terminationGracePeriodSeconds: 86405 containers: - name: worker securityContext: diff --git a/charts/turbinia/values-production.yaml b/charts/turbinia/values-production.yaml deleted file mode 100644 index 3487feb0..00000000 --- a/charts/turbinia/values-production.yaml +++ /dev/null @@ -1,495 +0,0 @@ -## Turbinia Helm Production Values -## Please use these values to override the default Turbinia values with recommended resources and replica counts for production installations -## -## @section Turbinia configuration -## The following section covers configuration parameters for Turbinia -## -## @section Turbinia server configuration -## -server: - image: - ## @param server.image.repository Turbinia image repository - ## - repository: us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-server - ## @param server.image.pullPolicy Turbinia image pull policy - ## ref https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy - ## - pullPolicy: IfNotPresent - ## @param server.image.tag Overrides the image tag whose default is the chart appVersion - ## - tag: latest - ## @param server.image.imagePullSecrets Specify secrets if pulling from a private repository - ## ref https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - ## @param server.podSecurityContext Holds pod-level security attributes and common server container settings - ## Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext - ## ref https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podsecuritycontext-v1-core - ## e.g. - ## fsgroup: 2000 - ## - podSecurityContext: {} - ## @param server.securityContext Holds security configuration that will be applied to the server container - ## Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence - ## ref https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#securitycontext-v1-core - ## e.g. - ## capabilities - ## drop: - ## - ALL - ## readOnlyRootFilesystem: true - ## runAsNonRoot: true - ## runAsUser: 1000 - ## - securityContext: {} - ## Turbinia Server resource requests and limits - ## @param server.resources.requests.cpu The requested cpu for the server container - ## @param server.resources.requests.memory The requested memory for the server container - ## @param server.resources.limits.cpu The resources cpu limits for the server container - ## @param server.resources.limits.memory The resources memory limits for the server container - ## - resources: - requests: - cpu: 1000m - memory: 2000Mi - limits: - cpu: 8000m - memory: 16000Mi - ## @param server.nodeSelector Node labels for Turbinia server pods assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param server.tolerations Tolerations for Turbinia server pods assignment - ## ref https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ - ## - tolerations: [] - ## @param server.affinity Affinity for Turbinia server pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} -## @section Turbinia worker configuration -## -worker: - image: - ## @param worker.image.repository Turbinia image repository - ## - repository: us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-worker - ## @param worker.image.pullPolicy Turbinia image pull policy - ## ref https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy - ## - pullPolicy: IfNotPresent - ## @param worker.image.tag Overrides the image tag whose default is the chart appVersion - ## - tag: latest - ## @param worker.image.imagePullSecrets Specify secrets if pulling from a private repository - ## ref https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - ## @param worker.replicaCount Number of worker pods to run at once - ## Disabled if the value worker.autoscaling.enabled is enabled. - replicaCount: 5 - ## Worker autoscaler configuration - ## ref https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - ## - autoscaling: - ## @param worker.autoscaling.enabled Enables Turbinia Worker autoscaling - ## - enabled: true - ## @param worker.autoscaling.minReplicas Minimum amount of worker pods to run at once - ## - minReplicas: 5 - ## @param worker.autoscaling.maxReplicas Maximum amount of worker pods to run at once - ## - maxReplicas: 500 - ## @param worker.autoscaling.targetCPUUtilizationPercentage CPU scaling metric workers will scale based on - ## - targetCPUUtilizationPercentage: 80 - ## @param worker.podSecurityContext Holds pod-level security attributes and common worker container settings - ## Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext - ## ref https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podsecuritycontext-v1-core - ## e.g. - ## fsgroup: 2000 - ## - podSecurityContext: {} - ## Worker Security Context Configuration - ## Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence - ## ref https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#securitycontext-v1-core - ## - securityContext: - ## @param worker.securityContext.privileged Runs the container as priveleged. Due to Turbinia attaching and detaching disks, a priveleged container is required for the worker container. - ## - privileged: true - ## Turbinia Worker resource requests and limits - ## @param worker.resources.requests.cpu The requested cpu for the worker container - ## @param worker.resources.requests.memory The requested memory for the worker container - ## @param worker.resources.limits.cpu The resources cpu limits for the worker container - ## @param worker.resources.limits.memory The resources memory limits for the worker container - ## - resources: - requests: - cpu: 1500m - memory: 2048Mi - limits: - cpu: 31000m - memory: 65536Mi - ## @param worker.nodeSelector Node labels for Turbinia worker pods assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param worker.tolerations Tolerations for Turbinia worker pods assignment - ## ref https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ - ## - tolerations: [] - ## @param worker.affinity Affinity for Turbinia worker pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} -## @section Turbinia API / Web configuration -## -api: - image: - ## @param api.image.repository Turbinia image repository for API / Web server - ## - repository: us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-api-server - ## @param api.image.pullPolicy Turbinia image pull policy - ## ref https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy - ## - pullPolicy: IfNotPresent - ## @param api.image.tag Overrides the image tag whose default is the chart appVersion - ## - tag: latest - ## @param api.image.imagePullSecrets Specify secrets if pulling from a private repository - ## ref https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - ## API podSecurity Context configuration - ## Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext - ## ref https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podsecuritycontext-v1-core - ## e.g. - ## fsgroup: 2000 - ## - podSecurityContext: - seccompProfile: - ## @param api.podSecurityContext.seccompProfile.type Deploys the default seccomp profile to the container - ## - type: RuntimeDefault - ## @param api.securityContext Holds security configuration that will be applied to the API / Web container - ## Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence - ## ref https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#securitycontext-v1-core - ## e.g. - ## capabilities - ## drop: - ## - ALL - ## readOnlyRootFilesystem: true - ## runAsNonRoot: true - ## runAsUser: 1000 - ## - securityContext: {} - ## Turbinia API / Web resource requests and limits - ## @param api.resources.requests.cpu The requested cpu for the frontend container - ## @param api.resources.requests.memory The requested memory for the frontend container - ## @param api.resources.limits.cpu The resources cpu limits for the frontend container - ## @param api.resources.limits.memory The resources memory limits for the frontend container - ## - resources: - requests: - cpu: 2000m - memory: 4000Mi - limits: - cpu: 8000m - memory: 16000Mi - ## @param api.nodeSelector Node labels for Turbinia api pods assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param api.tolerations Tolerations for Turbinia api pods assignment - ## ref https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ - ## - tolerations: [] - ## @param api.affinity Affinity for Turbinia api pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} -## @section Turbinia controller configuration -## The controller is not required to use Turbinia and serves as an extra -## container that can be used for troubleshooting -## -controller: - ## @param controller.enabled If enabled, deploys the Turbinia controller - ## - enabled: false - image: - ## @param controller.image.repository Turbinia image repository for the Turbinia controller - ## - repository: us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-controller - ## @param controller.image.pullPolicy Turbinia image pull policy - ## ref https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy - ## - pullPolicy: IfNotPresent - ## @param controller.image.tag Overrides the image tag whose default is the chart appVersion - ## - tag: latest - ## @param controller.image.imagePullSecrets Specify secrets if pulling from a private repository - ## ref https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - ## @param controller.podSecurityContext Holds pod-level security attributes and common API / Web container settings - ## Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext - ## ref https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podsecuritycontext-v1-core - ## e.g. - ## fsgroup: 2000 - ## - podSecurityContext: {} - ## @param controller.securityContext Holds security configuration that will be applied to the API / Web container - ## Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence - ## ref https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#securitycontext-v1-core - ## e.g. - ## capabilities - ## drop: - ## - ALL - ## readOnlyRootFilesystem: true - ## runAsNonRoot: true - ## runAsUser: 1000 - ## - securityContext: {} - ## Turbinia controller resource requests and limits - ## @param controller.resources.requests.cpu The requested cpu for the controller container - ## @param controller.resources.requests.memory The requested memory for the controller container - ## @param controller.resources.limits.cpu The resources cpu limits for the controller container - ## @param controller.resources.limits.memory The resources memory limits for the controller container - ## - resources: - requests: - cpu: 1000m - memory: 2000Mi - limits: - cpu: 4000m - memory: 4000Mi - ## @param controller.nodeSelector Node labels for Turbinia controller pods assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param controller.tolerations Tolerations for Turbinia controller pods assignment - ## ref https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ - ## - tolerations: [] - ## @param controller.affinity Affinity for Turbinia controller pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} -## Turbinia persistence storage parameters -## -persistence: - ## @param persistence.name Turbinia persistent volume name - ## - name: turbiniavolume - ## @param persistence.size Turbinia persistent volume size - ## - size: 1T - ## @param persistence.storageClass PVC Storage Class for Turbinia volume - ## If default, storageClassName: , which enables GCP Filestore - ## when using the Filestore CSI Driver - ## ref https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/filestore-csi-driver#access - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## ref https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/#using-dynamic-provisioning - ## - storageClass: standard-rwx - ## @param persistence.accessModes PVC Access Mode for Turbinia volume - ## Access mode may need to be updated based on the StorageClass - ## ref https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes - ## - accessModes: - - ReadWriteMany -## Turbinia dfDewey parameters -## -dfdewey: - ## @section dfDewey PostgreSQL Configuration Parameters - ## - postgresql: - ## @param dfdewey.postgresql.enabled Enables the Postgresql deployment - ## - enabled: true - ## @param dfdewey.postgresql.nameOverride String to partially override common.names.fullname template - ## - nameOverride: dfdewey-postgresql - ## PostgreSQL Authentication parameters - ## - auth: - ## @param dfdewey.postgresql.auth.username Name for a custom user to create - ## - username: "dfdewey" - ## @param dfdewey.postgresql.auth.password Password for the custom user to create. Ignored if auth.existingSecret is provided - ## - password: "password" - ## @param dfdewey.postgresql.auth.database Name for a custom database to create - ## - database: "dfdewey" - ## PostgreSQL Primary configuration parameters - ## - primary: - ## PostgreSQL Primary persistence configuration - ## - persistence: - ## @param dfdewey.postgresql.primary.persistence.size PostgreSQL Persistent Volume size - ## - size: 500Gi - ## PostgreSQL Primary resource requests and limits - ## @param dfdewey.postgresql.primary.resources.requests.cpu Requested cpu for the PostgreSQL Primary containers - ## @param dfdewey.postgresql.primary.resources.requests.memory Requested memory for the PostgreSQL Primary containers - ## @param dfdewey.postgresql.primary.resources.limits Resource limits for the PostgreSQL Primary containers - ## - resources: - requests: - cpu: 250m - memory: 256Mi - limits: {} - ## @section dfDewey Opensearch Configuration Parameters - ## IMPORTANT: The Opensearch Security Plugin / TLS has not yet been configured by default - ## ref on steps required https://opensearch.org/docs/1.1/security-plugin/configuration/index/ - ## - opensearch: - ## @param dfdewey.opensearch.enabled Enables the Opensearch deployment - ## - enabled: true - ## @param dfdewey.opensearch.nameOverride Overrides the clusterName when used in the naming of resources - ## - nameOverride: dfdewey-opensearch - ## @param dfdewey.opensearch.masterService The service name used to connect to the masters - ## - masterService: dfdewey-opensearch - ## @param dfdewey.opensearch.singleNode Replicas will be forced to 1 - ## - singleNode: true - ## @param dfdewey.opensearch.sysctlInit.enabled Sets optimal sysctl's through privileged initContainer - ## - sysctlInit: - enabled: true - ## @param dfdewey.opensearch.opensearchJavaOpts Sets the size of the Opensearch Java heap - ## It is recommended to use at least half the system's available ram - ## - opensearchJavaOpts: "-Xms32g -Xmx32g" - ## @param dfdewey.opensearch.config.opensearch.yml Opensearch configuration file. Can be appended for additional configuration options - ## Values must be YAML literal style scalar / YAML multiline string - ## : | - ## - ## - config: - opensearch.yml: | - discovery: - type: single-node - plugins: - security: - disabled: true - extraEnvs: - ## @param dfdewey.opensearch.extraEnvs[0].name Environment variable to set the initial admin password - ## - - name: OPENSEARCH_INITIAL_ADMIN_PASSWORD - ## @param dfdewey.opensearch.extraEnvs[0].value The initial admin password - ## - value: KyfwJExU2!2MvU6j - ## @param dfdewey.opensearch.extraEnvs[1].name Environment variable to disable Opensearch Demo config - ## - - name: DISABLE_INSTALL_DEMO_CONFIG - ## @param dfdewey.opensearch.extraEnvs[1].value Disables Opensearch Demo config - ## - value: "true" - ## @param dfdewey.opensearch.extraEnvs[2].name Environment variable to disable Opensearch Security plugin given that - ## certificates were not setup as part of this deployment - ## - - name: DISABLE_SECURITY_PLUGIN - ## @param dfdewey.opensearch.extraEnvs[2].value Disables Opensearch Security plugin - ## - value: "true" - ## Opensearch persistence configuration - ## - persistence: - ## @param dfdewey.opensearch.persistence.size Opensearch Persistent Volume size. A persistent volume would be created for each Opensearch replica running - ## - size: 6Ti - ## Opensearch resource requests - ## @param dfdewey.opensearch.resources.requests.cpu Requested cpu for the Opensearch containers - ## @param dfdewey.opensearch.resources.requests.memory Requested memory for the Opensearch containers - ## - resources: - requests: - cpu: 8000m - memory: 32Gi -## @section Third Party Configuration -## -## @section Redis configuration parameters -## IMPORTANT: Redis is deployed with Auth enabled by default -## To see a full list of available values, run helm show values charts/redis* -## -redis: - ## @param redis.enabled enabled Enables the Redis deployment - ## - enabled: true - ## @param redis.sentinel.enabled Enables Redis Sentinel on Redis pods - ## IMPORTANT: This has not been tested for Turbinia so would leave this disabled - ## - sentinel: - enabled: false - ## Master Redis Service configuration - ## - master: - ## @param redis.master.count Number of Redis master instances to deploy (experimental, requires additional configuration) - ## - count: 1 - ## Redis master persistence configuration - ## - persistence: - ## @param redis.master.persistence.size Persistent Volume size - ## - size: 500Gi - ## Redis master resource requests and limits - ## @param redis.master.resources.requests.cpu The requested cpu for the Redis master containers - ## @param redis.master.resources.requests.memory The requested memory for the Redis master containers - ## @param redis.master.resources.limits.cpu The resources cpu limits for the Redis master containers - ## @param redis.master.resources.limits.memory The resources memory limits for the Redis master containers - ## - resources: - requests: - cpu: 4000m - memory: 8Gi - limits: - cpu: 8000m - memory: 16Gi - ## Redis replicas configuration parameters - ## - replica: - ## @param redis.replica.replicaCount Number of Redis replicas to deploy - ## - replicaCount: 3 - ## Redis replicas persistence configuration - ## - persistence: - ## @param redis.replica.persistence.size Persistent Volume size - ## - size: 500Gi - ## Redis Replica resource requests and limits - ## @param redis.replica.resources.requests.cpu The requested cpu for the Redis replica containers - ## @param redis.replica.resources.requests.memory The requested memory for the Redis replica containers - ## @param redis.replica.resources.limits.cpu The resources cpu limits for the Redis replica containers - ## @param redis.replica.resources.limits.memory The resources memory limits for the Redis replica containers - ## - resources: - requests: - cpu: 4000m - memory: 8Gi - limits: - cpu: 8000m - memory: 16Gi diff --git a/charts/turbinia/values.yaml b/charts/turbinia/values.yaml index a544b532..1893eb44 100644 --- a/charts/turbinia/values.yaml +++ b/charts/turbinia/values.yaml @@ -60,7 +60,7 @@ server: pullPolicy: IfNotPresent ## @param server.image.tag Overrides the image tag whose default is the chart appVersion ## - tag: latest + tag: "20240820" ## @param server.image.imagePullSecrets Specify secrets if pulling from a private repository ## ref https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## e.g. @@ -131,7 +131,7 @@ worker: pullPolicy: IfNotPresent ## @param worker.image.tag Overrides the image tag whose default is the chart appVersion ## - tag: latest + tag: "20240820" ## @param worker.image.imagePullSecrets Specify secrets if pulling from a private repository ## ref https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## e.g. @@ -212,7 +212,7 @@ api: pullPolicy: IfNotPresent ## @param api.image.tag Overrides the image tag whose default is the chart appVersion ## - tag: latest + tag: "20240820" ## @param api.image.imagePullSecrets Specify secrets if pulling from a private repository ## ref https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## e.g. @@ -265,88 +265,12 @@ api: ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {} -## @section Turbinia controller configuration -## The controller is not required to use Turbinia and serves as an extra -## container that can be used for troubleshooting -## -controller: - ## @param controller.enabled If enabled, deploys the Turbinia controller - ## - enabled: false - image: - ## @param controller.image.repository Turbinia image repository for the Turbinia controller - ## - repository: us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-controller - ## @param controller.image.pullPolicy Turbinia image pull policy - ## ref https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy - ## - pullPolicy: IfNotPresent - ## @param controller.image.tag Overrides the image tag whose default is the chart appVersion - ## - tag: latest - ## @param controller.image.imagePullSecrets Specify secrets if pulling from a private repository - ## ref https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - ## @param controller.podSecurityContext Holds pod-level security attributes and common API / Web container settings - ## Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext - ## ref https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podsecuritycontext-v1-core - ## e.g. - ## fsgroup: 2000 - ## - podSecurityContext: {} - ## @param controller.securityContext Holds security configuration that will be applied to the API / Web container - ## Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence - ## ref https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#securitycontext-v1-core - ## e.g. - ## capabilities - ## drop: - ## - ALL - ## readOnlyRootFilesystem: true - ## runAsNonRoot: true - ## runAsUser: 1000 - ## - securityContext: {} - ## Turbinia controller resource requests and limits - ## @param controller.resources.limits Resource limits for the controller container - ## @param controller.resources.requests Requested resources for the controller container - resources: - ## Example: - ## limits: - ## cpu: 500m - ## memory: 1Gi - limits: {} - ## Example: - ## requests: - ## cpu: 500m - ## memory: 1Gi - requests: {} - ## @param controller.nodeSelector Node labels for Turbinia controller pods assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param controller.tolerations Tolerations for Turbinia controller pods assignment - ## ref https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ - ## - tolerations: [] - ## @param controller.affinity Affinity for Turbinia controller pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} ## @section Common Parameters ## ## Turbinia configuration parameters ## ref: https://github.com/google/turbinia/blob/master/turbinia/config/turbinia_config_tmpl.py ## config: - ## @param config.override Overrides the default Turbinia config to instead use a user specified config. Please ensure - ## that the config file is either placed in the root of this directory or point the override flag to a path containing - ## your config file - ## - override: turbinia.conf ## @param config.existingConfigMap Use an existing ConfigMap as the default Turbinia config. ## Please ensure that the ConfigMap has been created prior to deployment ## (e.g. kubectl create configmap turbinia-config --from-file=turbinia.conf) @@ -468,7 +392,7 @@ ingress: ## @param ingress.className IngressClass that will be be used to implement the Ingress ## ref https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/ ## - className: "gce" + className: "" ## GCP ingress configuration ## gcp: @@ -477,11 +401,10 @@ ingress: ## managedCertificates: false ## @param ingress.gcp.staticIPName Name of the static IP address you reserved in GCP - ## This is required when using "gce" in ingress.className ## ref https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address ## staticIPName: "" - ## @param ingress.gcp.staticIPV6Name Name of the static IPV6 address you reserved in GCP. This can be optionally provided to deploy a loadbalancer with an IPV6 address + ## @param ingress.gcp.staticIPV6Name Name of the static IPV6 address you reserved. This can be optionally provided to deploy a loadbalancer with an IPV6 address in GCP. ## ref https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address ## staticIPV6Name: "" @@ -601,38 +524,6 @@ dfdewey: memory: 512Mi ## @section Third Party Configuration ## -## @section Monitoring configuration parameters -## IMPORTANT: Turbinia utilizes the kube-prometheus-stack for monitoring. This includes Prometheus, Grafana, and Alertmanager. -## -monitoring: - ## @param monitoring.deployKubePrometheus Deploy kube-prometheus-stack as a subchart. For production environments, it is best practice to deploy this chart separately. - ## - deployKubePrometheus: false - ## @param monitoring.kubeScheduler.enabled Component scraping kube scheduler. Disabled by default due to lack of Prometheus endpoint access for managed K8s clusters (e.g. GKE, EKS). - ## - kubeScheduler: - enabled: false - ## @param monitoring.kubeControllerManager.enabled Component scraping kube controller. Disabled by default due to lack of Prometheus endpoint access for managed K8s clusters (e.g. GKE, EKS). - ## - kubeControllerManager: - enabled: false - ## @param monitoring.coreDns.enabled Component scraping core dns. Disabled by default in favor of kube dns. - ## - coreDns: - enabled: false - ## @param monitoring.kubeProxy.enabled Component scraping kube proxy. Disabled by default due to lack of Prometheus endpoint access for managed K8s clusters (e.g. GKE, EKS). - ## - kubeProxy: - enabled: false - ## @param monitoring.kubeDns.enabled Component scraping kube dns. - ## - kubeDns: - enabled: true - ## @param monitoring.prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues Disable so that custom servicemonitors can be created and monitored - ## - prometheus: - prometheusSpec: - serviceMonitorSelectorNilUsesHelmValues: false ## @section Redis configuration parameters ## IMPORTANT: Redis is deployed with Auth enabled by default ## To see a full list of available values, run helm show values charts/redis*