Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add converged agent deployment #61

Merged
merged 21 commits into from
Aug 6, 2024
27 changes: 27 additions & 0 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
@@ -297,6 +297,21 @@ staging-deploy-dd-ingest-demo-eks:
ORDERPRODUCER_DEPLOYMENT: deployment-staging.yaml
REGISTRY: $BUILD_SANDBOX_REGISTRY

# Demo env:converged-agent-staging
staging-deploy-converged-agent-demo-eks:
!!merge <<: *staging-deploy
variables:
NAMESPACE: converged-agent-staging
VALUES:
NODE_GROUP: ng-8
SCRIPT: ./ci/scripts/ci-deploy-demo.sh
CLUSTER_NAME: dd-otel
CLUSTER_ARN: "arn:aws:eks:us-east-1:601427279990:cluster/dd-otel"
REGION: us-east-1
ZOOKEEPER_DEPLOYMENT: deployment-staging.yaml
ORDERPRODUCER_DEPLOYMENT: deployment-staging.yaml
REGISTRY: $BUILD_SANDBOX_REGISTRY

# Agent env:otel-ingest-staging
staging-deploy-otel-ingest-agent-eks:
!!merge <<: *staging-deploy
@@ -320,3 +335,15 @@ staging-deploy-dd-ingest-agent-eks:
REGION: us-east-1
RELEASE_NAME: datadog-agent-dd
NODE_GROUP: ng-7

# Agent env:converged-agent-staging
staging-deploy-converged-agent-staging-eks:
!!merge <<: *staging-deploy
variables:
NAMESPACE: converged-agent-staging
SCRIPT: ./ci/scripts/ci-deploy-converged-agent.sh
CLUSTER_NAME: dd-otel
CLUSTER_ARN: "arn:aws:eks:us-east-1:601427279990:cluster/dd-otel"
REGION: us-east-1
RELEASE_NAME: converged-agent
NODE_GROUP: ng-8
155 changes: 155 additions & 0 deletions ci/converged-agent-values/values.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
registry: docker.io/datadog
agents:
image:
repository: datadog/agent-dev
tag: nightly-ot-beta-main-jmx
datadog:
apiKeyExistingSecret: datadog-secrets
otelCollector:
ports: [{"containerPort":"4317", "hostPort":"4317" ,"name":"otel-grpc"},{"containerPort":"4318", "hostPort":"4318" ,"name":"otel-http"}]
enabled: true
config: |
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
exporters:
debug:
verbosity: detailed
datadog:
host_metadata:
tags: ['env:${env:OTEL_K8S_NAMESPACE}']
metrics:
resource_attributes_as_tags: true
histograms:
mode: counters
send_count_sum_metrics: true
traces:
span_name_as_resource_name: true
compute_stats_by_span_kind: true
peer_service_aggregation: true
trace_buffer: 1000
api:
key: "$DD_API_KEY"
processors:
resourcedetection:
# ensures host.name and other important resource tags
# get picked up
detectors: [env, gcp, ecs, ec2, azure, system]
timeout: 5s
override: false
system:
# Enable optional system attributes
resource_attributes:
os.type:
enabled: true
os.description:
enabled: true
host.ip:
enabled: true
host.mac:
enabled: true
host.arch:
enabled: true
host.cpu.vendor.id:
enabled: true
host.cpu.model.name:
enabled: true
host.cpu.family:
enabled: true
host.cpu.model.id:
enabled: true
host.cpu.stepping:
enabled: true
host.cpu.cache.l2.size:
enabled: true
host.id:
enabled: false
# adds various tags related to k8s
# adds various tags related to k8s
k8sattributes:
passthrough: false
auth_type: "serviceAccount"
pod_association:
- sources:
- from: resource_attribute
name: k8s.pod.ip
extract:
metadata:
- k8s.pod.name
- k8s.pod.uid
- k8s.deployment.name
- k8s.node.name
- k8s.namespace.name
- k8s.pod.start_time
- k8s.replicaset.name
- k8s.replicaset.uid
- k8s.daemonset.name
- k8s.daemonset.uid
- k8s.job.name
- k8s.job.uid
- k8s.cronjob.name
- k8s.statefulset.name
- k8s.statefulset.uid
- container.image.name
- container.image.tag
- container.id
- k8s.container.name
- container.image.name
- container.image.tag
- container.id
labels:
- tag_name: kube_app_name
key: app.kubernetes.io/name
from: pod
- tag_name: kube_app_instance
key: app.kubernetes.io/instance
from: pod
- tag_name: kube_app_version
key: app.kubernetes.io/version
from: pod
- tag_name: kube_app_component
key: app.kubernetes.io/component
from: pod
- tag_name: kube_app_part_of
key: app.kubernetes.io/part-of
from: pod
- tag_name: kube_app_managed_by
key: app.kubernetes.io/managed-by
from: pod
batch:
send_batch_max_size: 1000
send_batch_size: 100
timeout: 10s
probabilistic_sampler:
hash_seed: 22
sampling_percentage: 15.3
connectors:
datadog/connector:
traces:
span_name_as_resource_name: true
service:
telemetry:
logs:
encoding: "json"
initial_fields:
- service: "converged-agent"
pipelines:
metrics:
receivers: [otlp, datadog/connector]
processors: [resourcedetection, k8sattributes, batch]
exporters: [datadog]
traces:
receivers: [otlp]
processors: [resourcedetection, k8sattributes, batch]
exporters: [datadog/connector, debug]
traces/sampled:
receivers: [datadog/connector]
processors: [probabilistic_sampler, batch]
exporters: [datadog]
logs:
processors: [resourcedetection, k8sattributes, batch]
exporters: [datadog]
37 changes: 37 additions & 0 deletions ci/scripts/ci-deploy-converged-agent.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#!/usr/bin/env bash

# Copyright The OpenTelemetry Authors
# SPDX-License-Identifier: Apache-2.0

# This script is used to deploy collector on demo account cluster

set -euo pipefail
IFS=$'\n\t'

clusterName=$CLUSTER_NAME
clusterArn=$CLUSTER_ARN
region=$REGION
namespace=$NAMESPACE
releaseName=$RELEASE_NAME
nodegroup=$NODE_GROUP

install_agent() {
# if repo already exists, helm 3+ will skip
helm repo add datadog https://helm.datadoghq.com

# --install will run `helm install` if not already present.
helm_cmd="helm --debug upgrade "${releaseName}" -n "${namespace}" datadog/datadog --install \
-f ./ci/converged-agent-values/values.yaml \
--set datadog.tags=env:"${namespace}" \
--set agents.nodeSelector.\"alpha\\.eksctl\\.io/nodegroup-name\"=${nodegroup} \
--set agents.image.doNotCheckTag=true"

eval $helm_cmd
}

###########################################################################################################

aws eks --region "${region}" update-kubeconfig --name "${clusterName}"
kubectl config use-context "${clusterArn}"

install_agent
Loading