From 450098c7f2ee609ba1b95c8ea2e0c4c3df82e374 Mon Sep 17 00:00:00 2001 From: Alexander Wert Date: Mon, 24 Mar 2025 10:36:11 +0100 Subject: [PATCH] EDOT Collector config diagrams Signed-off-by: Alexander Wert --- docs/_config.yml | 4 + .../config/customize-data-ingestion.md | 11 + .../config/customize-logs-collection.md | 89 +++++++ .../config/customize-metrics-collection.md | 10 + .../config/default-config-k8s.md | 226 ++++++++++++++++++ .../config/default-config-standalone.md | 159 ++++++++++++ docs/_edot-collector/config/index.md | 12 + docs/_edot-collector/edot-collector-config.md | 63 ----- .../edot-collector-limitations.md | 6 +- docs/_includes/head_custom.html | 3 + docs/use-cases/kubernetes/index.md | 2 +- 11 files changed, 520 insertions(+), 65 deletions(-) create mode 100644 docs/_edot-collector/config/customize-data-ingestion.md create mode 100644 docs/_edot-collector/config/customize-logs-collection.md create mode 100644 docs/_edot-collector/config/customize-metrics-collection.md create mode 100644 docs/_edot-collector/config/default-config-k8s.md create mode 100644 docs/_edot-collector/config/default-config-standalone.md create mode 100644 docs/_edot-collector/config/index.md delete mode 100644 docs/_edot-collector/edot-collector-config.md create mode 100644 docs/_includes/head_custom.html diff --git a/docs/_config.yml b/docs/_config.yml index f77584ae..71ae1ff6 100644 --- a/docs/_config.yml +++ b/docs/_config.yml @@ -75,3 +75,7 @@ edot_versions: exclude: - "gen_edot_col_components/" + +mermaid: + # Version of mermaid library + version: "11.5.0" diff --git a/docs/_edot-collector/config/customize-data-ingestion.md b/docs/_edot-collector/config/customize-data-ingestion.md new file mode 100644 index 00000000..ed37c29d --- /dev/null +++ b/docs/_edot-collector/config/customize-data-ingestion.md @@ -0,0 +1,11 @@ +--- +title: Customize Data Ingestion +parent: Configuration +layout: default +nav_order: 5 +--- + +๐Ÿšง Coming soon + +- data routing (example split data by K8s namespace into different data streams) +- ECS mode (though not officially supported) \ No newline at end of file diff --git a/docs/_edot-collector/config/customize-logs-collection.md b/docs/_edot-collector/config/customize-logs-collection.md new file mode 100644 index 00000000..f1d1455c --- /dev/null +++ b/docs/_edot-collector/config/customize-logs-collection.md @@ -0,0 +1,89 @@ +--- +title: Customize Logs +parent: Configuration +layout: default +nav_order: 3 +--- + +# Customize Logs Collection + +๐Ÿšง Coming soon + +## Pre-processing Logs + +- limitation: (as of 9.0) Elasticsearch Ingest Pipelinges are not (yet) applicable to OTel-native data +- pre-processing of logs needs to happen in OTel collectors + +### Parsing JSON logs + +```yaml +# ... +receivers: + filelog: + # ... + operators: + # Parse body as JSON https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/stanza/docs/operators/json_parser.md + - type: json_parser + on_error: send_quiet + parse_from: body + parse_to: body + + # ... +``` + +### Setting custom fields + +## Customizing logs parsing on Kubernetes + +TODO: use K8s pods annotation to configure logs parsing, link Blog post + +Daemonset collector config: + +```yaml +receivers: + receiver_creator/logs: + watch_observers: [k8s_observer] + discovery: + enabled: true + receivers: + +# ... + +extensions: + k8s_observer: + +# ... + +service: + extensions: [k8s_observer] + pipelines: +``` + +- Make sure to remove / comment out the static file log receiver (or restrict the log file pattern) to avoid log duplication + +Annotation of the pod + +```yaml +# ... +metadata: + annotations: + io.opentelemetry.discovery.logs/enabled: "true" + io.opentelemetry.discovery.logs/config: | + operators: + - id: container-parser + type: container + - id: json-parser + type: json_parser + on_error: send_quiet + parse_from: body + parse_to: body + - id: custom-value + type: add + field: attributes.tag + value: custom-value +spec: + containers: + # ... +``` + +## Using Processors / OTTL for Logs processing \ No newline at end of file diff --git a/docs/_edot-collector/config/customize-metrics-collection.md b/docs/_edot-collector/config/customize-metrics-collection.md new file mode 100644 index 00000000..fa34d4b2 --- /dev/null +++ b/docs/_edot-collector/config/customize-metrics-collection.md @@ -0,0 +1,10 @@ +--- +title: Customize Metrics +parent: Configuration +layout: default +nav_order: 4 +--- + +# Customize Metrics Collection + +๐Ÿšง Coming soon \ No newline at end of file diff --git a/docs/_edot-collector/config/default-config-k8s.md b/docs/_edot-collector/config/default-config-k8s.md new file mode 100644 index 00000000..34608a3b --- /dev/null +++ b/docs/_edot-collector/config/default-config-k8s.md @@ -0,0 +1,226 @@ +--- +title: Default Configโ€”Kubernetes +parent: Configuration +layout: default +nav_order: 2 +--- + +# Default Configuration - EDOT Collectors on Kubernetes +{: .no_toc } + +The [Kubernetes setup](../../quickstart/index) utilizes the OpenTelemetry Operator to automate orchestration of EDOT Collectors: + +* [EDOT Collector Cluster](#cluster-collector): Collection of cluster metrics. +* [EDOT Collector Daemon](#daemonset-collector): Collection of node metrics, logs and application telemetry. +* [EDOT Collector Gateway](#gateway-collectors): Pre-processing, aggregation and ingestion of data into Elastic. + + + + + + + + + + + + + + +
Direct ingestion into ElasticsearchManaged OTLP Endpoint
+
+                    flowchart LR
+                        cluster@{ shape: proc, label: "Cluster
+                        Collector
+                        fa:fa-microchip"} -->|otlp| gateway@{ shape: procs, label: "Gateway
+                        Collectors
+                        fa:fa-microchip"}
+
+                        daemon@{ shape: procs, label: "Daemonset
+                        Collectors
+                        fa:fa-microchip"} -->|otlp| gateway
+
+                        gateway ==>|_bulk| es@{ shape: db, label: "Elasticsearch" }
+
+                        style es stroke:#33f,stroke-width:2px,color:#000;
+                
+
+
+                    flowchart LR
+                        cluster@{ shape: proc, label: "Cluster
+                        Collector
+                        fa:fa-microchip"} -->|otlp| gateway@{ shape: procs, label: "Gateway
+                        Collectors
+                        fa:fa-microchip"}
+
+                        daemon@{ shape: procs, label: "Daemonset
+                        Collectors
+                        fa:fa-microchip"} -->|otlp| gateway
+
+                        gateway ==>|otlp| otlp@{ shape: display, label: "Managed
+                        OTLP endpoint" }
+
+                        style otlp stroke:#33f,stroke-width:2px,color:#000;
+                
+
+ +The following sections describe the default pipelines for the different roles of EDOT collectors in a Kubernetes setup. + +- TOC +{:toc} + +## Pipeline - Cluster Collector + +```mermaid +flowchart LR + k8s_cluster@{ shape: proc, label: "k8s_cluster + fa:fa-right-to-bracket"} -->|M| k8sattributes@{ shape: proc, label: "k8sattributes + fa:fa-gears"} + + k8sattributes -->|M| resourcedetection@{ shape: procs, label: "resourcedetection + fa:fa-gears"} + + k8sobjects@{ shape: proc, label: "k8sobjects + fa:fa-right-to-bracket"} -->|L| resourcedetection + + resourcedetection -->|L/M| resource@{ shape: procs, label: "resource + fa:fa-gears"} + + resource -->|L/M| otlp_exporter@{ shape: proc, label: "OTLP + fa:fa-right-from-bracket"} +``` + +The main purpose of the `Cluster Collector` is to collect Kubernetes cluster-level metrics (using the [`k8s_cluster`] receiver) and cluster events ([`k8sobjects`] receiver) and forward them to the Gateway Collector through `OTLP`. The [`resource`] and [`resourcedetection`] processors enrich the cluster-level data with corresponding meta information. + + +## Pipeline - Daemonset Collectors + +```mermaid +flowchart LR + otlp@{ shape: proc, label: "OTLP + fa:fa-right-to-bracket"} -->|T/L/M| batch@{ shape: proc, label: "batch + fa:fa-gears"} + + batch -->|T/L/M| resource@{ shape: proc, label: "resource + fa:fa-right-from-bracket"} + + resource -->|T/L/M| otlp_exporter@{ shape: proc, label: "OTLP + fa:fa-right-from-bracket"} + +%% logs pipeline + filelog@{ shape: proc, label: "filelog + fa:fa-right-to-bracket"} -->|L| batch_lm@{ shape: proc, label: "batch + fa:fa-gears"} + + batch_lm -->|L/M| k8sattributes@{ shape: proc, label: "k8sattributes + fa:fa-gears"} + + k8sattributes -->|L/M| resourcedetection@{ shape: procs, label: "resourcedetection + fa:fa-gears"} + + resourcedetection -->|L/M| resource@{ shape: procs, label: "resource + fa:fa-gears"} + + resource -->|L/M| otlp_exporter + +%% system metrics pipeline + kubletstats@{ shape: proc, label: "kubletstats + fa:fa-right-to-bracket"} -->|M| batch_lm + hostmetrics@{ shape: proc, label: "hostmetrics + fa:fa-right-to-bracket"} -->|M| batch_lm +``` + +The `Daemonset Collectors` gather telemetry associated with corresponding, individual Kubernetes nodes: + +1. *Host metrics and container logs* + + [`filelog`] and [`hostmetrics`] receivers are used to gather container logs and host metrics, respectively. + The [`kubletstats`] receiver collects additional Kubernetes Node, Pod and Container metrics. + The logs and metrics are batched for better performance ([`batch`] processor) and then enriched with meta information using the + [`k8sattributes`], [`resourcedetection`] and [`resource`] processors. + +2. *Application teleemtry through OTLP from OTel SDKs* + + The `Daemonset Collectors` also receive the application telemetry from OTel SDKs that instrument services / pods running on + corresponding Kubernetes nodes. The Daemonset Collectors receive that data through [`OTLP`], batch the data ([`batch`] processor) + and pass it on to the Gateway Collector through the OTLP exporter. + +## Pipeline - Gateway Collectors + +The `Gateway Collectors` pipelines differ fundamentally between the two different deployment use cases *'Direct ingestion into Elasticsearch'* +and using Elastic's *'Managed OTLP Endpoint'*. + +### Direct ingestion into Elasticsearch + +In *self-managed* and *Elastic Cloud Hosted* Stack deployment use cases the main purpose of the `Gateway Collector` is the central enrichment of data +before the OpenTelemetry data is being ingested directly into Elasticsearch using the [`elasticsearch`] exporter. + +```mermaid +flowchart LR + otlp@{ shape: proc, label: "OTLP + fa:fa-right-to-bracket"} -->|T/L| batch@{ shape: proc, label: "batch + fa:fa-gears"} + batch -->|T| elastictrace@{ shape: proc, label: "elastictrace + fa:fa-gears"} + elastictrace -->|T| es_exporter@{ shape: proc, label: "elasticsearch + fa:fa-right-from-bracket"} + es_exporter -->|otel| otel@{ shape: framed-circle, label: "otel" } + elastictrace -->|T| elasticapm@{ shape: hex, label: "elasticapm + fa:fa-link"} + elasticapm -->|M| es_exporter + + batch -->|L| elasticapm + + otlp -->|M| routing@{ shape: hex, label: "routing + fa:fa-link"} + routing -->|M| batch + batch -->|L/M| es_exporter + + routing -->|"M (infra)"| elasticinframetrics@{ shape: proc, label: "elasticinframetrics + fa:fa-gears"} + elasticinframetrics -->|M| attributes@{ shape: proc, label: "attributes + fa:fa-gears"} + attributes -->|M| resource@{ shape: proc, label: "resource + fa:fa-gears"} + resource -->|M| batch_ecs@{ shape: proc, label: "batch + fa:fa-gears"} + batch_ecs -->|M| es_exporter_ecs@{ shape: proc, label: "elasticsearch + fa:fa-right-from-bracket"} + es_exporter_ecs -->|ecs| ecs@{ shape: framed-circle, label: "ecs" } +``` + +Hence, this Gateway Collector configuration comprises the pipelines for data enrichment of [application telemetry](./default-config-standalone#application--traces-collection-pipeline) and [host metrics](./default-config-standalone#host-metrics-collection-pipeline) (for details, refer to the linked descriptions of the corresponding standalone use cases). + +The [`routing`] connector separates the infrastructure metrics from other metrics and routes them into the ECS-based pipeline, with ECS-compatibility exporter mode. +Other metrics are exported in OTel-native format to Elasticsearch. + +### Managed OTLP Endpoint + +With the managed OTLP Endpoint, the Gateway Collector configuration simply pipes all the data from the [`OTLP`] receiver through a [`batch`] processor before the data is being exported through `OTLP` to the managed endpoint. + +```mermaid +flowchart LR + otlp@{ shape: proc, label: "OTLP + fa:fa-right-to-bracket"} -->|T/L/M| batch@{ shape: proc, label: "batch + fa:fa-gears"} + batch -->|T/L/M| otlp_exporter@{ shape: proc, label: "OTLP + fa:fa-right-from-bracket"} +``` + +With this scenario there's no need to do any Elastic-specific enrichment in your Kubernetes cluster, as all of that happens behind the managed OTLP endpoint. + +[`hostmetrics`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/hostmetricsreceiver +[`elasticsearch`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticsearchexporter +[`elasticinframetrics`]: https://github.com/elastic/opentelemetry-collector-components/tree/main/processor/elasticinframetricsprocessor +[`elasticsearch`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticsearchexporter +[`k8s_cluster`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8sclusterreceiver +[`k8sobjects`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8sobjectsreceiver +[`resource`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourceprocessor +[`k8sattributes`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/k8sattributesprocessor +[`resourcedetection`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor +[`filelog`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver +[`hostmetrics`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/hostmetricsreceiver +[`kubletstats`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/kubletstatsreceiver +[`batch`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/batchprocessor +[`OTLP`]: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver/otlpreceiver +[`routing`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/connector/routingconnector \ No newline at end of file diff --git a/docs/_edot-collector/config/default-config-standalone.md b/docs/_edot-collector/config/default-config-standalone.md new file mode 100644 index 00000000..d312a3f4 --- /dev/null +++ b/docs/_edot-collector/config/default-config-standalone.md @@ -0,0 +1,159 @@ +--- +title: Default Configโ€”Standalone +parent: Configuration +layout: default +nav_order: 1 +--- + +# Default Configuration - Standalone EDOT Collector +{: .no_toc } + +The standalone EDOT Collector comes with a default configuration that covers pipelines for the collection of logs, host metrics and data from OTel SDKs. +The following sampling files are available: + +| Use Cases | Direct ingestion into Elasticsearch | Managed OTLP Endpoint | +|:---|:---:|:---:| +| Platform logs | [๐Ÿ“„ Logs - ES] | [๐Ÿ“„ Logs - OTLP] | +| Platform logs & host metrics | [๐Ÿ“„ Logs | Metrics - ES] | [๐Ÿ“„ Logs | Metrics - OTLP] | +| Platform logs, host metrics,
application telemetry | [๐Ÿ“„ Logs | Metrics | App - ES]
(*default*) | [๐Ÿ“„ Logs | Metrics | App - OTLP]
(*default*) | + +Use the above example configurations as a reference when configuring your upstream collector or customizing your EDOT Collector configuration. + +The following sections describe the the pipelines of the above configurations split down by single use cases. + +- TOC +{:toc} + +## Direct Ingestion into Elasticsearch + +For *self-managed* and *Elastic Cloud Hosted* Stack deployment use cases (as of Elastic Stack version {{ site.edot_versions.collector }}), we recommend ingesting OpenTelemetry data +from the EDOT Collector directly into Elasticsearch using the [`elasticsearch`] exporter. + +Learn more about the configuration options for the `elasticsearch` exporter in the [corresponding documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/elasticsearchexporter/README.md#configuration-options) or learn about [common configuration use cases](./customize-data-ingestion) for the `elasticsearch` exporter. + +The `elasticsearch` exporter comes with two relevant data ingestion modes: + +- **`ecs`**: Writes data in backwards compatible ECS format. Original attribute names and semantics may get lost in translation. +- **`otel`**: OTel attribute names and semantics are preserved. + +Overall, the goal of EDOT is to preserve OTel data formats and semantics as much as possible, hence, `otel` is the default mode for the EDOT Collector. +However, some use cases might require data to be exported in ECS format for backwards compatibility reasons. + +### Logs Collection Pipeline + +```mermaid +flowchart LR + filelog@{ shape: proc, label: "filelog + fa:fa-right-to-bracket"} -->|M| resourcedetection@{ shape: proc, label: "resourcedetection + fa:fa-gears"} + resourcedetection -->|M| es_exporter@{ shape: proc, label: "elasticsearch + fa:fa-right-from-bracket"} + es_exporter -->|otel| otel@{ shape: framed-circle, label: "otel" } +``` + +For log collection, the default configuration utilizes the [`filelog`] receiver to read log entries from files. + +In addition, the [`resourcedetection`] processor enriches the log entries with meta information about the corresponding host and operating system. + +Data is written out directly into Elasticsearch with the [`elasticsearch`] exporter using the `OTel-native` data mode. + +### Application / Traces Collection Pipeline + +```mermaid +flowchart LR + otlp@{ shape: proc, label: "OTLP + fa:fa-right-to-bracket"} -->|T| elastictrace@{ shape: proc, label: "elastictrace + fa:fa-gears"} + elastictrace -->|T| es_exporter@{ shape: proc, label: "elasticsearch + fa:fa-right-from-bracket"} + elastictrace -->|T| elasticapm@{ shape: hex, label: "elasticapm + fa:fa-link"} + elasticapm -->|M| es_exporter + otlp -->|L/M| es_exporter + otlp -->|L| elasticapm + es_exporter -->|otel| otel@{ shape: framed-circle, label: "otel" } +``` + +The application pipeline in the EDOT Collector receives data from OTel SDKs through the [`OTLP`] receiver. While logs and metrics are exported as is into Elasticsearch, traces involve two additional processors / connectors. + +The [`elastictrace`] processor enriches trace data with additional attributes that improve the user experience in the Elastic Observability UIs. In addition, the [`elasticapm`] connector generates pre-aggregated APM metrics from tracing data. + +{: .note} +> Both components, `elastictrace` and `elasticapm` are required for Elastic APM UIs to work properly. However, both components are not (yet) included in the OpenTelemetry [Collector Contrib repository](https://github.com/open-telemetry/opentelemetry-collector-contrib). Thus, to use OpenTelemetry with Elastic for APM use cases one of the following options is available: +> +> * use the EDOT Collector with that configuration to ingest data into Elasticsearch +> * **or** [build a custom, EDOT-like ](../custom-collector) for ingesting data into Elasticsearch +> * **or** use Elastic's [managed OTLP endpoint](../../quickstart/serverless/index) that would do the enrichment for you + +Application-related OTel data is ingested into Elasticsearch in OTel-native format using the [`elasticsearch`] exporter. + +### Host Metrics Collection Pipeline + +```mermaid +flowchart LR + hostmetrics@{ shape: proc, label: "hostmetrics + fa:fa-right-to-bracket"} -->|M| elasticinframetrics@{ shape: proc, label: "elasticinframetrics + fa:fa-gears"} + elasticinframetrics --> |M| resourcedetection@{ shape: proc, label: "resourcedetection + fa:fa-gears"} + resourcedetection --> |M| attributes@{ shape: proc, label: "attributes + fa:fa-gears"} + attributes --> |M| resource@{ shape: proc, label: "resource + fa:fa-gears"} + resource --> |M| es_exporter_ecs@{ shape: proc, label: "elasticsearch + fa:fa-right-from-bracket"} + es_exporter_ecs -->|ecs| ecs@{ shape: framed-circle, label: "ecs" } +``` + +This pipeline utilizes the [`hostmetrics`] receiver to collect `disk`, `filesystem`, `cpu`, `memory`, `process` and `network` metrics for the corresponding host. + +For backwards compatibility host metrics are translated into ECS-compatible system metrics using the [`elasticinframetrics`] processor and, finally, are ingested in `ecs` format through the [`elasticsearch`] exporter. + +The [`resourcedetection`] processor enriches the metrics with meta information about the corresponding host and operating system. +The [`attributes`] and [`resource`] processor are used to set some fields for proper routing of the ECS-based system metrics data into corresponding Elasticsearch data streams. + +## Ingestion through the Managed OTLP Endpoint + +When ingesting OTel data through Elastics Managed OTLP endpoint, all the enrichment that is required for an optimal experience in the Elastic solutions is happening behind the +managed OTLP endpoint and, thus, is transparent to the users. + +Accordingly, the collector configuration for all the use cases is relatively simple and is only about local data collection and context enrichment: + +```mermaid +flowchart LR + hostmetrics@{ shape: proc, label: "hostmetrics + fa:fa-right-to-bracket"} -->|M| resourcedetection@{ shape: proc, label: "resourcedetection + fa:fa-gears"} + + filelog@{ shape: proc, label: "filelog + fa:fa-right-to-bracket"} -->|L| resourcedetection + + otlp@{ shape: proc, label: "OTLP + fa:fa-right-to-bracket"} -->|L/M/T| otlp_exporter@{ shape: proc, label: "OTLP + fa:fa-right-from-bracket"} + + resourcedetection --> |L/M| otlp_exporter +``` + +Platform logs are scraped with the [`filelog`] receiver, host metrics are collected through the [`hostmetrics`] receiver and both signals are enriched with meta information through the [`resourcedetection`] processor. + +Data from OTel SDKs is piped through the [`OTLP`] receiver directly to the OTLP exporter that sends data for all the signals to the managed OTLP endpoint. + +As you can see, with the managed OTLP Endpoint there is no need for configuring any Elastic-specific components, such as [`elasticinframetrics`], [`elastictrace`] processors, [`elasticapm`] connector or the [`elasticsearch`] exporter. Hence, with this deployment model the edge setup and configuration can be 100% vendor agnostic. + +[`attributes`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/attributesprocessor +[`filelog`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver +[`hostmetrics`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/hostmetricsreceiver +[`elasticsearch`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/elasticsearchexporter +[`elasticinframetrics`]: https://github.com/elastic/opentelemetry-collector-components/tree/main/processor/elasticinframetricsprocessor +[`elastictrace`]: https://github.com/elastic/opentelemetry-collector-components/tree/main/processor/elastictraceprocessor +[`elasticapm`]: https://github.com/elastic/opentelemetry-collector-components/tree/main/connector/elasticapmconnector +[`resource`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourceprocessor +[`resourcedetection`]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor +[`OTLP`]: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver/otlpreceiver +[๐Ÿ“„ Logs - ES]: https://raw.githubusercontent.com/elastic/elastic-agent/refs/tags/v{{ site.edot_versions.collector }}/internal/pkg/otel/samples/linux/platformlogs.yml +[๐Ÿ“„ Logs - OTLP]: https://raw.githubusercontent.com/elastic/elastic-agent/refs/tags/v{{ site.edot_versions.collector }}/internal/pkg/otel/samples/linux/managed_otlp/platformlogs.yml +[๐Ÿ“„ Logs | Metrics - ES]: https://raw.githubusercontent.com/elastic/elastic-agent/refs/tags/v{{ site.edot_versions.collector }}/internal/pkg/otel/samples/linux/platformlogs_hostmetrics.yml +[๐Ÿ“„ Logs | Metrics - OTLP]: https://raw.githubusercontent.com/elastic/elastic-agent/refs/tags/v{{ site.edot_versions.collector }}/internal/pkg/otel/samples/linux/managed_otlp/platformlogs_hostmetrics.yml +[๐Ÿ“„ Logs | Metrics | App - ES]: https://raw.githubusercontent.com/elastic/elastic-agent/refs/tags/v{{ site.edot_versions.collector }}/internal/pkg/otel/samples/linux/managed_otlp/logs_metrics_traces.yml +[๐Ÿ“„ Logs | Metrics | App - OTLP]: https://raw.githubusercontent.com/elastic/elastic-agent/refs/tags/v{{ site.edot_versions.collector }}/internal/pkg/otel/samples/linux/managed_otlp/logs_metrics_traces.yml \ No newline at end of file diff --git a/docs/_edot-collector/config/index.md b/docs/_edot-collector/config/index.md new file mode 100644 index 00000000..0d0b50bc --- /dev/null +++ b/docs/_edot-collector/config/index.md @@ -0,0 +1,12 @@ +--- +title: Configuration +layout: default +nav_order: 3 +--- + +# EDOT Collector - Configuration + +The following sub-pages provide some insights into + +- the default configurations of the EDOT Collector in different environments and scenarios +- ways and examples of customizing the EDOT Collector configuration for different custom use cases diff --git a/docs/_edot-collector/edot-collector-config.md b/docs/_edot-collector/edot-collector-config.md deleted file mode 100644 index 80bed3eb..00000000 --- a/docs/_edot-collector/edot-collector-config.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Configuration -layout: default -nav_order: 3 ---- - -## Upstream collector configuration examples - -Use the Elastic [example configurations](https://github.com/elastic/elastic-agent/tree/main/internal/pkg/otel/samples) as a reference when configuring your upstream collector. - -## Collector Configuration - -The EDOT Collector uses a YAML-based configuration file. Below is a sample configuration: - -``` - hostmetrics/system: - collection_interval: 30s - scrapers: - cpu: - memory: - disk: - - # Receiver for platform logs - filelog/platformlogs: - include: [ /var/log/*.log ] - start_at: end - -processors: - resourcedetection: - detectors: ["system"] - attributes/dataset: - actions: - - key: event.dataset - from_attribute: data_stream.dataset - action: upsert - -exporters: - elasticsearch: - endpoints: ["${env:ELASTIC_ENDPOINT}"] - api_key: ${env:ELASTIC_API_KEY} - mapping: - mode: ecs - -service: - extensions: [file_storage] - pipelines: - metrics: - receivers: [hostmetrics/system] - processors: [resourcedetection, attributes/dataset] - exporters: [elasticsearch] - logs: - receivers: [filelog/platformlogs] - processors: [resourcedetection] - exporters: [elasticsearch] - -``` - -**Note**: Replace `"https://your-elastic-instance:9200"` with your Elastic instance URL and `"YOUR_API_KEY"` with your API key. - -For comprehensive configuration options, consult the [OpenTelemetry Collector documentation](https://github.com/open-telemetry/opentelemetry-collector). - - - diff --git a/docs/_edot-collector/edot-collector-limitations.md b/docs/_edot-collector/edot-collector-limitations.md index 2dd81a9f..2ed00ca2 100644 --- a/docs/_edot-collector/edot-collector-limitations.md +++ b/docs/_edot-collector/edot-collector-limitations.md @@ -27,4 +27,8 @@ The Elastic Distribution of the OpenTelemetry (EDOT) Collector has the following The [`hostmetrics receiver`](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/hostmetricsreceiver) logs errors if it cannot access certain process information due to insufficient permissions. - **Mapping errors appear temporarily in the console** - Initial mapping errors occur until the system completes the mapping process. \ No newline at end of file + Initial mapping errors occur until the system completes the mapping process. + +- **Ingest Pipelines with OTel-native Data** + +- **Histograms only supported in delta temporality** \ No newline at end of file diff --git a/docs/_includes/head_custom.html b/docs/_includes/head_custom.html new file mode 100644 index 00000000..7699c4da --- /dev/null +++ b/docs/_includes/head_custom.html @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/docs/use-cases/kubernetes/index.md b/docs/use-cases/kubernetes/index.md index cd656099..a6192357 100644 --- a/docs/use-cases/kubernetes/index.md +++ b/docs/use-cases/kubernetes/index.md @@ -12,7 +12,7 @@ The [quickstart guides](../../quickstart/index) for Kubernetes install a set of The Kubernetes setup utilizes the OpenTelemetry Operator preconfigured to automate orchestration of EDOT as below: * **EDOT Collector Cluster:** Collection of cluster metrics. -* **EDOT Collector Daemon:** Collection of node metrics and logs. +* **EDOT Collector Daemon:** Collection of node metrics, logs and application telemetry. * **EDOT Collector Gateway:** Pre-processing, aggregation and ingestion of data into Elastic. * **EDOT SDKs**: Annotated applications will be auto-instrumented with [EDOT SDKs](../../edot-sdks/index).