Skip to content

Commit eb5ae12

Browse files
Merge branch 'main' into dependabot/go_modules/otel-dependencies-034d251fb6
2 parents d003ca6 + d48ac97 commit eb5ae12

23 files changed

+2860
-330
lines changed

.buildkite/pipeline.elastic-agent-package.yml

+2
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,8 @@ steps:
134134
depends_on: package
135135
agents:
136136
provider: "gcp"
137+
machineType: "n2-standard-8"
138+
diskSizeGb: 250
137139
env:
138140
DRA_PROJECT_ID: "elastic-agent-package"
139141
DRA_PROJECT_ARTIFACT_ID: "agent-package"

.buildkite/scripts/steps/integration_tests.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ MAGE_SUBTARGET="${3:-""}"
1111
# Override the agent package version using a string with format <major>.<minor>.<patch>
1212
# NOTE: use only after version bump when the new version is not yet available, for example:
1313
# OVERRIDE_AGENT_PACKAGE_VERSION="8.10.3" otherwise OVERRIDE_AGENT_PACKAGE_VERSION="".
14-
OVERRIDE_AGENT_PACKAGE_VERSION=""
14+
OVERRIDE_AGENT_PACKAGE_VERSION="8.14.0"
1515

1616
if [[ -n "$OVERRIDE_AGENT_PACKAGE_VERSION" ]]; then
1717
OVERRIDE_TEST_AGENT_VERSION=${OVERRIDE_AGENT_PACKAGE_VERSION}"-SNAPSHOT"

.mergify.yml

+13
Original file line numberDiff line numberDiff line change
@@ -306,3 +306,16 @@ pull_request_rules:
306306
labels:
307307
- "backport"
308308
title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}"
309+
- name: backport patches to 8.14 branch
310+
conditions:
311+
- merged
312+
- label=backport-v8.14.0
313+
actions:
314+
backport:
315+
assignees:
316+
- "{{ author }}"
317+
branches:
318+
- "8.14"
319+
labels:
320+
- "backport"
321+
title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}"
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
# Kind can be one of:
2+
# - breaking-change: a change to previously-documented behavior
3+
# - deprecation: functionality that is being removed in a later release
4+
# - bug-fix: fixes a problem in a previous version
5+
# - enhancement: extends functionality but does not break or fix existing behavior
6+
# - feature: new functionality
7+
# - known-issue: problems that we are aware of in a given version
8+
# - security: impacts on the security of a product or a user’s deployment.
9+
# - upgrade: important information for someone upgrading from a prior version
10+
# - other: does not fit into any of the other categories
11+
kind: feature
12+
13+
# Change summary; a 80ish characters long description of the change.
14+
summary: Introduce isolate units for input spec.
15+
16+
# Long description; in case the summary is not enough to describe the change
17+
# this field accommodate a description without length limits.
18+
# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment.
19+
description: |
20+
Introduce a new flag in the spec that will allow to disable grouping of same input type inputs into a single component.
21+
If that flag is being activated the inputs won't be grouped and we will create a separate component for each input that will run units for that particular input.
22+
23+
# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc.
24+
component: elastic-agent
25+
26+
# PR URL; optional; the PR number that added the changeset.
27+
# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added.
28+
# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number.
29+
# Please provide it if you are adding a fragment for a different PR.
30+
pr: https://github.com/elastic/elastic-agent/pull/4476
31+
32+
# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of).
33+
# If not present is automatically filled by the tooling with the issue linked to the PR number.
34+
issue: https://github.com/elastic/security-team/issues/8669
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
# Kind can be one of:
2+
# - breaking-change: a change to previously-documented behavior
3+
# - deprecation: functionality that is being removed in a later release
4+
# - bug-fix: fixes a problem in a previous version
5+
# - enhancement: extends functionality but does not break or fix existing behavior
6+
# - feature: new functionality
7+
# - known-issue: problems that we are aware of in a given version
8+
# - security: impacts on the security of a product or a user’s deployment.
9+
# - upgrade: important information for someone upgrading from a prior version
10+
# - other: does not fit into any of the other categories
11+
kind: bug-fix
12+
13+
# Change summary; a 80ish characters long description of the change.
14+
summary: Fix issue where kubernetes_leaderelection provider would not try to reacquire the lease once lost
15+
16+
# Long description; in case the summary is not enough to describe the change
17+
# this field accommodate a description without length limits.
18+
# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment.
19+
#description:
20+
21+
# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc.
22+
component: elastic-agent
23+
24+
# PR URL; optional; the PR number that added the changeset.
25+
# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added.
26+
# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number.
27+
# Please provide it if you are adding a fragment for a different PR.
28+
pr: https://github.com/elastic/elastic-agent/pull/4542
29+
30+
# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of).
31+
# If not present is automatically filled by the tooling with the issue linked to the PR number.
32+
issue: https://github.com/elastic/elastic-agent/issues/4543

deploy/kubernetes/elastic-agent-managed-kubernetes.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ spec:
3030
dnsPolicy: ClusterFirstWithHostNet
3131
containers:
3232
- name: elastic-agent
33-
image: docker.elastic.co/beats/elastic-agent:8.14.0
33+
image: docker.elastic.co/beats/elastic-agent:8.15.0
3434
env:
3535
# Set to 1 for enrollment into Fleet server. If not set, Elastic Agent is run in standalone mode
3636
- name: FLEET_ENROLL

deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml

+2-2
Original file line numberDiff line numberDiff line change
@@ -698,13 +698,13 @@ spec:
698698
# - -c
699699
# - >-
700700
# mkdir -p /etc/elastic-agent/inputs.d &&
701-
# wget -O - https://github.com/elastic/elastic-agent/archive/8.14.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-8.14/deploy/kubernetes/elastic-agent-standalone/templates.d"
701+
# wget -O - https://github.com/elastic/elastic-agent/archive/8.15.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-8.15/deploy/kubernetes/elastic-agent-standalone/templates.d"
702702
# volumeMounts:
703703
# - name: external-inputs
704704
# mountPath: /etc/elastic-agent/inputs.d
705705
containers:
706706
- name: elastic-agent-standalone
707-
image: docker.elastic.co/beats/elastic-agent:8.14.0
707+
image: docker.elastic.co/beats/elastic-agent:8.15.0
708708
args: ["-c", "/etc/elastic-agent/agent.yml", "-e"]
709709
env:
710710
# The basic authentication username used to connect to Elasticsearch

internal/pkg/agent/application/coordinator/coordinator_test.go

+112-2
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,19 @@ var (
6161
},
6262
},
6363
}
64+
fakeIsolatedUnitsInputSpec = component.InputSpec{
65+
Name: "fake-isolated-units",
66+
Platforms: []string{fmt.Sprintf("%s/%s", goruntime.GOOS, goruntime.GOARCH)},
67+
Shippers: []string{"fake-shipper"},
68+
Command: &component.CommandSpec{
69+
Timeouts: component.CommandTimeoutSpec{
70+
Checkin: 30 * time.Second,
71+
Restart: 10 * time.Millisecond, // quick restart during tests
72+
Stop: 30 * time.Second,
73+
},
74+
},
75+
IsolateUnits: true,
76+
}
6477
fakeShipperSpec = component.ShipperSpec{
6578
Name: "fake-shipper",
6679
Platforms: []string{fmt.Sprintf("%s/%s", goruntime.GOOS, goruntime.GOARCH)},
@@ -547,6 +560,94 @@ func TestCoordinator_StateSubscribe(t *testing.T) {
547560
require.NoError(t, err)
548561
}
549562

563+
func TestCoordinator_StateSubscribeIsolatedUnits(t *testing.T) {
564+
coordCh := make(chan error)
565+
ctx, cancel := context.WithCancel(context.Background())
566+
defer cancel()
567+
568+
coord, cfgMgr, varsMgr := createCoordinator(t, ctx, WithComponentInputSpec(fakeIsolatedUnitsInputSpec))
569+
go func() {
570+
err := coord.Run(ctx)
571+
if errors.Is(err, context.Canceled) {
572+
// allowed error
573+
err = nil
574+
}
575+
coordCh <- err
576+
}()
577+
578+
resultChan := make(chan error)
579+
go func() {
580+
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
581+
defer cancel()
582+
583+
subChan := coord.StateSubscribe(ctx, 32)
584+
for {
585+
select {
586+
case <-ctx.Done():
587+
resultChan <- ctx.Err()
588+
return
589+
case state := <-subChan:
590+
t.Logf("%+v", state)
591+
if len(state.Components) == 3 {
592+
compState0 := getComponentState(state.Components, "fake-isolated-units-default-fake-isolated-units-0")
593+
compState1 := getComponentState(state.Components, "fake-isolated-units-default-fake-isolated-units-1")
594+
if compState0 != nil && compState1 != nil {
595+
unit0, ok0 := compState0.State.Units[runtime.ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-isolated-units-default-fake-isolated-units-0-unit"}]
596+
unit1, ok1 := compState1.State.Units[runtime.ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-isolated-units-default-fake-isolated-units-1-unit"}]
597+
if ok0 && ok1 {
598+
if (unit0.State == client.UnitStateHealthy && unit0.Message == "Healthy From Fake Isolated Units 0 Config") &&
599+
(unit1.State == client.UnitStateHealthy && unit1.Message == "Healthy From Fake Isolated Units 1 Config") {
600+
resultChan <- nil
601+
return
602+
}
603+
}
604+
}
605+
}
606+
}
607+
}
608+
}()
609+
610+
// no vars used by the config
611+
varsMgr.Vars(ctx, []*transpiler.Vars{{}})
612+
613+
// set the configuration to run a fake input
614+
cfg, err := config.NewConfigFrom(map[string]interface{}{
615+
"outputs": map[string]interface{}{
616+
"default": map[string]interface{}{
617+
"type": "fake-action-output",
618+
"shipper": map[string]interface{}{
619+
"enabled": true,
620+
},
621+
},
622+
},
623+
"inputs": []interface{}{
624+
map[string]interface{}{
625+
"id": "fake-isolated-units-0",
626+
"type": "fake-isolated-units",
627+
"use_output": "default",
628+
"state": client.UnitStateHealthy,
629+
"message": "Healthy From Fake Isolated Units 0 Config",
630+
},
631+
map[string]interface{}{
632+
"id": "fake-isolated-units-1",
633+
"type": "fake-isolated-units",
634+
"use_output": "default",
635+
"state": client.UnitStateHealthy,
636+
"message": "Healthy From Fake Isolated Units 1 Config",
637+
},
638+
},
639+
})
640+
require.NoError(t, err)
641+
cfgMgr.Config(ctx, cfg)
642+
643+
err = <-resultChan
644+
require.NoError(t, err)
645+
cancel()
646+
647+
err = <-coordCh
648+
require.NoError(t, err)
649+
}
650+
550651
func TestCollectManagerErrorsTimeout(t *testing.T) {
551652
handlerChan, _, _, _, _ := setupManagerShutdownChannels(time.Millisecond)
552653
// Don't send anything to the shutdown channels, causing a timeout
@@ -757,6 +858,7 @@ func TestCoordinator_UpgradeDetails(t *testing.T) {
757858
type createCoordinatorOpts struct {
758859
managed bool
759860
upgradeManager UpgradeManager
861+
compInputSpec component.InputSpec
760862
}
761863

762864
type CoordinatorOpt func(o *createCoordinatorOpts)
@@ -773,13 +875,21 @@ func WithUpgradeManager(upgradeManager UpgradeManager) CoordinatorOpt {
773875
}
774876
}
775877

878+
func WithComponentInputSpec(spec component.InputSpec) CoordinatorOpt {
879+
return func(o *createCoordinatorOpts) {
880+
o.compInputSpec = spec
881+
}
882+
}
883+
776884
// createCoordinator creates a coordinator that using a fake config manager and a fake vars manager.
777885
//
778886
// The runtime specifications is set up to use both the fake component and fake shipper.
779887
func createCoordinator(t *testing.T, ctx context.Context, opts ...CoordinatorOpt) (*Coordinator, *fakeConfigManager, *fakeVarsManager) {
780888
t.Helper()
781889

782-
o := &createCoordinatorOpts{}
890+
o := &createCoordinatorOpts{
891+
compInputSpec: fakeInputSpec,
892+
}
783893
for _, opt := range opts {
784894
opt(o)
785895
}
@@ -793,7 +903,7 @@ func createCoordinator(t *testing.T, ctx context.Context, opts ...CoordinatorOpt
793903
InputType: "fake",
794904
BinaryName: "",
795905
BinaryPath: testBinary(t, "component"),
796-
Spec: fakeInputSpec,
906+
Spec: o.compInputSpec,
797907
}
798908
shipperSpec := component.ShipperRuntimeSpec{
799909
ShipperType: "fake-shipper",

internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go

+20-7
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,12 @@ import (
1010
"time"
1111

1212
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
13+
k8sclient "k8s.io/client-go/kubernetes"
1314
"k8s.io/client-go/tools/leaderelection"
1415
"k8s.io/client-go/tools/leaderelection/resourcelock"
1516

1617
"github.com/elastic/elastic-agent-autodiscover/kubernetes"
18+
1719
"github.com/elastic/elastic-agent/internal/pkg/agent/application/info"
1820
"github.com/elastic/elastic-agent/internal/pkg/agent/errors"
1921
"github.com/elastic/elastic-agent/internal/pkg/composable"
@@ -22,6 +24,8 @@ import (
2224
"github.com/elastic/elastic-agent/pkg/core/logger"
2325
)
2426

27+
const leaderElectorPrefix = "elastic-agent-leader-"
28+
2529
func init() {
2630
composable.Providers.MustAddContextProvider("kubernetes_leaderelection", ContextProviderBuilder)
2731
}
@@ -45,11 +49,15 @@ func ContextProviderBuilder(logger *logger.Logger, c *config.Config, managed boo
4549
return &contextProvider{logger, &cfg, nil}, nil
4650
}
4751

52+
// This is needed to overwrite the Kubernetes client for the tests
53+
var getK8sClientFunc = func(kubeconfig string, opt kubernetes.KubeClientOptions) (k8sclient.Interface, error) {
54+
return kubernetes.GetKubernetesClient(kubeconfig, opt)
55+
}
56+
4857
// Run runs the leaderelection provider.
4958
func (p *contextProvider) Run(ctx context.Context, comm corecomp.ContextProviderComm) error {
50-
client, err := kubernetes.GetKubernetesClient(p.config.KubeConfig, p.config.KubeClientOptions)
59+
client, err := getK8sClientFunc(p.config.KubeConfig, p.config.KubeClientOptions)
5160
if err != nil {
52-
// info only; return nil (do nothing)
5361
p.logger.Debugf("Kubernetes leaderelection provider skipped, unable to connect: %s", err)
5462
return nil
5563
}
@@ -61,9 +69,9 @@ func (p *contextProvider) Run(ctx context.Context, comm corecomp.ContextProvider
6169
var id string
6270
podName, found := os.LookupEnv("POD_NAME")
6371
if found {
64-
id = "elastic-agent-leader-" + podName
72+
id = leaderElectorPrefix + podName
6573
} else {
66-
id = "elastic-agent-leader-" + agentInfo.AgentID()
74+
id = leaderElectorPrefix + agentInfo.AgentID()
6775
}
6876

6977
ns, err := kubernetes.InClusterNamespace()
@@ -104,9 +112,14 @@ func (p *contextProvider) Run(ctx context.Context, comm corecomp.ContextProvider
104112
p.logger.Errorf("error while creating Leader Elector: %v", err)
105113
}
106114
p.logger.Debugf("Starting Leader Elector")
107-
le.Run(comm)
108-
p.logger.Debugf("Stopped Leader Elector")
109-
return comm.Err()
115+
116+
for {
117+
le.Run(ctx)
118+
if ctx.Err() != nil {
119+
p.logger.Debugf("Stopped Leader Elector")
120+
return comm.Err()
121+
}
122+
}
110123
}
111124

112125
func (p *contextProvider) startLeading(comm corecomp.ContextProviderComm) {

0 commit comments

Comments
 (0)