Skip to content

Commit eb572da

Browse files
authored
Revert "Relax leak test condition from Healthy to not Failed. (#5301)" (#5356)
This reverts commit b832c15.
1 parent 5dc0ddb commit eb572da

File tree

1 file changed

+2
-34
lines changed

1 file changed

+2
-34
lines changed

testing/integration/agent_long_running_leak_test.go

+2-34
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ package integration
99
import (
1010
"context"
1111
"encoding/json"
12-
"fmt"
1312
"io"
1413
"net"
1514
"net/http"
@@ -30,9 +29,7 @@ import (
3029
"github.com/elastic/elastic-agent-libs/api/npipe"
3130
"github.com/elastic/elastic-agent-libs/kibana"
3231
"github.com/elastic/elastic-agent/internal/pkg/agent/application/paths"
33-
"github.com/elastic/elastic-agent/pkg/control/v2/client"
3432
"github.com/elastic/elastic-agent/pkg/control/v2/cproto"
35-
"github.com/elastic/elastic-agent/pkg/core/process"
3633
atesting "github.com/elastic/elastic-agent/pkg/testing"
3734
"github.com/elastic/elastic-agent/pkg/testing/define"
3835
"github.com/elastic/elastic-agent/pkg/testing/tools"
@@ -163,10 +160,7 @@ func (runner *ExtendedRunner) TestHandleLeak() {
163160
case <-timer.C:
164161
done = true
165162
case <-ticker.C:
166-
// https://github.com/elastic/elastic-agent/issues/5300
167-
// Ideally we would require healthy but we currently report as DEGRADED due to unexpected permissions errors
168-
// accessing some process metrics. Ensure the leak tests still run as long while this is the case.
169-
err := runner.IsHealthyOrDegraded(ctx)
163+
err := runner.agentFixture.IsHealthy(ctx)
170164
require.NoError(runner.T(), err)
171165
// iterate through our watchers, update them
172166
for _, mon := range runner.resourceWatchers {
@@ -211,8 +205,6 @@ func (runner *ExtendedRunner) TestHandleLeak() {
211205

212206
// CheckHealthAtStartup ensures all the beats and agent are healthy and working before we continue
213207
func (runner *ExtendedRunner) CheckHealthAtStartup(ctx context.Context) {
214-
runner.T().Helper()
215-
216208
// because we need to separately fetch the PIDs, wait until everything is healthy before we look for running beats
217209
compDebugName := ""
218210
require.Eventually(runner.T(), func() bool {
@@ -241,11 +233,7 @@ func (runner *ExtendedRunner) CheckHealthAtStartup(ctx context.Context) {
241233
}
242234
}
243235
runner.T().Logf("component state: %s", comp.Message)
244-
245-
// https://github.com/elastic/elastic-agent/issues/5300
246-
// Ideally we would require healthy but we currently report as DEGRADED due to unexpected permissions errors
247-
// accessing some process metrics. Ensure the leak tests still run as long while this is the case.
248-
if !isHealthyOrDegraded(comp.State) {
236+
if comp.State != int(cproto.State_HEALTHY) {
249237
compDebugName = comp.Name
250238
allHealthy = false
251239
}
@@ -254,26 +242,6 @@ func (runner *ExtendedRunner) CheckHealthAtStartup(ctx context.Context) {
254242
}, runner.healthCheckTime, runner.healthCheckRefreshTime, "install never became healthy: components did not return a healthy state: %s", compDebugName)
255243
}
256244

257-
func (runner *ExtendedRunner) IsHealthyOrDegraded(ctx context.Context, opts ...process.CmdOption) error {
258-
runner.T().Helper()
259-
260-
status, err := runner.agentFixture.ExecStatus(ctx, opts...)
261-
if err != nil {
262-
return fmt.Errorf("agent status returned an error: %w", err)
263-
}
264-
265-
if !isHealthyOrDegraded(status.State) {
266-
return fmt.Errorf("agent isn't healthy or degraded, current status: %s",
267-
client.State(status.State))
268-
}
269-
270-
return nil
271-
}
272-
273-
func isHealthyOrDegraded(state int) bool {
274-
return state == int(cproto.State_HEALTHY) || state == int(cproto.State_DEGRADED)
275-
}
276-
277245
/*
278246
=============================================================================
279247
Watchers for checking resource usage

0 commit comments

Comments
 (0)