@@ -9,7 +9,6 @@ package integration
9
9
import (
10
10
"context"
11
11
"encoding/json"
12
- "fmt"
13
12
"io"
14
13
"net"
15
14
"net/http"
@@ -30,9 +29,7 @@ import (
30
29
"github.com/elastic/elastic-agent-libs/api/npipe"
31
30
"github.com/elastic/elastic-agent-libs/kibana"
32
31
"github.com/elastic/elastic-agent/internal/pkg/agent/application/paths"
33
- "github.com/elastic/elastic-agent/pkg/control/v2/client"
34
32
"github.com/elastic/elastic-agent/pkg/control/v2/cproto"
35
- "github.com/elastic/elastic-agent/pkg/core/process"
36
33
atesting "github.com/elastic/elastic-agent/pkg/testing"
37
34
"github.com/elastic/elastic-agent/pkg/testing/define"
38
35
"github.com/elastic/elastic-agent/pkg/testing/tools"
@@ -163,10 +160,7 @@ func (runner *ExtendedRunner) TestHandleLeak() {
163
160
case <- timer .C :
164
161
done = true
165
162
case <- ticker .C :
166
- // https://github.com/elastic/elastic-agent/issues/5300
167
- // Ideally we would require healthy but we currently report as DEGRADED due to unexpected permissions errors
168
- // accessing some process metrics. Ensure the leak tests still run as long while this is the case.
169
- err := runner .IsHealthyOrDegraded (ctx )
163
+ err := runner .agentFixture .IsHealthy (ctx )
170
164
require .NoError (runner .T (), err )
171
165
// iterate through our watchers, update them
172
166
for _ , mon := range runner .resourceWatchers {
@@ -211,8 +205,6 @@ func (runner *ExtendedRunner) TestHandleLeak() {
211
205
212
206
// CheckHealthAtStartup ensures all the beats and agent are healthy and working before we continue
213
207
func (runner * ExtendedRunner ) CheckHealthAtStartup (ctx context.Context ) {
214
- runner .T ().Helper ()
215
-
216
208
// because we need to separately fetch the PIDs, wait until everything is healthy before we look for running beats
217
209
compDebugName := ""
218
210
require .Eventually (runner .T (), func () bool {
@@ -241,11 +233,7 @@ func (runner *ExtendedRunner) CheckHealthAtStartup(ctx context.Context) {
241
233
}
242
234
}
243
235
runner .T ().Logf ("component state: %s" , comp .Message )
244
-
245
- // https://github.com/elastic/elastic-agent/issues/5300
246
- // Ideally we would require healthy but we currently report as DEGRADED due to unexpected permissions errors
247
- // accessing some process metrics. Ensure the leak tests still run as long while this is the case.
248
- if ! isHealthyOrDegraded (comp .State ) {
236
+ if comp .State != int (cproto .State_HEALTHY ) {
249
237
compDebugName = comp .Name
250
238
allHealthy = false
251
239
}
@@ -254,26 +242,6 @@ func (runner *ExtendedRunner) CheckHealthAtStartup(ctx context.Context) {
254
242
}, runner .healthCheckTime , runner .healthCheckRefreshTime , "install never became healthy: components did not return a healthy state: %s" , compDebugName )
255
243
}
256
244
257
- func (runner * ExtendedRunner ) IsHealthyOrDegraded (ctx context.Context , opts ... process.CmdOption ) error {
258
- runner .T ().Helper ()
259
-
260
- status , err := runner .agentFixture .ExecStatus (ctx , opts ... )
261
- if err != nil {
262
- return fmt .Errorf ("agent status returned an error: %w" , err )
263
- }
264
-
265
- if ! isHealthyOrDegraded (status .State ) {
266
- return fmt .Errorf ("agent isn't healthy or degraded, current status: %s" ,
267
- client .State (status .State ))
268
- }
269
-
270
- return nil
271
- }
272
-
273
- func isHealthyOrDegraded (state int ) bool {
274
- return state == int (cproto .State_HEALTHY ) || state == int (cproto .State_DEGRADED )
275
- }
276
-
277
245
/*
278
246
=============================================================================
279
247
Watchers for checking resource usage
0 commit comments