@@ -5,8 +5,11 @@ import (
5
5
"fmt"
6
6
"log"
7
7
"os"
8
+ "os/signal"
8
9
"strconv"
9
10
"strings"
11
+ "sync"
12
+ "syscall"
10
13
"time"
11
14
12
15
node_common "github.com/certusone/wormhole/node/pkg/common"
@@ -288,6 +291,7 @@ func main() {
288
291
if err != nil {
289
292
logger .Fatal ("Failed to create bigtable db" , zap .Error (err ))
290
293
}
294
+
291
295
promErrC := make (chan error )
292
296
// Start Prometheus scraper
293
297
initPromScraper (promRemoteURL , logger , promErrC )
@@ -307,15 +311,35 @@ func main() {
307
311
batchSize := 100
308
312
observationBatch := make ([]* types.Observation , 0 , batchSize )
309
313
314
+ sigChan := make (chan os.Signal , 1 )
315
+ signal .Notify (sigChan , os .Interrupt , syscall .SIGTERM )
316
+
317
+ // to make sure that we wait til observation cleanup is done
318
+ var wg sync.WaitGroup
319
+
320
+ // rootCtx might not cancel if shutdown abruptly
321
+ go func () {
322
+ <- sigChan
323
+ logger .Info ("Received signal, initiating shutdown" )
324
+ rootCtxCancel ()
325
+ }()
326
+
327
+ wg .Add (1 )
310
328
go func () {
329
+ defer wg .Done ()
311
330
ticker := time .NewTicker (5 * time .Second )
331
+ defer ticker .Stop ()
332
+
312
333
for {
313
334
select {
314
335
case <- rootCtx .Done ():
336
+ if len (observationBatch ) > 0 {
337
+ historical_uptime .ProcessObservationBatch (* db , logger , observationBatch )
338
+ }
339
+ logger .Info ("Observation cleanup completed." )
315
340
return
316
- case o := <- obsvC :
341
+ case o := <- obsvC : // TODO: Rip out this code once we cut over to batching.
317
342
obs := historical_uptime .CreateNewObservation (o .Msg .MessageId , o .Msg .Addr , o .Timestamp , o .Msg .Addr )
318
-
319
343
observationBatch = append (observationBatch , obs )
320
344
321
345
// if it reaches batchSize then process this batch
@@ -324,13 +348,16 @@ func main() {
324
348
observationBatch = observationBatch [:0 ] // Clear the batch
325
349
}
326
350
case batch := <- batchObsvC :
327
- // process immediately since batches are in group
328
- batchObservations := make ([]* types.Observation , 0 , len (batch .Msg .Observations ))
329
351
for _ , signedObs := range batch .Msg .Observations {
330
352
obs := historical_uptime .CreateNewObservation (signedObs .MessageId , signedObs .Signature , batch .Timestamp , signedObs .TxHash )
331
- batchObservations = append (batchObservations , obs )
353
+ observationBatch = append (observationBatch , obs )
354
+
355
+ // if it reaches batchSize then process this batch
356
+ if len (observationBatch ) >= batchSize {
357
+ historical_uptime .ProcessObservationBatch (* db , logger , observationBatch )
358
+ observationBatch = observationBatch [:0 ] // Clear the batch
359
+ }
332
360
}
333
- historical_uptime .ProcessObservationBatch (* db , logger , batchObservations )
334
361
335
362
case <- ticker .C :
336
363
// for every interval, process the batch
@@ -421,8 +448,12 @@ func main() {
421
448
supervisor .WithPropagatePanic )
422
449
423
450
<- rootCtx .Done ()
424
- logger .Info ("root context cancelled, exiting..." )
425
- // TODO: wait for things to shut down gracefully
451
+ logger .Info ("Root context cancelled, starting cleanup..." )
452
+
453
+ // Wait for all goroutines to complete their cleanup
454
+ wg .Wait ()
455
+
456
+ logger .Info ("All cleanup completed. Exiting..." )
426
457
}
427
458
428
459
func monitorChannelCapacity [T any ](ctx context.Context , logger * zap.Logger , channelName string , ch <- chan T ) {
0 commit comments