Skip to content

Commit d090941

Browse files
committed
hum: add message index bulk mutation that was hum
hum: update MAX_CHAIN_ID Signed-off-by: bingyuyap <bingyu.yap.21@gmail.com>
1 parent 81bd43d commit d090941

File tree

2 files changed

+28
-5
lines changed

2 files changed

+28
-5
lines changed

fly/cmd/historical_uptime/main.go

+5-5
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,9 @@ var (
109109
)
110110
)
111111

112-
const PYTHNET_CHAIN_ID = int(vaa.ChainIDPythNet)
112+
// [0, MAX_CHAIN_ID] is the range of chain id that we will track for the uptime monitor
113+
// in this case it's snaxchain since it's the largest mainnet chain idj
114+
const MAX_CHAIN_ID = vaa.ChainIDSnaxchain
113115

114116
// guardianChainHeights indexes current chain height by chain id and guardian name
115117
var guardianChainHeights = make(common.GuardianChainHeights)
@@ -187,10 +189,7 @@ func initPromScraper(promRemoteURL string, logger *zap.Logger, errC chan error)
187189
case <-t.C:
188190
recordGuardianHeightDifferences()
189191

190-
for i := 1; i < 36; i++ {
191-
if i == PYTHNET_CHAIN_ID {
192-
continue
193-
}
192+
for i := 1; i <= int(MAX_CHAIN_ID); i++ {
194193
chainName := vaa.ChainID(i).String()
195194
if strings.HasPrefix(chainName, "unknown chain ID:") {
196195
continue
@@ -230,6 +229,7 @@ func initObservationScraper(db *bigtable.BigtableDB, logger *zap.Logger, errC ch
230229
messageObservations := make(map[types.MessageID][]*types.Observation)
231230

232231
messages, err := db.GetUnprocessedMessagesBeforeCutOffTime(ctx, time.Now().Add(-common.ExpiryDuration))
232+
logger.Info("Number of unprocessed messages", zap.Int("count", len(messages)))
233233
if err != nil {
234234
logger.Error("QueryMessagesByIndex error", zap.Error(err))
235235
continue

fly/pkg/bigtable/operations.go

+23
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,10 @@ func (db *BigtableDB) FlushCache(ctx context.Context, logger *zap.Logger, cache
4444
observationMuts := make([]*bigtable.Mutation, 0)
4545
observationRows := make([]string, 0)
4646

47+
// Prepare bulk mutations for message indexes
48+
indexMuts := make([]*bigtable.Mutation, 0, len(cache.Messages))
49+
indexRows := make([]string, 0, len(cache.Messages))
50+
4751
for messageID, message := range cache.Messages {
4852
// Prepare message mutation
4953
messageMut, err := createMessageMutation(message)
@@ -64,20 +68,39 @@ func (db *BigtableDB) FlushCache(ctx context.Context, logger *zap.Logger, cache
6468
observationMuts = append(observationMuts, observationMut)
6569
observationRows = append(observationRows, observationRow)
6670
}
71+
72+
// Prepare message index mutation
73+
indexMut := bigtable.NewMutation()
74+
indexMut.Set("indexData", "placeholder", bigtable.Now(), nil)
75+
indexMuts = append(indexMuts, indexMut)
76+
indexRows = append(indexRows, string(messageID))
6777
}
6878

79+
// Apply bulk mutations for messages
6980
err := db.ApplyBulk(ctx, MessageTableName, messageRows, messageMuts)
7081
if err != nil {
7182
logger.Error("Failed to apply bulk mutations for messages", zap.Error(err))
7283
return err
7384
}
7485

86+
// Apply bulk mutations for observations
7587
err = db.ApplyBulk(ctx, ObservationTableName, observationRows, observationMuts)
7688
if err != nil {
7789
logger.Error("Failed to apply bulk mutations for observations", zap.Error(err))
7890
return err
7991
}
8092

93+
// Apply bulk mutations for message indexes
94+
err = db.ApplyBulk(ctx, MessageIndexTableName, indexRows, indexMuts)
95+
if err != nil {
96+
logger.Error("Failed to apply bulk mutations for message indexes", zap.Error(err))
97+
return err
98+
}
99+
100+
logger.Info("Successfully applied bulk mutations for messages", zap.Int("count", len(messageMuts)))
101+
logger.Info("Successfully applied bulk mutations for observations", zap.Int("count", len(observationMuts)))
102+
logger.Info("Successfully applied bulk mutations for message indexes", zap.Int("count", len(indexMuts)))
103+
81104
return nil
82105
}
83106

0 commit comments

Comments
 (0)