@@ -128,13 +128,22 @@ def __init__(self, optimizer, mode=PlateauModes.accuracy, **kwargs):
128
128
self .lr_scheduler = optim .lr_scheduler .ReduceLROnPlateau (
129
129
optimizer , mode = mode .value , ** kwargs ) # Max because accuracy :)
130
130
self .mode = mode
131
+ self .steps = 0
131
132
132
133
def step (self , score ):
134
+ scheduler_steps = self .lr_scheduler .num_bad_epochs
133
135
self .lr_scheduler .step (getattr (score , self .mode .name ))
134
-
135
- @property
136
- def steps (self ):
137
- return self .lr_scheduler .num_bad_epochs
136
+ # No change in number of bad epochs =
137
+ # we are progressing
138
+ if scheduler_steps == self .lr_scheduler .num_bad_epochs :
139
+ self .steps = 0
140
+ # Otherwise, we are not
141
+ else :
142
+ self .steps += 1
143
+
144
+ if self .steps >= self .patience * 2 :
145
+ # If we haven't progressed even by lowering twice
146
+ raise EarlyStopException ("No progress for %s , stoping now... " % self .steps )
138
147
139
148
@property
140
149
def patience (self ):
@@ -230,10 +239,6 @@ def run(
230
239
231
240
# Run a check on saving the current model
232
241
best_valid_loss = self ._temp_save (fid , best_valid_loss , dev_score )
233
-
234
- # Advance Learning Rate if needed
235
- lr_scheduler .step (dev_score )
236
-
237
242
print (f'\t Train Loss: { train_score .loss :.3f} | Perplexity: { train_score .perplexity :7.3f} | '
238
243
f' Acc.: { train_score .accuracy :.3f} | '
239
244
f' Lev.: { train_score .leven :.3f} | '
@@ -246,6 +251,9 @@ def run(
246
251
print (lr_scheduler )
247
252
print ()
248
253
254
+ # Advance Learning Rate if needed
255
+ lr_scheduler .step (dev_score )
256
+
249
257
if lr_scheduler .steps >= lr_patience and lr_scheduler .lr < min_lr :
250
258
raise EarlyStopException ()
251
259
@@ -260,6 +268,7 @@ def run(
260
268
break
261
269
except EarlyStopException :
262
270
print ("Reached plateau for too long, stopping." )
271
+ break
263
272
264
273
best_valid_loss = self ._temp_save (fid , best_valid_loss , dev_score )
265
274
0 commit comments