Skip to content

Commit e3b3515

Browse files
committed
added deepVAR and Transformer notebook
1 parent 7bfeb0f commit e3b3515

17 files changed

+9766
-4620
lines changed

ConvTSMixer-hyperparameter_tuning.ipynb

+40-29
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
"from gluonts.evaluation.backtest import make_evaluation_predictions\n",
2121
"from gluonts.evaluation import MultivariateEvaluator\n",
2222
"\n",
23-
"#from pts.modules import StudentTOutput\n",
23+
"# from pts.modules import StudentTOutput\n",
2424
"\n",
2525
"from ConvTSMixer import ConvTSMixerEstimator\n",
2626
"import random\n",
@@ -38,56 +38,63 @@
3838
},
3939
"outputs": [],
4040
"source": [
41-
"class ConvTSMixerObjective: \n",
42-
" def __init__(self, dataset, train_grouper, test_grouper, metric_type=\"m_sum_mean_wQuantileLoss\"):\n",
41+
"class ConvTSMixerObjective:\n",
42+
" def __init__(\n",
43+
" self,\n",
44+
" dataset,\n",
45+
" train_grouper,\n",
46+
" test_grouper,\n",
47+
" metric_type=\"m_sum_mean_wQuantileLoss\",\n",
48+
" ):\n",
4349
" self.metric_type = metric_type\n",
4450
" self.dataset = dataset\n",
4551
" self.dataset_train = train_grouper(self.dataset.train)\n",
4652
" self.dataset_test = test_grouper(self.dataset.test)\n",
47-
" \n",
53+
"\n",
4854
" def get_params(self, trial) -> dict:\n",
4955
" return {\n",
50-
" \"context_length\": trial.suggest_int(\"context_length\", dataset.metadata.prediction_length, dataset.metadata.prediction_length*10,4),\n",
51-
" \"batch_size\": trial.suggest_int(\"batch_size\", 128, 256, 64),\n",
52-
" \"depth\": trial.suggest_int(\"depth\", 2, 16,4),\n",
53-
" \"dim\": trial.suggest_int(\"dim\", 16, 64, 16),\n",
54-
" \"patch_size\": trial.suggest_int(\"dim\", 2, 16,4),\n",
55-
" \"kernel_size\": trial.suggest_int(\"dim\", 9, 18, 3),\n",
56+
" \"context_length\": trial.suggest_int(\n",
57+
" \"context_length\",\n",
58+
" dataset.metadata.prediction_length,\n",
59+
" dataset.metadata.prediction_length * 10,\n",
60+
" 4,\n",
61+
" ),\n",
62+
" \"batch_size\": trial.suggest_int(\"batch_size\", 128, 256, 64),\n",
63+
" \"depth\": trial.suggest_int(\"depth\", 2, 16, 4),\n",
64+
" \"dim\": trial.suggest_int(\"dim\", 16, 64, 16),\n",
65+
" \"patch_size\": trial.suggest_int(\"dim\", 2, 16, 4),\n",
66+
" \"kernel_size\": trial.suggest_int(\"dim\", 9, 18, 3),\n",
5667
" }\n",
57-
" \n",
68+
"\n",
5869
" def __call__(self, trial):\n",
5970
" params = self.get_params(trial)\n",
6071
" estimator = estimator = ConvTSMixerEstimator(\n",
61-
" #distr_output=StudentTOutput(dim=int(dataset.metadata.feat_static_cat[0].cardinality)),\n",
72+
" # distr_output=StudentTOutput(dim=int(dataset.metadata.feat_static_cat[0].cardinality)),\n",
6273
" input_size=int(self.dataset.metadata.feat_static_cat[0].cardinality),\n",
63-
"\n",
6474
" prediction_length=self.dataset.metadata.prediction_length,\n",
65-
" context_length=self.dataset.metadata.prediction_length*5,\n",
75+
" context_length=self.dataset.metadata.prediction_length * 5,\n",
6676
" freq=self.dataset.metadata.freq,\n",
6777
" scaling=\"std\",\n",
68-
"\n",
6978
" depth=params[\"depth\"],\n",
7079
" patch_size=(params[\"patch_size\"], params[\"patch_size\"]),\n",
7180
" kernel_size=params[\"kernel_size\"],\n",
7281
" dim=params[\"dim\"],\n",
73-
"\n",
7482
" batch_size=params[\"batch_size\"],\n",
7583
" num_batches_per_epoch=100,\n",
76-
" trainer_kwargs=dict(accelerator=\"cuda\", max_epochs=30)\n",
84+
" trainer_kwargs=dict(accelerator=\"cuda\", max_epochs=30),\n",
7785
" )\n",
7886
" predictor = estimator.train(\n",
79-
" training_data=self.dataset_train,\n",
80-
" num_workers=8,\n",
81-
" shuffle_buffer_length=1024\n",
87+
" training_data=self.dataset_train, num_workers=8, shuffle_buffer_length=1024\n",
88+
" )\n",
89+
"\n",
90+
" forecast_it, ts_it = make_evaluation_predictions(\n",
91+
" dataset=self.dataset_test, predictor=predictor, num_samples=100\n",
8292
" )\n",
83-
" \n",
84-
" forecast_it, ts_it = make_evaluation_predictions(dataset=self.dataset_test,\n",
85-
" predictor=predictor,\n",
86-
" num_samples=100)\n",
8793
" forecasts = list(forecast_it)\n",
8894
" tss = list(ts_it)\n",
89-
" evaluator = MultivariateEvaluator(quantiles=(np.arange(20)/20.0)[1:],\n",
90-
" target_agg_funcs={'sum': np.sum})\n",
95+
" evaluator = MultivariateEvaluator(\n",
96+
" quantiles=(np.arange(20) / 20.0)[1:], target_agg_funcs={\"sum\": np.sum}\n",
97+
" )\n",
9198
" agg_metrics, _ = evaluator(iter(tss), iter(forecasts))\n",
9299
" return agg_metrics[self.metric_type]"
93100
]
@@ -102,10 +109,14 @@
102109
"outputs": [],
103110
"source": [
104111
"dataset = get_dataset(\"solar_nips\", regenerate=False)\n",
105-
"train_grouper = MultivariateGrouper(max_target_dim=int(dataset.metadata.feat_static_cat[0].cardinality))\n",
112+
"train_grouper = MultivariateGrouper(\n",
113+
" max_target_dim=int(dataset.metadata.feat_static_cat[0].cardinality)\n",
114+
")\n",
106115
"\n",
107-
"test_grouper = MultivariateGrouper(num_test_dates=int(len(dataset.test)/len(dataset.train)), \n",
108-
" max_target_dim=int(dataset.metadata.feat_static_cat[0].cardinality))\n",
116+
"test_grouper = MultivariateGrouper(\n",
117+
" num_test_dates=int(len(dataset.test) / len(dataset.train)),\n",
118+
" max_target_dim=int(dataset.metadata.feat_static_cat[0].cardinality),\n",
119+
")\n",
109120
"dataset_train = train_grouper(dataset.train)\n",
110121
"dataset_test = test_grouper(dataset.test)"
111122
]

0 commit comments

Comments
 (0)