NeuralProphet超参数的网格搜索

Grid Search on hyperparameters of NeuralProphet

提问人:Dino Pbn 提问时间:10/18/2023 最后编辑:Dino Pbn 更新时间:10/18/2023 访问量:42

问:

我正在使用 NeuralProphet 上的参数运行网格搜索,代码如下:

param_grid = {
    'growth': ['off', 'linear', 'discontinuous'],
    'n_forecasts': range(1, 4, 1),
    'learning_rate': [None, 0.01, 0.1],
    'n_lags': [4],
    'n_changepoints': range(10, 26, 5),
    'changepoints_range': [0.5, 0.6, 0.7, 0.8, 0.9]
}
all_params = [dict(zip(param_grid.keys(), v)) for v in itertools.product(*param_grid.values())]

mapes = list()

for params in all_params:
    m = NeuralProphet(**params)
    m.fit(treino_uni.drop(treino_uni.index[-periodo_previsao:]), freq="MS")
    
    previsoes = m.predict(treino_uni[-periodo_previsao:].reset_index(drop=True))
    
    y_true = treino_uni[-periodo_previsao:].reset_index(drop=True).y.values
    y_pred = previsoes.yhat1.values
    mape = (np.mean(np.true_divide(np.abs(y_true-y_pred), y_true)))*100
    mapes.append(mape)

然后我大约在第 60 个循环中收到以下错误:

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
Cell In[45], line 6
      3 for params in all_params:
      4     # Ajustando o modelo utilizando uma combinação de parâmetros:
      5     m = NeuralProphet(**params)
----> 6     m.fit(treino_uni.drop(treino_uni.index[-periodo_previsao:]), freq="MS")
      8     # Fazendo previsões com os dados de validação
      9     previsoes = m.predict(treino_uni[-periodo_previsao:].reset_index(drop=True))

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\neuralprophet\forecaster.py:946, in NeuralProphet.fit(self, df, freq, validation_df, epochs, batch_size, learning_rate, early_stopping, minimal, metrics, progress, checkpointing, continue_training, num_workers)
    944 # Training
    945 if validation_df is None:
--> 946     metrics_df = self._train(
    947         df,
    948         progress_bar_enabled=bool(progress),
    949         metrics_enabled=bool(self.metrics),
    950         checkpointing_enabled=checkpointing,
    951         continue_training=continue_training,
    952         num_workers=num_workers,
    953     )
    954 else:
    955     df_val, _, _, _ = df_utils.prep_or_copy_df(validation_df)

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\neuralprophet\forecaster.py:2632, in NeuralProphet._train(self, df, df_val, progress_bar_enabled, metrics_enabled, checkpointing_enabled, continue_training, num_workers)
   2630 self.config_train.set_lr_finder_args(dataset_size=dataset_size, num_batches=len(train_loader))
   2631 # Find suitable learning rate
-> 2632 lr_finder = self.trainer.tuner.lr_find(
   2633     self.model,
   2634     train_dataloaders=train_loader,
   2635     **self.config_train.lr_finder_args,
   2636 )
   2637 assert lr_finder is not None
   2638 # Estimate the optimat learning rate from the loss curve

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\tuner\tuning.py:267, in Tuner.lr_find(self, model, train_dataloaders, val_dataloaders, dataloaders, datamodule, method, min_lr, max_lr, num_training, mode, early_stop_threshold, update_attr)
    264 lr_finder_callback._early_exit = True
    265 self.trainer.callbacks = [lr_finder_callback] + self.trainer.callbacks
--> 267 self.trainer.fit(model, train_dataloaders, val_dataloaders, datamodule)
    269 self.trainer.callbacks = [cb for cb in self.trainer.callbacks if cb is not lr_finder_callback]
    271 self.trainer.auto_lr_find = False

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\trainer\trainer.py:608, in Trainer.fit(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
    606 model = self._maybe_unwrap_optimized(model)
    607 self.strategy._lightning_module = model
--> 608 call._call_and_handle_interrupt(
    609     self, self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
    610 )

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\trainer\call.py:38, in _call_and_handle_interrupt(trainer, trainer_fn, *args, **kwargs)
     36         return trainer.strategy.launcher.launch(trainer_fn, *args, trainer=trainer, **kwargs)
     37     else:
---> 38         return trainer_fn(*args, **kwargs)
     40 except _TunerExitException:
     41     trainer._call_teardown_hook()

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\trainer\trainer.py:650, in Trainer._fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
    643 ckpt_path = ckpt_path or self.resume_from_checkpoint
    644 self._ckpt_path = self._checkpoint_connector._set_ckpt_path(
    645     self.state.fn,
    646     ckpt_path,  # type: ignore[arg-type]
    647     model_provided=True,
    648     model_connected=self.lightning_module is not None,
    649 )
--> 650 self._run(model, ckpt_path=self.ckpt_path)
    652 assert self.state.stopped
    653 self.training = False

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\trainer\trainer.py:1097, in Trainer._run(self, model, ckpt_path)
   1095 # hook
   1096 if self.state.fn == TrainerFn.FITTING:
-> 1097     self._call_callback_hooks("on_fit_start")
   1098     self._call_lightning_module_hook("on_fit_start")
   1100 self._log_hyperparams()

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\trainer\trainer.py:1394, in Trainer._call_callback_hooks(self, hook_name, *args, **kwargs)
   1392     if callable(fn):
   1393         with self.profiler.profile(f"[Callback]{callback.state_key}.{hook_name}"):
-> 1394             fn(self, self.lightning_module, *args, **kwargs)
   1396 if pl_module:
   1397     # restore current_fx when nested context
   1398     pl_module._current_fx_name = prev_fx_name

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\callbacks\lr_finder.py:122, in LearningRateFinder.on_fit_start(self, trainer, pl_module)
    121 def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
--> 122     self.lr_find(trainer, pl_module)

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\callbacks\lr_finder.py:107, in LearningRateFinder.lr_find(self, trainer, pl_module)
    105 def lr_find(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
    106     with isolate_rng():
--> 107         self.optimal_lr = lr_find(
    108             trainer,
    109             pl_module,
    110             min_lr=self._min_lr,
    111             max_lr=self._max_lr,
    112             num_training=self._num_training_steps,
    113             mode=self._mode,
    114             early_stop_threshold=self._early_stop_threshold,
    115             update_attr=self._update_attr,
    116         )
    118     if self._early_exit:
    119         raise _TunerExitException()

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\tuner\lr_finder.py:247, in lr_find(trainer, model, min_lr, max_lr, num_training, mode, early_stop_threshold, update_attr)
    244 lr_finder._exchange_scheduler(trainer)
    246 # Fit, lr & loss logged in callback
--> 247 _try_loop_run(trainer, params)
    249 # Prompt if we stopped early
    250 if trainer.global_step != num_training + start_steps:

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\tuner\lr_finder.py:481, in _try_loop_run(trainer, params)
    479 loop.load_state_dict(deepcopy(params["loop_state_dict"]))
    480 loop.restarting = False
--> 481 loop.run()

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\loops\loop.py:199, in Loop.run(self, *args, **kwargs)
    197 try:
    198     self.on_advance_start(*args, **kwargs)
--> 199     self.advance(*args, **kwargs)
    200     self.on_advance_end()
    201     self._restarting = False

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\loops\fit_loop.py:267, in FitLoop.advance(self)
    265 self._data_fetcher.setup(dataloader, batch_to_device=batch_to_device)
    266 with self.trainer.profiler.profile("run_training_epoch"):
--> 267     self._outputs = self.epoch_loop.run(self._data_fetcher)

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\loops\loop.py:199, in Loop.run(self, *args, **kwargs)
    197 try:
    198     self.on_advance_start(*args, **kwargs)
--> 199     self.advance(*args, **kwargs)
    200     self.on_advance_end()
    201     self._restarting = False

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\loops\epoch\training_epoch_loop.py:213, in TrainingEpochLoop.advance(self, data_fetcher)
    210     self.batch_progress.increment_started()
    212     with self.trainer.profiler.profile("run_training_batch"):
--> 213         batch_output = self.batch_loop.run(kwargs)
    215 self.batch_progress.increment_processed()
    217 # update non-plateau LR schedulers
    218 # update epoch-interval ones only when we are at the end of training epoch

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\loops\loop.py:199, in Loop.run(self, *args, **kwargs)
    197 try:
    198     self.on_advance_start(*args, **kwargs)
--> 199     self.advance(*args, **kwargs)
    200     self.on_advance_end()
    201     self._restarting = False

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\loops\batch\training_batch_loop.py:90, in TrainingBatchLoop.advance(self, kwargs)
     88     outputs = self.optimizer_loop.run(optimizers, kwargs)
     89 else:
---> 90     outputs = self.manual_loop.run(kwargs)
     91 if outputs:
     92     # automatic: can be empty if all optimizers skip their batches
     93     # manual: #9052 added support for raising `StopIteration` in the `training_step`. If that happens,
     94     # then `advance` doesn't finish and an empty dict is returned
     95     self._outputs.append(outputs)

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\loops\loop.py:199, in Loop.run(self, *args, **kwargs)
    197 try:
    198     self.on_advance_start(*args, **kwargs)
--> 199     self.advance(*args, **kwargs)
    200     self.on_advance_end()
    201     self._restarting = False

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\loops\optimization\manual_loop.py:110, in ManualOptimization.advance(self, kwargs)
    107 kwargs = self._build_kwargs(kwargs, self._hiddens)
    109 # manually capture logged metrics
--> 110 training_step_output = self.trainer._call_strategy_hook("training_step", *kwargs.values())
    111 del kwargs  # release the batch from memory
    112 self.trainer.strategy.post_training_step()

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\trainer\trainer.py:1494, in Trainer._call_strategy_hook(self, hook_name, *args, **kwargs)
   1491     return
   1493 with self.profiler.profile(f"[Strategy]{self.strategy.__class__.__name__}.{hook_name}"):
-> 1494     output = fn(*args, **kwargs)
   1496 # restore current_fx when nested context
   1497 pl_module._current_fx_name = prev_fx_name

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\pytorch_lightning\strategies\strategy.py:378, in Strategy.training_step(self, *args, **kwargs)
    376 with self.precision_plugin.train_step_context():
    377     assert isinstance(self.model, TrainingStep)
--> 378     return self.model.training_step(*args, **kwargs)

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\neuralprophet\time_net.py:780, in TimeNet.training_step(self, batch, batch_idx)
    778     meta_name_tensor = None
    779 # Run forward calculation
--> 780 predicted = self.forward(inputs, meta_name_tensor)
    781 # Store predictions in self for later network visualization
    782 self.train_epoch_prediction = predicted

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\neuralprophet\time_net.py:640, in TimeNet.forward(self, inputs, meta, compute_components)
    637 if "regressors_lagged" in _inputs:
    638     _inputs["regressors"] = _inputs["regressors_lagged"]
--> 640 non_stationary_components = self._forward(_inputs, meta, non_stationary_only=True)
    641 corrected_inputs = inputs.copy()
    642 corrected_inputs["lags"] = (
    643     corrected_inputs["lags"] - non_stationary_components[:, :, 0]
    644 )  # only median quantile

File D:\Python_Venvs\mestrado_neuralprophet\vienv\lib\site-packages\neuralprophet\time_net.py:596, in TimeNet._forward(self, inputs, meta, non_stationary_only)
    590         multiplicative_components += self.future_regressors(
    591             inputs["regressors"]["multiplicative"], "multiplicative"
    592         )
    594 trend = self.trend(t=inputs["time"], meta=meta)
    595 out = (
--> 596     trend
    597     + additive_components
    598     + trend.detach() * multiplicative_components
    599     # 0 is the median quantile index
    600     # all multiplicative components are multiplied by the median quantile trend (uncomment line below to apply)
    601     # trend + additive_components + trend.detach()[:, :, 0].unsqueeze(dim=2) * multiplicative_components
    602 )  # dimensions - [batch, n_forecasts, no_quantiles]
    603 return out

RuntimeError: The size of tensor a (2) must match the size of tensor b (4) at non-singleton dimension 1

我检查了所有内容,但我找不到任何代码错误。此外,我搜索了类似的错误,但只找到了与图像处理相关的错误,没有时间序列。

Python 性能 时间 序列 超参数

评论


答: 暂无答案