generated from ArneBinder/pytorch-ie-hydra-template-1
-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.yaml
67 lines (52 loc) · 2.28 KB
/
train.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# @package _global_
# specify here default configuration
# order of defaults determines the order in which configs override each other
defaults:
- _self_
- dataset: conll2003.yaml
- datamodule: default.yaml
- taskmodule: transformer_token_classification.yaml
- model: transformer_token_classification.yaml
- callbacks: default.yaml
- logger: null # set logger here or use command line (e.g. `python train.py logger=tensorboard`)
- trainer: default.yaml
- paths: default.yaml
- extras: default.yaml
- hydra: default.yaml
# experiment configs allow for version control of specific hyperparameters
# e.g. best hyperparameters for given model and taskmodule
- experiment: null
# config for hyperparameter optimization
- hparams_search: null
# optional local config for machine/user specific settings
# it's optional since it doesn't need to exist and is excluded from version control
- optional local: default.yaml
# debugging config (enable through command line, e.g. `python train.py debug=default)
- debug: null
# task name, determines output directory path
pipeline_type: "training"
# default name for the experiment, determines output directory path
# (you can overwrite this in experiment configs)
name: "default"
# tags to help you identify your experiments
# you can overwrite this in experiment configs
# overwrite from command line with `python train.py tags="[first_tag, second_tag]"`
# appending lists from command line is currently not supported :(
# https://github.com/facebookresearch/hydra/issues/1547
tags: ["dev"]
# set False to skip model training
train: True
# evaluate on test set, using best model weights achieved during training
# lightning chooses best weights based on the metric specified in checkpoint callback
test: False
# metric with mode (minimize or maximize) to monitor for checkpointing and early stopping callbacks
monitor_metric: "val/f1"
monitor_mode: "max"
# seed for random number generators in pytorch, numpy and python.random
seed: null
# simply provide checkpoint path to resume training
ckpt_path: null
# push the model and taskmodule to the huggingface model hub when training has finished
push_to_hub: False
# where to save the trained model and taskmodule
model_save_dir: ${paths.save_dir}/models/${name}/${now:%Y-%m-%d_%H-%M-%S}