-
Notifications
You must be signed in to change notification settings - Fork 2
/
configs.py
122 lines (109 loc) · 2.05 KB
/
configs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import torch
# Train from scratch
TFS_CONF = {
"opt_fn": torch.optim.Adam,
"T": 100,
"train_batch_size": 16,
"test_batch_size": 4,
"lr": 0.001,
}
# Fine Tuning
FT_CONF = {
"opt_fn": torch.optim.Adam,
"T": 100,
"train_batch_size": 16,
"test_batch_size": 4,
"lr": 0.001,
}
# Centroid Fine Tuning
CFT_CONF = {
"opt_fn": torch.optim.Adam,
"T": 100,
"train_batch_size": 16,
"test_batch_size": 4,
"lr": 0.001
}
# LSTM meta-learner
LSTM_CONF = {
"opt_fn": torch.optim.Adam,
"T": 8,
"lr": 0.001,
"input_size": 4,
"hidden_size": 20,
"grad_clip": 0.25
}
# LSTM meta-learner
LSTM_CONF2 = {
"opt_fn": torch.optim.Adam,
"T": 8,
"lr": 0.001,
"input_size": 4,
"hidden_size": 20,
"grad_clip": 0.25
}
# Model-agnostic meta-learning
MAML_CONF = {
"opt_fn": torch.optim.Adam,
"T": 1,
"lr": 0.001,
"base_lr": 0.01,
"meta_batch_size":1,
"grad_clip": 10
}
BOIL_CONF = {
"opt_fn": torch.optim.Adam,
"T": 1,
"lr": 0.001,
"base_lr": 0.5,
"meta_batch_size":4,
"grad_clip": 10
}
# Model-agnostic meta-learning
SPFT_CONF = {
"opt_fn": None,
"T": 1,
"lr": 0.001,
"base_lr": 0.01,
"meta_batch_size":1
}
REPTILE_CONF = {
"opt_fn": torch.optim.Adam,
"T": 1,
"lr": 0.001,
"base_lr": 0.01,
"meta_batch_size":5,
"meta_lr": 1,
"annealing": True
}
# Mimicking One-Step Optimizer config
MOSO_CONF = {
"opt_fn": torch.optim.Adam,
"T": 100,
"lr": 0.001,
"act": torch.nn.ReLU(),
"hcopt_fn": torch.optim.Adam,
"hcopt_lr": 0.001,
"meta_batch_size": 32
}
# TURTLE
TURTLE_CONF = {
"opt_fn": torch.optim.Adam,
"T": 1, # not applicable
"lr": 0.001,
"act": torch.nn.ReLU(),
"beta": 0.9,
"meta_batch_size": 1,
"time_input": False,
"param_lr": False,
"decouple": None
}
SIMPLELSTM_CONF={
"opt_fn": torch.optim.Adam,
"T": 1,
"lr": 0.001,
"base_lr": 0.01,
"meta_batch_size":1,
"grad_clip": 10,
"hidden_size": 3,
"num_layers": 1,
}