-
Notifications
You must be signed in to change notification settings - Fork 1
/
plm.py
69 lines (54 loc) · 1.92 KB
/
plm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from utils.Manager import Manager
from models.PLM import PLM
def main(rank, manager):
""" train/dev/test/tune the model (in distributed)
Args:
rank: current process id
world_size: total gpus
"""
manager.setup(rank)
loaders = manager.prepare()
if manager.encoderU in ['lstm', 'gru']:
from models.Encoders.RNN import RNN_User_Encoder
encoderU = RNN_User_Encoder(manager)
elif manager.encoderU == 'avg':
from models.Encoders.Pooling import Average_Pooling
encoderU = Average_Pooling(manager)
elif manager.encoderU == 'attn':
from models.Encoders.Pooling import Attention_Pooling
encoderU = Attention_Pooling(manager)
elif manager.encoderU == 'mha':
from models.Encoders.MHA import MHA_User_Encoder
encoderU = MHA_User_Encoder(manager)
elif manager.encoderU == 'lstur':
from models.Encoders.RNN import LSTUR
encoderU = LSTUR(manager)
model = PLM(manager, encoderU).to(rank)
if manager.world_size > 1:
model = DDP(model, device_ids=[rank], output_device=rank, find_unused_parameters=False)
if manager.mode == 'dev':
manager.evaluate(model, loaders, load=True)
elif manager.mode == 'train':
manager.train(model, loaders)
elif manager.mode == 'test':
manager.test(model, loaders)
elif manager.mode == 'inspect':
manager.inspect(model, loaders)
elif manager.mode == 'encode':
manager.encode(model, loaders)
if __name__ == "__main__":
manager = Manager()
# default settings
manager.reducer = 'none'
manager.hidden_dim = 768
if manager.world_size > 1:
mp.spawn(
main,
args=(manager,),
nprocs=manager.world_size,
join=True
)
else:
main(manager.device, manager)