-
Notifications
You must be signed in to change notification settings - Fork 0
/
model.py
64 lines (47 loc) · 1.46 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
# -*- coding: utf-8 -*-
import numpy as np
import paddle
import paddle.nn as nn
from config import Config
class MLP(nn.Layer):
def __init__(self, dim_in, config):
super().__init__()
self.dim_embeding = 2048
self.model = nn.Sequential(
nn.Linear(dim_in, self.dim_embeding),
nn.BatchNorm1D(self.dim_embeding),
nn.ReLU(self.dim_embeding),
nn.Dropout(config.dropout),
nn.Linear(self.dim_embeding, 1)
)
self.loss = nn.MSELoss()
def forward(self, X):
return self.model(X)
def log_rmse(self, X, Y):
clipped_preds = paddle.clip(self.model(X), 1, float('inf'))
eps = np.finfo(np.float32).eps
log_rmse = paddle.sqrt(
self.loss(paddle.log(clipped_preds+eps), paddle.log(Y+eps)))
return log_rmse.item()
if __name__ == '__main__':
device = paddle.get_device()
paddle.set_device(device)
print(f'device {device}')
config = Config()
config.device = device
X = paddle.randn([3, 18])
Y = paddle.randn([3, 1])
model = MLP(X.shape[1], config)
print(model)
optimizer = getattr(
paddle.optimizer, config.optimizer)(
parameters=model.parameters(),
**config.optim_hparams)
print(optimizer)
log_rmse = 0.0
loss = model.loss(model(X), Y)
loss.backward()
optimizer.step()
optimizer.clear_grad()
log_rmse = model.log_rmse(X, Y)
pass