-
Notifications
You must be signed in to change notification settings - Fork 4
/
runmodel_example.py
84 lines (63 loc) · 1.87 KB
/
runmodel_example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
# time pypy-2.4 -u runmodel.py | tee output_0.txt
from FM_FTRL_machine import *
import random
from math import log
#### RANDOM SEED ####
random.seed(5) # seed random variable for reproducibility
#####################
####################
#### PARAMETERS ####
####################
reportFrequency = 10
trainingFile = "./data_mini/train.mini"
fm_dim = 4
fm_initDev = .01
hashSalt = "salty"
alpha = .1
beta = 1.
alpha_fm = .01
beta_fm = 1.
p_D = 22
D = 2 ** p_D
L1 = 1.0
L2 = .1
L1_fm = 2.0
L2_fm = 3.0
n_epochs = 5
####
start = datetime.now()
# initialize a FM learner
learner = FM_FTRL_machine(fm_dim, fm_initDev, L1, L2, L1_fm, L2_fm, D, alpha, beta, alpha_fm = alpha_fm, beta_fm = beta_fm)
print("Start Training:")
for e in range(n_epochs):
# if it is the first epoch, then don't use L1_fm or L2_fm
if e == 0:
learner.L1_fm = 0.
learner.L2_fm = 0.
else:
learner.L1_fm = L1_fm
learner.L2_fm = L2_fm
cvLoss = 0.
cvCount = 0.
progressiveLoss = 0.
progressiveCount = 0.
for t, date, ID, x, y in data(trainingFile, D, hashSalt):
if date == 30:
p = learner.predict(x)
loss = logLoss(p, y)
cvLoss += loss
cvCount += 1.
else:
p = learner.predict(x)
loss = logLoss(p, y)
learner.update(x, p, y)
progressiveLoss += loss
progressiveCount += 1.
if t % reportFrequency == 0:
print("Epoch %d\tcount: %d\tProgressive Loss: %f" % (e, t, progressiveLoss / progressiveCount))
print("Epoch %d finished.\tvalidation loss: %f\telapsed time: %s" % (e, cvLoss / cvCount, str(datetime.now() - start)))
# save the weights
w_outfile = "param.w.txt"
w_fm_outfile = "param.w_fm.txt"
learner.write_w(w_outfile)
learner.write_w_fm(w_fm_outfile)