-
Notifications
You must be signed in to change notification settings - Fork 0
/
trial_hh.py
74 lines (57 loc) · 2.41 KB
/
trial_hh.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# Usage:
# $ source activate mlenv
from keras.models import Sequential
from keras.layers import Dense, Activation, LocallyConnected1D, Flatten
import numpy as np
from keras import backend as K
import random
from keras import layers
data_folder = '/Users/nikhil/data/Sinusoid3/'
# 1.
data_file_names = ['Sinusoid3_200.csv']
data_file_name = data_file_names[0];
def read_data(file):
data, label = [], []
with open(data_folder + file) as f:
for line in f:
line = line.strip('\n')
line_split = line.split(',')
tmp = []
for i in range(1, len(line_split)):
tmp.append([float(line_split[i])])
data.append(tmp)
label.append([float(line_split[0])])
return np.array(data), np.array(label)
def custom_activation(x):
# return K.sigmoid(x + K.random_normal_variable((K.ndim(x), 1), 0, x))
# noise = K.random_normal(shape=tf.shape(input_layer), mean=0.0, stddev=std, dtype=tf.float32)
noise = K.random_normal(shape=K.shape(x), mean=0.0, stddev=10, dtype=K.dtype(x))
# return K.relu(x + noise)
return K.sigmoid(x)
model = Sequential()
model.add(LocallyConnected1D(1, 200, strides=200, input_shape=(600, 1)))
#model.add(Flatten())
#model.add(Dense(units=12, activation='sigmoid'))
model.add(Dense(units=12, activation=custom_activation))
#model.add(Dense(units=12, activation=layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.add(Dense(units=1, activation=layers.advanced_activations.LeakyReLU(alpha=0.3)))
model.compile(optimizer='sgd', loss='mean_absolute_error')
data, label = read_data(data_file_name)
test_data, test_label = data[:10000], label[:10000]
tune_data, tune_label = data[10000:20000], label[10000:20000]
train_data, train_label = data[20000:], label[20000:]
model.fit(train_data, train_label, epochs=10000, batch_size=64)
loss_and_metrics = model.evaluate(test_data, test_label, batch_size=128)
print ''
print loss_and_metrics
# predicts = model.predict(test_data, batch_size=128)
# for _ in range(1, 100):
# index = random.randint(0, len(test_label))
# print test_label[index], predicts[index]
for i in range(7):
data_file_name = data_file_names[i]
data, label = read_data(data_file_name)
test_data, test_label = data[:10000], label[:10000]
loss_and_metrics = model.evaluate(test_data, test_label, batch_size=128)
print ''
print 'dataset ' + str(i + 1) + ': ' + str(loss_and_metrics)