forked from gstark0/anns
-
Notifications
You must be signed in to change notification settings - Fork 1
/
test.py
78 lines (66 loc) · 2.21 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import tensorflow as tf
n_hidden_layers = 0
n_hidden_nodes = []
learning_rate = 0.
activation = 'tahn'
x_train, y_train = [], []
x_test, y_test = [], []
with open('dataset2.csv', 'r') as f:
lines = f.readlines()
i = 0
for line in lines:
if i < 100:
x_train.append([float(line.replace('\n', '').split(',')[0]), float(line.replace('\n', '').split(',')[1])])
y_ = [float(line.replace('\n', '').split(',')[2])]
if y_ == 1.0:
y_train.append([0, 1])
else:
y_train.append([1, 0])
else:
x_test.append([float(line.replace('\n', '').split(',')[0]), float(line.replace('\n', '').split(',')[1])])
y_ = [float(line.replace('\n', '').split(',')[2])]
if y_ == 1.0:
y_test.append([0, 1])
else:
y_test.append([1, 0])
i += 1
x = tf.placeholder(tf.float32, [None, 2])
y = tf.placeholder(tf.float32, [None, 2])
'''
w1 = tf.Variable(tf.truncated_normal([2, n_hidden_nodes]))
b1 = tf.Variable(tf.truncated_normal([n_hidden_nodes]))
w2 = tf.Variable(tf.truncated_normal([n_hidden_nodes, 2]))
b2 = tf.Variable(tf.truncated_normal([2]))
hl = tf.nn.relu(tf.matmul(x, w1) + b1)
logits = tf.matmul(hl, w2) + b2
'''
def model():
layers = []
activation_funcs = {'relu': tf.nn.relu, 'tanh': tf.nn.tanh, 'sigmoid': tf.nn.sigmoid}
for layer in range(n_hidden_layers):
if layer == 0:
layers.append(tf.layers.dense(
inputs=x,
units=n_hidden_nodes[layer],
activation=activation_funcs[activation]))
else:
layers.append(tf.layers.dense(
inputs=layers[layer-1],
units=n_hidden_nodes[layer],
activation=tf.nn.relu))
logits = tf.layers.dense(
inputs=x,
units=2)
out = tf.nn.softmax(logits)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(out, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy, loss, optimizer
accuracy, loss, optimizer = model()
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(10):
sess.run(optimizer, {x: x_train, y: y_train})
print(sess.run(accuracy, {x: x_test, y: y_test}), sess.run(loss, {x: x_test, y: y_test}))