forked from sunkevin1214/codes
-
Notifications
You must be signed in to change notification settings - Fork 0
/
back_rand.py
105 lines (95 loc) · 5.07 KB
/
back_rand.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.ops import control_flow_ops
from datetime import datetime
import numpy as np
import os
import get_data as data
'''
Length:5, Num:795078, Mean:0.91, Std:0.03,
conv[2,2,24,0.72,0.38],
conv[4,4,11,0.26,0.13],
pool[4,4,0.70],
full[1445,-0.9305572400603059,0.7123493284279396],
full[10,-0.05645566288281878,0.3516372640499208]
'''
batch_size = 100
total_epochs = 100
def model():
is_training = tf.placeholder(tf.bool, [])
train_images, train_label = data.get_train_data(batch_size)
test_images, test_label = data.get_test_data(batch_size)
x = tf.cond(is_training, lambda:train_images, lambda:test_images)
y_ = tf.cond(is_training, lambda:train_label, lambda:test_label)
y_ = tf.cast(y_, tf.int64)
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.crelu,
normalizer_fn=slim.batch_norm,
weights_regularizer=slim.l2_regularizer(0.005),
normalizer_params={'is_training': is_training, 'decay': 0.95}
):
conv1 =slim.conv2d(x, 24, [2,2], weights_initializer=tf.truncated_normal_initializer(mean=0.72, stddev=0.38))
conv2 =slim.conv2d(conv1, 11, [4,4], weights_initializer=tf.truncated_normal_initializer(mean=0.26, stddev=0.13))
pool1 = slim.avg_pool2d(conv2, [4,4], stride=4, padding='SAME')
flatten = slim.flatten(pool1)
full1 = slim.fully_connected(flatten, 1445, weights_initializer=tf.truncated_normal_initializer(mean=-0.93055, stddev=0.71234), biases_initializer=tf.constant_initializer(0.1, dtype=tf.float32))
logits = slim.fully_connected(full1, 10, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(mean=-0.05645566, stddev=0.3516), biases_initializer=tf.constant_initializer(0.1, dtype=tf.float32))
correct_prediction = tf.equal(tf.argmax(logits, 1), y_)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
regularization_loss = tf.add_n(slim.losses.get_regularization_losses())
cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_, logits=logits))+ regularization_loss
step = tf.get_variable("step", [], initializer=tf.constant_initializer(0.0), trainable=False)
# lr = tf.train.exponential_decay(0.1,
# step,
# 550*30,
# 0.9,
# staircase=True)
#
#
# optimizer = tf.train.GradientDescentOptimizer(lr)
optimizer = tf.train.AdamOptimizer(0.001)
# lr_summary = tf.summary.scalar('lr', lr)
train_step = slim.learning.create_train_op(cross_entropy, optimizer, global_step=step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
cross_entropy = control_flow_ops.with_dependencies([updates], cross_entropy)
loss_summary = tf.summary.scalar('loss', cross_entropy)
accuracy_summary = tf.summary.scalar('accuracy', accuracy)
merge_summary = tf.summary.merge([loss_summary, accuracy_summary])
return is_training, train_step, step, accuracy, cross_entropy, merge_summary
def train():
gpu_options = tf.GPUOptions(allow_growth=True)
is_training, train_step, _, accuracy, loss, merge_summary = model()
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
train_data_length = 10000
test_data_length = 50000
steps_in_each_epoch = (train_data_length//batch_size)
total_steps = int(total_epochs*steps_in_each_epoch)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
try:
for i in range(total_steps):
if coord.should_stop():
break
_, accuracy_str, loss_str, _ = sess.run([train_step, accuracy,loss, merge_summary], {is_training:True})
if i % steps_in_each_epoch == 0:
test_total_step = test_data_length//batch_size
test_accuracy_list = []
test_loss_list = []
for _ in range(test_total_step):
test_accuracy_str, test_loss_str = sess.run([accuracy, loss], {is_training:False})
test_accuracy_list.append(test_accuracy_str)
test_loss_list.append(test_loss_str)
print('{}, {}, Step:{}/{}, train_loss:{}, acc:{}, test_loss:{}, accu:{}'.format(datetime.now(), i // steps_in_each_epoch, i, total_steps, loss_str, accuracy_str, np.mean(test_loss_list), np.mean(test_accuracy_list)))
except tf.errors.OutOfRangeError:
print('done')
finally:
coord.request_stop()
coord.join(threads)
if __name__ =='__main__':
#CUDA2
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
tf.reset_default_graph()
train() #96.41