-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy pathtf_cnn_mnist_classification.py
122 lines (96 loc) · 3.47 KB
/
tf_cnn_mnist_classification.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# Import dependencies
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# load the mmist dataset from tensorflow.examples
mnist_data = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Neural Network
# Hyper-parameters
n_image_width = 28
n_image_height = 28
n_input_pixels = n_image_height * n_image_width
filter_width = 5
filter_height = 5
n_classes = 10 # digits 0-9
n_channels = 1 # black
con_1_features = 16
con_2_features = 32
learning_rate = 0.001
batch_size = 50
# Input/Output Placeholders
X = tf.placeholder(dtype=tf.float32, shape=[None, n_input_pixels])
Y = tf.placeholder(dtype=tf.float32, shape=[None, n_classes])
# Layer Weights and biases
conv_lay_1 = {
'weight':
tf.Variable(
tf.random_normal(
[filter_height, filter_width, n_channels, con_1_features],
stddev=0.1)),
'bias':
tf.Variable(tf.random_normal([con_1_features], stddev=0.1))
}
conv_lay_2 = {
'weight':
tf.Variable(
tf.random_normal(
[filter_height, filter_width, con_1_features, con_2_features],
stddev=0.1)),
'bias':
tf.Variable(tf.random_normal([con_2_features], stddev=0.1))
}
fc_nn_lay_1 = {
'weight':
tf.Variable(
tf.random_normal(
[7 * 7 * con_2_features, n_classes],
stddev=0.1)),
'bias':
tf.Variable(tf.random_normal([n_classes], stddev=0.1))
}
# Model
# Resize image to proper shape
x_img = tf.reshape(X, [-1, n_image_width, n_image_height,
n_channels]) # [batch, height, width, channels]
h_conv_1 = tf.nn.conv2d(
x_img, conv_lay_1['weight'], strides=[1, 1, 1, 1], padding='SAME')
h_relu_1 = tf.nn.relu(h_conv_1 + conv_lay_1['bias'])
op_pool_1 = tf.nn.max_pool(
h_relu_1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
h_conv_2 = tf.nn.conv2d(
op_pool_1, conv_lay_2['weight'], strides=[1, 1, 1, 1], padding='SAME')
h_relu_2 = tf.nn.relu(h_conv_2 + conv_lay_2['bias'])
op_pool_2 = tf.nn.max_pool(
h_relu_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
flat_lay_3 = tf.reshape(op_pool_2, [-1, 7 * 7 * con_2_features])
h_nn_1 = tf.matmul(flat_lay_3, fc_nn_lay_1['weight']) + fc_nn_lay_1['bias']
final_op = tf.nn.sigmoid(h_nn_1)
# Error and Optimizer
# mean-squared error
error = tf.reduce_mean(0.5 * tf.square(final_op - Y))
# adam-optimizer
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(error)
# Prediction for test
correct_pred = tf.equal(tf.argmax(final_op, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Start Session
with tf.Session() as sess:
tf.global_variables_initializer().run()
print("*********** Train ***********")
train_examples = len(mnist_data.train.images)
for i in range(train_examples // batch_size):
train_batch = mnist_data.train.next_batch(batch_size)
_, err = sess.run(
[optimizer, error],
feed_dict={X: train_batch[0],
Y: train_batch[1]})
if i % 100 == 0:
validation_batch = mnist_data.validation.next_batch(batch_size)
acc = accuracy.eval({
X: validation_batch[0],
Y: validation_batch[1]
})
print("Batch: %d validation-error = %f accuracy = %f" %
(i, err, acc * 100))
print("*********** Test ***********")
acc = accuracy.eval({X: mnist_data.test.images, Y: mnist_data.test.labels})
print("Final Accuracy = %f" % (acc * 100))