-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathp256_cifar10.py
140 lines (120 loc) · 5.21 KB
/
p256_cifar10.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import _pickle as pickle
import time
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
def unpickle_(filename):
with open(filename, 'rb') as f:
d = pickle.load(f, encoding='latinl')
return d
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def onehot(labels):
'''one-hot 编码'''
n_sample = len(labels)
n_class = max(labels) + 1
onehot_labels = np.zeros((n_sample, n_class))
onehot_labels[np.arange(n_sample), labels] = 1
return onehot_labels
data1 = unpickle('data/cifar10-dataset/data_batch_1')
data2 = unpickle('data/cifar10-dataset/data_batch_2')
data3 = unpickle('data/cifar10-dataset/data_batch_3')
data4 = unpickle('data/cifar10-dataset/data_batch_4')
data5 = unpickle('data/cifar10-dataset/data_batch_5')
X_train = np.concatenate((data1[b'data'], data2[b'data'], data3[b'data'], data4[b'data'], data5[b'data']), axis=0)
y_train = np.concatenate((data1[b'labels'], data2[b'labels'], data3[b'labels'], data4[b'labels'], data5[b'labels']), axis=0)
y_train = onehot(y_train)
test = unpickle('data/cifar10-dataset/test_batch')
X_test = test[b'data'][:5000, :]
y_test = onehot(test[b'labels'])[:5000, :]
print("Training dataset shape:", X_train.shape)
print("Training labels shape:", y_train.shape)
print("Testing dataset shape:", X_test.shape)
print("Testing labels shape:", y_test.shape)
with tf.device('/cpu:0'):
learning_rate = 1e-3
training_iters = 200
batch_size = 50
display_step = 5
n_features = 3072 # 32*32*3
n_classes = 10
n_fc1 = 384
n_fc2 = 192
x = tf.placeholder(tf.float32, [None, n_features])
y = tf.placeholder(tf.float32, [None, n_classes])
W_conv = {
'conv1': tf.Variable(tf.truncated_normal([5, 5, 3, 32], stddev=0.0001)),
'conv2': tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.01)),
'fc1': tf.Variable(tf.truncated_normal([8*8*64, n_fc1], stddev=0.1)),
'fc2': tf.Variable(tf.truncated_normal([n_fc1, n_fc2], stddev=0.1)),
'fc3': tf.Variable(tf.truncated_normal([n_fc2, n_classes], stddev=0.1))
}
b_conv = {
'conv1': tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[32])),
'conv2': tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[64])),
'fc1': tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[n_fc1])),
'fc2': tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[n_fc2])),
'fc3': tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[n_classes]))
}
x_image = tf.reshape(x, [-1, 32, 32, 3])
#卷积层
conv1= tf.nn.conv2d(x_image, W_conv['conv1'], strides=[1, 1, 1, 1], padding='SAME')
conv1 = tf.nn.bias_add(conv1, b_conv['conv1'])
conv1 = tf.nn.relu(conv1)
# 池化层 1
pool1 = tf.nn.avg_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
# LRN层,Local Rsponse Normalization
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001/9.0, beta=0.75)
# 卷积层 2
conv2 = tf.nn.conv2d(norm1, W_conv['conv2'], strides=[1, 1, 1, 1], padding='SAME')
conv2 = tf.nn.bias_add(conv2, b_conv['conv2'])
conv2 = tf.nn.relu(conv2)
# LRN层,Local Response Normalization
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001/9.0, beta=0.75)
# 池化层 2
pool2 = tf.nn.avg_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
reshape = tf.reshape(pool2, [-1, 8*8*64])
fc1 = tf.add(tf.matmul(reshape, W_conv['fc1']), b_conv['fc1'])
fc1 = tf.nn.relu(fc1)
# 全连接层 2
fc2 = tf.add(tf.matmul(fc1, W_conv['fc2']), b_conv['fc2'])
fc2 = tf.nn.relu(fc2)
# 全连接层 3, 即分类层
fc3 = tf.nn.softmax(tf.add(tf.matmul(fc2, W_conv['fc3']), b_conv['fc3']))
# 定义损失
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=fc3, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)
correct_pred = tf.equal(tf.argmax(fc3, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
c = []
total_batch = int(X_train.shape[0] /batch_size)
#for i in range(trainning_iters):
start_time = time.time()
for i in range(200):
for batch in range(total_batch):
batch_x = X_train[batch*batch_size: (batch+1)*batch_size, :]
batch_y = y_train[batch*batch_size: (batch+1)*batch_size, :]
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
print(acc)
c.append(acc)
end_time = time.time()
print("------%d onpech is finished ---------"% i)
print("Optimization Finished!")
#Test
test_acc = sess.run(accuracy, feed_dict={x: X_test, y: y_test})
print("Testing Accuracy:", test_acc)
plt.plot(c)
plt.xlabel("Iter")
plt.ylabel("Cost")
plt.title("lr=%f, ti=%d, bs=%d, acc=%f" % (learning_rate, training_iters, batch_size, test_acc))
plt.tight_layout()
plt.savefig('cnn-tf-cifar10-%s.png' % test_acc, dpi=200)