-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathsketchANet.py
117 lines (93 loc) · 4.01 KB
/
sketchANet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.datasets import mnist, cifar10
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape
from keras.optimizers import SGD, RMSprop
from keras.utils import np_utils
from keras.regularizers import l2
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D
from keras.callbacks import EarlyStopping
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from keras.callbacks import ModelCheckpoint
from PIL import Image
## Model check point
filepath="/sketchANetModel/weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
# filepath="weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
## File path to read data
mypath ='/python/png'
mypath2 ='/data'
# mypath ='png'
## Model
batch_size = 32
nb_classes = 250
nb_epoch = 200
data_augmentation = True
# apply a 3x3 convolution with 64 output filters on a 256x256 image:
# sketch-A-Net
# apply a 3x3 convolution with 64 output filters on a 256x256 image:
model = Sequential()
model.add(Convolution2D(64, 15, 15, border_mode='same', input_shape=(1, 225, 225)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Convolution2D(128, 5, 5, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Convolution2D(256, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(256,3,3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(256,3,3, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Convolution2D(512,7,7, border_mode='same'))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(512,1,1, border_mode='same'))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(250,1,1, border_mode='same'))
model.add(Flatten())
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=True, # set each sample mean to 0
featurewise_std_normalization=True, # divide inputs by std of the dataset
samplewise_std_normalization=True, # divide each input by its std
zca_whitening=True, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=True) # randomly flip images
validation_datagen = ImageDataGenerator()
train_generator = datagen.flow_from_directory(
mypath, # this is the target directory
target_size=(225,225), # all images will be resized to 150x150
batch_size=batch_size,
shuffle = True,
color_mode='grayscale',
class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory(
mypath2, # this is the target directory
target_size=(225,225), # all images will be resized to 150x150
batch_size=batch_size,
shuffle = True,
color_mode='grayscale',
class_mode='categorical')
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(train_generator,
samples_per_epoch=20000,
nb_epoch=nb_epoch,
callbacks=callbacks_list,
validation_data = validation_generator,
nb_val_samples=2)