Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
13o-bbr-bbq authored May 1, 2017
1 parent 62722c9 commit a58b1cc
Show file tree
Hide file tree
Showing 3 changed files with 299 additions and 0 deletions.
95 changes: 95 additions & 0 deletions CNN_test/finetuning.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
# -*- coding: utf-8 -*-
import os
from keras.applications.vgg16 import VGG16
from keras.layers import Input, Dropout, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras import optimizers


# CIFAR10のクラス一覧
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']

batch_size = 32
nb_classes = len(classes)

# CIFAR10の画像情報(x:32pic, y:32pic, channel:3)
img_rows, img_cols = 32, 32
channels = 3

# 訓練用画像とテスト用画像のPath
train_data_dir = '.\\cifar10\\train_image'
validation_data_dir = '.\\cifar10\\test_image'

# 訓練用画像(各クラス5000枚)、テスト用画像(各クラス1000枚)
# epochは30くらいでも十分か?
nb_train_samples = 50000
nb_val_samples = 10000
nb_epoch = 50

# 学習済み重みの保存Path
result_dir = '.\\cifar10\\results'
if not os.path.exists(result_dir):
os.mkdir(result_dir)


if __name__ == '__main__':
# VGG16モデルと学習済み重み(ImageNet)をロード
input_tensor = Input(shape=(img_rows, img_cols, 3))
vgg16 = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor)

# FC
top_model = Sequential()
top_model.add(Flatten(input_shape=vgg16.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(nb_classes, activation='softmax'))

# VGG16とFCを接続
model = Model(input=vgg16.input, output=top_model(vgg16.output))

# 最後の畳み込み層の手前までFreeze
for layer in model.layers[:15]:
layer.trainable = False

# 多クラス(=10)分類なのでloss=categorical_crossentropyとする
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])

train_datagen = ImageDataGenerator(
rescale=1.0 / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1.0 / 255)

train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_rows, img_cols),
color_mode='rgb',
classes=classes,
class_mode='categorical',
batch_size=batch_size,
shuffle=True)

validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_rows, img_cols),
color_mode='rgb',
classes=classes,
class_mode='categorical',
batch_size=batch_size,
shuffle=True)

# Fine-tuning
history = model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
nb_epoch=nb_epoch,
validation_data=validation_generator,
nb_val_samples=nb_val_samples)

model.save_weights(os.path.join(result_dir, 'finetuning.h5'))
58 changes: 58 additions & 0 deletions CNN_test/predict.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
from keras.applications.vgg16 import VGG16
from keras.models import Sequential, Model
from keras.layers import Input, Dropout, Flatten, Dense
from keras.preprocessing import image

# CIFAR10のクラス一覧
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
nb_classes = len(classes)

# CIFAR10の画像情報(x:32pic, y:32pic, channel:3)
img_height, img_width = 32, 32
channels = 3

# VGG16
input_tensor = Input(shape=(img_height, img_width, channels))
vgg16 = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor)

# FC
fc = Sequential()
fc.add(Flatten(input_shape=vgg16.output_shape[1:]))
fc.add(Dense(256, activation='relu'))
fc.add(Dropout(0.5))
fc.add(Dense(nb_classes, activation='softmax'))

# VGG16とFCを接続
model = Model(input=vgg16.input, output=fc(vgg16.output))

# 学習済みの重み(finetuning.pyでFine-tuningした重み)をロード
model.load_weights(os.path.join('cifar10\\results', 'finetuning.h5'))
# model.load_weights(os.path.join('cifar10\\results', 'finetuning_noise.h5'))

# 多クラス(=10)分類なのでloss=categorical_crossentropyとする
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])

if sys.argv[1] == '':
print("usage: Test file is not found.")
sys.exit(1)

# 画像を4次元テンソルに変換
img = image.load_img(sys.argv[1], target_size=(img_height, img_width))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = x / 255.0

# クラスの予測(top3を出力)
pred = model.predict(x)[0]
top = 3
top_indices = pred.argsort()[-top:][::-1]
result = [(classes[i], pred[i]) for i in top_indices]
for x in result:
print(x)
146 changes: 146 additions & 0 deletions CNN_test/search_adv.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
# -*- coding: utf-8 -*-
import os
import random
import datetime
import numpy as np
import cv2
from progressbar import ProgressBar
from keras.applications.vgg16 import VGG16
from keras.models import Sequential, Model
from keras.layers import Input, Dropout, Flatten, Dense
from keras.preprocessing import image

# 検証対象のクラス
CLASS = 'cat'
TEST_DIR = 'cifar10\\test_image\\' + CLASS + '\\'
ADV_DIR = 'results\\' + CLASS + '\\'
TEST_IMAGES = 1000


# ランダム選択したピクセルに細工を加える
def random_adv(perturbation_filename, p):
# 画像読み込み
img = cv2.imread(TEST_DIR + perturbation_filename, cv2.IMREAD_UNCHANGED)

# 画像サイズの取得
if len(img) == 3:
height, width, channel = img.shape[:3]
else:
height, width = img.shape[:2]

for i in range(p):
# 細工するピクセルをランダムに選択
x = random.randint(0, width - 1)
y = random.randint(0, height - 1)

# ピクセル値の細工
pixel = img[y, x]
average = sum(pixel) / len(pixel)

if average < 128:
img[y, x] = [0, 0, 0]
else:
img[y, x] = [255, 255, 255]

# 細工画像の保存
adv_filename = 'adv_' + filename
cv2.imwrite(ADV_DIR + adv_filename, img)

return adv_filename


# クラスの予測
def predict(target_filename, height, width):
# 画像を4次元テンソルに変換
img = image.load_img(target_filename, target_size=(height, width))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = x / 255.0

# クラスの予測
pred = model.predict(x)[0]
top = 1
top_indices = pred.argsort()[-top:][::-1]
result = [(classes[i], pred[i]) for i in top_indices]

'''
result[0][0] : predict label
result[0][1] : prediction probability
'''
return result

if __name__ == "__main__":
# CIFAR10のクラス一覧
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
nb_classes = len(classes)

# CIFAR10の画像情報(x:32pixel, y:32pixel, channel:3)
img_height, img_width = 32, 32
channels = 3

# VGG16
input_tensor = Input(shape=(img_height, img_width, channels))
vgg16 = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor)

# FC
fc = Sequential()
fc.add(Flatten(input_shape=vgg16.output_shape[1:]))
fc.add(Dense(256, activation='relu'))
fc.add(Dropout(0.5))
fc.add(Dense(nb_classes, activation='softmax'))

# VGG16とFCを接続
model = Model(input=vgg16.input, output=fc(vgg16.output))

# 学習済みの重み(finetuning.pyでFine-tuningした重み)をロード
model.load_weights(os.path.join('cifar10\\results', 'finetuning.h5'))
# model.load_weights(os.path.join('cifar10\\results', 'finetuning_noise.h5'))

# 多クラス(=10)分類なのでloss=categorical_crossentropyとする
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])

# 細工するピクセル数(p)、1画像たりの細工試行最大回数(p_max_num)
p = 1
p_max_num = 100

# 検証対象の画像一覧
test_files = os.listdir(TEST_DIR)

# 検証結果保存用のファイル
today_detail = datetime.datetime.today()
test_result = str(today_detail.strftime("%Y%m%d_%H%M%S")) + '.txt'
f = open(test_result, 'w')
f.write('Idx\tnormal image\tadversarial image\tmisclassify\tpredict\n')

# 細工処理
count = 0
progress_count = 1
progress = ProgressBar(min_value=1, max_value=TEST_IMAGES)
for filename in test_files:
# 進捗表示
progress.update(progress_count)
progress_count += 1

# 細工前画像のクラス分類予測
result = predict(TEST_DIR + filename, img_height, img_width)

# 正しく分類できない画像は検証しない
if CLASS != result[0][0]:
continue

for i in range(p_max_num):
p_filename = random_adv(filename, p)

# 細工画像のクラス分類予測
result = predict(ADV_DIR + p_filename, img_height, img_width)

# 誤分類した場合はログに書き込んで次の画像の検証を行う
if CLASS != result[0][0]:
count += 1
f.write(str(count)+'\t'+filename+'\t'+p_filename+'\t'+result[0][0]+'\t'+str(result[0][1])+'\n')
break
print('\n')
f.close()

0 comments on commit a58b1cc

Please sign in to comment.