1
kerg.bviously에 의해 8 분류 작업에 사용되는 kaggle에서 DL 모델을 다운로드했습니다. 밀도가 높습니다 (8) model.and 꼭대기에서 2-classifier에 사용하고 싶기 때문에 dense (8)에서 dense (2)로 상위 연결을 수정합니다. 이것이 나에게 효과적이라고 생각합니다. 그러나, 터미널은 스크립트를 실행할 때 오류를보고합니다. 인내심과 도움을 주셔서 감사합니다. 여기keras 별 8 분류 작업에 사용되는 kaggle에서 DL 모델을 다운로드했습니다.
Error when checking model target: expected dense_3 to have shape (None, 2) but got array with shape (1333L, 8L)
아래 오류가 어쩌면 긴 코드,
# %load kaggle_dog_cat_classifier.py
__author__ = 'JeofuHuang: https://www.kaggle.com/jeofuhuang'
import numpy as np
np.random.seed(2016)
import os
import glob
import cv2
import datetime
import pandas as pd
import time
import warnings
warnings.filterwarnings("ignore")
from sklearn.cross_validation import KFold
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
from sklearn.metrics import log_loss
from keras import __version__ as keras_version
def get_im_cv2(path):
img = cv2.imread(path)
resized = cv2.resize(img, (32, 32), cv2.INTER_LINEAR)
return resized
def load_train():
X_train = []
X_train_id = []
y_train = []
start_time = time.time()
print('Read train images')
folders = ['dog', 'cat']
for fld in folders:
index = folders.index(fld)
print('Load folder {} (Index: {})'.format(fld, index))
path = os.path.join('.', 'input', 'train', fld, '*.jpg')
files = glob.glob(path)
for fl in files:
flbase = os.path.basename(fl)
img = get_im_cv2(fl)
X_train.append(img)
X_train_id.append(flbase)
y_train.append(index)
print('Read train data time: {} seconds'.format(round(time.time() - start_time, 2)))
return X_train, y_train, X_train_id
def load_test():
path = os.path.join('.', 'input', 'test', '*.jpg')
files = sorted(glob.glob(path))
X_test = []
X_test_id = []
for fl in files:
flbase = os.path.basename(fl)
img = get_im_cv2(fl)
X_test.append(img)
X_test_id.append(flbase)
return X_test, X_test_id
def create_submission(predictions, test_id, info):
result1 = pd.DataFrame(predictions, columns=['dog', 'cat'])
result1.loc[:, 'image'] = pd.Series(test_id, index=result1.index)
now = datetime.datetime.now()
sub_file = 'submission_' + info + '_' + str(now.strftime("%Y-%m-%d-%H-%M")) + '.csv'
result1.to_csv(sub_file, index=False)
def read_and_normalize_train_data():
train_data, train_target, train_id = load_train()
print('Convert to numpy...')
train_data = np.array(train_data, dtype=np.uint8)
train_target = np.array(train_target, dtype=np.uint8)
print('Reshape...')
train_data = train_data.transpose((0, 3, 1, 2))
print('Convert to float...')
train_data = train_data.astype('float32')
train_data = train_data/255
train_target = np_utils.to_categorical(train_target, 8)
print('Train shape:', train_data.shape)
print(train_data.shape[0], 'train samples')
return train_data, train_target, train_id
def read_and_normalize_test_data():
start_time = time.time()
test_data, test_id = load_test()
test_data = np.array(test_data, dtype=np.uint8)
test_data = test_data.transpose((0, 3, 1, 2))
test_data = test_data.astype('float32')
test_data = test_data/255
print('Test shape:', test_data.shape)
print(test_data.shape[0], 'test samples')
print('Read and process test data time: {} seconds'.format(round(time.time() - start_time, 2)))
return test_data, test_id
def dict_to_list(d):
ret = []
for i in d.items():
ret.append(i[1])
return ret
def merge_several_folds_mean(data, nfolds):
a = np.array(data[0])
for i in range(1, nfolds):
a += np.array(data[i])
a /= nfolds
return a.tolist()
def create_model():
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, 32, 32), dim_ordering='th'))
model.add(Convolution2D(4, 3, 3, activation='relu', dim_ordering='th'))
model.add(ZeroPadding2D((1, 1), dim_ordering='th'))
model.add(Convolution2D(4, 3, 3, activation='relu', dim_ordering='th'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering='th'))
model.add(ZeroPadding2D((1, 1), dim_ordering='th'))
model.add(Convolution2D(8, 3, 3, activation='relu', dim_ordering='th'))
model.add(ZeroPadding2D((1, 1), dim_ordering='th'))
model.add(Convolution2D(8, 3, 3, activation='relu', dim_ordering='th'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering='th'))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
return model
def get_validation_predictions(train_data, predictions_valid):
pv = []
for i in range(len(train_data)):
pv.append(predictions_valid[i])
return pv
def run_cross_validation_create_models(nfolds=10):
# input image dimensions
batch_size = 16
nb_epoch = 30
random_state = 51
train_data, train_target, train_id = read_and_normalize_train_data()
yfull_train = dict()
kf = KFold(len(train_id), n_folds=nfolds, shuffle=True,
random_state=random_state)
num_fold = 0
sum_score = 0
models = []
for train_index, test_index in kf:
model = create_model()
X_train = train_data[train_index]
Y_train = train_target[train_index]
X_valid = train_data[test_index]
Y_valid = train_target[test_index]
num_fold += 1
print('Start KFold number {} from {}'.format(num_fold, nfolds))
print('Split train: ', len(X_train), len(Y_train))
print('Split valid: ', len(X_valid), len(Y_valid))
callbacks = [
EarlyStopping(monitor='val_loss', patience=3, verbose=0),
]
model.fit(X_train, Y_train, batch_size=batch_size,
nb_epoch=nb_epoch,shuffle=True, verbose=2, validation_data=
(X_valid, Y_valid), callbacks=callbacks)
predictions_valid = model.predict(X_valid.astype('float32'),
batch_size=batch_size, verbose=2)
score = log_loss(Y_valid, predictions_valid)
print('Score log_loss: ', score)
sum_score += score*len(test_index)
# Store valid predictions
for i in range(len(test_index)):
yfull_train[test_index[i]] = predictions_valid[i]
models.append(model)
score = sum_score/len(train_data)
print("Log_loss train independent avg: ", score)
info_string = 'loss_' + str(score) + '_folds_' + str(nfolds) + '_ep_' +
str(nb_epoch)
return info_string, models
def run_cross_validation_process_test(info_string, models):
batch_size = 16
num_fold = 0
yfull_test = []
test_id = []
nfolds = len(models)
for i in range(nfolds):
model = models[i]
num_fold += 1
print('Start KFold number {} from {}'.format(num_fold, nfolds))
test_data, test_id = read_and_normalize_test_data()
test_prediction = model.predict(test_data, batch_size=batch_size,
verbose=2)
yfull_test.append(test_prediction)
test_res = merge_several_folds_mean(yfull_test, nfolds)
info_string = 'loss_' + info_string \ + '_folds_' + str(nfolds)
create_submission(test_res, test_id, info_string)
if __name__ == '__main__':
print('Keras version: {}'.format(keras_version))
num_folds = 3
info_string, models = run_cross_validation_create_models(num_folds)
run_cross_validation_process_test(info_string, models)