Following code is developed for identify 5 image classes using keras and python with tensorflow backend. I have used imageDataGenerator but when I run this, it's started to train and after a while, following error occured.
How can I solve this?
Training Step: 127 | total loss: 0.01171 | time: 32.772s | Adam | epoch: 005 | loss: 0.01171 - acc: 0.9971 -- iter: 1536/1550 Training Step: 128 | total loss: 0.01055 | time: 36.283s | Adam | epoch: 005 | loss: 0.01055 - acc: 0.9974 | val_loss: 3.05709 - val_acc: 0.6500 -- iter: 1550/1550 -- Found 0 images belonging to 0 classes. Found 0 images belonging to 0 classes. Traceback (most recent call last):
File "", line 1, in runfile('D:/My Projects/FinalProject_Vr_01.2/CNN_IMGDG_stackoverflow.py', wdir='D:/My Projects/FinalProject_Vr_01.2')
File "C:\Users\Asus\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 704, in runfile execfile(filename, namespace)
File "C:\Users\Asus\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 108, in execfile exec(compile(f.read(), filename, 'exec'), namespace)
File "D:/My Projects/FinalProject_Vr_01.2/CNN_IMGDG_stackoverflow.py", line 191, in model.fit_generator(train_generator,
AttributeError: 'DNN' object has no attribute 'fit_generator'
import cv2
import numpy as np
import os
from random import shuffle
from tqdm import tqdm
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
TRAIN_DIR = 'train'
VALID_DIR = 'validate'
TEST_DIR = 'test'
IMG_SIZE = 128
LR = 1e-3
train_samples = 1500
valdate_samples = 250
epochs = 5
batch_size = 10
MODEL_NAME = 'snakes-{}-{}.model'.format(LR, '2conv-basic')
def label_img(img):
print("\nImage = ",img)
print("\n",img.split('.')[-2])
temp_name= img.split('.')[-2]
print("\n",temp_name[:1])
temp_name=temp_name[:1]
word_label = temp_name
if word_label == 'A': return [0,0,0,0,1]
elif word_label == 'B': return [0,0,0,1,0]
elif word_label == 'C': return [0,0,1,0,0]
elif word_label == 'D': return [0,1,0,0,0]
elif word_label == 'E' : return [1,0,0,0,0]
def create_train_data():
training_data = []
for img in tqdm(os.listdir(TRAIN_DIR)):
label = label_img(img)
path = os.path.join(TRAIN_DIR,img)
img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img,(IMG_SIZE,IMG_SIZE))
training_data.append([np.array(img),np.array(label)])
shuffle(training_data)
np.save('train_data.npy', training_data)
return training_data
def create_validate_data():
validating_data = []
for img in tqdm(os.listdir(VALID_DIR)):
label = label_img(img)
path = os.path.join(VALID_DIR,img)
img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img,(IMG_SIZE,IMG_SIZE))
validating_data.append([np.array(img),np.array(label)])
shuffle(validating_data)
np.save('validate_data.npy', validating_data)
return validating_data
def process_test_data():
testing_data = []
for img in tqdm(os.listdir(TEST_DIR)):
path = os.path.join(TEST_DIR,img)
img_num = img.split('.')[0]
img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img,(IMG_SIZE,IMG_SIZE))
testing_data.append([np.array(img), img_num])
shuffle(testing_data)
np.save('test_data.npy', testing_data)
return testing_data
train_data = create_train_data()
validate_data = create_validate_data()
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import tensorflow as tf
tf.reset_default_graph()
convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input')
convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 128, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 5, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
if os.path.exists('{}.meta'.format(MODEL_NAME)):
model.load(MODEL_NAME)
print('model loaded!')
train = train_data[:]
validate = validate_data[:]
X = np.array([i[0] for i in train]).reshape(-1,IMG_SIZE,IMG_SIZE,1)
Y = [i[1] for i in train]
validate_x = np.array([i[0] for i in validate]).reshape(-1,IMG_SIZE,IMG_SIZE,1)
validate_y = [i[1] for i in validate]
model.fit({'input': X}, {'targets': Y}, n_epoch=epochs, validation_set=({'input': validate_x}, {'targets': validate_y}),
snapshot_step=500, show_metric=True, run_id=MODEL_NAME)
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
validation_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory('train',
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=batch_size,
class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory('validate',
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=batch_size,
class_mode='categorical')
model.fit_generator(train_generator,
steps_per_epoch=25,
epochs=epochs,
validation_data=validation_generator,
validation_steps=25)
model.save(MODEL_NAME)