I am trying to classify a group of bee images into two classes - bumble bee and honey bee with the resulting format being a CSV file like -
id,bumble_bee,honey_bee
20000,0.75, 0.25.
I have a running model, but the accuracy is very low and I've tried a bunch of different things such as adding a base_model like VGG16 or InceptionV3, adjusting the epochs, adjusting the optimizer type... and I just haven't noticed much difference. All of my changes still result in an accuracy of around 70-79%.
How can I increase the accuracy of my model?
Here is my code:
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
# dimensions of our images.
img_width, img_height = 200, 200
train_data_dir = 'data/train/'
validation_data_dir = 'data/validation/'
nb_train_samples = 2978
nb_validation_samples = 991
epochs = 50
batch_size = 25
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(2))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save_weights('thirdtry.h5')
pred = model.predict(pred_images)
np.savetxt('resultsfrom3no.csv',pred)
Here is an example of its output:
Found 2978 images belonging to 2 classes. Found 991 images belonging to 2 classes.
Epoch 1/50 119/119 [==============================] - 238s 2s/step - loss: 0.5570 - acc: 0.7697 - val_loss: 0.5275 - val_acc: 0.7908
Epoch 2/50 119/119 [==============================] - 237s 2s/step - loss: 0.5337 - acc: 0.7894 - val_loss: 0.5270 - val_acc: 0.7908
Epoch 3/50 119/119 [==============================] - 282s 2s/step - loss: 0.5299 - acc: 0.7939 - val_loss: 0.5215 - val_acc: 0.7908