0
votes

I started learning about neural networks and decided to follow this Google code lab on convolutional neural networks, but I decided to use the CIFAR-10 dataset for image classification, but I get very low accuracy and high cross-entropy.

After training the accuracy is around 0.1 (never more than 0.2) and cross-entropy doesn't go below 230. I didn't use batch-normalization or dropout, but I should still get more accuracy here.

My code:

import tensorflow as tf
import numpy as np
import matplotlib as mpt
import math
# Just disables the warning, doesn't enable AVX/FMA
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

def unpickle(file):
    import pickle
    with open(file, 'rb') as fo:
        dict = pickle.load(fo, encoding='bytes')
    return dict

def makeMiniBatch(dictionary,start,number):
    matrix=np.zeros([number,3072],dtype=np.int)
    labels=np.zeros([number],dtype=np.int)
    for i in range(0,number):
        matrix[i]=dictionary[b'data'][i+start]
        labels[i]=dictionary[b'labels'][i+start]
    return matrix,labels

def formatLabels(labele):
    lab=np.zeros([100,10])
    for i in range(0,100):
        lab[i][labele[i]]=1
    return lab

def formatData(values):
    temp = np.zeros([100,32,32,3])
    for i in range(0,100):
        im_r = values[i][0:1024].reshape(32, 32)
        im_g = values[i][1024:2048].reshape(32, 32)
        im_b = values[i][2048:].reshape(32, 32)
        temp[i] = np.dstack((im_r, im_g, im_b))
    return temp

batch='D:/cifar-10-python/cifar-10-batches-py/data_batch_1'
data=unpickle(batch)
tf.set_random_seed(0)

K = 8
L = 16
M = 32
N = 200

X_=tf.placeholder(tf.float32,[None,32,32,3])

Y_=tf.placeholder(tf.float32,[None,10])

lr = tf.placeholder(tf.float32)



W1 = tf.Variable(tf.truncated_normal([5, 5, 3, K], stddev=0.1))
B1 = tf.Variable(tf.ones([K])/10)
W2 = tf.Variable(tf.truncated_normal([5, 5, K, L], stddev=0.1))
B2 = tf.Variable(tf.ones([L])/10)
W3 = tf.Variable(tf.truncated_normal([4, 4, L, M], stddev=0.1))
B3 = tf.Variable(tf.ones([M])/10)

W4 = tf.Variable(tf.truncated_normal([8 * 8 * M, N], stddev=0.1))
B4 = tf.Variable(tf.ones([N])/10)
W5 = tf.Variable(tf.truncated_normal([N, 10], stddev=0.1))
B5 = tf.Variable(tf.ones([10])/10)


stride = 1

Y1_ = tf.nn.conv2d(X_, W1, strides=[1, stride, stride, 1], padding='SAME') +     
                   B1
Y1_max=tf.nn.max_pool(Y1_,ksize=[1,2,2,1],strides=[1,1,1,1],padding='SAME')
Y1 = tf.nn.relu(Y1_max)

Y2_ = tf.nn.conv2d(Y1, W2, strides=[1, stride, stride, 1], padding='SAME') + 
                   B2
Y2_max=tf.nn.max_pool(Y2_,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
Y2 = tf.nn.relu(Y2_max)

Y3_ = tf.nn.conv2d(Y2, W3, strides=[1, stride, stride, 1], padding='SAME') + 
                   B3
Y3_max=tf.nn.max_pool(Y3_,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
Y3 = tf.nn.relu(Y3_max)

YY = tf.reshape(Y3, shape=[-1, 8 * 8 * M])
Y4 = tf.nn.relu(tf.matmul(YY, W4) + B4)


Ylogits = tf.matmul(Y4, W5) + B5
Y = tf.nn.softmax(Ylogits)

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, 
                                                        labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100

correct_prediction=tf.equal(tf.argmax(Y,1),tf.argmax(Y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

init=tf.global_variables_initializer()
sess=tf.Session()
sess.run(init)

def training_step(i):
    global data
    val,lab=makeMiniBatch(data,i * 100,100)
    Y_labels=formatLabels(lab)
    X_data=formatData(val)
    max_learning_rate = 0.003
    min_learning_rate = 0.0001
    decay_speed = 2000.0
    learning_rate = min_learning_rate + (max_learning_rate - 
                    min_learning_rate) * math.exp(-i/decay_speed)
    _,a,c = sess.run([train_step,accuracy, cross_entropy], feed_dict={X_:     
                     X_data, Y_: Y_labels, lr:learning_rate})
    print("Accuracy: ",a)
    print("Cross-Entropy",c)

for i in range (0,100):
    training_step(i%100)
1
Things look reasonable to me here, nothing obvious jumps out at me. I would check your input and make sure something isn't terribly mangled by accident, use scipy.misc.imshow to visualize a few images to validate. Also, do a quick check that your learning rate is calculated correctly. It looks right, but double check, a bad number there would certainly cause this.David Parks
See this answer. Your network seems fine. Sometimes the only necessary thing is input data normalization.Maxim

1 Answers

1
votes

Thanks Maxim, the normalization worked and after 30 seconds of training the network achieved an accuracy of 40%.

The changes I made to my code are the following:

def formatDatanew2(values):
    ret=values.reshape(100,3,32,32).transpose(0,2,3,1).astype("float32")
    ret/=255
    return ret