I have defined a neural network of a single input layer and an output layer. My data is in csv format which I have converted to tfrecord format. Using tf.data api i batch it and feed it as follows :
- Features : 32(batch size) x 24(feature column)
- Label : 32(batch size) x 4(onehot encoded)
while running the graph it throws ValueError. Here is the traceback :
File "dummy.py", line 60, in train_summary, _ = sess.run([trainStep],feed_dict = {ground_truth : Label, features :Features})
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 895, in run run_metadata_ptr)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 1104, in _run % (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (32, 4) for Tensor u'softmax_cross_entropy_with_logits/Reshape_2:0', which has shape '(?,)'
Here is the minimal code that can reproduce the error:
import tensorflow as tf
import numpy as np
num_columns=24
num_classes=4
train_steps = 2
def model():
ground_truth_input = tf.placeholder(tf.float32,[None,num_classes]) #onehotencoded with depth 4
bottleneck_input = tf.placeholder(tf.float32,[None,num_columns]) #num_columns=24 keypoint features
#fully connected 1 : 24(num_input_features)x100
initial_value = tf.truncated_normal([num_columns, 100], stddev=0.001)
layer1_weights = tf.Variable(initial_value, name='hidden1_weights')
layer1_biases = tf.Variable(tf.zeros([100]), name='hidden1_biases')
logits_hidden1 = tf.matmul(bottleneck_input, layer1_weights) + layer1_biases
inp_activated=tf.nn.relu(logits_hidden1,name='hidden1_activation')
#fully connected 2 : 100x4(num_classes)
initial_value = tf.truncated_normal([100, num_classes], stddev=0.001)
layer_weights = tf.Variable(initial_value, name='final_weights')
layer_biases = tf.Variable(tf.zeros([num_classes]), name='final_biases')
logits = tf.matmul(inp_activated, layer_weights) + layer_biases
# loss function
loss_mean = tf.nn.softmax_cross_entropy_with_logits_v2(labels=ground_truth_input, logits=logits)
with tf.name_scope('train'):
optimizer = tf.train.MomentumOptimizer(learning_rate=0.1,use_nesterov=True,momentum=0.9)
train_op = optimizer.minimize(loss_mean, global_step=tf.train.get_global_step())
with tf.name_scope('SoftMax_Layer'):
final_tensor = tf.nn.softmax(logits,name='Softmax')
return train_op, ground_truth_input, bottleneck_input, loss_mean
trainStep, cross_entropy, features, ground_truth = model()
with tf.Session() as sess:
for i in range(2):
Label = np.eye(4)[np.random.choice(4,32)]
Features = np.random.rand(32,24)
train_summary, _ = sess.run([trainStep],feed_dict = {ground_truth : Label, features :Features})