1
votes

Here are my code:

# Model parameters: W and b
# tf.reset_default_graph()

W = tf.get_variable("weight",shape = [784, 10], dtype=tf.float32)
b = tf.get_variable("b", shape=(784,10), dtype= tf.float32)

input_X = tf.placeholder('float32', shape = (None,10)) 
input_y =  tf.placeholder('float32', [784, 10]) 

ogits = W*input_X + b 

probas = tf.nn.softmax(logits) 

classes = tf.argmax(probas) 

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = input_y, logits = logits))


step = tf.train.AdamOptimizer(loss)

s.run(tf.global_variables_initializer())

BATCH_SIZE = 512
EPOCHS = 40

# for logging the progress right here in Jupyter (for those who don't have TensorBoard)
simpleTrainingCurves = matplotlib_utils.SimpleTrainingCurves("cross-entropy", "accuracy")

for epoch in range(EPOCHS):  # we finish an epoch when we've looked at all training samples
    
    batch_losses = []
    for batch_start in range(0, X_train_flat.shape[0], BATCH_SIZE):  # data is already shuffled
        _, batch_loss = s.run([step, loss], {input_X: X_train_flat[batch_start:batch_start+BATCH_SIZE], 
                                             input_y: y_train_oh[batch_start:batch_start+BATCH_SIZE]})
        # collect batch losses, this is almost free as we need a forward pass for backprop anyway
        batch_losses.append(batch_loss)

    train_loss = np.mean(batch_losses)
    val_loss = s.run(loss, {input_X: X_val_flat, input_y: y_val_oh})  # this part is usually small
    train_accuracy = accuracy_score(y_train, s.run(classes, {input_X: X_train_flat}))  # this is slow and usually skipped
    valid_accuracy = accuracy_score(y_val, s.run(classes, {input_X: X_val_flat}))  
    simpleTrainingCurves.add(train_loss, val_loss, train_accuracy, valid_accuracy)

the error is:

/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata) 973 'Cannot feed value of shape %r for Tensor %r, ' 974 'which has shape %r' --> 975 % (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape()))) 976 if not self.graph.is_feedable(subfeed_t): 977 raise ValueError('Tensor %s may not be fed.' % subfeed_t)

ValueError: Cannot feed value of shape (512, 784) for Tensor 'Placeholder_2:0', which has shape '(?, 10)'

I am new at tensorflow and coursena is what I am learning. Please help me.

1
Check out dimensions of X_train_flat and y_train_oh. The sizes of resulting batches should match the placeholder dimensions.Poe Dator
Because we cannot see the number lines like in an IDE, can you highlight the line of code that’s actually throwing the error?joe hoeller
I think I am suck at dimensions setting for weight, bias, input_X and input_Y. Kind of how to make loop with batch size. My idea is:Phạm Tâm
when I run the loop, it went wrong.Phạm Tâm

1 Answers

1
votes

The issue is with the weights, Bias, input_x and input_y placeholder size.

Below is the modified code that should resolve your issue.

W = tf.get_variable('Weight', shape=(784,10), dtype=tf.float32)
b = tf.get_variable('bias', shape=(10,), dtype=tf.float32)

input_X = tf.placeholder(shape=(None, 784), dtype=tf.float32) #None is batch size and 784 is the input.
input_y = tf.placeholder(shape=(None, 10), dtype=tf.float32) # None is batch size, 10 is number of classess.
logits = tf.matmul(input_X, W) + b
probas = tf.nn.softmax(logits)
classes = tf.argmax(probas, 1)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=input_y))
step = tf.train.RMSPropOptimizer(0.001).minimize(loss)
s.run(tf.global_variables_initializer())

BATCH_SIZE = 512
EPOCHS = 50

simpleTrainingCurves = matplotlib_utils.SimpleTrainingCurves("cross-entropy", "accuracy")

for epoch in range(EPOCHS):  
    batch_losses = []
    for batch_start in range(0, X_train_flat.shape[0], BATCH_SIZE):  # data is already shuffled
        _, batch_loss = s.run([step, loss], {input_X: X_train_flat[batch_start:batch_start+BATCH_SIZE], 
                                             input_y: y_train_oh[batch_start:batch_start+BATCH_SIZE]})
        batch_losses.append(batch_loss)

    train_loss = np.mean(batch_losses)
    val_loss = s.run(loss, {input_X: X_val_flat, input_y: y_val_oh}) 
    train_accuracy = accuracy_score(y_train, s.run(classes, {input_X: X_train_flat})) 
    valid_accuracy = accuracy_score(y_val, s.run(classes, {input_X: X_val_flat}))  
    simpleTrainingCurves.add(train_loss, val_loss, train_accuracy, valid_accuracy)