I'm facing a trouble with tensorFlow. Executing the following code
import tensorflow as tf
import input_data
learning_rate = 0.01
training_epochs = 25
batch_size = 100
display_step = 1
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# tensorflow graph input
X = tf.placeholder('float', [None, 784]) # mnist data image of shape 28 * 28 = 784
Y = tf.placeholder('float', [None, 10]) # 0-9 digits recognition = > 10 classes
# set model weights
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# Our hypothesis
activation = tf.add(tf.matmul(X, W),b) # Softmax
# Cost function: cross entropy
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=activation, logits=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Gradient Descen
I get the following error:
ValueError: No gradients provided for any variable, check your graph for ops that do not support gradients, between variables ['Tensor("Variable/read:0", shape=(784, 10), dtype=float32)', 'Tensor("Variable_1/read:0", shape=(10,), dtype=float32)'] and loss Tensor("Mean:0", shape=(), dtype=float32).