3
votes
m = tf.Variable(0.44)
b = tf.Variable(0.87)
error = 0
for x, y in zip(x_data, y_label):
    y_hat = m*x+b
    error +=(y-y_label)**2
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.001)
train = optimizer.minimize(error)

AttributeError Traceback (most recent call last) in () 1 optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.001) ----> 2 train = optimizer.minimize(error)

C:\Users\ASUS PC\Anaconda3\lib\site-packages\tensorflow\python\training\optimizer.py in minimize(self, loss, global_step, var_list, gate_gradients, aggregation_method, colocate_gradients_with_ops, name, grad_loss) 341 aggregation_method=aggregation_method, 342 colocate_gradients_with_ops=colocate_gradients_with_ops, --> 343 grad_loss=grad_loss) 344 345 vars_with_grad = [v for g, v in grads_and_vars if g is not None]

C:\Users\ASUS PC\Anaconda3\lib\site-packages\tensorflow\python\training\optimizer.py in compute_gradients(self, loss, var_list, gate_gradients, aggregation_method, colocate_gradients_with_ops, grad_loss) 392 "Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s" % 393 gate_gradients) --> 394 self._assert_valid_dtypes([loss]) 395 if grad_loss is not None: 396 self._assert_valid_dtypes([grad_loss])

C:\Users\ASUS PC\Anaconda3\lib\site-packages\tensorflow\python\training\optimizer.py in _assert_valid_dtypes(self, tensors) 541 valid_dtypes = self._valid_dtypes() 542 for t in tensors: --> 543 dtype = t.dtype.base_dtype 544 if dtype not in valid_dtypes: 545 raise ValueError(

AttributeError: 'numpy.dtype' object has no attribute 'base_dtype'

1

1 Answers

3
votes

Because optimizer.minimize accepts only Tensor variables.

The rest of the code is not actually correct as well, it should look similar to:

x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
m = tf.Variable(0.44)
b = tf.Variable(0.87)

y_hat = m * x + b

error = tf.squared_difference(y, y_hat)
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.001)
train = optimizer.minimize(error)
number_of_iterations = 100000
with tf.Session() as sess:
    for _ in xrange(0, number_of_iterations):
        sess.run(train, {x:x_data, y: y_label}}
        #If you want to see the loss:
        #loss = sess.run(error,{x:x_data, y: y_label})
        #print("Current loss is:" + str(loss))