I have written the following code in my Pycharm which does Fully Connect Layer (FCL) in Tensorflow. The placeholder happens invalid argument error. So I entered all the dtype
, shape
, and name
in the placeholder, but I still get invalid argument error.
I want to make new Signal(1, 222) through FCL model.
input Signal(1, 222) => output Signal(1, 222)
maxPredict
: Find the index with the highest value in the output signal.calculate Y
: Get the frequency array value corresponding to maxPredict.loss
: Use the difference between true Y and calculate Y as a loss.loss
= tf.abs(trueY - calculateY)`
Code (occur Error)x = tf.placeholder(dtype=tf.float32, shape=[1, 222], name='inputX')
ERROR
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'inputX' with dtype float and shape [1,222] tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'inputX' with dtype float and shape [1,222] [[{{node inputX}} = Placeholderdtype=DT_FLOAT, shape=[1,222], _device="/job:localhost/replica:0/task:0/device:CPU:0"]] During handling of the above exception, another exception occurred:
New Error Case
I changed my Code.x = tf.placeholder(tf.float32, [None, 222], name='inputX')
Error Case 1tensorFreq = tf.convert_to_tensor(basicFreq, tf.float32)
newY = tf.gather(tensorFreq, maxPredict) * 60
loss = tf.abs(y - tf.Variable(newY))
ValueError: initial_value must have a shape specified: Tensor("mul:0", shape=(?,), dtype=float32)
Error Case 2tensorFreq = tf.convert_to_tensor(basicFreq, tf.float32)
newY = tf.gather(tensorFreq, maxPredict) * 60
loss = tf.abs(y - newY)
Traceback (most recent call last): File "D:/PycharmProject/DetectionSignal/TEST_FCL_StackOverflow.py", line 127, in trainStep = opt.minimize(loss) File "C:\Users\Heewony\Anaconda3\envs\TSFW_pycharm\lib\site-packages\tensorflow\python\training\optimizer.py", line 407, in minimize ([str(v) for _, v in grads_and_vars], loss)) ValueError: No gradients provided for any variable, check your graph for ops that do not support gradients, between variables [tf.Variable 'Variable:0' shape=(222, 1024) dtype=float32_ref, tf.Variable 'Variable_1:0' shape=(1024,) dtype=float32_re, ......... tf.Variable 'Variable_5:0' shape=(222,) dtype=float32_ref] and loss Tensor("Abs:0", dtype=float32).
Development environment
- OS Platform and Distribution: Windows 10 x64
- TensorFlow installed from: Anaconda
- Tensorflow version 1.12.0:
- python 3.6.7 :
- Mobile device: N/A
- Exact command to reproduce: N/A
- GPU model and memory: NVIDIA GeForce CTX 1080 Ti
- CUDA/cuDNN: 9.0/7.4
Model and Function
def Model_FCL(inputX):
data = inputX # input Signals
# Fully Connected Layer 1
flatConvh1 = tf.reshape(data, [-1, 222])
fcW1 = tf.Variable(tf.truncated_normal(shape=[222, 1024], stddev=0.05))
fcb1 = tf.Variable(tf.constant(0.1, shape=[1024]))
fch1 = tf.nn.relu(tf.matmul(flatConvh1, fcW1) + fcb1)
# Fully Connected Layer 2
flatConvh2 = tf.reshape(fch1, [-1, 1024])
fcW2 = tf.Variable(tf.truncated_normal(shape=[1024, 1024], stddev=0.05))
fcb2 = tf.Variable(tf.constant(0.1, shape=[1024]))
fch2 = tf.nn.relu(tf.matmul(flatConvh2, fcW2) + fcb2)
# Output Layer
fcW3 = tf.Variable(tf.truncated_normal(shape=[1024, 222], stddev=0.05))
fcb3 = tf.Variable(tf.constant(0.1, shape=[222]))
logits = tf.add(tf.matmul(fch2, fcW3), fcb3)
predictY = tf.nn.softmax(logits)
return predictY, logits
def loadMatlabData(fileName):
contentsMat = sio.loadmat(fileName)
dataInput = contentsMat['dataInput']
dataLabel = contentsMat['dataLabel']
dataSize = dataInput.shape
dataSize = dataSize[0]
return dataInput, dataLabel, dataSize
def getNextSignal(num, data, labels, WINDOW_SIZE, OUTPUT_SIZE):
shuffleSignal = data[num]
shuffleLabels = labels[num]
# shuffleSignal = shuffleSignal.reshape(1, WINDOW_SIZE)
# shuffleSignal = np.asarray(shuffleSignal, np.float32)
return shuffleSignal, shuffleLabels
def getBasicFrequency():
# basicFreq => shape(222)
basicFreq = np.array([0.598436736688, 0.610649731314, ... 3.297508549096])
return basicFreq
Graph
basicFreq = getBasicFrequency()
myGraph = tf.Graph()
with myGraph.as_default():
# define input data & output data 입력받기 위한 placeholder
x = tf.placeholder(dtype=tf.float32, shape=[1, 222], name='inputX') # Signal size = [1, 222]
y = tf.placeholder(tf.float32, name='trueY') # Float value size = [1]
print('inputzz ', x, y)
print('Graph ', myGraph.get_operations())
print('TrainVariable ', tf.trainable_variables())
predictY, logits = Model_FCL(x) # Predict Signal, size = [1, 222]
maxPredict = tf.argmax(predictY, 1, name='maxPredict') # Find max index of Predict Signal
tensorFreq = tf.convert_to_tensor(basicFreq, tf.float32)
newY = tf.gather(tensorFreq, maxPredict) * 60 # Find the value that corresponds to the Freq array index
loss = tf.abs(y - tf.Variable(newY)) # Calculate absolute (true Y - predict Y)
opt = tf.train.AdamOptimizer(learning_rate=0.0001)
trainStep = opt.minimize(loss)
print('Graph ', myGraph.get_operations())
print('TrainVariable ', tf.trainable_variables())
Session
with tf.Session(graph=myGraph) as sess:
sess.run(tf.global_variables_initializer())
dataFolder = './'
writer = tf.summary.FileWriter('./logMyGraph', sess.graph)
startTime = datetime.datetime.now()
numberSummary = 0
accuracyTotalTrain = []
for trainEpoch in range(1, 25 + 1):
arrayTrain = []
dataPPG, dataLabel, dataSize = loadMatlabData(dataFolder + "TestValues.mat")
for i in range(dataSize):
batchSignal, valueTrue = getNextSignal(i, dataPPG, dataLabel, 222, 222)
_, lossPrint, valuePredict = sess.run([trainStep, loss, newY], feed_dict={x: batchSignal, y: valueTrue})
print('Train ', i, ' ', valueTrue, ' - ', valuePredict, ' Loss ', lossPrint)
arrayTrain.append(lossPrint)
writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag='Loss', simple_value=float(lossPrint))]),
numberSummary)
numberSummary += 1
accuracyTotalTrain.append(np.mean(arrayTrain))
print('Final Train : ', accuracyTotalTrain)
sess.close()
loss = tf.losses.mean_squared_error(labels=y, predictions=newY)
. There are still errors. – Heewony