3
votes

I have written the following code in my Pycharm which does Fully Connect Layer (FCL) in Tensorflow. The placeholder happens invalid argument error. So I entered all the dtype, shape, and name in the placeholder, but I still get invalid argument error.

I want to make new Signal(1, 222) through FCL model.
input Signal(1, 222) => output Signal(1, 222)

  • maxPredict: Find the index with the highest value in the output signal.
  • calculate Y: Get the frequency array value corresponding to maxPredict.
  • loss: Use the difference between true Y and calculate Y as a loss.
  • loss = tf.abs(trueY - calculateY)`

Code (occur Error)
x = tf.placeholder(dtype=tf.float32, shape=[1, 222], name='inputX')

ERROR

InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'inputX' with dtype float and shape [1,222] tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'inputX' with dtype float and shape [1,222] [[{{node inputX}} = Placeholderdtype=DT_FLOAT, shape=[1,222], _device="/job:localhost/replica:0/task:0/device:CPU:0"]] During handling of the above exception, another exception occurred:

New Error Case

I changed my Code.
x = tf.placeholder(tf.float32, [None, 222], name='inputX')

Error Case 1
tensorFreq = tf.convert_to_tensor(basicFreq, tf.float32)
newY = tf.gather(tensorFreq, maxPredict) * 60
loss = tf.abs(y - tf.Variable(newY))

ValueError: initial_value must have a shape specified: Tensor("mul:0", shape=(?,), dtype=float32)

Error Case 2
tensorFreq = tf.convert_to_tensor(basicFreq, tf.float32)
newY = tf.gather(tensorFreq, maxPredict) * 60
loss = tf.abs(y - newY)

Traceback (most recent call last): File "D:/PycharmProject/DetectionSignal/TEST_FCL_StackOverflow.py", line 127, in trainStep = opt.minimize(loss) File "C:\Users\Heewony\Anaconda3\envs\TSFW_pycharm\lib\site-packages\tensorflow\python\training\optimizer.py", line 407, in minimize ([str(v) for _, v in grads_and_vars], loss)) ValueError: No gradients provided for any variable, check your graph for ops that do not support gradients, between variables [tf.Variable 'Variable:0' shape=(222, 1024) dtype=float32_ref, tf.Variable 'Variable_1:0' shape=(1024,) dtype=float32_re, ......... tf.Variable 'Variable_5:0' shape=(222,) dtype=float32_ref] and loss Tensor("Abs:0", dtype=float32).

Development environment

  • OS Platform and Distribution: Windows 10 x64
  • TensorFlow installed from: Anaconda
  • Tensorflow version 1.12.0:
  • python 3.6.7 :
  • Mobile device: N/A
  • Exact command to reproduce: N/A
  • GPU model and memory: NVIDIA GeForce CTX 1080 Ti
  • CUDA/cuDNN: 9.0/7.4

Model and Function

def Model_FCL(inputX):
    data = inputX  # input Signals

    # Fully Connected Layer 1
    flatConvh1 = tf.reshape(data, [-1, 222])
    fcW1 = tf.Variable(tf.truncated_normal(shape=[222, 1024], stddev=0.05))
    fcb1 = tf.Variable(tf.constant(0.1, shape=[1024]))
    fch1 = tf.nn.relu(tf.matmul(flatConvh1, fcW1) + fcb1)

    # Fully Connected Layer 2
    flatConvh2 = tf.reshape(fch1, [-1, 1024])
    fcW2 = tf.Variable(tf.truncated_normal(shape=[1024, 1024], stddev=0.05))
    fcb2 = tf.Variable(tf.constant(0.1, shape=[1024]))
    fch2 = tf.nn.relu(tf.matmul(flatConvh2, fcW2) + fcb2)

    # Output Layer
    fcW3 = tf.Variable(tf.truncated_normal(shape=[1024, 222], stddev=0.05))
    fcb3 = tf.Variable(tf.constant(0.1, shape=[222]))

    logits = tf.add(tf.matmul(fch2, fcW3), fcb3)
    predictY = tf.nn.softmax(logits)
    return predictY, logits

def loadMatlabData(fileName):
    contentsMat = sio.loadmat(fileName)
    dataInput = contentsMat['dataInput']
    dataLabel = contentsMat['dataLabel']

    dataSize = dataInput.shape
    dataSize = dataSize[0]
    return dataInput, dataLabel, dataSize

def getNextSignal(num, data, labels, WINDOW_SIZE, OUTPUT_SIZE):
    shuffleSignal = data[num]
    shuffleLabels = labels[num]

    # shuffleSignal = shuffleSignal.reshape(1, WINDOW_SIZE)
    # shuffleSignal = np.asarray(shuffleSignal, np.float32)
    return shuffleSignal, shuffleLabels

def getBasicFrequency():
    # basicFreq => shape(222)
    basicFreq = np.array([0.598436736688, 0.610649731314, ... 3.297508549096])
    return basicFreq

Graph

basicFreq = getBasicFrequency()
myGraph = tf.Graph()
with myGraph.as_default():
    # define input data & output data 입력받기 위한 placeholder
    x = tf.placeholder(dtype=tf.float32, shape=[1, 222], name='inputX') # Signal size = [1, 222]
    y = tf.placeholder(tf.float32, name='trueY') # Float value size = [1]

    print('inputzz ', x, y)
    print('Graph  ', myGraph.get_operations())
    print('TrainVariable ', tf.trainable_variables())

    predictY, logits = Model_FCL(x) # Predict Signal, size = [1, 222]
    maxPredict = tf.argmax(predictY, 1, name='maxPredict') # Find max index of Predict Signal

    tensorFreq = tf.convert_to_tensor(basicFreq, tf.float32)
    newY = tf.gather(tensorFreq, maxPredict) * 60   # Find the value that corresponds to the Freq array index
    loss = tf.abs(y - tf.Variable(newY))  # Calculate absolute (true Y - predict Y)
    opt = tf.train.AdamOptimizer(learning_rate=0.0001)
    trainStep = opt.minimize(loss)

    print('Graph  ', myGraph.get_operations())
    print('TrainVariable ', tf.trainable_variables())  

Session

with tf.Session(graph=myGraph) as sess:
    sess.run(tf.global_variables_initializer())

    dataFolder = './'
    writer = tf.summary.FileWriter('./logMyGraph', sess.graph)
    startTime = datetime.datetime.now()

    numberSummary = 0
    accuracyTotalTrain = []
    for trainEpoch in range(1, 25 + 1):
        arrayTrain = []

        dataPPG, dataLabel, dataSize = loadMatlabData(dataFolder + "TestValues.mat")

        for i in range(dataSize):
            batchSignal, valueTrue = getNextSignal(i, dataPPG, dataLabel, 222, 222)
            _, lossPrint, valuePredict = sess.run([trainStep, loss, newY], feed_dict={x: batchSignal, y: valueTrue})
            print('Train ', i, ' ', valueTrue, ' - ', valuePredict, '   Loss ', lossPrint)

            arrayTrain.append(lossPrint)
            writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag='Loss', simple_value=float(lossPrint))]),
                               numberSummary)
            numberSummary += 1
        accuracyTotalTrain.append(np.mean(arrayTrain))
    print('Final Train : ', accuracyTotalTrain)

    sess.close()    
2
I changed code. x = tf.placeholder(tf.float32, [None, 222], name='inputX')Heewony
Also, change code loss = tf.losses.mean_squared_error(labels=y, predictions=newY). There are still errors.Heewony

2 Answers

0
votes

It seems that the variable batchSignal is of a wrong type or shape. It must be a numpy array of shape exactly [1, 222]. If you want to use a batch of examples of size n × 222, the placeholder x should have a shape of [None, 222] and placeholder y shape [None].

By the way, consider using tf.layers.dense instead of explicitly initializing variables and implementing the layers yourself.

0
votes

There should be two things to change.

Error Case 0. You don't need to reshape your flow between layers. You can use None at the first dimension to pass a dynamic batch size.

Error Case 1. You can use directly your newY as output of the NN. You only use tf.Variable to define weights or bias.

Error Case 2. And it seems that tensorflow doesn't have gradient descent implementation for neither tf.abs() nor tf.gather(). With a regression problem, the mean square error is often sufficient.

Herein, how I rewrite your code. I don't have your matlab part so I can't debug your python/matlab interface:

Model:

def Model_FCL(inputX):
    # Fully Connected Layer 1
    fcW1 = tf.get_variable('w1', shape=[222, 1024], initializer=tf.initializer.truncated_normal())
    fcb1 = tf.get_variable('b1', shape=[222], initializer=tf.initializer.truncated_normal())
    # fcb1 = tf.get_variable('b1', shape=[None, 222], trainable=False, initializer=tf.constant_initializer(valueThatYouWant)) # if you want to fix your bias constant
    fch1 = tf.nn.relu(tf.matmul(inputX, fcW1) + fcb1, name='relu1')

    # Fully Connected Layer 2
    fcW2 = tf.get_variable('w2', shape=[1024, 1024], initializer=tf.initializer.truncated_normal())
    fcb2 = tf.get_variable('b2', shape=[222], initializer=tf.initializer.truncated_normal())
    # fcb2 = tf.get_variable('b2', shape=[None, 222], trainable=False, initializer=tf.constant_initializer(valueThatYouWant)) # if you want to fix your bias constant
    fch2 = tf.nn.relu(tf.matmul(fch1, fcW2) + fcb2, name='relu2')

    # Output Layer
    fcW3 = tf.get_variable('w3', shape=[1024, 222], initializer=tf.initializer.truncated_normal())
    fcb3 = tf.get_variable('b3', shape=[222], initializer=tf.initializer.truncated_normal())
    # fcb2 = tf.get_variable('b2', shape=[None, 222], trainable=False, initializer=tf.constant_initializer(valueThatYouWant)) # if you want to fix your bias constant
    logits = tf.add(tf.matmul(fch2, fcW3), fcb3)

    predictY = tf.nn.softmax(logits)  #I'm not sure that it will learn if you do softmax then abs/MSE
    return predictY, logits

Graph:

with myGraph.as_default():
    # define input data & output data 입력받기 위한 placeholder
    # put None(dynamic batch size) not -1 at the first dimension so that you can change your batch size
    x = tf.placeholder(tf.float32, shape=[None, 222], name='inputX')  # Signal size = [1, 222]
    y = tf.placeholder(tf.float32, shape=[None], name='trueY')  # Float value size = [1]

    ...

    predictY, logits = Model_FCL(x)  # Predict Signal, size = [1, 222]
    maxPredict = tf.argmax(predictY, 1, name='maxPredict')  # Find max index of Predict Signal

    tensorFreq = tf.convert_to_tensor(basicFreq, tf.float32)
    newY = tf.gather(tensorFreq, maxPredict) * 60   # Find the value that corresponds to the Freq array index

    loss = tf.losses.mean_squared_error(labels=y, predictions=newY)  # maybe use MSE for regression problem
    # loss = tf.abs(y - newY)  # Calculate absolute (true Y - predict Y) #tensorflow doesn't have gradient descent implementation for tf.abs
    opt = tf.train.AdamOptimizer(learning_rate=0.0001)
    trainStep = opt.minimize(loss)