1
votes

when I build my experiment model using tf.kears.layer functional API, I got GraphDisconnected Error as follow:

ValueError Traceback (most recent call last)

in () 35 outputs = x 36 ---> 37 model = tf.keras.Model(inputs=inputs, outputs=outputs) 38 model.summary()

4 frames

/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/functional.py in _map_graph_network(inputs, outputs) 988 'The following previous layers ' 989 'were accessed without issue: ' + --> 990 str(layers_with_complete_input)) 991 for x in nest.flatten(node.outputs): 992 computable_tensors.add(id(x))

ValueError: Graph disconnected: cannot obtain value for tensor Tensor("input_63:0", shape=(?, 32, 32, 32), dtype=float32) at layer "tf_op_layer_Pow_105". The following previous layers were accessed without issue: []

to understand this error, I looked into possible SO post but not able to remove the error. I think it is because of shape mismatching at some layers. I double-checked the shape of each layer but the error still persists. I am not sure what caused the problem. Can anyone suggest a possible idea of solving this error? any quick solution to fix this?

update: my full coding attempt:

def my_func(x):
    n = 2
    c = tf.constant([1, -1/6], dtype=tf.float32)
    p = tf.constant([1,3], dtype=tf.float32)
    W,H, C = x.shape[1:].as_list()
    inputs = tf.keras.Input(shape=(W,H,C))
    xx = inputs
    res = []
    for i in range(n):
        m = c[i] * tf.math.pow(xx, p[i])
        res.append(m)
    csum = tf.math.cumsum(res)
    csum_tr = tf.transpose(csum, perm=[1, 2, 3, 4, 0])
    new_x = tf.reshape(csum_tr, tf.constant([-1, W, H, C*n]))
    return new_x

inputs = tf.keras.Input(shape=(32,32,3))

conv_1 = Conv2D(64, kernel_size = (3, 3), padding='same')(inputs)
BN_1 = BatchNormalization(axis=-1)(conv_1)
pool_1 = MaxPooling2D(strides=(1,1), pool_size=(3,3), padding='same')(BN_1)
z0 = my_func(pool_1)

conv_2 = Conv2D(64, kernel_size = (3, 3), padding='same')(z0)
BN_2 = BatchNormalization(axis=-1)(conv_2)
pool_2 = MaxPooling2D(strides=(1,1), pool_size=(3,3), padding='same')(BN_2)
z1 = my_func(pool_2)
merged_2 = concatenate([z0, z1], axis=-1)
act_2 = Activation('tanh')(merged_2)

x = Conv2D(64, kernel_size = (3, 3), padding='same', activation='relu')(act_2)
x = BatchNormalization(axis=-1)(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(3,3))(x)
x = Dropout(0.1)(x)

x = Flatten()(x)
x = Dense(128)(x)
x = BatchNormalization()(x)
x = Activation('tanh')(x)
x = Dropout(0.1)(x)

x = Dense(10)(x)
x = Activation('softmax')(x)
outputs = x

model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.summary()

can anyone point me out what causes the problem? how should I fix the graph disconnected error above? any quick thoughts? Thanks!

1

1 Answers

1
votes

this is the correct way to write your custom function... there is no need to use an additional Input layer

def my_func(x):
    
    n = 2
    c = tf.constant([1, -1/6], dtype=tf.float32)
    p = tf.constant([1,3], dtype=tf.float32)
    W, H, C = x.shape[1:].as_list()

    res = []
    for i in range(n):
        m = c[i] * tf.math.pow(x, p[i])
        res.append(m)
    
    csum = tf.math.cumsum(res)
    csum_tr = tf.transpose(csum, perm=[1, 2, 3, 4, 0])
    new_x = tf.reshape(csum_tr, tf.constant([-1, W, H, C*n]))
    
    return new_x

you can apply it inside your network simple using a Lambda layer

inputs = tf.keras.Input(shape=(32,32,3))

conv_1 = Conv2D(64, kernel_size = (3, 3), padding='same')(inputs)
BN_1 = BatchNormalization(axis=-1)(conv_1)
pool_1 = MaxPooling2D(strides=(1,1), pool_size=(3,3), padding='same')(BN_1)
z0 = Lambda(my_func)(pool_1)  ## <=================

conv_2 = Conv2D(64, kernel_size = (3, 3), padding='same')(z0)
BN_2 = BatchNormalization(axis=-1)(conv_2)
pool_2 = MaxPooling2D(strides=(1,1), pool_size=(3,3), padding='same')(BN_2)
z1 = Lambda(my_func)(pool_2)  ## <=================
merged_2 = concatenate([z0, z1], axis=-1)
act_2 = Activation('tanh')(merged_2)

x = Conv2D(64, kernel_size = (3, 3), padding='same', activation='relu')(act_2)
x = BatchNormalization(axis=-1)(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(3,3))(x)
x = Dropout(0.1)(x)

x = Flatten()(x)
x = Dense(128)(x)
x = BatchNormalization()(x)
x = Activation('tanh')(x)
x = Dropout(0.1)(x)

x = Dense(10)(x)
x = Activation('softmax')(x)
outputs = x

model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.summary()