I have figured it out, after 2 days of blockade!
Both the input layers should not be reshaped when they are defined. The reshaping can be carried on in next step, the input layers should be independently declared.
following is the fixed code:
main_input = layer_input(shape = c(1024), batch_shape = c(4201,1024), dtype = 'float32', name = 'main_input')
main_reshaped = main_input %>% layer_reshape( target_shape = list(1024,1), input_shape = c(1024),dtype = 'float32', batch_input_shape = c(4201, 1024), batch_size = 4201)
conv1 = layer_conv_1d(filters = 64, kernel_size = 10, strides = 5, dtype = 'float32', activation = 'relu' )
max1 = layer_max_pooling_1d(pool_size = 10)
conv2 = layer_conv_1d(filters = 32, kernel_size = 5, strides = 3, dtype = 'float32', activation = 'relu' )
max2 = layer_max_pooling_1d(pool_size = 5)
conv = reshaped %>% conv1%>%max1%>%conv2 %>% max2 %>% layer_flatten()
semantic_input = layer_input(shape = c(2074), dtype = 'float32', batch_shape = c(4201,2074), name = 'semantic_input')
sem_reshaped = semantic_input %>% layer_reshape(target_shape = list(2074,1), input_shape = c (2074), dtype = 'float32')
conc = sem_reshaped %>% layer_flatten()
output = layer_concatenate(c(conv, conc)) %>%
layer_dense( units = 100, activation = 'relu', use_bias = TRUE) %>%
layer_dense(units = 50, activation = 'relu', use_bias = TRUE) %>%
layer_dense(units = 25, activation = 'relu', use_bias = TRUE)%>%
layer_dense(units = 10, activation = 'relu', use_bias = TRUE)%>%
layer_dense(units = 1, activation = 'softmax', name = 'output')
cnn1_model = keras_model(
inputs = c(main_input,semantic_input),
outputs = c(output)
)
so the model looks like this
summary (cnn1_model)
_______________________________________________________________________________________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
===============================================================================================================================================================================
main_input (InputLayer) (4201, 1024) 0
_______________________________________________________________________________________________________________________________________________________________________________
reshape_25 (Reshape) (4201, 1024, 1) 0 main_input[0][0]
_______________________________________________________________________________________________________________________________________________________________________________
conv1d_65 (Conv1D) (4201, 203, 64) 704 reshape_25[0][0]
_______________________________________________________________________________________________________________________________________________________________________________
max_pooling1d_50 (MaxPooling1D) (4201, 20, 64) 0 conv1d_65[6][0]
_______________________________________________________________________________________________________________________________________________________________________________
conv1d_66 (Conv1D) (4201, 6, 32) 10272 max_pooling1d_50[6][0]
_______________________________________________________________________________________________________________________________________________________________________________
semantic_input (InputLayer) (4201, 2074) 0
_______________________________________________________________________________________________________________________________________________________________________________
max_pooling1d_51 (MaxPooling1D) (4201, 1, 32) 0 conv1d_66[5][0]
_______________________________________________________________________________________________________________________________________________________________________________
reshape_26 (Reshape) (4201, 2074, 1) 0 semantic_input[0][0]
_______________________________________________________________________________________________________________________________________________________________________________
flatten_35 (Flatten) (4201, 32) 0 max_pooling1d_51[5][0]
_______________________________________________________________________________________________________________________________________________________________________________
flatten_36 (Flatten) (4201, 2074) 0 reshape_26[0][0]
_______________________________________________________________________________________________________________________________________________________________________________
concatenate_38 (Concatenate) (4201, 2106) 0 flatten_35[0][0]
flatten_36[0][0]
_______________________________________________________________________________________________________________________________________________________________________________
dense_77 (Dense) (4201, 100) 210700 concatenate_38[0][0]
_______________________________________________________________________________________________________________________________________________________________________________
dense_78 (Dense) (4201, 50) 5050 dense_77[0][0]
_______________________________________________________________________________________________________________________________________________________________________________
dense_79 (Dense) (4201, 25) 1275 dense_78[0][0]
_______________________________________________________________________________________________________________________________________________________________________________
dense_80 (Dense) (4201, 10) 260 dense_79[0][0]
_______________________________________________________________________________________________________________________________________________________________________________
output (Dense) (4201, 1) 11 dense_80[0][0]
===============================================================================================================================================================================
Total params: 228,272
Trainable params: 228,272
Non-trainable params: 0
_______________________________________________________________________________________________________________________________________________________________________________