8
votes

I am confused with Conv2D and conv2d in Keras. what is the difference between them? I think the first one is a layer and the second one is a backend function, but what does it mean? in Conv2D we send the number of filters, the size of filters and stride ( Conv2D(64,(3,3),stride=(8,8))(input)) but in conv2d we use conv2d(input, kernel, stride=(8,8)) what is kernel is it (64,3,3) and we put number of filter and size together? where should I enter the number of kernels? could you please help me with this issue? Thank you.

code in pytorch

def apply_conv(self, image, filter_type: str):

        if filter_type == 'dct':
            filters = self.dct_conv_weights
        elif filter_type == 'idct':
            filters = self.idct_conv_weights
        else:
            raise('Unknown filter_type value.')

        image_conv_channels = []
        for channel in range(image.shape[1]):
            image_yuv_ch = image[:, channel, :, :].unsqueeze_(1)
            image_conv = F.conv2d(image_yuv_ch, filters, stride=8)
            image_conv = image_conv.permute(0, 2, 3, 1)
            image_conv = image_conv.view(image_conv.shape[0], image_conv.shape[1], image_conv.shape[2], 8, 8)
            image_conv = image_conv.permute(0, 1, 3, 2, 4)
            image_conv = image_conv.contiguous().view(image_conv.shape[0],
                                                  image_conv.shape[1]*image_conv.shape[2],
                                                  image_conv.shape[3]*image_conv.shape[4])

            image_conv.unsqueeze_(1)

            # image_conv = F.conv2d()
            image_conv_channels.append(image_conv)

        image_conv_stacked = torch.cat(image_conv_channels, dim=1)

        return image_conv_stacked

the changed code in Keras

def apply_conv(self, image, filter_type: str):

        if filter_type == 'dct':
            filters = self.dct_conv_weights
        elif filter_type == 'idct':
            filters = self.idct_conv_weights
        else:
            raise('Unknown filter_type value.')
        print(image.shape)

        image_conv_channels = []
        for channel in range(image.shape[1]):
            print(image.shape)
            print(channel)
            image_yuv_ch = K.expand_dims(image[:, channel, :, :],1)
            print( image_yuv_ch.shape)
            print(filters.shape)
            image_conv = Kr.backend.conv2d(image_yuv_ch,filters,strides=(8,8),data_format='channels_first')
           image_conv = Kr.backend.permute_dimensions(image_conv,(0, 2, 3, 1))
            image_conv = Kr.backend.reshape(image_conv,(image_conv.shape[0], image_conv.shape[1], image_conv.shape[2], 8, 8))
            image_conv =  Kr.backend.permute_dimensions(image_conv,(0, 1, 3, 2, 4))
            image_conv = Kr.backend.reshape(image_conv,(image_conv.shape[0],
                                                  image_conv.shape[1]*image_conv.shape[2],
                                                  image_conv.shape[3]*image_conv.shape[4]))

            Kr.backend.expand_dims(image_conv,1)

            # image_conv = F.conv2d()
            image_conv_channels.append(image_conv)

        image_conv_stacked = Kr.backend.concatenate(image_conv_channels, axis=1)

        return image_conv_stacked

but when I execute the code, it produces the following error:

Traceback (most recent call last):

File "", line 383, in decoded_noise=JpegCompression()(act11)#16

File "D:\software\Anaconda3\envs\py36\lib\site-packages\keras\engine\base_layer.py", line 457, in call output = self.call(inputs, **kwargs)

File "", line 169, in call image_dct = self.apply_conv(noised_image, 'dct')

File "", line 132, in apply_conv image_conv = Kr.backend.conv2d(image_yuv_ch,filters,strides=(8,8),data_format='channels_first')

File "D:\software\Anaconda3\envs\py36\lib\site-packages\keras\backend\tensorflow_backend.py", line 3650, in conv2d data_format=tf_data_format)

File "D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\ops\nn_ops.py", line 779, in convolution data_format=data_format)

File "D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\ops\nn_ops.py", line 839, in init filter_shape[num_spatial_dims]))

ValueError: number of input channels does not match corresponding dimension of filter, 1 != 8

the new code

for channel in range(image.shape[1]):
            image_yuv_ch = K.expand_dims(image[:, channel, :, :],axis=1)
            image_yuv_ch = K.permute_dimensions(image_yuv_ch, (0, 2, 3, 1))
            image_conv = tf.keras.backend.conv2d(image_yuv_ch,kernel=filters,strides=(8,8),padding='same')
            image_conv = tf.keras.backend.reshape(image_conv,(image_conv.shape[0],image_conv.shape[1], image_conv.shape[2],8,8))

the error:

Traceback (most recent call last):

File "", line 263, in decoded_noise=JpegCompression()(act11)#16

File "D:\software\Anaconda3\envs\py36\lib\site-packages\keras\engine\base_layer.py", line 457, in call output = self.call(inputs, **kwargs)

File "", line 166, in call image_dct = self.apply_conv(noised_image, 'dct')

File "", line 128, in apply_conv image_conv = tf.keras.backend.reshape(image_conv,(image_conv.shape[0],image_conv.shape[1], image_conv.shape[2],8,8))

File "D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\keras\backend.py", line 2281, in reshape return array_ops.reshape(x, shape)

File "D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 6482, in reshape "Reshape", tensor=tensor, shape=shape, name=name)

File "D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 513, in _apply_op_helper raise err

File "D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 510, in _apply_op_helper preferred_dtype=default_dtype)

File "D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\framework\ops.py", line 1146, in internal_convert_to_tensor ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)

File "D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\framework\constant_op.py", line 229, in _constant_tensor_conversion_function return constant(v, dtype=dtype, name=name)

File "D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\framework\constant_op.py", line 208, in constant value, dtype=dtype, shape=shape, verify_shape=verify_shape))

File "D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\framework\tensor_util.py", line 531, in make_tensor_proto "supported type." % (type(values), values))

TypeError: Failed to convert object of type to Tensor. Contents: (Dimension(None), Dimension(4), Dimension(4), 8, 8). Consider casting elements to a supported type.

1
See "Merge" versus "merge", what is the difference?. Names starting with lower case represent functions that can receive one or more tensors and parameters and produce another tensor. Names starting with upper case represent layers, those do not receive directly and input tensor, instead they produce a callable that can receive the tensor and produce a new one.jdehesa
Thank you. now I have a tensor with shape (:,1,32,32) and filters with shape(64,1,8,8), if I use conv2d(image, filters), is it possible or we should have a similarity between filter and image shape? I need that Keras consider 64 filters 8x8 and I am not sure when I used conv2d(image, filters) it does the same thing I need? could you please help menadia
If you already have an image tensor and a filters tensor, then use tf.nn.conv2d. With Keras functions you just give the filters size, and Keras creates them for you internally. In any case, it seems your data is not in the default format (I suppose the image is (batch, channels, height, width) and the filters (out_channes, in_channels, height, width)?). See the data_format parameter in the functions and, if you need it, use tf.transpose.jdehesa
yes, the image shape is (batch, 3,32,32) and now I need to convolve the image with the special filter I make before they are 64 filter 8x8 and I have to convolve them with the image. what should I do for this? is it possible to send filters to conv2d?nadia
sorry, I use Keras so instead of tf.nn.conv2d I should use keras.backend.conv2d? I had a code in pytorch and I need to change it to Keras. in the pytorch code at first the filter size was (64,8,8) and then squeeze(1) it so I think the size become (64,1,8,8,). due to this I said the filter size is (64,1,8,8). I add the code above that I changed it to Kerasnadia

1 Answers

7
votes

Tensorflow and Keras now are using channel_last convention. So first you should permute the channel dim to the last using K.permute_dimension. You might try this code in colab.research.google.com to figure out yourself.

First question:

  • conv2d is a function to perform 2D convolution docs
  • keras.layers.Conv2D() will return an instance of class Conv2D which perform convolution function. See more here
# The second 
import keras
conv_layer = keras.layers.Conv2D(filters=64, kernel_size=8, strides=(4, 4), padding='same')

Basically, they differ from the way to define and the way to use. K.conv2d is used inside keras.layers.Conv2D when conv_layer apply convolution on some input x such as conv_layer.

The example below may help you to understand it easier the difference between say_hello and SayHello.

def say_hello(word, name):
    print(word, name)


class SayHello():

    def __init__(self, word='Hello'):
        self.word = word
        pass

    def __call__(self, name):
        say_hello(self.word, name)


say_hello('Hello', 'Nadia') #Hello Nadia

sayhello = SayHello(word='Hello') # you will get an instance `sayhello` from class SayHello

sayhello('Nadia') # Hello Nadia

Second question:

  • kernel here is a tensor of shape (kernel_size, kernel_size, in_channels, out_channels)
  • If you want to get the image_conv of shape (8, 8, 64) then the strides=(4,4).
import tensorflow as tf
import tensorflow.keras.backend as K

image = tf.random_normal((10,3, 32, 32))
print(image.shape) # shape=(10, 3, 32, 32)

channel = 1
image_yuv_ch = K.expand_dims(image[:, channel,:,:], axis=1) # shape=(10, 1, 32, 32)
image_yuv_ch = K.permute_dimensions(image_yuv_ch, (0, 2, 3, 1)) # shape=(10, 32, 32, 1)

# The first K.conv2d
in_channels = 1
out_channels = 64 # same as filters
kernel = tf.random_normal((8, 8, in_channels, out_channels)) # shape=(8, 8, 1, 64)

image_conv = tf.keras.backend.conv2d(image_yuv_ch, kernel=kernel, strides=(4, 4), padding='same')
print(image_conv.shape) #shape=(10, 8, 8, 64)


# The second 
import keras
conv_layer = keras.layers.Conv2D(filters=64, kernel_size=8, strides=(4, 4), padding='same')
image_conv = conv_layer(image_yuv_ch)
print(image_conv.shape) #shape=(10, 8, 8, 64)