0
votes

I am making a program by looking at an example of mnist.
However, instead of using the load_data() function,
Generating custom data.

The data has a size of 28x28.
An error occurs when generating a model.

What's wrong?

import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist 
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import load_model
import cv2
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Flatten, Convolution2D, MaxPooling2D
from tensorflow.keras.layers import Dropout, Activation, Dense
from tensorflow.keras.layers import Conv2D
from keras.preprocessing.image import ImageDataGenerator

os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'

path = '/Users/animalman/Documents/test/cnn/'

batch_size = 20
epochs = 1
width = 28
height = 28

total_train = 10
total_val = 10

train_datagen = ImageDataGenerator(rescale = 1./255)

test_datagen = ImageDataGenerator(rescale = 1./255)


model = Sequential([
    Conv2D(16, 3, padding='same', activation='relu', input_shape=(width, height ,3)),
    MaxPooling2D(),
    Conv2D(32, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    Conv2D(64, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    Flatten(),
    Dense(512, activation='relu'),
    Dense(1)
])

model.compile(optimizer='adam',
              loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
              metrics=['accuracy'])

model.summary()

training_set = train_datagen.flow_from_directory(path + 'train',
                                                 target_size = (width, height),
                                                 batch_size = batch_size,
                                                 class_mode = 'binary')

test_set = test_datagen.flow_from_directory(path + 'train',
                                            target_size = (width, height),
                                            batch_size = batch_size,
                                            class_mode = 'binary')

history = model.fit_generator(training_set,
                         steps_per_epoch = total_train,
                         epochs = epochs,
                         validation_data = test_set,
                         validation_steps = total_val)



arr = np.zeros((28, 28))
for i in range(28):
    for j in range(28):
        arr[j, i] = 1

print(arr)
arr = arr.reshape(-1, 28, 28)
pred = model.predict(arr)[0]


hdf5_file = "./model_number.h5"

model.save(hdf5_file)


error line

arr = arr.reshape(-1, 28, 28)
pred = model.predict(arr)[0]

ValueError: Input 0 of layer sequential is incompatible with the layer: : expected min_ndim=4, found ndim=3. Full shape received: (None, 28, 28)

1

1 Answers

0
votes

Because

input_shape=(width, height ,3)

I think shape should be

arr = np.ones((1, 28, 28, 3))
pred = model.predict(arr)[0]