I'm trying to make auto-encoder.
My code was working before I update to tensorflow2.0, but it is not working with updated the version of tensorflow.
# Define batch_size / epochs
epochs = 200
batch_size = 128
encoder = encoder_model(inputs)
decoder = decoder_model()
# instantiate VAE model
outputs = decoder(encoder(inputs))
ae = Model(inputs, outputs, name='ae_mlp')
reconstruction_loss = mse(inputs, outputs)
ae.add_loss(reconstruction_loss)
opt = tf.keras.optimizers.Adam(lr=0.001)
ae.compile(optimizer=opt, loss='mse')
history = ae.fit(x_trn, x_trn, epochs=epochs, batch_size=batch_size, validation_data=(x_val, x_val))
It keeps getting an error with most of batch sizes.
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-12-38e600fb7073> in <module>
135 opt = tf.keras.optimizers.Adam(lr=0.001)
136 ae.compile(optimizer=opt, loss='mse')
--> 137 history = ae.fit(x_trn, x_trn, epochs=epochs, batch_size=batch_size, validation_data=(x_val, x_val))
138
139 """ save model"""
~\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
726 max_queue_size=max_queue_size,
727 workers=workers,
--> 728 use_multiprocessing=use_multiprocessing)
729
730 def evaluate(self,
~\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, **kwargs)
322 mode=ModeKeys.TRAIN,
323 training_context=training_context,
--> 324 total_epochs=epochs)
325 cbks.make_logs(model, epoch_logs, training_result, ModeKeys.TRAIN)
326
~\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in run_one_epoch(model, iterator, execution_function, dataset_size, batch_size, strategy, steps_per_epoch, num_samples, mode, training_context, total_epochs)
170 batch_outs,
171 batch_start=step * batch_size,
--> 172 batch_end=step * batch_size + current_batch_size)
173 cbks.make_logs(model, batch_logs, batch_outs, mode)
174 step += 1
~\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_utils.py in aggregate(self, batch_outs, batch_start, batch_end)
132 self.results[0] += batch_outs[0]
133 else:
--> 134 self.results[0] += batch_outs[0] * (batch_end - batch_start)
135 # Metrics (always stateful, just grab current values.)
136 self.results[1:] = batch_outs[1:]
ValueError: operands could not be broadcast together with shapes (128,) (116,) (128,)
I tried with batch_size=1, and batch_size=2, it gave me no errors, takes much more time though.
When I try to do it with bigger batch_size, it always give me the error. How can I solve this problem?
mseon inputs and outputs? - Daniel Möller