0
votes
# Importing the required Keras modules containing model and layers
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D,Conv2DTranspose
from keras.utils.vis_utils import plot_model

# Creating a Sequential Model and adding the layers
model = Sequential()
#63 kernels - Conv of 3X3
model.add(Conv2D(63, kernel_size=(3,3), input_shape=input_shape)) 
#Then pooling of 2X2
model.add(MaxPooling2D(pool_size=(2, 2)))
# Flattening the 2D arrays for fully connected layers
model.add(Flatten()) 
model.add(Dense(1000, activation=tf.nn.relu))
model.add(Dropout(0.2))  #Combat Overfitting, drop random elements
#Softmax layer must have neurons = range of labels, 0-9 for this case
model.add(Dense(10,activation=tf.nn.softmax))   


#Visualize Network Architecture
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)

model.compile(optimizer='adam', 
              loss='sparse_categorical_crossentropy', 
              metrics=['accuracy'])
history = model.fit(x=x_train,y=y_train,batch_size=batch_size,
                epochs=30,
                verbose=verbosity,
                validation_split=validation_split)

Here as u can see above , we have the loss function ='sparse_categorical_crossentropy', but model runs through 30 epoch , generating training and validation loss for each epoch. In this case we have a class of 10, stated in the last dense layer.

*****My question is how do we compute the total loss of to 30 epoch based on sparse_categorical_crossentropy on all 30 epochs and not individual losses for each epoch.

1
I added another example with the sequential api, maybe that's easier for youNicolas Gervais

1 Answers

0
votes

The fit() method isn't really made for customization. So why don't you use custom loops?

import tensorflow as tf
from sklearn.datasets import load_iris
iris, target = load_iris(return_X_y=True)

X = iris[:, :3]
y = iris[:, 3].reshape(-1, 1)

ds = tf.data.Dataset.from_tensor_slices((X, y)).shuffle(25).batch(8)

model = tf.keras.models.Sequential([
    tf.keras.layers.Dense(16, activation='relu', input_shape=(3,)),
    tf.keras.layers.Dense(1)])

loss_object = tf.keras.losses.MeanSquaredError()

optimizer = tf.keras.optimizers.Adam(learning_rate=5e-4)

loss = tf.keras.metrics.Mean(name='loss')
error = tf.keras.metrics.MeanSquaredError()
cumul_loss = tf.keras.metrics.Sum()

@tf.function
def train_step(inputs, targets):
    with tf.GradientTape() as tape:
        predictions = model(inputs)
        run_loss = loss_object(targets, predictions)
    gradients = tape.gradient(run_loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))
    loss(run_loss)
    cumul_loss(run_loss)
    error(predictions, targets)

for epoch in range(10):
  for data, labels in ds:
    train_step(data, labels)

  template = 'Epoch {:>2}, Loss: {:>7.4f}, MSE: {:>6.2f}, Cumul MSE: {:>7.2f}'
  print(template.format(epoch+1,
                        loss.result(),
                        error.result()*100,
                        cumul_loss.result()))

  loss.reset_states()
  error.reset_states()
Epoch  5, Loss:  0.0947, MSE:   9.37, Cumul MSE:   16.39
Epoch  6, Loss:  0.0807, MSE:   8.04, Cumul MSE:   17.92
Epoch  7, Loss:  0.0675, MSE:   6.76, Cumul MSE:   19.21
Epoch  8, Loss:  0.0623, MSE:   6.19, Cumul MSE:   20.39
Epoch  9, Loss:  0.0559, MSE:   5.52, Cumul MSE:   21.45
Epoch 10, Loss:  0.0521, MSE:   5.15, Cumul MSE:   22.44

Here's an example using the Sequential API, by subclassing keras.callbacks:

import tensorflow as tf
from tensorflow.keras.layers import Dense
from sklearn.datasets import load_iris
import numpy as np
iris, target = load_iris(return_X_y=True)

X = iris[:, :3].astype(np.float32)
y = iris[:, 3]

model = tf.keras.models.Sequential([
    Dense(8, input_shape=(3,), activation='relu'),
    Dense(3, activation='softmax')
])
cumul_loss = tf.metrics.Sum(name='cumul_loss')

model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')

class CustomCallback(tf.keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs=None):
        cumul_loss.update_state(logs['loss'])
        print(' {}: {:.4f}'.format(cumul_loss.name, cumul_loss.result()), end='')

history = model.fit(X, y, callbacks=[CustomCallback()], epochs=10)
Epoch 10/10
 32/150 [=====>........................] - ETA: 0s - loss: 1.7872 cumul_loss: 25.0464
150/150 [==============================] - 0s 113us/sample - loss: 1.8853