1
votes

I am training model to classify 2 types of images. I have decided to take a transfer-learning approach, freeze every part of resnet50 and new layer and start finetuning process. My dataset is not perfectly balanced but i used weights for that purpose.Please take a look at validation loss vs training loss graph. It seems to be extremely inconsitent. Could you please take a look at my code? I am new to Pytorch, maybe there is something wrong with my method and code. Final accuracy tested on test set is 86%. Thank you!

enter image description here

        learning_rate = 1e-1
        num_epochs = 100
        patience = 10
        batch_size = 100
        weights = [4, 1]    
        
        model = models.resnet50(pretrained=True)
        
        # Replace last layer       
        num_features = model.fc.in_features
        
        model.fc = nn.Sequential(
                nn.Linear(num_features, 512),
                nn.ReLU(inplace=True),
                nn.Linear(512, 64),
                nn.Dropout(0.5, inplace=True),
                nn.Linear(64, 2))
    
        class_weights = torch.FloatTensor(weights).cuda()
        criterion = nn.CrossEntropyLoss(weight=class_weights)
        optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
        running_loss = 0
        losses = []
        
            # To freeze the residual layers
        
            for param in model.parameters():
                param.requires_grad = False
            for param in model.fc.parameters():
                param.requires_grad = True
            
            # Find total parameters and trainable parameters
            total_params = sum(p.numel() for p in model.parameters())
            print(f'{total_params:,} total parameters.')
            total_trainable_params = sum(
                p.numel() for p in model.parameters() if p.requires_grad)
            print(f'{total_trainable_params:,} training parameters.')

24,590,082 total parameters. 1,082,050 training parameters.

# initialize the early_stopping object
early_stopping = pytorchtools.EarlyStopping(patience=patience, verbose=True)
for epoch in range(num_epochs):
    ##########################    
    #######TRAIN MODEL########
    ##########################
    epochs_loss=0
    
    ##Switch to train mode
    model.train()
    for i, (images, labels) in enumerate(train_dl):
        # Move tensors to the configured device
        images = images.to(device)
        labels = labels.to(device)
        # Forward pass
       
        
        # Backprpagation and optimization
        optimizer.zero_grad()
        outputs = model(images).to(device)
        loss = criterion(outputs, labels)
        
        loss.backward()
        optimizer.step()
        #calculate train_loss
        train_losses.append(loss.item())
    
    ##########################    
    #####VALIDATE MODEL#######
    ##########################
    model.eval()
    for images, labels in val_dl:
        images = images.to(device)
        labels = labels.to(device)
        outputs = model(images).to(device)
        loss = criterion(outputs,labels)
        valid_losses.append(loss.item())
    
    # print training/validation statistics 
    # calculate average loss over an epoch
    train_loss = np.average(train_losses)
    valid_loss = np.average(valid_losses)
#     print(train_loss)
    avg_train_losses.append(train_loss)
    avg_valid_losses.append(valid_loss)
    
    print_msg = (f'train_loss: {train_loss:.5f} ' + f'valid_loss: {valid_loss:.5f}')
    
    print(print_msg)

    
    # clear lists to track next epoch
    train_losses = []
    valid_losses = []
    
    early_stopping(valid_loss, model)
    print(epoch)
        
    if early_stopping.early_stop:
        print("Early stopping")
        break
Your code looks ok to me but looks like excessive overfitting. You could reduce learnable parameters and see what happens? How many images do you have in training set? - Umang Gupta