from dataset import get_strange_symbol_loader, get_strange_symbols_test_data
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(28*28, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 15)
def forward(self,x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
if __name__ == '__main__':
net = Net()
train, test = get_strange_symbol_loader(batch_size=128)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=1e-3)
Accuracy = []
for epoch in range(30):
print("epoch",epoch)
#Train
for data in train:
img, label = data
net.zero_grad()
output = net(img.view(-1,28*28))
loss = F.nll_loss(output, label)
loss.backward()
optimizer.step()
#Test
correct, total = 0, 0
with torch.no_grad():
for data in test:
img, label = data
output = net(img.view(-1,784))
for idx, i in enumerate(output):
if torch.argmax(i) == label[idx]:
correct += 1
total += 1
Accuracy.append(round(correct/total, 3))
print("Accuracy: ",Accuracy)
Here is my neural network made with PyTorch based on the one by Sentdex. I'm using a dataset given to me by my university course administrators imported by the function get_strange_symbol_loader(batch_size=128)
.
When I run this code it tells me that accuracy in every epoch is supposed to be 1.0
. However, running the #Test block after the iteration of the for loop containing epoch gives somewhat more realistic results. Why does this happen?
My goal here is to plot testing accuracy against the number of epochs to find the optimal number of epochs for the model before it starts to overfit.