import torch from torch import nn from torch.utils.data import DataLoader from torchvision import datasets, transforms import torch.nn.functional as F # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) # Download training data from open datasets. train_data = datasets.MNIST( root="data", train=True, download=True, transform=transform, ) # Download test data from open datasets. test_data = datasets.MNIST( root="data", train=False, download=True, transform=transform, ) # YOUR CODE HERE # YOUR CODE HERE # I FILLED THIS IN FOR YOU num_epochs = 3 # Set this to the number of your choice for epoch in range(num_epochs): print(f"Train/Test Epoch Round {epoch + 1}") print(f"------------------------") # Train the model (loop over batches of training examples) model.train() num_training_samples = len(train_dataloader) for i, (X, y) in enumerate(train_dataloader): X = X.to("cpu") y = y.to("cpu") # Compute prediction error for the batch pred = model(X) loss = loss_fn(pred, y) # Backpropagation loss.backward() optimizer.step() optimizer.zero_grad() # Log our progress every 100 batches if i % 100 == 0: print(f"loss: {loss.item():>7f} [{(i+1)*len(X):>5d}/{num_training_samples:>5d}]") # Test the epoch (loop over batches of testing examples) model.eval() num_test_samples = len(test_dataloader.dataset) total_loss = 0 num_correct = 0 with torch.no_grad(): for X, y in test_dataloader: X = X.to("cpu") y = y.to("cpu") pred = model(X) total_loss += loss_fn(pred, y).item() num_correct += (pred.argmax(1) == y).type(torch.float).sum().item() # Evaluate avg_loss = total_loss / num_test_samples accuracy = num_correct / num_test_samples print(f"Test Error: \n Accuracy: {(100*accuracy):>0.1f}%, Avg loss: {avg_loss:>7f}") print("Model is done!") # YOUR CODE HERE # YOUR BONUS CODE HERE