Training Loop¶ TO DO - YOUR CODE HERE Within the training loop, code the four steps to complete the automatic differentiation: Forward pass Calculate loss Send the loss backward thru the network Use the optimizer to calculate the gradients and update weights def train_network(model, train_loader, val_loader, optimizer, criterion, max_epochs): min_valid_loss = np.inf val_loss = [] val_acc = [] for epoch in range(max_epochs): train_loss = 0.0 # Training loop for inputs, labels in train_loader: # Transfer Data to device inputs, labels = inputs.to(device), labels.to(device) # Clear the gradients optimizer.zero_grad() # TO DO: CODE HERE # Forward Pass # Find the Loss # Send Loss backward # Update Weights # Calculate Loss train_loss += loss.item() valid_loss = 0.0 epoch_total = 0 epoch_correct = 0 # Validation loop - the network weights should not be adjusted with torch.no_grad(): for inputs, labels in val_loader: # Transfer Data to device inputs, labels = inputs.to(device), labels.to(device) # Forward Pass outputs = model(inputs) # Find the Loss loss = criterion(outputs, labels) # Calculate Loss valid_loss += loss.item() # Calculate Accuracy _, predicted = torch.max(outputs.data, 1) epoch_total += labels.size(0) epoch_correct += (predicted == labels).sum().item() val_loss.append(valid_loss/len(validloader)) val_acc.append(epoch_correct/epoch_total) print(f'Epoch {epoch+1}, Training Loss: {train_loss / len(trainloader):.5f}, Validation Loss: {valid_loss / len(validloader):.5f}') if min_valid_loss > valid_loss: # print(f'Validation Loss Decreased({min_valid_loss:.6f}--->{valid_loss:.6f}) \t Saving The Model') min_valid_loss = valid_loss # Saving State Dict torch.save(model.state_dict(), PATH) return val_loss, val_acc
Training Loop¶
TO DO - YOUR CODE HERE
Within the training loop, code the four steps to complete the automatic differentiation:
- Forward pass
- Calculate loss
- Send the loss backward thru the network
- Use the optimizer to calculate the gradients and update weights
def train_network(model, train_loader, val_loader, optimizer, criterion, max_epochs):
min_valid_loss = np.inf
val_loss = []
val_acc = []
for epoch in range(max_epochs):
train_loss = 0.0
# Training loop
for inputs, labels in train_loader:
# Transfer Data to device
inputs, labels = inputs.to(device), labels.to(device)
# Clear the gradients
optimizer.zero_grad()
# TO DO: CODE HERE
# Forward Pass
# Find the Loss
# Send Loss backward
# Update Weights
# Calculate Loss
train_loss += loss.item()
valid_loss = 0.0
epoch_total = 0
epoch_correct = 0
# Validation loop - the network weights should not be adjusted
with torch.no_grad():
for inputs, labels in val_loader:
# Transfer Data to device
inputs, labels = inputs.to(device), labels.to(device)
# Forward Pass
outputs = model(inputs)
# Find the Loss
loss = criterion(outputs, labels)
# Calculate Loss
valid_loss += loss.item()
# Calculate Accuracy
_, predicted = torch.max(outputs.data, 1)
epoch_total += labels.size(0)
epoch_correct += (predicted == labels).sum().item()
val_loss.append(valid_loss/len(validloader))
val_acc.append(epoch_correct/epoch_total)
print(f'Epoch {epoch+1}, Training Loss: {train_loss / len(trainloader):.5f}, Validation Loss: {valid_loss / len(validloader):.5f}')
if min_valid_loss > valid_loss:
# print(f'Validation Loss Decreased({min_valid_loss:.6f}--->{valid_loss:.6f}) \t Saving The Model')
min_valid_loss = valid_loss
# Saving State Dict
torch.save(model.state_dict(), PATH)
return val_loss, val_acc

Step by step
Solved in 2 steps









