📝Common PyTorch Code Snippets
Notes inspired by d2l’s PyTorch content
Device Switching Link to heading
Try using GPU:
def try_gpu(i=0):
if torch.cuda.device_count() >= i + 1:
return torch.device(f'cuda:{i}')
return torch.device('cpu')
def try_all_gpus():
"""Find all available GPUs"""
devices = [torch.device(f'cuda:{i}')
for i in range(torch.cuda.device_count())]
return devices if devices else [torch.device('cpu')]
Accumulator Link to heading
class Accumulator:
"""Used for accumulating values"""
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
"""Add a list of values to the existing data"""
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
Accuracy Link to heading
For training accuracy evaluation, you can use the following code
def accuracy(y_hat, y):
"""Calculate accuracy"""
if len(y_hat.shape) >1 and y_hat.shape[1] > 1:
y_hat = y_hat.argmax(axis=1)
cmp = y_hat.type(y.dtype) == y
return float(cmp.type(y_hat.dtype).sum())
def evaluate_accuracy(net, data_iter):
"""Calculate accuracy"""
if isinstance(net, nn.Module):
net.eval() # Set to evaluation mode
# Use accumulator to store the number of correct predictions and total samples
metric = Accumulator(2)
with torch.no_grad():
for X, y in data_iter:
metric.add(accuracy(net(X), y), y.numel())
return metric[0] / metric[1]
When using GPU, additional handling is needed, which can mainly use the following content
def evaluate_accuracy_gpu(net, data_iter, device=None):
"""Calculate accuracy on GPU"""
if isinstance(net, nn.Module):
net.eval() # Set to evaluation mode
if not device: # If device is not specified, use the device where the network parameters are located
device = next(iter(net.parameters())).device
# Use accumulator to store the number of correct predictions and total samples
metric = Accumulator(2)
with torch.no_grad():
for X, y in data_iter:
if isinstance(X, list): # When X is a list, move each element to the GPU separately
X = [x.to(device) for x in X]
else:
X = X.to(device)
y = y.to(device)
metric.add(accuracy(net(X), y),y.numel())
return metric[0] / metric[1]
Plotting Images Link to heading
Define commonly used classes for plotting images, which will be repeatedly used during training later
import matplotlib.pyplot as plt
from IPython import display
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
if legend:
axes.legend(legend)
axes.grid()
class Animator:
"""Used for plotting graphs"""
def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
figsize=(3.5, 2.5)):
if legend is None:
legend = []
display.set_matplotlib_formats('svg') # Set plotting format to svg
self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1:
self.axes = [self.axes, ]
# Configure each plot
self.config_axes = lambda: set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
"""Add a set of data"""
if not hasattr(y, "__len__"):
y = [y]
n = len(y)
if not hasattr(x, "__len__"):
x = [x] * n
if not self.X:
self.X = [[] for _ in range(n)]
if not self.Y:
self.Y = [[] for _ in range(n)]
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
Training Link to heading
Train neural network using GPU
def train(net, train_iter, test_iter, num_epochs, lr, device):
# Initialize weights of each layer
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
nn.init.xavier_uniform_(m.weight)
net.apply(init_weights)
# Select device for training
print('training on', device)
net.to(device)
# Define optimizer and loss function
optimizer = torch.optim.SGD(net.parameters(), lr=lr)
loss = nn.CrossEntropyLoss()
# Get the plot for training loss
animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs],
legend=['train loss', 'train acc', 'test acc'])
# Get batch size
num_batches = len(train_iter)
for epoch in range(num_epochs):
# Create an accumulator of length 3 to store: loss, number of correct predictions, total number of samples
metric = d2l.Accumulator(3)
net.train() # Set network to training mode
for i, (X, y) in enumerate(train_iter):
optimizer.zero_grad() # Zero gradients
X, y = X.to(device), y.to(device) # Move to corresponding device
# Forward pass
y_hat = net(X)
l = loss(y_hat, y)
# Backward pass
l.backward()
optimizer.step()
with torch.no_grad():
# First statistic is batch loss, second is number of correct predictions, third is total samples
metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])
train_l = metric[0] / metric[2]
train_acc = metric[1] / metric[2]
# Compute test accuracy every certain number of batches
if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
animator.add(epoch + (i + 1) / num_batches,
(train_l, train_acc, None))
# Compute test accuracy and plot
test_acc = evaluate_accuracy_gpu(net, test_iter)
animator.add(epoch + 1, (None, None, test_acc))
print(f'loss {train_l:.3f}, train acc {train_acc:.3f}, '
f'test acc {test_acc:.3f}')
Train with multiple GPUs in parallel
def train_batch(net, X, y, loss, trainer, device):
"""Train a mini-batch using multiple GPUs"""
if isinstance(X, list):
X = [x.to(device) for x in X]
else:
X = X.to(device[0])
net.train() # Switch to training mode
trainer.zero_grad() # Zero gradients
pred=net(X)
l=loss(pred,y)
l.sum().backward() # Backpropagation
trainer.step() # Update parameters
# Calculate loss
train_loss_sum=l.sum()
train_acc_sum=torch.sum(torch.argmax(pred,dim=1)==y).float()
return train_loss_sum,train_acc_sum
def train(net, train_iter, test_iter, loss, trainer, num_epochs, devices=try_all_gpus()):
num_batches = len(train_iter)
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0, 1],
legend=['train loss', 'train acc', 'test acc'])
net = nn.DataParallel(net, device_ids=devices).to(devices[0])
for epoch in range(num_epochs):
metric = Accumulator(4)
for i, (features, labels) in enumerate(train_iter):
l, acc = train_batch(net, features, labels, loss, trainer, devices)
metric.add(l, acc, labels.shape[0], labels.numel())
if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
animator.add(epoch + (i + 1) / num_batches,
(metric[0] / metric[2], metric[1] / metric[3],
None))
test_acc = d2l.evaluate_accuracy_gpu(net, test_iter)
animator.add(epoch + 1, (None, None, test_acc))
print(f'loss {metric[0] / metric[2]:.3f}, train acc '
f'{metric[1] / metric[3]:.3f}, test acc {test_acc:.3f}')
View Layer Structure Link to heading
X = torch.rand(size=(1,1,224,224)) # Define input shape
for layer in net:
X=layer(X) # Forward pass through one layer
print(layer.__class__.__name__, 'output shape:', X.shape)