一、前言
首先介绍两个 python 函数:dir 和 help。这两个是 python 中的函数,所以使用时需要进入 python 交互模式,然后使用如下例子。
1
2
3
4
import torch
dir(torch)
dir(torch.zeros)
help(torch.zeros.__call__)
此外在训练过程中可以使用 TensorBoard 对训练过程进行监视。TensorBoard 是一个强大的可视化工具,最初由 TensorFlow 团队开发,但现在也完全支持 PyTorch。它主要用于监控、分析和可视化机器学习实验的各个方面。使用示例如下:
1
2
3
4
5
6
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter("logs")
# img 是一个 tensor 的图像数据
writer.add_image("Normalize", img, 0)
writer.close()
然后在控制台输入指令 ` python -m tensorboard.main –logdir=logs`。
二、简单工作流程
PyTorch 是一个基于 Python 的开源机器学习框架,由 Facebook 的 AI 研究团队开发。以图象分类认为为例,一个完整的模型训练流程如下:
-
数据准备
-
模型定义
-
损失函数与优化器
-
模型训练
-
模型评估
模型训练的过程如上,最终会得到一个训练完成的模型,可以用来推理。
三、数据准备
数据准备一般会用到 transforms 、 datasets 、 DataLoader 等工具。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
# transforms 用法
from PIL import Image
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter("logs")
img_path = "dataset/train/ants_image/0013035.jpg"
img = Image.open(img_path) # PIL.image
# ToTensor
trans_tensor = transforms.ToTensor()
img_tensor = trans_tensor(img) # tensor
writer.add_image("ToTensor", img_tensor)
# Normalize
trans_norm = transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
img_norm = trans_norm(img_tensor)
writer.add_image("Normalize", img_norm, 0)
# Resize
trans_resize = transforms.Resize((512, 512))
img_resize = trans_resize(img)
img_resize = trans_tensor(img_resize)
writer.add_image("Resize", img_resize, 0)
# Compose
trans_resize_2 = transforms.Resize(512)
trans_compose = transforms.Compose([trans_resize_2, trans_tensor])
img_resize_2 = trans_compose(img)
writer.add_image("Resize", img_resize_2, 1)
writer.close()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# dataset
import torchvision
from torch.utils.tensorboard import SummaryWriter
dataset_transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor()
])
# root 根目录,transform 转换为指定格式,download 如果本地没有数据集是否自动下载
train_set = torchvision.datasets.CIFAR10(root='./dataset', train=True, transform=dataset_transform, download=True)
test_set = torchvision.datasets.CIFAR10(root='./dataset', train=False, transform=dataset_transform, download=True)
writer = SummaryWriter("dataset")
for i in range(10):
img, target = test_set[i]
writer.add_image("test_set", img, i)
writer.close()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
#dataloader
import torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
test_data = torchvision.datasets.CIFAR10("./dataset", train=False, transform=torchvision.transforms.ToTensor())
# batch_size 每次取出的数据量,shuffle 每次取出时是否重组,num_workers 取数据的线程数,drop_last 剩余数据小于 batch_size 时是否舍去
test_loader = DataLoader(dataset=test_data, batch_size=64, shuffle=True, num_workers=0, drop_last=True)
writer = SummaryWriter("dataloader")
for epoch in range(2):
step = 0
for data in test_loader:
imgs, targets = data
writer.add_images("Epoch: {}".format(epoch), imgs, step)
step = step + 1
writer.close()
四、模型定义
模型一般都是指神经网络。在模型定义时一般都是通过自定义一个模型类来继承 torch.nn.Module 这个类,并且重写 __init__ 和 forward 函数来定义自己的模型。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset, batch_size=64, drop_last=True)
class SimpleCNN(nn.Module):
def __init__(self, num_classes=4096):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 3, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(64 * 8 * 8, 128 * 32) # 假设输入 32x32
self.fc2 = nn.Linear(128 * 32, num_classes)
self.dropout = nn.Dropout(0.5)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = F.relu(self.conv3(x))
x = x.view(-1, 64 * 8 * 8) # 展平
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return x
# 创建模型实例
net = SimpleCNN(num_classes=4096)
writer = SummaryWriter(log_dir='./logs')
step = 0
for data in dataloader:
imgs, targets = data
output = net(imgs)
writer.add_images("input", imgs, step)
output = torch.reshape(output, (-1, 3, 32, 32))
writer.add_images("output", output, step)
step = step + 1
writer.close()
注意:torch.nn.Conv2d 和 torch.nn.functional.conv2d 是两个不同的东西。前者是个类,会自动传件和管理卷积核,后者是一个函数需要手动传入一个卷积核。
除此之外还可以使用 Sequential 的风格来定义模型。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import torch
import torch.nn as nn
import torchvision
from torch.nn import Conv2d, MaxPool2d, Linear, Dropout, Sequential, ReLU
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset, batch_size=64, drop_last=True)
class SimpleCNN(nn.Module):
def __init__(self, num_classes=4096):
super(SimpleCNN, self).__init__()
self.features = Sequential(
Conv2d(3, 32, kernel_size=3, padding=1),
ReLU(),
MaxPool2d(2, 2),
Conv2d(32, 64, kernel_size=3, padding=1),
ReLU(),
MaxPool2d(2, 2),
Conv2d(64, 3, kernel_size=3, padding=1),
ReLU(),
)
self.classifier = Sequential(
Linear(64 * 8 * 8, 128 * 32),
ReLU(),
Dropout(0.5),
Linear(128 * 32, num_classes)
)
def forward(self, x):
x = self.features(x)
x = x.view(-1, 64 * 8 * 8)
x = self.classifier(x)
return x
# 创建模型实例
net = SimpleCNN(num_classes=4096)
writer = SummaryWriter(log_dir='./logs')
step = 0
for data in dataloader:
imgs, targets = data
output = net(imgs)
writer.add_images("input", imgs, step)
output = torch.reshape(output, (-1, 3, 32, 32))
writer.add_images("output", output, step)
step = step + 1
writer.close()
五、损失函数与优化器
以分类任务为例,像 CIFAR10 数据集中有十种不同类别的目标。当图像数据经过我们选好的模型时会输出一个有十个元素的矢量数据类似 [0.1, 0.12, ..., 0.43],其中的每一项代表该图像可能是对应目标的概率。之后使用这个数据与标签进行计算损失函数,该数据中标签对应的概率越大,损失值就越小。当计算出一个损失值时可以进行反向传播使用优化器对网络层的参数进行优化。
六、模型训练
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import torch
import torch.nn as nn
import torchvision
from torch.nn import Conv2d, MaxPool2d, Linear, Dropout, Sequential, ReLU
from torch.utils.data import DataLoader
dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset, batch_size=3, drop_last=True)
class SimpleCNN(nn.Module):
def __init__(self, num_classes=10):
super(SimpleCNN, self).__init__()
self.features = Sequential(
Conv2d(3, 32, kernel_size=3, padding=1),
ReLU(),
MaxPool2d(2, 2),
Conv2d(32, 64, kernel_size=3, padding=1),
ReLU(),
MaxPool2d(2, 2),
)
self.classifier = Sequential(
Linear(64 * 8 * 8, 128),
ReLU(),
Dropout(0.5),
Linear(128, num_classes)
)
def forward(self, x):
x = self.features(x)
x = x.view(-1, 64 * 8 * 8)
x = self.classifier(x)
return x
# 创建模型实例
net = SimpleCNN(num_classes=10)
loss = nn.CrossEntropyLoss()
# lr 为学习率,一般模型训练前期学习率会设置较大,到训练后期调小学习率
optim = torch.optim.SGD(net.parameters(), lr=0.01)
for epoch in range(10):
running_loss = 0.0
net.train() # 设置为训练模型,只对于某些特定的层有影响
for data in dataloader:
imgs, targets = data
output = net(imgs)
result_loss = loss(output, targets)
optim.zero_grad()
result_loss.backward()
optim.step()
running_loss += result_loss
print(running_loss)
# 模型保存与加载
# 方式一:只保存模型参数
torch.save(net.state_dict(), 'simplecnn.pth')
new_net = SimpleCNN(num_classes=10)
new_net.load_state_dict(torch.load('simplecnn.pth'))
# 方式二,保存模型参数和模型结构
torch.save(net, 'simplecnn2.pth')
new_net2 = torch.load('simplecnn2.pth')
除了自己训练模型之外还可以使用很多已经定义好甚至训练好的模型。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
import torchvision
from torch import nn
vgg16_true = torchvision.models.vgg16(weights='DEFAULT')
vgg16_false = torchvision.models.vgg16(weights=None)
print(vgg16_true)
print(vgg16_false)
# 修改模型结构
vgg16_false.add_module('new', nn.Linear(in_features=1000, out_features=10, bias=True))
vgg16_false.classifier.add_module('7', nn.Linear(in_features=1000, out_features=1000, bias=True))
vgg16_false.features[1] = nn.Conv2d(3, 32, kernel_size=3, padding=1)
print(vgg16_false)
七、模型评估
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
def evaluate_model(model, test_loader):
model.eval() # 设置为评估模式,只对于某些特定的层有影响
device = next(model.parameters()).device
correct = 0
total = 0
test_loss = 0.0
with torch.no_grad(): # 关闭梯度计算
for inputs, labels in test_loader:
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
test_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100 * correct / total
avg_loss = test_loss / len(test_loader)
print(f'Test Loss: {avg_loss:.4f}, Test Accuracy: {accuracy:.2f}%')
return accuracy
八、结尾
pytorch 简单的使用大概就这些,具体细节的话可以看 pytorch 的官网文档。下一步可以考虑了解一下如何使用 GPU 对模型进行训练,然后就可以看一下开源的模型代码。
这里也简单介绍一下如何使用 GPU,首先需要安装指定版本的驱动软件和 cuda,这里不多介绍,可以在网上搜索一些相关的教程。
简单地使用 GPU 只需要把模型、数据(输入和标签)、损失函数 放到 cuda 里边即可,例子:
1
2
3
4
5
6
7
8
9
# 方式一
if torch.cuda.is_available():
net = net.cuda()
# 方式二
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
gpu0 = torch.device('cuda:0')
gpu1 = torch.device('cuda:1')
if torch.cuda.is_available():
net = net.to(gpu0)
使用 GPU 训练可能比使用 CPU 快10倍,但是需要注意的是在 GPU 上训练保存的模型,在 CPU 上加载时需要指定 CPU,model = torch.load('model.pth', map_location=torch.device('cpu'))。