PyTorch实现Softmax回归

#softmax回归的简洁实现
import torch
from torch import nn
from torchvision import transforms
import torchvision
from torch.utils import data

# 读取数据集的函数
def load_data_fashion_mnist(b_size, resize=None):
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize)) # 在第0个位置插入resize变换
trans = transforms.Compose(trans)
mnist_train = torchvision.datasets.FashionMNIST(root='../data', train=True, transform=trans, download=True)
mnist_test = torchvision.datasets.FashionMNIST(root="../data", train=False, transform=trans, download=True)
return (data.DataLoader(mnist_train, b_size, shuffle=True, num_workers=0),
data.DataLoader(mnist_test, b_size, shuffle=False, num_workers=0),)

batch_size = 256
train_iter, test_iter = load_data_fashion_mnist(batch_size)

# 定义模型
net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))

#初始化模型参数
def init_weights(m):
if type(m) == nn.Linear: #由于下面的apply会递归调用所有子类,所以先判断类型决定是否初始化
nn.init.normal_(m.weight, std=0.01)
net.apply(init_weights)

#定义损失函数
loss = nn.CrossEntropyLoss(reduction='none')

#定义优化函数
trainer = torch.optim.SGD(net.parameters(), lr=0.1)

# Accumulator是一个类,用于累加多个变量
class Accumulator:
def __init__(self, n):
self.data = [0.] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]

# 计算分类精度的函数
def accuracy(y_hat, y):
y_hat_res = y_hat.argmax(axis=1) # 假定第二个维度存储每个类的预测分数
cmp = y_hat_res.type(y.dtype) == y #cmp是由True或False组成的一维数组
return float(cmp.type(y.dtype).sum())

# 计算模型精度的函数
def evaluate_accuracy(net, data_iter):
if isinstance(net, torch.nn.Module):
net.eval() # 模型设置成评估模式
metric = Accumulator(2) # 用两个变量分别存储正确预测数和总预测数
with torch.no_grad():
for X, y in data_iter:
metric.add(accuracy(net(X), y), y.numel())
return metric[0] / metric[1]

# 单次迭代的函数
def train_epoch(net, train_iter, loss, updater):
net.train()
metric = Accumulator(3) #训练损失总和、训练准确度总和、样本数
for X, y in train_iter:
y_hat = net(X)
l = loss(y_hat, y)
updater.zero_grad()
l.mean().backward()
updater.step()
metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
return metric[0]/metric[2], metric[1]/metric[2] #返回训练损失和训练精度

# 整合训练函数
def train(net, train_iter, test_iter, loss, num_epochs, updater):
for epoch in range(num_epochs):
train_metrics = train_epoch(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
print(f'epoch {epoch+1}: ')
print(f'train_loss: {train_metrics[0]}; train_acc: {train_metrics[1]}')
print(f'test_acc: {test_acc}')

num_epochs = 10
train(net, train_iter, test_iter, loss, num_epochs, trainer)

    所属分类:机器学习     发表于2022-02-06