PyTorch实现岭回归正则化
只需要将优化器进行调整即可:
trainer = torch.optim.SGD([
{"params": net[0].weight, 'weight_decay':wd},
{"params": net[0].bias}
], lr=lr)
其中wd是惩罚超参数,net是训练模型。
完整代码示例:
import torch
from torch import nn
from d2l import torch as d2l
from torch.utils import data
import matplotlib.pyplot as plt
#d2l.synthetic_data()
# 生成含有噪声的数据集的函数
def generate_data(w, b, n):
X = torch.normal(0, 1, size=(n, len(w)))
Y = X @ w+ b
Y += torch.normal(0, 0.01, size=Y.shape)
return X, Y.reshape(-1, 1)
# 读取数据集的函数
def load_array(data_arr, batch_size, isTrain=True):
dataset = data.TensorDataset(*data_arr)
return data.DataLoader(dataset, batch_size, isTrain)
# 定义输入输出规模
n_train, n_test, num_inputs, b_size = 20, 100, 200, 5
true_w, true_b = torch.ones((num_inputs, 1)) * 0.01, 0.05
train_data = generate_data(true_w, true_b, n_train)
train_iter = load_array(train_data, b_size)
test_data = generate_data(true_w, true_b, n_test)
test_iter = load_array(test_data, b_size, False)
train_log = []
test_log = []
#定义训练函数
def train_concise(wd): #wd是惩罚超参数
net = nn.Sequential(nn.Linear(num_inputs, 1))
for param in net.parameters():
param.data.normal_()
loss = nn.MSELoss()
num_epochs, lr = 100, 0.003
trainer = torch.optim.SGD([
{"params": net[0].weight, 'weight_decay':wd},
{"params": net[0].bias}
], lr=lr)
for epoch in range(num_epochs):
for X, y in train_iter:
trainer.zero_grad()
l = loss(net(X),y)
l.mean().backward()
trainer.step()
with torch.no_grad():
train_loss, train_num = evaluate_loss(net, train_iter, loss)
test_loss, test_num = evaluate_loss(net, test_iter, loss)
train_log.append(train_loss/train_num)
test_log.append(test_loss/test_num)
if epoch % 10 == 1:
plt.close()
plt.plot(range(epoch + 1), train_log, color='red')
plt.plot(range(epoch + 1), test_log, color='blue')
plt.show()
def evaluate_loss(net, data_iter, loss):
cnt = 0
total_loss = 0
for X, y in data_iter:
y_hat = net(X)
y.reshape(y_hat.shape)
lost = loss(y_hat, y)
cnt += y.numel()
total_loss += lost
return total_loss, cnt
train_concise(1)
    所属分类:机器学习     发表于2022-02-10