本篇文章给大家分享的是有关Pytorch中怎么实现softmax回归,小编觉得挺实用的,因此分享给大家学习,希望大家阅读完这篇文章后可以有所收获,话不多说,跟着小编一起来看看吧。
如果采用自定义方式搭建网络方式:
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import time
import sys
import numpy as np
import requests
#设置训练集和测试集,下载在本地
mnist_train = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST', train=True, download=True, transform=transforms.ToTensor())
mnist_test = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST', train=False, download=True, transform=transforms.ToTensor())
batch_size = 256
if sys.platform.startswith('win'):
num_workers = 0 # 0表示不用额外的进程来加速读取数据
else:
num_workers = 4
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)
num_inputs=784
#图片为1*28*28,单通道,所以输入为28*28=784
num_outputs=10
#最终的类别为10个类别,所以输出是10;
w=torch.tensor(np.random.normal(0,0.01,(num_inputs,num_outputs)),dtype=torch.float)
b=torch.zeros(num_outputs,dtype=torch.float)
#使得w,b可以反向传播
w.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True)
def cross_entropy(y_hat, y):
return - torch.log(y_hat.gather(1, y.view(-1, 1)))
def accuracy(y_hat,y):
return (y_hat.argmax(dim=1)==y).float().mean().item()
'''
这里注意下正确率计算的写法;
对于y_hat,返回的是batch_size*10的一个矩阵,argemax(dim=1)为选择一个维度上的最大数的索引;
所以y_hat.argmax(dim=1)返回一个batch_size*1的向量;
针对于返回的向量和y进行诸位比较,平均化取值,即可得到该批次的正确率
'''
def sgd(params, lr, batch_size): # 本函数已保存在d2lzh_pytorch包中方便以后使用
for param in params:
param.data -= lr * param.grad / batch_size # 注意这里更改param时用的param.data
def evalute_accuracy(data_iter,net):
acc_sum,n=0.0,0
for X,y in data_iter:
acc_sum+=(net(X).argmax(dim=1)==y).float().sum().item()
n+=y.shape[0]
return acc_sum/n
num_epochs,lr=30,0.1
def softmax(X):
X_exp = X.exp()
partition = X_exp.sum(dim=1, keepdim=True)
return X_exp / partition # 这里应用了广播机制
def net(X):
return softmax(torch.mm(X.view((-1, num_inputs)), w) + b)
def train(net,train_iter,test_iter,loss,num_epochs,batch_size,params=None,lr=None,optimizer=None):
for epoch in range(num_epochs):
train_l_sum,train_acc_sum,n=0.0,0.0,0
for X,y in train_iter:
y_hat=net(X)
l=loss(y_hat,y).sum()
if optimizer is not None:
optimizer.zero_grad()
elif params is not None and params[0].grad is not None:
for param in params:
param.grad.data.zero_()
l.backward()
if optimizer is None:
sgd(params, lr, batch_size)
else:
optimizer.step() # “softmax回归的简洁实现”一节将用到
train_l_sum+=l.item()
train_acc_sum+=(y_hat.argmax(dim=1)==y).sum().item()
n+=y.shape[0]
test_acc=evalute_accuracy(test_iter,net);
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))
if __name__ == '__main__':
print(mnist_train)
X, y = [], []
'''
for i in range(10):
X.append(mnist_train[i][0])
y.append(mnist_train[i][1])
show_fashion_mnist(X, get_fashion_mnist_labels(y))
'''
train(net, train_iter, test_iter, cross_entropy, num_epochs, batch_size, [w, b], lr)
如果采用典型的层数搭建函数,能有更加简洁的实现版本:
import torch
import torchvision
import torchvision.transforms as transforms
from torch import nn
from collections import OrderedDict
import matplotlib.pyplot as plt
import time
import sys
import numpy as np
import requests
#设置训练集和测试集,下载在本地
from torch.nn import init
mnist_train = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST', train=True, download=True, transform=transforms.ToTensor())
mnist_test = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST', train=False, download=True, transform=transforms.ToTensor())
batch_size = 256
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False)
num_inputs=784
#图片为1*28*28,单通道,所以输入为28*28=784
num_outputs=10
#最终的类别为10个类别,所以输出是10;
class LinearNet(nn.Module):
def __init__(self,num_inputs,num_outputs):
super(LinearNet,self).__init__()
self.linear=nn.Linear(num_inputs,num_outputs)
def forward(self,x):
y=self.linear(x.view(x.shape[0],-1))
return y
def evalute_accuracy(data_iter,net):
acc_sum,n=0.0,0
for X,y in data_iter:
acc_sum+=(net(X).argmax(dim=1)==y).float().sum().item()
n+=y.shape[0]
return acc_sum/n
class FlattenLayer(nn.Module):
def __init__(self):
super(FlattenLayer, self).__init__()
def forward(self,x):
return x.view(x.shape[0],-1)
#把1*28*28转化为1*784
net=LinearNet(num_inputs,num_outputs)
#使用orderdict来进行网络结构搭建
#第一层flatten层把1*28*28转化为1*784
#第二层为实际工作层
net=nn.Sequential(
OrderedDict(
[
('flatten',FlattenLayer()),
('linear',nn.Linear(num_inputs,num_outputs))
]
)
)
init.normal_(net.linear.weight, mean=0, std=0.01)
init.constant_(net.linear.bias, val=0)
loss=nn.CrossEntropyLoss()
optimizer=torch.optim.SGD(net.parameters(),lr=0.1)
num_epochs=10
def train(net,train_iter,test_iter,loss,num_epochs,batch_size,params=None,lr=None,optimizer=None):
for epoch in range(num_epochs):
train_l_sum,train_acc_sum,n=0.0,0.0,0
for X,y in train_iter:
y_hat=net(X)
l=loss(y_hat,y).sum()
if optimizer is not None:
optimizer.zero_grad()
elif params is not None and params[0].grad is not None:
for param in params:
param.grad.data.zero_()
l.backward()
optimizer.step() # “softmax回归的简洁实现”一节将用到
train_l_sum+=l.item()
train_acc_sum+=(y_hat.argmax(dim=1)==y).sum().item()
n+=y.shape[0]
test_acc=evalute_accuracy(test_iter,net);
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))
if __name__ == '__main__':
print(mnist_train)
X, y = [], []
'''
for i in range(10):
X.append(mnist_train[i][0])
y.append(mnist_train[i][1])
show_fashion_mnist(X, get_fashion_mnist_labels(y))
'''
train(net, train_iter, test_iter, loss, num_epochs, batch_size,None,None,optimizer)
以上就是Pytorch中怎么实现softmax回归,小编相信有部分知识点可能是我们日常工作会见到或用到的。希望你能通过这篇文章学到更多知识。更多详情敬请关注亿速云行业资讯频道。
亿速云「云服务器」,即开即用、新一代英特尔至强铂金CPU、三副本存储NVMe SSD云盘,价格低至29元/月。点击查看>>
免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。
原文链接:https://my.oschina.net/u/3013989/blog/5013842