Pytorch学习笔记10:MNIST多分类实践(GPU版本V2)
#需要import的lib
import torch
import time
import platform
import CV2
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
#需要import的lib
#运行环境tesla k20/python 3.7/pytorch 1.20
print('——————————运行环境——————————')
print('Python Version:',platform.python_version())
print('Torch Version:',torch.__version__)
print('OpenCV Version:',CV2.__version__)
print('CUDA GPU check:',torch.cuda.is_available())
if(torch.cuda.is_available()):
print('CUDA GPU num:', torch.cuda.device_count())
n=torch.cuda.device_count()
while n > 0:
print('CUDA GPU name:', torch.cuda.get_device_name(n-1))
print('CUDA GPU capability:', torch.cuda.get_device_capability(n-1))
print('CUDA GPU properties:', torch.cuda.get_device_properties(n-1))
n -= 1
print('CUDA GPU index:', torch.cuda.current_device())
print('——————————运行环境——————————')
time_start=time.time()
print('——————————用GPU加速MNIST——————————')
#加载数据
batch_size=200
learning_rate=0.01
epochs=10
train_loader=torch.utils.data.DataLoader(
datasets.MNIST('c:\mnist',train=True,download=True,
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.3081,))
]
)
),
batch_size=batch_size,shuffle=True #每个batch load的数据,是不是打乱每个batch的数据
)
test_loader=torch.utils.data.DataLoader(
datasets.MNIST('c:\mnist',train=False,
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.3081,))
]
)
),
batch_size=batch_size,shuffle=True #每个batch load的数据,是不是打乱每个batch的数据
)
#写网络模型
class MLP1(torch.nn.Module):
def __init__(self):
super(MLP1, self).__init__()
self.model= torch.nn.Sequential(
torch.nn.Linear(784, 200),
torch.nn.LeakyReLU(inplace=True),
torch.nn.Linear(200, 200),
torch.nn.LeakyReLU(inplace=True),
torch.nn.Linear(200, 10),
torch.nn.LeakyReLU(inplace=True),
)
def forward(self,x):#自定义forword函数
x=self.model(x)
return x
device=torch.device('cuda:0')
net=MLP1().to(device)
optimizer=torch.optim.SGD(net.parameters(),lr=learning_rate)
criteon=torch.nn.CrossEntropyLoss().to(device)
#迭代优化过程
for epoch in range(epochs):
for batch_idx,(data,target) in enumerate(train_loader):
data=data.view(-1,28*28)
data,target=data.to(device),target.to(device)
logits=forward(net,data) #把数据放入神经网络得出pred的值
loss=criteon(logits,target) #用loss函数计算pred和target的差
optimizer.zero_grad() #清零梯度
loss.backward() #重新计算梯度
optimizer.step() #用新的梯度计算新的w b,然后迭代
if batch_idx % 100 == 0:
#print('epoch:',epoch)
#print('batch_idx:', batch_idx)
#print('len data', len(data))
#print('len(train_loader.dataset)', len(train_loader.dataset))
#print('len(train_loader)', len(train_loader))
print(
'train epoch:{}[{}/{}({:.0f}%)]\tLoss:{:.6f}'.format(
epoch,batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader),loss.item()
)
)
test_loss=0
correct=0
for data,target in test_loader:
data=data.view(-1,28*28)
data, target = data.to(device), target.to(device)
logits = forward(net,data)#把数据放入神经网络得出pred的值
test_loss +=criteon(logits,target).item()#?累加loss
pred =logits.data.max(1)[1]#得出pred的最大值,就是网络识别出来的数字
correct +=pred.eq(target.data).sum()#?target.data
test_loss /=len(test_loader.dataset)
#print('len(test_loader.dataset):', len(test_loader.dataset))
print(
'\ntest set:avg loss:{:.4f},accu:{}/{} ({:.0f}%)\n'.format(
test_loss,correct,len(test_loader.dataset),
100.*correct/len(test_loader.dataset)
)
)
print('——————————用GPU加速MNIST——————————')
time_end=time.time()
print('totally cost',time_end-time_start,'s')