• 深度学习(pytorch)——利用GPU给网络训练的2种方法


    Pytorch是torch的python版本,是由Facebook开源的神经网络框架,专门针对 GPU 加速的深度神经网络(DNN)编程。Torch 是一个经典的对多维矩阵数据进行操作的张量
    (tensor )库,在机器学习和其他数学密集型应用有广泛应用。
    Pytorch的计算图是动态的,可以根据计算需要实时改变计算图。
    由于Torch语言采用 Lua,导致在国内一直很小众,并逐渐被支持 Python 的 Tensorflow 抢走用户。作为经典机器学习库 Torch 的端口,PyTorch 为 Python 语言使用者提供了舒适的写代码选择。

    Pytorch是一个基于Python的可续计算包,提供两个高级功能:1、具有强大的GPU加速的张量计算(如NumPy)。2、包含自动求导系统的深度神经网络。

    方法一:

    找到上图的三种变量,然后.cuda()便可 

    网络模型.cuda()和损失函数.cuda()程序如下:

    训练数据(输入.cuda()、标注.cuda())和 测试数据(输入.cuda()、标注.cuda())程序如下:

     电脑内衣GPU可以去Google colab跑程序,设置如下图所示:

     把pycharm的代码复制到Google colab上面去

    具体程序如下:

    1. import torch
    2. import torchvision
    3. from torch import nn
    4. from torch.nn import Conv2d, MaxPool2d, Flatten
    5. from torch.utils.tensorboard import SummaryWriter
    6. # from model import *
    7. from torch.utils.data import DataLoader
    8. train_data = torchvision.datasets.CIFAR10(root='./data_CIFAR10',train=True,
    9. transform=torchvision.transforms.ToTensor(),download=True)
    10. test_data = torchvision.datasets.CIFAR10(root='./data_CIFAR10',train=False,
    11. transform=torchvision.transforms.ToTensor(),download=True)
    12. train_data_size = len(train_data)
    13. test_data_size = len(test_data)
    14. print("训练数据集的长度为:{} " .format(train_data_size))
    15. print("测试数据集的长度为:{} " .format(test_data_size))
    16. train_dataloader = DataLoader(train_data,batch_size=64)
    17. test_dataloader = DataLoader(test_data,batch_size=64)
    18. #搭建神经网络
    19. class Tudui(nn.Module):
    20. def __init__(self):
    21. super(Tudui, self).__init__()
    22. self.model1 = nn.Sequential(
    23. nn.Conv2d(3, 32, 5, padding=2, stride=1),
    24. nn.MaxPool2d(2),
    25. nn.Conv2d(32, 32, 5, padding=2, stride=1),
    26. nn.MaxPool2d(2),
    27. nn.Conv2d(32, 64, 5, padding=2, stride=1),
    28. nn.MaxPool2d(2),
    29. nn.Flatten(),
    30. nn.Linear(1024, 64),
    31. nn.Linear(64, 10)
    32. )
    33. def forward(self, x):
    34. x = self.model1(x)
    35. return x
    36. tudui = Tudui()
    37. # if torch.cuda.is_available():
    38. # tudui = tudui.cuda()
    39. #损失函数
    40. loss_fn = nn.CrossEntropyLoss()
    41. # if torch.cuda.is_available():
    42. # loss_fn = loss_fn.cuda()
    43. #优化器
    44. optimizer = torch.optim.SGD(tudui.parameters(),lr=0.01)
    45. #设置训练网络的一些参数
    46. total_train_step = 0
    47. total_test_step = 0
    48. epoch = 10
    49. writer = SummaryWriter('./logs_train')
    50. for i in range(epoch):
    51. print("------------第 {} 轮训练开始--------------".format(i+1))
    52. tudui.eval()
    53. for data in train_dataloader:
    54. imgs,targets = data
    55. # if torch.cuda.is_available():
    56. # imgs = imgs.cuda()
    57. # targets = targets.cuda()
    58. outputs = tudui(imgs)
    59. loss = loss_fn(outputs,targets)
    60. optimizer.zero_grad()
    61. loss.backward()
    62. optimizer.step()
    63. total_train_step +=1
    64. if total_train_step % 100 ==0:
    65. print("训练次数 : {},Loss : {}".format(total_train_step,loss.item()))
    66. writer.add_scalar("train_loss",loss.item(),total_train_step)
    67. tudui.eval()
    68. total_test_loss = 0
    69. total_accuracy = 0
    70. with torch.no_grad():
    71. for data in test_dataloader:
    72. imgs,targets = data
    73. # if torch.cuda.is_available():
    74. # imgs = imgs.cuda()
    75. # targets = targets.cuda()
    76. outputs = tudui(imgs)
    77. loss = loss_fn(outputs,targets)
    78. total_test_loss += loss.item()
    79. accuracy = (outputs.argmax(1) == targets).sum()
    80. total_accuracy = total_accuracy +accuracy
    81. print("整体测试数据集的Loss:{}".format(total_test_loss))
    82. print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))
    83. writer.add_scalar('test_loss',total_test_loss,total_test_step)
    84. writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step)
    85. total_test_step += 1
    86. torch.save(tudui, 'tudui_{}.pth'.format(i))
    87. print("模型已保存")
    88. writer.close()

    方法二:

     电脑上有多张不同的显卡时,可以用该方法指定模型去哪张显卡运行。具体方法如下:

     device更加常用的写法如下

    具体程序如下:

    1. import torch
    2. import torchvision
    3. from torch import nn
    4. from torch.nn import Conv2d, MaxPool2d, Flatten
    5. from torch.utils.tensorboard import SummaryWriter
    6. # from model import *
    7. from torch.utils.data import DataLoader
    8. device = torch.device("cpu")
    9. # device = torch.device("cuda")
    10. # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    11. train_data = torchvision.datasets.CIFAR10(root='./data_CIFAR10',train=True,
    12. transform=torchvision.transforms.ToTensor(),download=True)
    13. test_data = torchvision.datasets.CIFAR10(root='./data_CIFAR10',train=False,
    14. transform=torchvision.transforms.ToTensor(),download=True)
    15. train_data_size = len(train_data)
    16. test_data_size = len(test_data)
    17. print("训练数据集的长度为:{} " .format(train_data_size))
    18. print("测试数据集的长度为:{} " .format(test_data_size))
    19. train_dataloader = DataLoader(train_data,batch_size=64)
    20. test_dataloader = DataLoader(test_data,batch_size=64)
    21. #搭建神经网络
    22. class Tudui(nn.Module):
    23. def __init__(self):
    24. super(Tudui, self).__init__()
    25. self.model1 = nn.Sequential(
    26. nn.Conv2d(3, 32, 5, padding=2, stride=1),
    27. nn.MaxPool2d(2),
    28. nn.Conv2d(32, 32, 5, padding=2, stride=1),
    29. nn.MaxPool2d(2),
    30. nn.Conv2d(32, 64, 5, padding=2, stride=1),
    31. nn.MaxPool2d(2),
    32. nn.Flatten(),
    33. nn.Linear(1024, 64),
    34. nn.Linear(64, 10)
    35. )
    36. def forward(self, x):
    37. x = self.model1(x)
    38. return x
    39. tudui = Tudui()
    40. tudui = tudui.to(device)
    41. # if torch.cuda.is_available():
    42. # tudui = tudui.cuda()
    43. #损失函数
    44. loss_fn = nn.CrossEntropyLoss()
    45. loss_fn = loss_fn.to(device)
    46. # if torch.cuda.is_available():
    47. # loss_fn = loss_fn.cuda()
    48. #优化器
    49. optimizer = torch.optim.SGD(tudui.parameters(),lr=0.01)
    50. #设置训练网络的一些参数
    51. total_train_step = 0
    52. total_test_step = 0
    53. epoch = 10
    54. writer = SummaryWriter('./logs_train')
    55. for i in range(epoch):
    56. print("------------第 {} 轮训练开始--------------".format(i+1))
    57. tudui.eval()
    58. for data in train_dataloader:
    59. imgs,targets = data
    60. imgs = imgs.to(device)
    61. targets = targets.to(device)
    62. # if torch.cuda.is_available():
    63. # imgs = imgs.cuda()
    64. # targets = targets.cuda()
    65. outputs = tudui(imgs)
    66. loss = loss_fn(outputs,targets)
    67. optimizer.zero_grad()
    68. loss.backward()
    69. optimizer.step()
    70. total_train_step +=1
    71. if total_train_step % 100 ==0:
    72. print("训练次数 : {},Loss : {}".format(total_train_step,loss.item()))
    73. writer.add_scalar("train_loss",loss.item(),total_train_step)
    74. tudui.eval()
    75. total_test_loss = 0
    76. total_accuracy = 0
    77. with torch.no_grad():
    78. for data in test_dataloader:
    79. imgs,targets = data
    80. imgs = imgs.to(device)
    81. targets = targets.to(device)
    82. # if torch.cuda.is_available():
    83. # imgs = imgs.cuda()
    84. # targets = targets.cuda()
    85. outputs = tudui(imgs)
    86. loss = loss_fn(outputs,targets)
    87. total_test_loss += loss.item()
    88. accuracy = (outputs.argmax(1) == targets).sum()
    89. total_accuracy = total_accuracy +accuracy
    90. print("整体测试数据集的Loss:{}".format(total_test_loss))
    91. print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))
    92. writer.add_scalar('test_loss',total_test_loss,total_test_step)
    93. writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step)
    94. total_test_step += 1
    95. torch.save(tudui, 'tudui_{}.pth'.format(i))
    96. print("模型已保存")
    97. writer.close()
  • 相关阅读:
    数据清洗:相似重复记录检测算法SNM及其Python实现
    [做题] 滑动窗口
    反爬虫机制与反爬虫技术(一)
    第一次汇报yandex广告数据时,应该展示哪些数据
    区间信息维护与查询【分块】 - 原理 分块详解
    Leo赠书活动-02期 【信息科技风险管理:合规管理、技术防控与数字化】
    LeetCode 318. 最大单词长度乘积
    [每周一更]-(第67期):docker-compose 部署php的laravel项目
    详细介绍如何使用 NeRF 进行 3D 体积渲染-附源码下载
    数字IC前端学习笔记:数字乘法器的优化设计(Dadda Tree乘法器)
  • 原文地址:https://blog.csdn.net/qq_42233059/article/details/126677566