• 【Pytorch深度学习开发实践学习】B站刘二大人课程笔记整理lecture11 Advanced_CNN 实现GoogleNet和ResNet


    Pytorch深度学习开发实践学习】B站刘二大人课程笔记整理lecture11 Advanced_CNN
    代码:

    Pytorch实现GoogleNet

    import torch
    from torchvision import datasets, transforms
    from torch.utils.data import DataLoader
    import torch.nn as nn
    import torch.nn.functional as F
    
    batch_size = 64
    transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) #把原始图像转为tensor  这是均值和方差
    
    train_set = datasets.MNIST(root='./data/mnist', train=True, download=True, transform=transform)
    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
    
    test_set = datasets.MNIST(root='./data/mnist', train=False, download=True, transform=transform)
    test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=True)
    class Inception(torch.nn.Module):
        def __init__(self,in_channels):
            super(Inception, self).__init__()
            self.branchpool = nn.Conv2d(in_channels, 24, kernel_size=1)
    
            self.branch1x1 = nn.Conv2d(in_channels, 16, kernel_size=1)
    
            self.branch5x5_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
            self.branch5x5_2 = nn.Conv2d(16, 24, kernel_size=5,padding=2)
    
            self.branch3x3_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
            self.branch3x3_2 = nn.Conv2d(16, 24,kernel_size=3,padding=1)
            self.branch3x3_3 = nn.Conv2d(24, 24, kernel_size=3,padding=1)
    
    
        def forward(self, x):
            branch1x1 = self.branch1x1(x)
    
            branch5x5 = self.branch5x5_1(x)
            branch5x5 = self.branch5x5_2(branch5x5)
    
            branch3x3 = self.branch3x3_1(x)
            branch3x3 = self.branch3x3_2(branch3x3)
            branch3x3 = self.branch3x3_3(branch3x3)
    
            branchpool = F.avg_pool2d(x, kernel_size=3,stride=1,padding=1)
            branchpool = self.branchpool(branchpool)
    
            outputs = torch.cat((branch1x1,branch5x5,branch3x3,branchpool),dim=1)
            return outputs
    
    class Net(torch.nn.Module):
        def __init__(self):
            super(Net,self).__init__()
            self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
            self.conv2 = nn.Conv2d(88, 20, kernel_size=5)
    
            self.incep1 = Inception(10)
            self.incep2 = Inception(20)
    
            self.fc = nn.Linear(1408, 10)
            self.maxpool = nn.MaxPool2d(kernel_size=2)
    
        def forward(self, x):
            in_size = x.size(0)
            x = F.relu(self.maxpool(self.conv1(x)))
            x = self.incep1(x)
            x =F.relu(self.maxpool(self.conv2(x)))
            x = self.incep2(x)
            x = x.view(in_size, -1)
            x = self.fc(x)
            return x
    
    model = Net()
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  #把模型迁移到GPU
    model = model.to(device)   #把模型迁移到GPU
    
    def train(epoch):
        running_loss = 0.0
        for i, data in enumerate(train_loader, 0):
            inputs, labels = data
            inputs,labels = inputs.to(device), labels.to(device)  #训练内容迁移到GPU上
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
            if i % 300 == 299:    # print every 300 mini-batches
                print('[%d, %5d] loss: %.3f' %
                      (epoch + 1, i + 1, running_loss / 300))
                running_loss = 0.0
    
    def test(epoch):
        correct = 0
        total = 0
        with torch.no_grad():
            for data in test_loader:
                images, labels = data
                images,labels = images.to(device), labels.to(device)  #测试内容迁移到GPU上
                outputs = model(images)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
    
        print('Accuracy of the network on the 10000 test images: %d %%' % (
            100 * correct / total))
    
    if __name__ == '__main__':
        for epoch in range(100):
            train(epoch)
            if epoch % 10 == 0:
                test(epoch)
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109

    Pytorch实现ResNet

    import torch
    from torchvision import datasets, transforms
    from torch.utils.data import DataLoader
    import torch.nn as nn
    import torch.nn.functional as F
    
    batch_size = 64
    transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) #把原始图像转为tensor  这是均值和方差
    
    train_set = datasets.MNIST(root='./data/mnist', train=True, download=True, transform=transform)
    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
    
    test_set = datasets.MNIST(root='./data/mnist', train=False, download=True, transform=transform)
    test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=True)
    
    class ResidualBlock(torch.nn.Module):
        def __init__(self, channels):
            super(ResidualBlock, self).__init__()
            self.channels = channels
            self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
            self.conv2 = nn.Conv2d(channels, channels, kernel_size=3,padding=1)
    
        def forward(self, x):
            y = F.relu(self.conv1(x))
            y = self.conv2(y)
            return F.relu(x + y)
    
    
    class Net(torch.nn.Module):
        def __init__(self):
            super(Net,self).__init__()
            self.conv1 = nn.Conv2d(1, 16, kernel_size=5)
            self.conv2 = nn.Conv2d(16, 32, kernel_size=5)
    
            self.rblock1 = ResidualBlock(16)
            self.rblock2 = ResidualBlock(32)
    
            self.maxpool = nn.MaxPool2d(kernel_size=2)
            self.fc = nn.Linear(512, 10)
    
        def forward(self, x):
            in_size = x.size(0)
            x = self.maxpool(F.relu(self.conv1(x)))
            x = self.rblock1(x)
            x = self.maxpool(F.relu(self.conv2(x)))
            x = self.rblock2(x)
            x = x.view(in_size, -1)
            x = self.fc(x)
            return x
    
    model = Net()
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  #把模型迁移到GPU
    model = model.to(device)   #把模型迁移到GPU
    
    def train(epoch):
        running_loss = 0.0
        for i, data in enumerate(train_loader, 0):
            inputs, labels = data
            inputs,labels = inputs.to(device), labels.to(device)  #训练内容迁移到GPU上
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
            if i % 300 == 299:    # print every 300 mini-batches
                print('[%d, %5d] loss: %.3f' %
                      (epoch + 1, i + 1, running_loss / 300))
                running_loss = 0.0
    
    def test(epoch):
        correct = 0
        total = 0
        with torch.no_grad():
            for data in test_loader:
                images, labels = data
                images,labels = images.to(device), labels.to(device)  #测试内容迁移到GPU上
                outputs = model(images)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
    
        print('Accuracy of the network on the 10000 test images: %d %%' % (
            100 * correct / total))
    
    if __name__ == '__main__':
        for epoch in range(100):
            train(epoch)
            if epoch % 10 == 0:
                test(epoch)
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92

    部分课件内容:
    在这里插入图片描述

    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述在这里插入图片描述

    在这里插入图片描述
    在这里插入图片描述

    在这里插入图片描述
    在这里插入图片描述在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述

    在这里插入图片描述

    在这里插入图片描述

    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述

    在这里插入图片描述在这里插入图片描述
    在这里插入图片描述

    在这里插入图片描述

    在这里插入图片描述

    在这里插入图片描述

    在这里插入图片描述在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述

  • 相关阅读:
    2-37.1 EmpProject综合案例
    如何部署 Git 实现多人协同开发
    单bit脉冲信号跨时钟域处理——展宽信号 + 握手协议
    javaweb基础:过滤器Filter
    超越视觉极限:深度学习图像超分辨率算法清单【第二部分】
    Swin-Transformer(2021-08)
    Gradient Domain High Dynamic Range Compression
    Kafka集群参数调优
    亚马逊刷关键词软件——亚马逊鲲鹏系统
    java计算机毕业设计辅导员班级量化管理系统源码+mysql数据库+系统+lw文档+部署
  • 原文地址:https://blog.csdn.net/weixin_44184852/article/details/136253824