• 【pytorch】MNIST 梯度上升法求使得某类概率最大的样本


    目标:用 MNIST 训练一个 CNN 模型,然后用梯度上升法生成一张图片,使得模型对这张图片的预测结果为 8

    
    import numpy as np
    import torch 
    import torch.nn as nn
    import torch.nn.functional as F
    import torch.optim as optim
    import torchvision
    import torchvision.transforms as transforms
    import matplotlib.pyplot as plt
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10

    CNN 模型训练

    # 下载 MNIST 数据集
    transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) # 被归一化到 [-1, 1] 之间
    trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=256, shuffle=True, num_workers=2)
    testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
    testloader = torch.utils.data.DataLoader(testset, batch_size=256, shuffle=False, num_workers=2)
    classes = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    # 训练模型
    class Net(nn.Module):
        def __init__(self):
            super(Net, self).__init__()
            self.conv1 = nn.Conv2d(1, 6, 5)
            self.pool = nn.MaxPool2d(2, 2)
            self.conv2 = nn.Conv2d(6, 16, 5)
            self.fc1 = nn.Linear(16 * 4 * 4, 120)
            self.fc2 = nn.Linear(120, 84)
            self.fc3 = nn.Linear(84, 10)
    
        def forward(self, x):
            x = self.pool(F.relu(self.conv1(x)))
            x = self.pool(F.relu(self.conv2(x)))
            x = x.view(-1, 16 * 4 * 4)
            x = F.relu(self.fc1(x))
            x = F.relu(self.fc2(x))
            x = self.fc3(x)
            return x
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net = Net()
    net.to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    # 开始训练
    epochs = 10
    
    from tqdm import tqdm
    
    for epoch in range(epochs):
        avg_loss = 0
        for i, data in enumerate(tqdm(trainloader)):
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            avg_loss += loss.item()
        avg_loss = avg_loss / (i + 1)
        print('epoch: %d, loss: %.4f' % (epoch + 1, avg_loss))
    print('Finished Training')
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    # 保存模型
    PATH = './mnist_net.pth'
    torch.save(net.state_dict(), PATH)
    
    • 1
    • 2
    • 3
    # 读取模型
    PATH = './mnist_net.pth'
    net = Net()
    net.load_state_dict(torch.load(PATH))
    
    • 1
    • 2
    • 3
    • 4

    梯度上升法进行图片生成

    net = net.to(device)
    
    • 1
    # 固定 net 的参数
    for param in net.parameters():
        param.requires_grad = False
        
    net.eval()
    
    • 1
    • 2
    • 3
    • 4
    • 5
    # 进行梯度上升,让模型生成一张图片,使得模型对这张图片的预测结果为 9
    img_gen = torch.randn(1, 1, 28, 28, requires_grad=True)
    
    img_gen = img_gen.to(device)
    
    epochs = 200
    for epoch in range(epochs):
    
        output = net(img_gen)
        value_to_max = output[0][8] # 使得类别 8 的概率输出最大化
        
        # 计算梯度
        grad = torch.autograd.grad(value_to_max, img_gen)[0] 
        img_gen = img_gen.data + 0.1 * grad.data / torch.sqrt(grad.data * grad.data) # torch.Size([1, 1, 28, 28]) 
        # 往梯度上升的方向前进
    
        # 把 img_gen 有 nan 的位置变成 0
        img_gen.data[img_gen.data != img_gen.data] = 0
        
        # 重新计算梯度
        img_gen = img_gen.clone().detach().requires_grad_(True).to(device)
    
        if epoch % 20 == 0:
            print('epoch: {}, loss: {}'.format(epoch, value_to_max.item()))
            plt.imshow(img_gen[0][0].cpu().detach().numpy(), cmap='gray')
            plt.show()
            
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27

    epoch: 0, loss: 1.4248332977294922
    在这里插入图片描述

    epoch: 180, loss: 259.0355224609375
    在这里插入图片描述

    # 把 这个 img_gen 标准化到 -1 ,1 之间,然后输入网络,看看网络的预测结果
    # 把最大值变成 1, 最小值变成 -1
    img_gen = img_gen - torch.min(img_gen)
    img_gen = img_gen / torch.max(img_gen)
    img_gen = img_gen * 2 - 1
    # 看看图片
    plt.imshow(img_gen[0][0].cpu().detach().numpy(), cmap='gray')
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    # 输入网络,看看网络的预测结果和各类的概率
    output = net(img_gen)
    # 看各类的概率
    for i in range(10):
        print('{}: {}'.format(classes[i], output[0][i].item()))
    
    • 1
    • 2
    • 3
    • 4
    • 5

    0: -5.7123026847839355
    1: -0.5687944889068604
    2: -1.5327638387680054
    3: 0.04780220612883568
    4: -2.2129156589508057
    5: 2.809201955795288
    6: -3.1844711303710938
    7: -7.135143280029297
    8: 13.538104057312012
    9: -0.9435712099075317

  • 相关阅读:
    构建现代应用:Java中的热门架构概览
    队列OJ--循环队列
    算法的时间复杂度和空间复杂度
    nginx proxy_set_header设置、自定义header
    [Linux](16)网络编程:网络概述,网络基本原理,套接字,UDP,TCP,并发服务器编程,守护(精灵)进程
    黑豹程序员-架构师学习路线图-百科:MVC的演变终点SpringMVC
    漏洞简述-漏洞分析实例是编号CVE-2006-3439
    基于SSM的家教系统的设计与实现毕业设计-附源码221752
    FastDFS(分布式文件系统)使用介绍
    python下用cartopy绘制地形晕染(shading)图
  • 原文地址:https://blog.csdn.net/qq_18846849/article/details/127964336