• Pytorch_basics_main


    [Pytorch] First day

    # import necessary packages
    import torch
    import torchvision
    import torch.nn as nn
    import numpy as np
    import torchvision.transforms as transforms
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    Content one
    # Basic autograd example 1
    
    # Create tensors.
    x = torch.tensor(1., requires_grad=True)
    w = torch.tensor(2., requires_grad=True)
    b = torch.tensor(3., requires_grad=True)
    
    print(type(x), type(w), type(b))
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
      
    
    • 1
    # Build a computional graph.
    y = w * x + b   # y = 2 * x + 3
    
    # Compute gadients.
    y.backward()
    
    # Print out the gradients.
    print(x.grad)   # x.grad = 2
    print(w.grad)   # w.grad = 1
    print(b.grad)   # b.grad = 1
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    tensor(2.)
    tensor(1.)
    tensor(1.)
    
    • 1
    • 2
    • 3
    Content two
    # Basic autograd example 2
    
    # Create tensors of shape (10, 3) and (10, 2).
    x = torch.randn(10, 3)
    y = torch.randn(10, 2)
    
    # Build a fully connected layer.
    linear = nn.Linear(3, 2)
    print('w: ', linear.weight)
    print('b: ', linear.bias)
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    w:  Parameter containing:
    tensor([[ 0.2271,  0.4796, -0.4287],
            [ 0.3378, -0.5249,  0.2095]], requires_grad=True)
    b:  Parameter containing:
    tensor([0.4186, 0.1931], requires_grad=True)
    
    • 1
    • 2
    • 3
    • 4
    • 5
    # Build loss function and optimizer.
    criterion = nn.MSELoss()
    optimizer = torch.optim.SGD(linear.parameters(), lr=0.01)
    
    # Forward pass.
    pred = linear(x)
    
    # Compute loss.
    loss = criterion(pred, y)
    print('loss:', loss.item())
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    loss: 1.1817594766616821
    
    • 1
    # Backward pass.
    loss.backward()
    
    # Print out the gradients.
    print('dl/dw: ', linear.weight.grad)
    print('dl/db: ', linear.bias.grad)
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    dl/dw:  tensor([[-0.5055,  0.2931, -0.8895],
            [ 0.0444,  0.0985,  0.4994]])
    dl/db:  tensor([ 0.6998, -0.0333])
    
    • 1
    • 2
    • 3
    # 1-step gradient descent.
    optimizer.step()
    
    # We can also perform gradient descent at the low level.
    # linear.weight.data.sub_(0.01 * linear.weight.grad.data)
    # linear.bias.data.sub_(0.01 * linear.bias.grad.data)
    
    # Print out the loss after 1-step gradient descent.
    pred = linear(x)
    loss = criterion(pred, y)
    print('loss after 1 step optimization: ',loss.item())
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    loss after 1 step optimization:  1.1630728244781494
    
    • 1
    Content three
    # Loading data from numpy
    
    # Create a numpy array.
    x = np.array([[1, 2], [3, 4]])
    
    # Convert the numpy array to a torch tensor.
    y = torch.from_numpy(x)
    
    # Convert the torch tensor to a numpy array.
    z = y.numpy()
    
    print(type(x), type(y), type(z))
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
      
    
    • 1
    Content four
    # Download and construct CIFAR-10 dataset.
    train_dataset = torchvision.datasets.CIFAR10(root='../../data',
                                                 train=True,
                                                 transform=transforms.ToTensor(),
                                                 download=True)
    # Fetch one data pair (read data from disk).
    print(type(train_dataset)) # 
    image, label = train_dataset[0]
    
    print(image.size)
    print(label)
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    Files already downloaded and verified
    
    
    6
    
    • 1
    • 2
    • 3
    • 4
    # Data loader (this provides queues and threads in a very simple way).
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=64,
                                               shuffle=True)
    
    # When iteration starts, queue and thread start to load data from files.
    data_iter = iter(train_loader)
    
    # Mini-batch images and labels.
    images, labels = data_iter.__next__()
    
    # Actual usage of the data loader is as below.
    for images, labels in train_loader:
        # Training code should be written here.
        pass
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    Content five
    # Input pipline for custom dataset
    
    # We should build our custom dataset as below.
    class CustomDataset(torch.utils.data.Dataset):
        def __init__(self):
            # TODO
            # 1.Initilize file paths or a list of file names.
            pass
        def __getitem__(self, index):
            # TODO
            # 1.Read one data from file (e.g.using numpy.fromfile, PIL.Image.open).
            # 2.Preprocess the data (e.g. torchvision.Transfrom).
            # 3.Return a data pair (e.g. image and label).
            pass
        def __len__(self):
            # We should change 0 to the total size of our dataset.
            return 0
        
    # You can then use the prebuilt data loader.
    custom_dataset = CustomDataset()
    train_loader = torch.utils.data.DataLoader(dataset=custom_dataset,
                                               batch_size=64,
                                               shuffle=False)
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    Content six
    # Pretrained model
    
    # Download and load the pretrained ResNet-18.
    resnet = torchvision.models.resnet18(pretrained=True)
    
    # If we want to finetune only the top layer of the model, set as below.
    for param in resnet.parameters():
        param.requires_grad = False
    
    # Replace the top layer for fintuning.
    resnet.fc = nn.Linear(resnet.fc.in_features, 100)   # 100 is an example.
    
    # Forward pass
    images = torch.randn(64, 3, 224, 224)
    outputs = resnet(images)
    
    print(outputs.size())   # result is torch.Size([64, 100])
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    /home/wsl_ubuntu/anaconda3/envs/xy_trans/lib/python3.8/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.
      warnings.warn(
    /home/wsl_ubuntu/anaconda3/envs/xy_trans/lib/python3.8/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=ResNet18_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet18_Weights.DEFAULT` to get the most up-to-date weights.
      warnings.warn(msg)
    Downloading: "https://download.pytorch.org/models/resnet18-f37072fd.pth" to /home/wsl_ubuntu/.cache/torch/hub/checkpoints/resnet18-f37072fd.pth
    100%|██████████| 44.7M/44.7M [00:20<00:00, 2.34MB/s]
    
    
    torch.Size([64, 100])
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    Content seven
    # Save and load the entire model.
    torch.save(resnet, 'model.ckpt')
    model = torch.load('model.ckpt')
    
    # Save and load only the model parameters (recommended).
    torch.save(resnet.state_dict(), 'params.ckpt')
    resnet.load_state_dict(torch.load('params.ckpt'))
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    
    
    • 1
  • 相关阅读:
    Delphi编程中的按键模拟及应用——使用SendInput函数实现按键模拟
    linux 内核漏洞利用 ret2dir
    含文档+PPT+源码等]精品基于Nodejs实现的家政服务微信小程序[包运行成功]Nodejs毕业设计计算机项目源码
    DDD领域驱动设计
    p6spy代理MyBatis控制台打印完整SQL执行语句
    数字验证学习笔记——SystemVerilog芯片验证11 ——类的继承
    C++之this指针
    MA2 统计分析案例
    c++继承与派生知识点
    mysql5.7 源码安装
  • 原文地址:https://blog.csdn.net/AIHUBEI/article/details/134528002