• VQ-VAE torch 实现


    model

    import torch
    import torch.nn as nn
    
    class ResidualBlock(nn.Module):
    
        def __init__(self, dim):
            super().__init__()
            self.relu = nn.ReLU()
            self.conv1 = nn.Conv2d(dim, dim, 3, 1, 1)
            self.conv2 = nn.Conv2d(dim, dim, 1)
    
        def forward(self, x):
            tmp = self.relu(x)
            tmp = self.conv1(tmp)
            tmp = self.relu(tmp)
            tmp = self.conv2(tmp)
            return x + tmp
    
    class VQVAE(nn.Module):
    
        def __init__(self, input_dim, dim, n_embedding):
            super().__init__()
            self.encoder = nn.Sequential(nn.Conv2d(input_dim, dim, 4, 2, 1),
                                         nn.ReLU(), nn.Conv2d(dim, dim, 4, 2, 1),
                                         nn.ReLU(), nn.Conv2d(dim, dim, 3, 1, 1),
                                         ResidualBlock(dim), ResidualBlock(dim))
            self.vq_embedding = nn.Embedding(n_embedding, dim)
            self.vq_embedding.weight.data.uniform_(-1.0 / n_embedding,
                                                   1.0 / n_embedding)
            self.decoder = nn.Sequential(
                nn.Conv2d(dim, dim, 3, 1, 1),
                ResidualBlock(dim), ResidualBlock(dim),
                nn.ConvTranspose2d(dim, dim, 4, 2, 1), nn.ReLU(),
                nn.ConvTranspose2d(dim, input_dim, 4, 2, 1))
            self.n_downsample = 2
    
        def forward(self, x):
            # encode
            ze = self.encoder(x)
    
            # ze: [N, C, H, W]
            # embedding [K, C]
            embedding = self.vq_embedding.weight.data
            N, C, H, W = ze.shape
            K, _ = embedding.shape
            embedding_broadcast = embedding.reshape(1, K, C, 1, 1)
            ze_broadcast = ze.reshape(N, 1, C, H, W)
            distance = torch.sum((embedding_broadcast - ze_broadcast)**2, 2)
            nearest_neighbor = torch.argmin(distance, 1)
            # make C to the second dim
            zq = self.vq_embedding(nearest_neighbor).permute(0, 3, 1, 2)
            # stop gradient
            decoder_input = ze + (zq - ze).detach()
    
            # decode
            x_hat = self.decoder(decoder_input)
            return x_hat, ze, zq
    
        @torch.no_grad()
        def encode(self, x):
            ze = self.encoder(x)
            embedding = self.vq_embedding.weight.data
    
            # ze: [N, C, H, W]
            # embedding [K, C]
            N, C, H, W = ze.shape
            K, _ = embedding.shape
            embedding_broadcast = embedding.reshape(1, K, C, 1, 1)
            ze_broadcast = ze.reshape(N, 1, C, H, W)
            distance = torch.sum((embedding_broadcast - ze_broadcast)**2, 2)
            nearest_neighbor = torch.argmin(distance, 1)
            return nearest_neighbor
    
        @torch.no_grad()
        def decode(self, discrete_latent):
            zq = self.vq_embedding(discrete_latent).permute(0, 3, 1, 2)
            x_hat = self.decoder(zq)
            return x_hat
    
        # Shape: [C, H, W]
        def get_latent_HW(self, input_shape):
            C, H, W = input_shape
            return (H // 2**self.n_downsample, W // 2**self.n_downsample)
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84

    main

    def train_vqvae(model: VQVAE,
                    img_shape=None,
                    device='cuda',
                    ckpt_path='dldemos/VQVAE/model.pth',
                    batch_size=64,
                    dataset_type='MNIST',
                    lr=1e-3,
                    n_epochs=100,
                    l_w_embedding=1,
                    l_w_commitment=0.25):
        print('batch size:', batch_size)
        dataloader = get_dataloader(dataset_type,
                                    batch_size,
                                    img_shape=img_shape,
                                    use_lmdb=USE_LMDB)
        model.to(device)
        model.train()
        optimizer = torch.optim.Adam(model.parameters(), lr)
        mse_loss = nn.MSELoss()
        tic = time.time()
        for e in range(n_epochs):
            total_loss = 0
    
            for x in dataloader:
                current_batch_size = x.shape[0]
                x = x.to(device)
    
                x_hat, ze, zq = model(x)
                l_reconstruct = mse_loss(x, x_hat)
                l_embedding = mse_loss(ze.detach(), zq)
                l_commitment = mse_loss(ze, zq.detach())
                loss = l_reconstruct + \
                    l_w_embedding * l_embedding + l_w_commitment * l_commitment
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                total_loss += loss.item() * current_batch_size
            total_loss /= len(dataloader.dataset)
            toc = time.time()
            torch.save(model.state_dict(), ckpt_path)
            print(f'epoch {e} loss: {total_loss} elapsed {(toc - tic):.2f}s')
        print('Done')
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
  • 相关阅读:
    DC150V降压芯片,12V/5A 5V/5Adc-dc高耐压电源IC
    oracle sql monitor简单使用说明
    mysql物理备份步骤
    Node.js | 内置模块 http | fs | path 综合小案例:分离HTML文件
    ElasticSearch是什么?有哪些应用?有哪些优缺点?
    【我不熟悉的css】03. 使用px、em、rem
    洛谷P1331 海战 题解
    实现寄生组合继承
    Vue 中setup的特性
    七、SSM 框架整合
  • 原文地址:https://blog.csdn.net/qq_42363032/article/details/134080868