• 李宏毅-机器学习hw4-self-attention结构-辨别600个speaker的身份


    一、慢慢分析+学习pytorch中的各个模块的参数含义、使用方法、功能:
    1.encoder编码器中的nhead参数:

    self.encoder_layer = nn.TransformerEncoderLayer( d_model=d_model, dim_feedforward=256, nhead=2)

    所以说,这个nhead的意思,就是有window窗口的大小,也就是一个b由几个a得到

    2.tensor.permute改变维度的用法示例:
    1. #尝试使用permute函数进行测试:可以通过tensor张量直接调用
    2. import torch
    3. import numpy as np
    4. x = np.array([[[1,1,1],[2,2,2]],[[3,3,3],[4,4,4]]])
    5. y = torch.tensor(x)
    6. #y.shape
    7. z=y.permute(2,1,0)
    8. z.shape
    9. print(z) #permute之后变成了3*2*2的维度
    10. print(y) #本来是一个2*2*3从外到内的维度
    3.tensor.mean求均值:从1个向量 到 1个数值:

    4.python中字典(映射)的使用:

    二、model的neural network设计部分:
    1. import torch
    2. import torch.nn as nn
    3. import torch.nn.functional as F
    4. class Classifier(nn.Module):
    5. def __init__(self, d_model=80, n_spks=600, dropout=0.1):
    6. super().__init__()
    7. # Project the dimension of features from that of input into d_model.
    8. self.prenet = nn.Linear(40, d_model) #通过一个线性的输入层,从40个维度,变成d_model个
    9. #展示不需要使用这个conformer进行实验
    10. # TODO:
    11. # Change Transformer to Conformer.
    12. # https://arxiv.org/abs/2005.08100
    13. #这里是不需要自己设计 self-attention层的,因为transformer的encoder层用到self-attention层
    14. self.encoder_layer = nn.TransformerEncoderLayer(
    15. d_model=d_model, dim_feedforward=256, nhead=2 #输入维度是上面的d_model,输出维度是256,这2个nhead是啥?一个b由几个a得到
    16. )
    17. #下面这个暂时用不到
    18. # self.encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=2)
    19. # Project the the dimension of features from d_model into speaker nums.
    20. #predict_layer
    21. self.pred_layer = nn.Sequential( #这里其实就相当于是一个线性输出层了,最终输出的是一个n_soks维度600的向量
    22. nn.Linear(d_model, d_model),
    23. nn.ReLU(),
    24. nn.Linear(d_model, n_spks),
    25. )
    26. def forward(self, mels):
    27. """
    28. args:
    29. mels: (batch size, length, 40) #我来试图解释一下这个东西,反正就是一段声音信号处理后得到的3维tensor,最里面那一维是40
    30. return:
    31. out: (batch size, n_spks) #最后只要输出每个batch中的行数 + 每一行中的n_spks的数值
    32. """
    33. # out: (batch size, length, d_model) #原来out设置的3个维度的数据分别是batchsize ,
    34. out = self.prenet(mels) #通过一个prenet层之后,最里面的那一维空间 就变成了一个d_model维度
    35. # out: (length, batch size, d_model)
    36. out = out.permute(1, 0, 2) #利用permute将0维和1维进行交换
    37. # The encoder layer expect features in the shape of (length, batch size, d_model).
    38. out = self.encoder_layer(out)
    39. # out: (batch size, length, d_model)
    40. out = out.transpose(0, 1) #重新得到原来的维度,这次用transpose和上一次用permute没有区别
    41. # mean pooling
    42. stats = out.mean(dim=1) #对维度1(第二个维度)计算均值,也就是将整个向量空间-->转成1个数值
    43. #得到的是batch,d_model (len就是一行的数据,从这一行中取均值,就是所谓的均值池化)
    44. # out: (batch, n_spks)
    45. out = self.pred_layer(stats) #这里得到n_spks还不是one-hot vec
    46. return out
    三、warming up 的设计过程:
    1. import math
    2. import torch
    3. from torch.optim import Optimizer
    4. from torch.optim.lr_scheduler import LambdaLR
    5. #这部分的代码感觉有一点诡异,好像是设计了一个learning rate的warmup过程,算了,之后再回来阅读好了
    6. def get_cosine_schedule_with_warmup(
    7. optimizer: Optimizer,
    8. num_warmup_steps: int,
    9. num_training_steps: int,
    10. num_cycles: float = 0.5,
    11. last_epoch: int = -1,
    12. ):
    13. """
    14. Create a schedule with a learning rate that decreases following the values of the cosine function between the
    15. initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
    16. initial lr set in the optimizer.
    17. Args:
    18. optimizer (:class:`~torch.optim.Optimizer`):
    19. The optimizer for which to schedule the learning rate.
    20. num_warmup_steps (:obj:`int`):
    21. The number of steps for the warmup phase.
    22. num_training_steps (:obj:`int`):
    23. The total number of training steps.
    24. num_cycles (:obj:`float`, `optional`, defaults to 0.5):
    25. The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
    26. following a half-cosine).
    27. last_epoch (:obj:`int`, `optional`, defaults to -1):
    28. The index of the last epoch when resuming training.
    29. Return:
    30. :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    31. """
    32. def lr_lambda(current_step):
    33. # Warmup
    34. if current_step < num_warmup_steps:
    35. return float(current_step) / float(max(1, num_warmup_steps))
    36. # decadence
    37. progress = float(current_step - num_warmup_steps) / float(
    38. max(1, num_training_steps - num_warmup_steps)
    39. )
    40. return max(
    41. 0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))
    42. )
    43. return LambdaLR(optimizer, lr_lambda, last_epoch)
    四、train中每个batch进行的处理:
    1. import torch
    2. #这里面其实就是原来train部分的代码处理一个batch的操作
    3. def model_fn(batch, model, criterion, device): #这个函数的参数是batch数据,model,loss_func,设备
    4. """Forward a batch through the model."""
    5. mels, labels = batch #获取mels参数 和 labels参数
    6. mels = mels.to(device)
    7. labels = labels.to(device)
    8. outs = model(mels) #得到的输出结果
    9. loss = criterion(outs, labels) #通过和labels进行比较得到loss
    10. # Get the speaker id with highest probability.
    11. preds = outs.argmax(1) #按照列的方向 计算出最大的索引位置
    12. # Compute accuracy.
    13. accuracy = torch.mean((preds == labels).float()) #通过将preds和labels进行比较得到acc的数值
    14. return loss, accuracy

    五、validation的处理函数:
    1. from tqdm import tqdm
    2. import torch
    3. def valid(dataloader, model, criterion, device): #感觉就是整个validationset中的数据都进行了操作
    4. """Validate on validation set."""
    5. model.eval() #开启evaluation模式
    6. running_loss = 0.0
    7. running_accuracy = 0.0
    8. pbar = tqdm(total=len(dataloader.dataset), ncols=0, desc="Valid", unit=" uttr") #创建进度条,实现可视化process_bar
    9. for i, batch in enumerate(dataloader): #下标i,batch数据存到batch中
    10. with torch.no_grad(): #先说明不会使用SGD
    11. loss, accuracy = model_fn(batch, model, criterion, device) #调用上面定义的batch处理函数得到loss 和 acc
    12. running_loss += loss.item()
    13. running_accuracy += accuracy.item()
    14. pbar.update(dataloader.batch_size) #这些处理进度条的内容可以暂时不用管
    15. pbar.set_postfix(
    16. loss=f"{running_loss / (i+1):.2f}",
    17. accuracy=f"{running_accuracy / (i+1):.2f}",
    18. )
    19. pbar.close()
    20. model.train()
    21. return running_accuracy / len(dataloader) #返回正确率
    六、train的main调用:
    1. from tqdm import tqdm
    2. import torch
    3. import torch.nn as nn
    4. from torch.optim import AdamW
    5. from torch.utils.data import DataLoader, random_split
    6. def parse_args(): #定义一个给config赋值的函数
    7. """arguments"""
    8. config = {
    9. "data_dir": "./Dataset",
    10. "save_path": "model.ckpt",
    11. "batch_size": 32,
    12. "n_workers": 1, #这个参数太大的时候,我的这个会error
    13. "valid_steps": 2000,
    14. "warmup_steps": 1000,
    15. "save_steps": 10000,
    16. "total_steps": 70000,
    17. }
    18. return config
    19. def main( #可以直接用上面定义那些参数作为这个main里面的参数
    20. data_dir,
    21. save_path,
    22. batch_size,
    23. n_workers,
    24. valid_steps,
    25. warmup_steps,
    26. total_steps,
    27. save_steps,
    28. ):
    29. """Main function."""
    30. device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    31. print(f"[Info]: Use {device} now!")
    32. train_loader, valid_loader, speaker_num = get_dataloader(data_dir, batch_size, n_workers) #获取所需的data,调用get_dataloader函数
    33. train_iterator = iter(train_loader) #定义一个train_data的迭代器
    34. print(f"[Info]: Finish loading data!",flush = True)
    35. model = Classifier(n_spks=speaker_num).to(device) #构造一个model的实例
    36. criterion = nn.CrossEntropyLoss() #分别构造loss_func 和 optimizer的实例
    37. optimizer = AdamW(model.parameters(), lr=1e-3)
    38. scheduler = get_cosine_schedule_with_warmup(optimizer, warmup_steps, total_steps) #构造warmup的实例
    39. print(f"[Info]: Finish creating model!",flush = True)
    40. best_accuracy = -1.0
    41. best_state_dict = None
    42. pbar = tqdm(total=valid_steps, ncols=0, desc="Train", unit=" step") #process_bar相关的东西,不用管它
    43. for step in range(total_steps): #一共需要的步数进行for循环
    44. # Get data
    45. try:
    46. batch = next(train_iterator) #从train_data中获取到下一个batch的数据
    47. except StopIteration:
    48. train_iterator = iter(train_loader)
    49. batch = next(train_iterator)
    50. loss, accuracy = model_fn(batch, model, criterion, device) #传递对应的数据、模型参数,得到这个batch的loss和acc
    51. batch_loss = loss.item()
    52. batch_accuracy = accuracy.item()
    53. # Updata model
    54. loss.backward()
    55. optimizer.step()
    56. scheduler.step()
    57. optimizer.zero_grad() #更新进行Gradient descend 更新模型,并且将grad清空
    58. # Log
    59. pbar.update() #process_bar的东西先不管
    60. pbar.set_postfix(
    61. loss=f"{batch_loss:.2f}",
    62. accuracy=f"{batch_accuracy:.2f}",
    63. step=step + 1,
    64. )
    65. # Do validation
    66. if (step + 1) % valid_steps == 0:
    67. pbar.close()
    68. valid_accuracy = valid(valid_loader, model, criterion, device) #调用valid函数计算这一次validation的正确率
    69. # keep the best model
    70. if valid_accuracy > best_accuracy: #总是保持最好的valid_acc
    71. best_accuracy = valid_accuracy
    72. best_state_dict = model.state_dict()
    73. pbar = tqdm(total=valid_steps, ncols=0, desc="Train", unit=" step")
    74. # Save the best model so far.
    75. if (step + 1) % save_steps == 0 and best_state_dict is not None:
    76. torch.save(best_state_dict, save_path) #保存最好的model参数
    77. pbar.write(f"Step {step + 1}, best model saved. (accuracy={best_accuracy:.4f})")
    78. pbar.close()
    79. if __name__ == "__main__": #调用这个main函数
    80. main(**parse_args())

    七、inference部分的test内容:

    1. import os
    2. import json
    3. import torch
    4. from pathlib import Path
    5. from torch.utils.data import Dataset
    6. class InferenceDataset(Dataset):
    7. def __init__(self, data_dir):
    8. testdata_path = Path(data_dir) / "testdata.json"
    9. metadata = json.load(testdata_path.open())
    10. self.data_dir = data_dir
    11. self.data = metadata["utterances"]
    12. def __len__(self):
    13. return len(self.data)
    14. def __getitem__(self, index):
    15. utterance = self.data[index]
    16. feat_path = utterance["feature_path"]
    17. mel = torch.load(os.path.join(self.data_dir, feat_path))
    18. return feat_path, mel
    19. def inference_collate_batch(batch):
    20. """Collate a batch of data."""
    21. feat_paths, mels = zip(*batch)
    22. return feat_paths, torch.stack(mels)
    1. import json
    2. import csv
    3. from pathlib import Path
    4. from tqdm.notebook import tqdm
    5. import torch
    6. from torch.utils.data import DataLoader
    7. def parse_args():
    8. """arguments"""
    9. config = {
    10. "data_dir": "./Dataset",
    11. "model_path": "./model.ckpt",
    12. "output_path": "./output.csv",
    13. }
    14. return config
    15. def main(
    16. data_dir,
    17. model_path,
    18. output_path,
    19. ):
    20. """Main function."""
    21. device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    22. print(f"[Info]: Use {device} now!")
    23. mapping_path = Path(data_dir) / "mapping.json"
    24. mapping = json.load(mapping_path.open())
    25. dataset = InferenceDataset(data_dir)
    26. dataloader = DataLoader(
    27. dataset,
    28. batch_size=1,
    29. shuffle=False,
    30. drop_last=False,
    31. num_workers=8,
    32. collate_fn=inference_collate_batch,
    33. )
    34. print(f"[Info]: Finish loading data!",flush = True)
    35. speaker_num = len(mapping["id2speaker"])
    36. model = Classifier(n_spks=speaker_num).to(device)
    37. model.load_state_dict(torch.load(model_path))
    38. model.eval()
    39. print(f"[Info]: Finish creating model!",flush = True)
    40. results = [["Id", "Category"]]
    41. for feat_paths, mels in tqdm(dataloader):
    42. with torch.no_grad():
    43. mels = mels.to(device)
    44. outs = model(mels) #调用model计算得到outs
    45. preds = outs.argmax(1).cpu().numpy() #对outs进行argmax,得到的索引存储到preds中
    46. for feat_path, pred in zip(feat_paths, preds):
    47. results.append([feat_path, mapping["id2speaker"][str(pred)]]) #将每一次的结果存放的到results中
    48. with open(output_path, 'w', newline='') as csvfile:
    49. writer = csv.writer(csvfile)
    50. writer.writerows(results)
    51. if __name__ == "__main__":
    52. main(**parse_args())

    inference部分的代码暂时就看看好了,这个2022版本的数据在github上404了。。。

    七、Dataset的处理过程:
    1. import os
    2. import json
    3. import torch
    4. import random
    5. from pathlib import Path
    6. from torch.utils.data import Dataset
    7. from torch.nn.utils.rnn import pad_sequence
    8. class myDataset(Dataset):
    9. def __init__(self, data_dir, segment_len=128):
    10. self.data_dir = data_dir
    11. self.segment_len = segment_len
    12. # Load the mapping from speaker neme to their corresponding id.
    13. mapping_path = Path(data_dir) / "mapping.json"
    14. mapping = json.load(mapping_path.open()) #将这个json文件load到变量mapping中
    15. self.speaker2id = mapping["speaker2id"] #其实speaker2id这个变量就是mapping里面的内容
    16. #其实也就是原来数据集中的"id00464"变成我们这里的600个人的数据集的0-599的id
    17. # Load metadata of training data.
    18. metadata_path = Path(data_dir) / "metadata.json"
    19. metadata = json.load(open(metadata_path))["speakers"]
    20. #和上面类似的操作,这里的metadata就是打开那个json文件中的内容
    21. #我觉得按照他上课的说法,这里的n_mels的意思就是每个特征音频长度取出40就好了,?对吗
    22. #然后,这个json文件里面的内容就是不同speakerid所发声的音频文件的路径和mel_len
    23. # Get the total number of speaker.
    24. self.speaker_num = len(metadata.keys())
    25. self.data = [] #data就是这个class中的数据
    26. for speaker in metadata.keys(): #逐个遍历每个speaker
    27. for utterances in metadata[speaker]: #遍历每个speaker的每一段录音
    28. self.data.append([utterances["feature_path"], self.speaker2id[speaker]])#将每一段录音按照 (路径,新id)存入data变量中
    29. def __len__(self):
    30. return len(self.data) #返回总共的data数量
    31. def __getitem__(self, index):
    32. feat_path, speaker = self.data[index] #从下标位置获取到该段录音的路径 和 speakerid
    33. # Load preprocessed mel-spectrogram.
    34. mel = torch.load(os.path.join(self.data_dir, feat_path)) #从路径中获取到该mel录音文件
    35. # Segmemt mel-spectrogram into "segment_len" frames.
    36. if len(mel) > self.segment_len: #如果大于128这个seg , 一些处理....
    37. # Randomly get the starting point of the segment.
    38. start = random.randint(0, len(mel) - self.segment_len)
    39. # Get a segment with "segment_len" frames.
    40. mel = torch.FloatTensor(mel[start:start+self.segment_len])
    41. else:
    42. mel = torch.FloatTensor(mel)
    43. # Turn the speaker id into long for computing loss later.
    44. speaker = torch.FloatTensor([speaker]).long() #将speakerid转换为long类型
    45. return mel, speaker #返回这个录音mel文件和对应的speakerid
    46. def get_speaker_number(self):
    47. return self.speaker_num

    这里附带我下载的文件资源路径:

    ML2022Spring-hw4 | Kaggle

    下面dropbox的链接是可以使用的

    1. !wget https://www.dropbox.com/s/vw324newiku0sz0/Dataset.tar.gz.aa?d1=0
    2. !wget https://www.dropbox.com/s/vw324newiku0sz0/Dataset.tar.gz.aa?d1=0
    3. !wget https://www.dropbox.com/s/z840g69e71nkayo/Dataset.tar.gz.ab?d1=0
    4. !wget https://www.dropbox.com/s/h1081e1ggonio81/Dataset.tar.gz.ac?d1=0
    5. !wget https://www.dropbox.com/s/fh3zd8ow668c4th/Dataset.tar.gz.ad?d1=0
    6. !wget https://www.dropbox.com/s/ydzygoy2pv6gw9d/Dataset.tar.gz.ae?d1=0
    7. !cat Dataset.tar.gz.* | tar zxvf -

    这样才能下载到你需要的数据

    怎么说呢?最后的最后,还是这个dropbox中下载的内容不全,少了一些文件

    有一个解决的方法是,直接在kaggle上面下载那个5.2GB的压缩包,不过解压之后可能有70GB,文件似乎太大了,而且下载之后,只要全部解压导入到Dataset文件夹就可以运行了

    方法三:尝试一下那个GoogleDrive上面的文件 :

    失败了,算了还是自己老老实实下载然后上传吧

    1. !gdown --id '1CtHZhJ-mTpNsO-MqvAPIi4Yrt3oSBXYV' --output Dataset.zip
    2. !gdown --id '14hmoMgB1fe6v50biIceKyndyeYABGrRq' --output Dataset.zip
    3. !gdown --id '1e9x-Pj13n7-9tK9LS_WjiMo21ru4UBH9' --output Dataset.zip
    4. !gdown --id '10TC0g46bcAz_jkiM165zNmwttT4RiRgY' --output Dataset.zip
    5. !gdown --id '1MUGBvG_Jjq00C2JYHuyV3B01vaf1kWIm' --output Dataset.zip
    6. !gdown --id '18M91P5DHwILNy01ssZ57AiPOR0OwutOM' --output Dataset.zip
    7. !unzip Dataset.zip

  • 相关阅读:
    log4j日志漏洞问题
    WSL2快速上手
    人工智能笔记
    C++入门基础05:表达式(表达式基础、算术运算符与赋值运算符、逻辑关系运算符、成员访问运算符与条件运算符、位运算符、移位运算符与类型转换)
    Ubuntu20.04安装Ipopt的流程介绍及报错解决方法(亲测简单有效)
    界面控件DevExpress WPF Scheduler控件 - 如何实现数据的按需加载?
    IMU标定之---Allan方差
    WPF项目-按着键盘方向键,移动格子盒子效果
    逍遥自在学C语言 | 指针的基础用法
    《隐私计算简易速速上手小册》第7章:隐私计算与云计算/边缘计算(2024 最新版)
  • 原文地址:https://blog.csdn.net/xiao_ZHEDA/article/details/132715083