• 李宏毅hw-9:Explainable ML


    ——欲速则不达,我已经很幸运了,只要珍惜这份幸运就好了,不必患得患失,慢慢来。

    ----查漏补缺:

    1.关于这个os.listdir的使用

    2.从‘num_文件名.jpg’中提取出数值:

    3.slic图像分割标记函数的作用:

    4.zip这个函数,用来讲2个数组“一一对应”的合成1个数组:

    5.关于astype的这个用来类型转换的东西:

    6.关于 利用[]合并之后,再进行enumerate:

    PART1:11个food的分类问题的explainable部分:

    一、对于这个cnn的代码部分的回顾:
    1.前期准备:库的引入,参数的设置
    1. import os
    2. import sys
    3. import argparse
    4. import numpy as np
    5. from PIL import Image
    6. import matplotlib.pyplot as plt
    7. import torch
    8. import torch.nn as nn
    9. import torch.nn.functional as F
    10. from torch.optim import Adam
    11. from torch.utils.data import Dataset
    12. import torchvision.transforms as transforms
    13. from skimage.segmentation import slic
    14. from lime import lime_image
    15. from pdb import set_trace
    16. from torch.autograd import Variable
    1. args = {
    2. 'ckptpath': './checkpoint.pth',
    3. 'dataset_dir': './food/'
    4. }
    5. args = argparse.Namespace(**args)
    2.模型结构的定义:

    (1)cnn是一系列的卷积层最终得到4**4*512的图像

    (2)flatten展平之后,再经过一系列的linear层得到11种的向量

    1. # Model definition——分析这个model的结构:
    2. class Classifier(nn.Module):
    3. def __init__(self):
    4. super(Classifier, self).__init__()
    5. def building_block(indim, outdim):
    6. return [
    7. nn.Conv2d(indim, outdim, 3, 1, 1),
    8. nn.BatchNorm2d(outdim),
    9. nn.ReLU(),
    10. ]
    11. def stack_blocks(indim, outdim, block_num):
    12. layers = building_block(indim, outdim)
    13. for i in range(block_num - 1):
    14. layers += building_block(outdim, outdim)
    15. layers.append(nn.MaxPool2d(2, 2, 0))
    16. return layers
    17. cnn_list = []
    18. cnn_list += stack_blocks(3, 128, 3)
    19. cnn_list += stack_blocks(128, 128, 3)
    20. cnn_list += stack_blocks(128, 256, 3)
    21. cnn_list += stack_blocks(256, 512, 1)
    22. cnn_list += stack_blocks(512, 512, 1)
    23. self.cnn = nn.Sequential( * cnn_list) #上面所有的函数,都是为了这个cnn的过程的设计
    24. dnn_list = [
    25. nn.Linear(512 * 4 * 4, 1024),
    26. nn.ReLU(),
    27. nn.Dropout(p = 0.3),
    28. nn.Linear(1024, 11),
    29. ]
    30. self.fc = nn.Sequential( * dnn_list)
    31. def forward(self, x):
    32. out = self.cnn(x)
    33. out = out.reshape(out.size()[0], -1)
    34. return self.fc(out)

    模型对象的实例化:

    1. # Load trained model
    2. model = Classifier().cuda()
    3. checkpoint = torch.load(args.ckptpath)
    4. model.load_state_dict(checkpoint['model_state_dict'])
    5. # It should display: <All keys matched successfully>
    3.定义food_dataset,虽然实例的部分使用eval不是很确定是不是已经把model已经train好了,还是说,只是使用eval版本的eval:
    1. # It might take some time, if it is too long, try to reload it.
    2. # Dataset definition
    3. #定义这个dataset了
    4. class FoodDataset(Dataset):
    5. def __init__(self, paths, labels, mode):
    6. # mode: 'train' or 'eval'
    7. self.paths = paths
    8. self.labels = labels
    9. trainTransform = transforms.Compose([
    10. transforms.Resize(size=(128, 128)),
    11. transforms.RandomHorizontalFlip(),
    12. transforms.RandomRotation(15),
    13. transforms.ToTensor(),
    14. ])
    15. evalTransform = transforms.Compose([
    16. transforms.Resize(size=(128, 128)),
    17. transforms.ToTensor(),
    18. ])
    19. self.transform = trainTransform if mode == 'train' else evalTransform
    20. # pytorch dataset class
    21. def __len__(self):
    22. return len(self.paths)
    23. def __getitem__(self, index):
    24. X = Image.open(self.paths[index])
    25. X = self.transform(X)
    26. Y = self.labels[index]
    27. return X, Y
    28. # help to get images for visualizing
    29. def getbatch(self, indices):
    30. images = []
    31. labels = []
    32. for index in indices:
    33. image, label = self.__getitem__(index)
    34. images.append(image)
    35. labels.append(label)
    36. return torch.stack(images), torch.tensor(labels)
    37. # help to get data path and label
    38. #先分析这个函数,再分析上面的dataset
    39. def get_paths_labels(path):
    40. #定义1个lambda函数
    41. def my_key(name):
    42. return int(name.replace(".jpg",""))+1000000*int(name.split("_")[0])
    43. imgnames = os.listdir(path)
    44. imgnames.sort(key=my_key) #使用这个lambda函数进行sort排序
    45. imgpaths = []
    46. labels = []
    47. for name in imgnames:
    48. imgpaths.append(os.path.join(path, name))
    49. labels.append(int(name.split('_')[0]))
    50. return imgpaths, labels
    51. train_paths, train_labels = get_paths_labels(args.dataset_dir) #没问题,只是key处理了,但是name本身没改变
    52. train_set = FoodDataset(train_paths, train_labels, mode='eval') #可能这里用到的model是已经train好的model

    从这个dataset中抽出11张图像进行人工观察:

    1. img_indices = [i for i in range(10)]
    2. images, labels = train_set.getbatch(img_indices)
    3. fig, axs = plt.subplots(1, len(img_indices), figsize=(15, 8))
    4. for i, img in enumerate(images):
    5. axs[i].imshow(img.cpu().permute(1, 2, 0))
    6. # print(labels)

    二、使用Lime对图像中的
    1.Local Interpretable Model-Agnostic Explanations的定义:

    2.具体使用这个lime:
    1. #调用model的eval对整个batch的input进行预测得到1个batch的predicts结果
    2. def predict(input):
    3. # input: numpy array, (batches, height, width, channels)
    4. model.eval()
    5. input = torch.FloatTensor(input).permute(0, 3, 1, 2)
    6. # pytorch tensor, (batches, channels, height, width)
    7. output = model(input.cuda())
    8. return output.detach().cpu().numpy()
    9. #对输入的图像进行分割后标记
    10. def segmentation(input):
    11. # split the image into 200 pieces with the help of segmentaion from skimage
    12. return slic(input, n_segments=200, compactness=1, sigma=1)
    13. #设置画布参数
    14. fig, axs = plt.subplots(1, len(img_indices), figsize=(15, 8))
    15. # fix the random seed to make it reproducible
    16. np.random.seed(16)
    17. for idx, (image, label) in enumerate(zip(images.permute(0, 2, 3, 1).numpy(), labels)):
    18. x = image.astype(np.double)
    19. # numpy array for lime
    20. #调用explainer的explain_instance,传递对应图像x,predict函数,segmentation函数作为它的参数
    21. explainer = lime_image.LimeImageExplainer()
    22. explaination = explainer.explain_instance(image=x, classifier_fn=predict, segmentation_fn=segmentation)
    23. # doc: https://lime-ml.readthedocs.io/en/latest/lime.html?highlight=explain_instance#lime.lime_image.LimeImageExplainer.explain_instance
    24. #调用上面的这个explaination,传递的参数主要是label值 和 num_features种类,其他的就是说是否显示不是重要的地方等。。
    25. lime_img, mask = explaination.get_image_and_mask(
    26. label=label.item(),
    27. positive_only=False,
    28. hide_rest=False,
    29. num_features=11,
    30. min_weight=0.05
    31. )
    32. # turn the result from explainer to the image
    33. # doc: https://lime-ml.readthedocs.io/en/latest/lime.html?highlight=get_image_and_mask#lime.lime_image.ImageExplanation.get_image_and_mask
    34. axs[idx].imshow(lime_img) #axs的第idx位置的图像,就放置这个lime_img了
    35. #show出这些用lime标记的图像咯
    36. plt.show()
    37. plt.close()
    三、使用saliency map:显著性标注出这个图像中贡献这个类型特征最多的地方

    (其实就是普通的gradient的方法)

    The heatmaps that highlight pixels of the input image that contribute the most in the classification task.

    总的来说,就是通过计算每个pixel对于整个loss的gradient,这个gradient就是新的图像的pixel数值

    1. #对图像中的每个pixel的数值进行normalize
    2. def normalize(image):
    3. return (image - image.min()) / (image.max() - image.min())
    4. # return torch.log(image)/torch.log(image.max())
    5. #用于计算saliency的函数
    6. def compute_saliency_maps(x, y, model): #x就是图像, y就是label, model就是分类器
    7. model.eval()
    8. x = x.cuda()
    9. # we want the gradient of the input x
    10. x.requires_grad_()
    11. y_pred = model(x)
    12. loss_func = torch.nn.CrossEntropyLoss()
    13. loss = loss_func(y_pred, y.cuda())
    14. loss.backward()
    15. # saliencies = x.grad.abs().detach().cpu()
    16. saliencies, _ = torch.max(x.grad.data.abs().detach().cpu(),dim=1) #这一步,就是将每个像素的位置的gradient梯度(3个通道中的最大的那个)作为新的图像位置的 像素值
    17. # We need to normalize each image, because their gradients might vary in scale, but we only care about the relation in each image
    18. saliencies = torch.stack([normalize(item) for item in saliencies])
    19. return saliencies
    1. # images, labels = train_set.getbatch(img_indices)
    2. saliencies = compute_saliency_maps(images, labels, model)
    3. # visualize
    4. fig, axs = plt.subplots(2, len(img_indices), figsize=(15, 8))
    5. for row, target in enumerate([images, saliencies]):
    6. for column, img in enumerate(target):
    7. if row==0:
    8. axs[row][column].imshow(img.permute(1, 2, 0).numpy()) #第一行:正常图像显示
    9. # What is permute?
    10. # In pytorch, the meaning of each dimension of image tensor is (channels, height, width)
    11. # In matplotlib, the meaning of each dimension of image tensor is (height, width, channels)
    12. # permute is a tool for permuting dimensions of tensors
    13. # For example, img.permute(1, 2, 0) means that,
    14. # - 0 dimension is the 1 dimension of the original tensor, which is height
    15. # - 1 dimension is the 2 dimension of the original tensor, which is width
    16. # - 2 dimension is the 0 dimension of the original tensor, which is channels
    17. else:
    18. axs[row][column].imshow(img.numpy(), cmap=plt.cm.hot) #第二行:热成像图
    19. plt.show()
    20. plt.close()
    四、smooth grad的方法查看heat 图像

    Smooth grad

    Smooth grad 的方法是,在圖片中隨機地加入 noise,然後得到不同的 heatmap,把這些 heatmap 平均起來就得到一個比較能抵抗 noisy gradient 的結果。

    1. # Smooth grad
    2. #一样的normalize函数
    3. def normalize(image):
    4. return (image - image.min()) / (image.max() - image.min())
    5. #计算出类似于saliency map中的saliencies图像的东西:
    6. def smooth_grad(x, y, model, epoch, param_sigma_multiplier): #总共epoch数,一个常量sigma
    7. model.eval()
    8. #x = x.cuda().unsqueeze(0)
    9. mean = 0
    10. sigma = param_sigma_multiplier / (torch.max(x) - torch.min(x)).item() #sigma就是1个数值
    11. smooth = np.zeros(x.cuda().unsqueeze(0).size()) #一个和x相同大小zero变量
    12. for i in range(epoch):
    13. # call Variable to generate random noise
    14. noise = Variable(x.data.new(x.size()).normal_(mean, sigma**2)) #sigma用作正太分布的标准差参数,抽取noise的抽样,和x一样大
    15. x_mod = (x+noise).unsqueeze(0).cuda()
    16. x_mod.requires_grad_()
    17. y_pred = model(x_mod)
    18. loss_func = torch.nn.CrossEntropyLoss()
    19. loss = loss_func(y_pred, y.cuda().unsqueeze(0))
    20. loss.backward()
    21. # like the method in saliency map
    22. smooth += x_mod.grad.abs().detach().cpu().data.numpy() #smooth用于累计每一个epoch的和
    23. smooth = normalize(smooth / epoch) # don't forget to normalize,取个均值就可以了
    24. # smooth = smooth / epoch
    25. return smooth
    26. # images, labels = train_set.getbatch(img_indices)
    27. smooth = []
    28. for i, l in zip(images, labels):
    29. smooth.append(smooth_grad(i, l, model, 500, 0.4))
    30. smooth = np.stack(smooth)
    31. print(smooth.shape)
    32. fig, axs = plt.subplots(2, len(img_indices), figsize=(15, 8)) #2行喔!
    33. for row, target in enumerate([images, smooth]):
    34. for column, img in enumerate(target):
    35. axs[row][column].imshow(np.transpose(img.reshape(3,128,128), (1,2,0)))
    五、Filter Explanation,透过卷积的中间层进行观察:
    1.hook钩子函数的作用:

    2.只输出指定filterid的那个滤波器的输出:

    3.具体的代码部分
    1. #定义正规化
    2. def normalize(image):
    3. return (image - image.min()) / (image.max() - image.min())
    4. layer_activations = None
    5. #filter的观察函数,返回的是 activation 和 visulization
    6. def filter_explanation(x, model, cnnid, filterid, iteration=100, lr=1):#cnnid是对应的卷积层的id,filterid是对应的过滤器的id
    7. # x: input image
    8. # cnnid, filterid: cnn layer id, which filter
    9. model.eval()
    10. def hook(model, input, output): #定义hook函数,就是将output给到全局的layer_activations
    11. global layer_activations
    12. layer_activations = output
    13. hook_handle = model.cnn[cnnid].register_forward_hook(hook) #hook的handle句柄,下面有解释这行代码的含义
    14. # When the model forward through the layer[cnnid], need to call the hook function first
    15. # The hook function save the output of the layer[cnnid]
    16. # After forwarding, we'll have the loss and the layer activation
    17. # Filter activation: x passing the filter will generate the activation map
    18. model(x.cuda()) # forward
    19. # Based on the filterid given by the function argument, pick up the specific filter's activation map
    20. # We just need to plot it, so we can detach from graph and save as cpu tensor
    21. filter_activations = layer_activations[:, filterid, :, :].detach().cpu()
    22. # Filter visualization: find the image that can activate the filter the most
    23. x = x.cuda()
    24. x.requires_grad_()
    25. # input image gradient
    26. optimizer = Adam([x], lr=lr)
    27. # Use optimizer to modify the input image to amplify filter activation
    28. for iter in range(iteration): #iteration==100
    29. optimizer.zero_grad()
    30. model(x)
    31. objective = -layer_activations[:, filterid, :, :].sum()
    32. # We want to maximize the filter activation's summation
    33. # So we add a negative sign
    34. objective.backward()
    35. # Calculate the partial differential value of filter activation to input image
    36. optimizer.step()
    37. # Modify input image to maximize filter activation
    38. filter_visualizations = x.detach().cpu().squeeze()
    39. # Don't forget to remove the hook
    40. hook_handle.remove()
    41. # The hook will exist after the model register it, so you have to remove it after used
    42. # Just register a new hook if you want to use it
    43. return filter_activations, filter_visualizations
    1. images, labels = train_set.getbatch(img_indices)
    2. #下面的这个函数的参数可以看出,是获取第cnnid==66个卷积层的第0个过滤器的activation和visulization
    3. filter_activations, filter_visualizations = filter_explanation(images, model, cnnid=6, filterid=0, iteration=100, lr=0.1)
    4. #以下总共进行了3组图片的绘制,分别是原始图片、activation图片,visulation图片
    5. fig, axs = plt.subplots(3, len(img_indices), figsize=(15, 8))
    6. for i, img in enumerate(images):
    7. axs[0][i].imshow(img.permute(1, 2, 0))
    8. # Plot filter activations
    9. for i, img in enumerate(filter_activations):
    10. axs[1][i].imshow(normalize(img))
    11. # Plot filter visualization
    12. for i, img in enumerate(filter_visualizations):
    13. axs[2][i].imshow(normalize(img.permute(1, 2, 0)))
    14. plt.show()
    15. plt.close()
    16. # 從下面四張圖可以看到,activate 的區域對應到一些物品的邊界,尤其是顏色對比較深的邊界
    1. images, labels = train_set.getbatch(img_indices)
    2. #下面的这个函数的参数可以看出,是获取第cnnid==2323个卷积层的第0个过滤器的activation和visulization
    3. filter_activations, filter_visualizations = filter_explanation(images, model, cnnid=23, filterid=0, iteration=100, lr=0.1)
    4. # Plot filter activations
    5. fig, axs = plt.subplots(3, len(img_indices), figsize=(15, 8))
    6. for i, img in enumerate(images):
    7. axs[0][i].imshow(img.permute(1, 2, 0))
    8. for i, img in enumerate(filter_activations):
    9. axs[1][i].imshow(normalize(img))
    10. for i, img in enumerate(filter_visualizations):
    11. axs[2][i].imshow(normalize(img.permute(1, 2, 0)))
    12. plt.show()
    13. plt.close()
    六、使用XAI中的Integrated gradient技术:

    1. #什么都别说,545去西园吃点清淡的,就出去玩——看电影,或者其他的,好吧!
    2. class IntegratedGradients():
    3. def __init__(self, model): #初始化这个类
    4. self.model = model
    5. self.gradients = None
    6. # Put model in evaluation mode
    7. self.model.eval()
    8. def generate_images_on_linear_path(self, input_image, steps):
    9. # Generate scaled xbar images
    10. xbar_list = [input_image*step/steps for step in range(steps)]
    11. return xbar_list
    12. def generate_gradients(self, input_image, target_class): #计算一张图像的gradient
    13. # We want to get the gradients of the input image
    14. input_image.requires_grad=True
    15. # Forward
    16. model_output = self.model(input_image)
    17. # Zero grads
    18. self.model.zero_grad()
    19. # Target for backprop
    20. one_hot_output = torch.FloatTensor(1, model_output.size()[-1]).zero_().cuda()
    21. one_hot_output[0][target_class] = 1
    22. # Backward
    23. model_output.backward(gradient=one_hot_output)
    24. self.gradients = input_image.grad
    25. # Convert Pytorch variable to numpy array
    26. # [0] to get rid of the first channel (1,3,128,128)
    27. gradients_as_arr = self.gradients.data.cpu().numpy()[0]
    28. return gradients_as_arr
    29. def generate_integrated_gradients(self, input_image, target_class, steps): #计算img_list的图像的gradient的integrate
    30. # Generate xbar images
    31. xbar_list = self.generate_images_on_linear_path(input_image, steps)
    32. # Initialize an iamge composed of zeros
    33. integrated_grads = np.zeros(input_image.size())
    34. for xbar_image in xbar_list:
    35. # Generate gradients from xbar images
    36. single_integrated_grad = self.generate_gradients(xbar_image, target_class)
    37. # Add rescaled grads from xbar images
    38. integrated_grads = integrated_grads + single_integrated_grad/steps
    39. # [0] to get rid of the first channel (1,3,128,128)
    40. return integrated_grads[0]
    41. def normalize(image):
    42. return (image - image.min()) / (image.max() - image.min())
    1. # put the image to cuda
    2. images, labels = train_set.getbatch(img_indices)
    3. images = images.cuda()
    1. IG = IntegratedGradients(model)
    2. integrated_grads = []
    3. for i, img in enumerate(images):
    4. img = img.unsqueeze(0)
    5. integrated_grads.append(IG.generate_integrated_gradients(img, labels[i], 10))
    6. fig, axs = plt.subplots(2, len(img_indices), figsize=(15, 8))
    7. for i, img in enumerate(images): #输出一组正常的图像
    8. axs[0][i].imshow(img.cpu().permute(1, 2, 0))
    9. for i, img in enumerate(integrated_grads): #输出integrate的图像
    10. axs[1][i].imshow(np.moveaxis(normalize(img),0,-1))
    11. plt.show()
    12. plt.close()

    PART2:有关BERT的可解释行的model

    (一)、在这个网站上感受bert的各个层的过程:

    exBERT

    这个模型可以用于查看注意力头部等信息,这里我就先不管了,后期慢慢摸索吧。。。。

    (二)、visualizing bert's embedding:

    湯姆有 3 個預訓練模型,但他忘記每一個模型是否有微調在閱讀理解的任務上了

    通过观察各个token的embedding的位置,分析这个model是否具有阅读理解的fine_tune,

    、。。。。我没做出来,有点难,

    不过,它的代码就是从bert的每一层中取出embedding结果,再将每个token投射到二维坐标中进行分析

    (三)、分析 吃的苹果 和 苹果手机的苹果词汇的embedding的距离
    1. # Sentences for visualization
    2. sentences = []
    3. sentences += ["今天買了蘋果來吃"]
    4. sentences += ["進口蘋果(富士)平均每公斤下跌12.3%"]
    5. sentences += ["蘋果茶真難喝"]
    6. sentences += ["老饕都知道智利的蘋果季節即將到來"]
    7. sentences += ["進口蘋果因防止水分流失故添加人工果糖"]
    8. sentences += ["蘋果即將於下月發振新款iPhone"]
    9. sentences += ["蘋果獲新Face ID專利"]
    10. sentences += ["今天買了蘋果手機"]
    11. sentences += ["蘋果的股價又跌了"]
    12. sentences += ["蘋果押寶指紋辨識技術"]
    13. # Index of word selected for embedding comparison. E.g. For sentence "蘋果茶真難喝", if index is 0, "蘋 is selected"
    14. select_word_index = [4, 2, 0, 8, 2, 0, 0, 4, 0, 0] #设置上面的词汇数组中的"苹果"二字的index位置
    1. #计算向量a 和 向量b的欧式距离
    2. def euclidean_distance(a, b):
    3. # Compute euclidean distance (L2 norm) between two numpy vectors a and b
    4. return np.linalg.norm(a-b)
    5. #计算a向量和b向量的余弦相似度cosine_similarity = (A · B) / (||A|| * ||B||)
    6. def cosine_similarity(a, b):
    7. # Compute cosine similarity between two numpy vectors a and b
    8. return 0
    9. # Metric for comparison. Choose from euclidean_distance, cosine_similarity
    10. #METRIC有2个选择,要么用欧式距离 要么用余弦相似度
    11. METRIC = euclidean_distance
    12. def get_select_embedding(output, tokenized_sentence, select_word_index):
    13. # The layer to visualize, choose from 0 to 12
    14. LAYER = 12
    15. # Get selected layer's hidden state
    16. hidden_state = output.hidden_states[LAYER][0]
    17. # Convert select_word_index in sentence to select_token_index in tokenized sentence
    18. select_token_index = tokenized_sentence.word_to_tokens(select_word_index).start
    19. # Return embedding of selected word
    20. return hidden_state[select_token_index].numpy()

    1. # Tokenize and encode sentences into model's input format
    2. tokenized_sentences = [tokenizer(sentence, return_tensors='pt') for sentence in sentences]
    3. # Input encoded sentences into model and get outputs
    4. with torch.no_grad():
    5. outputs = [model(**tokenized_sentence) for tokenized_sentence in tokenized_sentences]
    6. #得到词汇"苹果"在各个句子中的embedding
    7. # Get embedding of selected word(s) in sentences. "embeddings" has shape (len(sentences), 768), where 768 is the dimension of BERT's hidden state
    8. embeddings = [get_select_embedding(outputs[i], tokenized_sentences[i], select_word_index[i]) for i in range(len(outputs))]
    9. #计算 对应 "苹果"二字的 词汇的距离
    10. # Pairwse comparsion of sentences' embeddings using the metirc defined. "similarity_matrix" has shape [len(sentences), len(sentences)]
    11. similarity_matrix = pairwise_distances(embeddings, metric=METRIC)
    12. #绘制这个词汇的距离
    13. ##### Plot the similarity matrix #####
    14. plt.rcParams['figure.figsize'] = [12, 10] # Change figure size of the plot
    15. plt.imshow(similarity_matrix) # Display an image in the plot
    16. plt.colorbar() # Add colorbar to the plot
    17. plt.yticks(ticks=range(len(sentences)), labels=sentences, fontproperties=myfont) # Set tick locations and labels (sentences) of y-axis
    18. plt.title('Comparison of BERT Word Embeddings') # Add title to the plot
    19. for (i,j), label in np.ndenumerate(similarity_matrix): # np.ndenumerate is 2D version of enumerate
    20. plt.text(i, j, '{:.2f}'.format(label), ha='center', va='center') # Add values in similarity_matrix to the corresponding position in the plot
    21. plt.show() # Show the plot

  • 相关阅读:
    Python中的AI库有哪些?
    leetcode:854. 相似度为 K 的字符串【异位词最少交换次数 + bfs暴搜 + 多记录当前匹配到的idx + 剪枝】
    React-路由小知识
    实践篇1:深度学习之----LetNet之tensorflow2的实现
    【代码随想录】二刷-栈和队列
    私家车位上海商学院
    无人零售与传统便利店的竞争优势
    YOLOV8的tensorrt部署详解(目标检测模型-CUDA)
    详细介绍下路由器中的WAN口
    RabbitMQ 简介
  • 原文地址:https://blog.csdn.net/xiao_ZHEDA/article/details/133218349