• 第93步 深度学习图像分割:PSPNet建模


    基于WIN10的64位系统演示

    一、写在前面

    本期,我们继续学习深度学习图像分割系列的另一个模型,PSPNet

    二、PSPNet简介

    (1)金字塔池化模块 (Pyramid Pooling Module)

    PSPNet的核心是其金字塔池化模块,该模块能够捕捉图像的不同区域上下文。这是通过在多个不同的尺度上对特征进行池化并使用1x1卷积来实现的。然后,这些不同尺度的特征图被上采样到原始尺度并与原始特征图一起串联。

    (2)使用深层骨干网络

    为了获取丰富的特征表示,PSPNet使用深层网络(如ResNet)作为其骨干网络。这为模型提供了强大的特征提取能力。

    (3)辅助损失

    为了在深网络中稳定训练,PSPNet引入了辅助损失。这通常在网络的中间层添加,帮助梯度更好地流经网络。

    三、数据源

    来源于公共数据,主要目的是使用PSPNet分割出电子显微镜下的细胞边缘:

    数据分为训练集(train)、训练集的细胞边缘数据(label)以及验证集(test)注意哈,没有提供验证集的细胞边缘数据。因此,后面是算不出验证集的性能参数的。

    四、PSPNet实战:

    这里,由于Pytorch并没有PSPNet预训练模型,因此只能从头搭建咯。

    上代码:

    (a)数据读取和数据增强

    1. import os
    2. import numpy as np
    3. from skimage.io import imread
    4. from torchvision import transforms
    5. from torch.utils.data import DataLoader, Dataset
    6. import torch
    7. import torch.nn as nn
    8. import torch.nn.functional as F
    9. import matplotlib.pyplot as plt
    10. from sklearn.metrics import roc_curve, auc, accuracy_score, recall_score, precision_score, f1_score
    11. # 设置文件路径
    12. data_folder = 'U-net-master\data_set'
    13. train_images_folder = os.path.join(data_folder, 'train')
    14. label_images_folder = os.path.join(data_folder, 'label')
    15. test_images_folder = os.path.join(data_folder, 'test')
    16. train_images = sorted(os.listdir(train_images_folder))
    17. label_images = sorted(os.listdir(label_images_folder))
    18. test_images = sorted(os.listdir(test_images_folder))
    19. # 定义数据集类
    20. class CustomDataset(Dataset):
    21. def __init__(self, image_paths, mask_paths, transform=None):
    22. self.image_paths = image_paths
    23. self.mask_paths = mask_paths
    24. self.transform = transform
    25. def __len__(self):
    26. return len(self.image_paths)
    27. def __getitem__(self, idx):
    28. image = imread(self.image_paths[idx])
    29. mask = imread(self.mask_paths[idx])
    30. mask[mask == 255] = 1
    31. # Ensure the image has 3 channels
    32. if len(image.shape) == 2:
    33. image = np.stack((image,) * 3, axis=-1)
    34. sample = {'image': image, 'mask': mask}
    35. if self.transform:
    36. sample = self.transform(sample)
    37. return sample
    38. class ToTensorAndNormalize(object):
    39. def __init__(self, mean, std):
    40. self.mean = mean
    41. self.std = std
    42. def __call__(self, sample):
    43. image, mask = sample['image'], sample['mask']
    44. # Swap color axis
    45. # numpy image: H x W x C
    46. # torch image: C x H x W
    47. image = image.transpose((2, 0, 1))
    48. image = torch.from_numpy(image).float()
    49. mask = torch.from_numpy(mask).float()
    50. # Normalize the image
    51. for t, m, s in zip(image, self.mean, self.std):
    52. t.sub_(m).div_(s)
    53. return {'image': image, 'mask': mask}
    54. # Use the new transform for normalization
    55. transform = ToTensorAndNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    56. # 获取数据集
    57. train_dataset = CustomDataset(
    58. image_paths=[os.path.join(train_images_folder, img) for img in train_images],
    59. mask_paths=[os.path.join(label_images_folder, img) for img in label_images],
    60. transform=transform
    61. )
    62. train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)

    解读:

    其他没什么好说的,就是要注意:上述代码的数据需要人工的安排训练集和测试集。严格按照下面格式放置好各个文件,包括文件夹的命名也不要变动:

    (b)PSPNet建模

    1. # 获取PSPNet模型
    2. class PSPNet(nn.Module):
    3. def __init__(self, input_channels, output_channels):
    4. super(PSPNet, self).__init__()
    5. # Encoder (Feature extraction)
    6. self.enc = nn.Sequential(
    7. nn.Conv2d(input_channels, 64, kernel_size=3, padding=1),
    8. nn.BatchNorm2d(64),
    9. nn.ReLU(inplace=True),
    10. nn.MaxPool2d(2)
    11. )
    12. # Pyramid Pooling Module
    13. self.ppm = nn.ModuleList([
    14. nn.AdaptiveAvgPool2d(output_size=(1, 1)),
    15. nn.AdaptiveAvgPool2d(output_size=(2, 2)),
    16. nn.AdaptiveAvgPool2d(output_size=(3, 3)),
    17. nn.AdaptiveAvgPool2d(output_size=(6, 6))
    18. ])
    19. self.ppm_conv = nn.ModuleList([
    20. nn.Conv2d(64, 64, kernel_size=1),
    21. nn.Conv2d(64, 64, kernel_size=1),
    22. nn.Conv2d(64, 64, kernel_size=1),
    23. nn.Conv2d(64, 64, kernel_size=1)
    24. ])
    25. # Decoder
    26. self.decoder = nn.Sequential(
    27. nn.Conv2d(320, 128, kernel_size=3, padding=1),
    28. nn.BatchNorm2d(128),
    29. nn.ReLU(inplace=True),
    30. nn.ConvTranspose2d(128, output_channels, kernel_size=3, stride=2, padding=1, output_padding=1)
    31. )
    32. def forward(self, x):
    33. x = self.enc(x)
    34. ppm_out = [x]
    35. for pool, conv in zip(self.ppm, self.ppm_conv):
    36. ppm_out.append(F.interpolate(conv(pool(x)), size=x.size()[2:], mode='bilinear', align_corners=True))
    37. x = torch.cat(ppm_out, dim=1)
    38. return self.decoder(x)
    39. # 获取SegNet模型
    40. device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    41. model = PSPNet(input_channels=3, output_channels=2).to(device)
    42. optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
    43. criterion = torch.nn.CrossEntropyLoss()
    44. def calc_iou(pred, target):
    45. # Convert prediction to boolean values and flatten
    46. pred = (pred > 0.5).view(-1)
    47. target = target.view(-1)
    48. # Calculate intersection and union
    49. intersection = torch.sum(pred & target)
    50. union = torch.sum(pred | target)
    51. # Avoid division by zero
    52. iou = (intersection + 1e-8) / (union + 1e-8)
    53. return iou.item()
    54. # 初始化损失和IoU的历史记录列表
    55. losses_history = []
    56. ious_history = []
    57. # 训练模型
    58. epochs = 100
    59. for epoch in range(epochs):
    60. model.train()
    61. running_loss = 0.0
    62. total_iou = 0.0
    63. for samples in train_loader:
    64. images = samples['image'].to(device)
    65. masks = samples['mask'].long().to(device)
    66. optimizer.zero_grad()
    67. outputs = model(images)
    68. loss = criterion(outputs, masks)
    69. loss.backward()
    70. optimizer.step()
    71. running_loss += loss.item()
    72. # Calculate IOU
    73. pred_masks = F.softmax(outputs, dim=1)[:, 1]
    74. total_iou += calc_iou(pred_masks, masks)
    75. avg_loss = running_loss / len(train_loader)
    76. avg_iou = total_iou / len(train_loader)
    77. print(f"Epoch {epoch+1}/{epochs}, Loss: {avg_loss}, IOU: {avg_iou}")
    78. # Append to history
    79. losses_history.append(avg_loss)
    80. ious_history.append(avg_iou)

    迭代的深度比之前的模型要慢:

    (c)各种性能指标打印和可视化

    1. ###################################误差曲线#######################################
    2. import matplotlib.pyplot as plt
    3. # 设置matplotlib支持中文显示
    4. plt.rcParams['font.sans-serif'] = ['SimHei']
    5. plt.rcParams['axes.unicode_minus'] = False
    6. # 绘制训练损失和IoU
    7. plt.figure(figsize=(12, 5))
    8. # 绘制损失
    9. plt.subplot(1, 2, 1)
    10. plt.plot(losses_history, label='训练损失')
    11. plt.title('损失随迭代次数的变化')
    12. plt.xlabel('迭代次数')
    13. plt.ylabel('损失')
    14. plt.legend()
    15. # 绘制IoU
    16. plt.subplot(1, 2, 2)
    17. plt.plot(ious_history, label='训练IoU')
    18. plt.title('IoU随迭代次数的变化')
    19. plt.xlabel('迭代次数')
    20. plt.ylabel('IoU')
    21. plt.legend()
    22. plt.tight_layout()
    23. plt.show()

    直接看结果:

    误差和IOU曲线,看起来模型收敛不如之前的模型,100次迭代没有达到90%。

    1. ##############################评价指标,对于某一个样本#######################################
    2. import numpy as np
    3. import matplotlib.pyplot as plt
    4. from sklearn.metrics import roc_curve, auc, accuracy_score, recall_score, precision_score, f1_score
    5. def calc_iou(y_true, y_pred):
    6. intersection = np.logical_and(y_true, y_pred)
    7. union = np.logical_or(y_true, y_pred)
    8. return np.sum(intersection) / np.sum(union)
    9. # 从数据集中获取一个样本
    10. sample = train_dataset[0]
    11. sample_img = sample['image'].unsqueeze(0).to(device)
    12. sample_mask = sample['mask'].cpu().numpy()
    13. # 使用模型进行预测
    14. with torch.no_grad():
    15. model.eval()
    16. prediction = model(sample_img)['out']
    17. # 取前景类并转为CPU
    18. predicted_mask = prediction[0, 1].cpu().numpy()
    19. predicted_mask = (predicted_mask > 0.5).astype(np.uint8)
    20. # 计算ROC曲线
    21. fpr_train, tpr_train, _ = roc_curve(sample_mask.ravel(), predicted_mask.ravel())
    22. # 计算AUC
    23. auc_train = auc(fpr_train, tpr_train)
    24. # 计算其他评估指标
    25. pixel_accuracy_train = accuracy_score(sample_mask.ravel(), predicted_mask.ravel())
    26. iou_train = calc_iou(sample_mask, predicted_mask)
    27. accuracy_train = accuracy_score(sample_mask.ravel(), predicted_mask.ravel())
    28. recall_train = recall_score(sample_mask.ravel(), predicted_mask.ravel())
    29. precision_train = precision_score(sample_mask.ravel(), predicted_mask.ravel())
    30. f1_train = f1_score(sample_mask.ravel(), predicted_mask.ravel())
    31. # 绘制ROC曲线
    32. plt.figure()
    33. plt.plot(fpr_train, tpr_train, color='blue', lw=2, label='Train ROC curve (area = %0.2f)' % auc_train)
    34. plt.xlabel('False Positive Rate')
    35. plt.ylabel('True Positive Rate')
    36. plt.title('ROC Curve')
    37. plt.legend(loc='lower right')
    38. plt.show()
    39. # 定义指标列表
    40. metrics = [
    41. ("Pixel Accuracy", pixel_accuracy_train),
    42. ("IoU", iou_train),
    43. ("Accuracy", accuracy_train),
    44. ("Recall", recall_train),
    45. ("Precision", precision_train),
    46. ("F1 Score", f1_train)
    47. ]
    48. # 打印表格的头部
    49. print("+-----------------+------------+")
    50. print("| Metric | Value |")
    51. print("+-----------------+------------+")
    52. # 打印每个指标的值
    53. for metric_name, metric_value in metrics:
    54. print(f"| {metric_name:15} | {metric_value:.6f} |")
    55. print("+-----------------+------------+")

    注意哈,这个代码只是针对某一个样本的结果:

    ROC曲线:这里存疑,感觉没啥意义,而且这个曲线看起来有问题,是一个三点折线。

    一些性能指标,稍微解释,主要是前两个:

    A)Pixel Accuracy:

    定义:它是所有正确分类的像素总数与图像中所有像素的总数的比率。

    计算:(正确预测的像素数量) / (所有像素数量)。

    说明:这个指标评估了模型在每个像素级别上的准确性。但在某些场景中(尤其是当类别非常不平衡时),这个指标可能并不完全反映模型的表现。

    B)IoU (Intersection over Union):

    定义:对于每个类别,IoU 是该类别的预测结果(预测为该类别的像素)与真实标签之间的交集与并集的比率。

    计算:(预测与真实标签的交集) / (预测与真实标签的并集)。

    说明:它是一个很好的指标,因为它同时考虑了假阳性和假阴性,尤其在类别不平衡的情况下。

    C)Accuracy:

    定义:是所有正确分类的像素与所有像素的比例,通常与 Pixel Accuracy 相似。

    计算:(正确预测的像素数量) / (所有像素数量)。

    D)Recall (or Sensitivity or True Positive Rate):

    定义:是真实正样本被正确预测的比例。

    计算:(真阳性) / (真阳性 + 假阴性)。

    说明:高召回率表示少数阳性样本不容易被漏掉。

    E)Precision:

    定义:是被预测为正的样本中实际为正的比例。

    计算:(真阳性) / (真阳性 + 假阳性)。

    说明:高精度表示假阳性的数量很少。

    F)F1 Score:

    定义:是精度和召回率的调和平均值。它考虑了假阳性和假阴性,并试图找到两者之间的平衡。

    计算:2 × (精度 × 召回率) / (精度 + 召回率)。

    说明:在不平衡类别的场景中,F1 Score 通常比单一的精度或召回率更有用。

    1. ##############################评价指标,对于全部训练集的样本#######################################
    2. import numpy as np
    3. import matplotlib.pyplot as plt
    4. from sklearn.metrics import roc_curve, auc, accuracy_score, recall_score, precision_score, f1_score
    5. # 初始化变量来存储评估指标的累积值和真实标签与预测值
    6. total_pixel_accuracy = 0
    7. total_iou = 0
    8. total_accuracy = 0
    9. total_recall = 0
    10. total_precision = 0
    11. total_f1 = 0
    12. total_auc = 0
    13. all_true_masks = []
    14. all_predicted_masks = []
    15. # 遍历整个训练集
    16. for sample in train_dataset:
    17. sample_img = sample['image'].unsqueeze(0).to(device)
    18. sample_mask = sample['mask'].cpu().numpy()
    19. # 使用模型进行预测
    20. with torch.no_grad():
    21. model.eval()
    22. prediction = model(sample_img)['out']
    23. # 取前景类并转为CPU
    24. predicted_mask = prediction[0, 1].cpu().numpy()
    25. predicted_mask = (predicted_mask > 0.5).astype(np.uint8)
    26. # 收集真实标签和预测值
    27. all_true_masks.extend(sample_mask.ravel())
    28. all_predicted_masks.extend(predicted_mask.ravel())
    29. # 计算ROC曲线和AUC
    30. fpr_train, tpr_train, _ = roc_curve(all_true_masks, all_predicted_masks)
    31. avg_auc = auc(fpr_train, tpr_train)
    32. # 计算其他评估指标
    33. avg_pixel_accuracy = accuracy_score(all_true_masks, all_predicted_masks)
    34. avg_iou = calc_iou(np.array(all_true_masks), np.array(all_predicted_masks))
    35. avg_accuracy = accuracy_score(all_true_masks, all_predicted_masks)
    36. avg_recall = recall_score(all_true_masks, all_predicted_masks)
    37. avg_precision = precision_score(all_true_masks, all_predicted_masks)
    38. avg_f1 = f1_score(all_true_masks, all_predicted_masks)
    39. # 绘制ROC曲线
    40. plt.figure()
    41. plt.plot(fpr_train, tpr_train, color='blue', lw=2, label='Train ROC curve (area = %0.2f)' % avg_auc)
    42. plt.xlabel('False Positive Rate')
    43. plt.ylabel('True Positive Rate')
    44. plt.title('ROC Curve')
    45. plt.legend(loc='lower right')
    46. plt.show()
    47. # 打印评估指标的平均值
    48. metrics = [
    49. ("Pixel Accuracy", avg_pixel_accuracy),
    50. ("IoU", avg_iou),
    51. ("Accuracy", avg_accuracy),
    52. ("Recall", avg_recall),
    53. ("Precision", avg_precision),
    54. ("F1 Score", avg_f1),
    55. ("AUC", avg_auc)
    56. ]
    57. print("+-----------------+------------+")
    58. print("| Metric | Value |")
    59. print("+-----------------+------------+")
    60. for metric_name, metric_value in metrics:
    61. print(f"| {metric_name:15} | {metric_value:.6f} |")
    62. print("+-----------------+------------+")

    这个结果是针对有所训练集的:

    (d)查看验证集和验证集的具体分割情况

    1. #######################看训练集图片的具体分割效果###########################################
    2. import matplotlib.pyplot as plt
    3. import numpy as np
    4. # 选择一张训练集图片
    5. img_index = 20
    6. sample = train_dataset[img_index]
    7. train_img = sample['image'].unsqueeze(0).to(device) # 为batch_size添加一个维度
    8. with torch.no_grad():
    9. model.eval()
    10. prediction = model(train_img)['out']
    11. # 使用阈值处理预测掩码
    12. mask_threshold = 0.5
    13. pred_mask = prediction[0, 1].cpu().numpy() # 选择前景类
    14. pred_mask = (pred_mask > mask_threshold).astype(np.uint8)
    15. # 使用matplotlib来展示原始图像、真实掩码和预测的分割图像
    16. plt.figure(figsize=(15, 5))
    17. plt.subplot(1, 3, 1)
    18. plt.title("Original Image")
    19. # Normalize the image to [0,1] range
    20. denorm_img = train_img[0].permute(1, 2, 0).cpu().numpy()
    21. denorm_img = denorm_img - denorm_img.min()
    22. denorm_img = denorm_img / denorm_img.max()
    23. plt.imshow(denorm_img.clip(0, 1))
    24. plt.subplot(1, 3, 2)
    25. plt.title("True Segmentation")
    26. true_mask = sample['mask'].cpu().numpy()
    27. if len(true_mask.shape) == 1: # Ensure mask is 2D
    28. true_mask = true_mask.reshape(int(np.sqrt(true_mask.shape[0])), -1)
    29. plt.imshow(true_mask, cmap='gray')
    30. plt.subplot(1, 3, 3)
    31. plt.title("Predicted Segmentation")
    32. plt.imshow(pred_mask, cmap='gray')
    33. plt.show()
    34. #######################看测试集图片的具体分割效果###########################################
    35. #看具体分割的效果
    36. import matplotlib.pyplot as plt
    37. test_dataset = CustomDataset(
    38. image_paths=[os.path.join(test_images_folder, img) for img in test_images],
    39. mask_paths=[os.path.join(label_images_folder, img) for img in label_images], # 这里我假设您的测试集的标签也在label_images_folder中
    40. transform=transform
    41. )
    42. # 选择一张测试图片
    43. img_index = 20
    44. sample = test_dataset[img_index]
    45. test_img = sample['image'].unsqueeze(0).to(device)
    46. with torch.no_grad():
    47. model.eval()
    48. prediction = model(test_img)['out']
    49. # 使用阈值处理预测掩码
    50. mask_threshold = 0.5
    51. pred_mask = prediction[0, 1].cpu().numpy()
    52. pred_mask = (pred_mask > mask_threshold).astype(np.uint8)
    53. # 使用matplotlib来展示原始图像、真实掩码和预测的分割图像
    54. plt.figure(figsize=(15, 5))
    55. plt.subplot(1, 3, 1)
    56. plt.title("Original Image")
    57. # Normalize the image to [0,1] range
    58. denorm_img = test_img[0].permute(1, 2, 0).cpu().numpy()
    59. denorm_img = denorm_img - denorm_img.min()
    60. denorm_img = denorm_img / denorm_img.max()
    61. plt.imshow(denorm_img.clip(0, 1))
    62. plt.subplot(1, 3, 3)
    63. plt.title("Predicted Segmentation")
    64. plt.imshow(pred_mask, cmap='gray')
    65. plt.show()

    查看训练集分割效果:

    查看验证集分割效果(验证集没有label,所以中间是空的):

    总体来看,都不行哦!我就不调参了。

    五、写在后面

    略~

    六、数据

    链接:https://pan.baidu.com/s/1Cb78MwfSBfLwlpIT0X3q9Q?pwd=u1q1

    提取码:u1q1

  • 相关阅读:
    Super Marker插件——标记资源,提高效率
    Java 基础高频面试题(2022年最新版)
    【Maven教程】(七)聚合与继承:多模块项目实用特性介绍,反应堆构建及裁剪 ~
    Chapter 8 Intermediate Shell Tools II
    一些小的问题2
    代码随想录算法训练营Day60|单调栈01
    硬件-可靠性-热测试
    FullCalendar日历组件:进行任务增删改,参考gitee例子修改
    hive报错 Too many bytes before newline: 2147483648
    计算机组成原理-第六章 总线【期末复习|考研复习】
  • 原文地址:https://blog.csdn.net/qq_30452897/article/details/134497050