• 造车先做三蹦子-之三:自制数据集(6x6数据集)230103


    6*6的数据集制造、与识别:

    1. #6*6的数据集的制作、与识别、测试、输出等
    2. import torch
    3. import torch.nn as nn
    4. import torch.optim as optim
    5. # 定义模型
    6. class NeuralNet(nn.Module):
    7. def __init__(self, input_size, hidden_size, num_classes):
    8. super(NeuralNet, self).__init__()
    9. self.fc1 = nn.Linear(input_size, hidden_size)
    10. self.fc2 = nn.Linear(hidden_size, hidden_size)
    11. self.fc3 = nn.Linear(hidden_size, num_classes)
    12. self.relu = nn.ReLU()
    13. def forward(self, x):
    14. out = self.relu(self.fc1(x))
    15. out = self.relu(self.fc2(out))
    16. out = self.fc3(out)
    17. return out
    18. # 数据准备
    19. train_data = torch.tensor([
    20. [[0,0,1,0,0,0],[0,0,1,0,0,0],[0,0,1,0,0,0],[0,0,1,0,0,0],[0,0,1,0,0,0],[0,0,1,0,0,0]],
    21. [[0,0,0,0,0,0],[0,0,0,0,0,0],[1,1,1,1,1,1],[1,1,1,1,1,1],[0,0,0,0,0,0],[0,0,0,0,0,0]],
    22. # ... 其他训练数据
    23. ] , dtype=torch.float32 )
    24. train_labels = torch.tensor([
    25. [1,0,0,0,0,0,0],
    26. [0,1,0,0,0,0,0],
    27. # ... 其他训练标签
    28. ] , dtype=torch.float32 )
    29. test_data = torch.tensor([
    30. [[0,0,0,0,0,0],[0,1,1,1,1,0],[1,1,1,1,1,1],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0]],
    31. [[0,0,1,0,0,0],[0,0,1,0,0,0],[0,0,1,0,0,0],[0,0,1,0,0,0],[0,0,1,0,0,0],[0,0,0,1,0,0]],
    32. # ... 其他测试数据
    33. ] , dtype=torch.float32 )
    34. # 参数设置
    35. input_size = 6 * 6 # 输入维度
    36. hidden_size = 50 # 隐藏层节点数量
    37. num_classes = 7 # 输出维度
    38. learning_rate = 0.001
    39. num_epochs = 1000
    40. model = NeuralNet(input_size, hidden_size, num_classes)
    41. # 损失和优化器
    42. criterion = nn.CrossEntropyLoss()
    43. optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    44. # 训练模型
    45. for epoch in range(num_epochs):
    46. # 前向传播
    47. outputs = model(train_data.view(-1, 36))
    48. loss = criterion(outputs, torch.argmax(train_labels, 1))
    49. # 反向传播和优化
    50. optimizer.zero_grad()
    51. loss.backward()
    52. optimizer.step()
    53. if (epoch+1) % 100 == 0:
    54. print ('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
    55. # 保存模型
    56. torch.save(model.state_dict(), 'model.ckpt')
    57. # 测试
    58. with torch.no_grad():
    59. test_outputs = model(test_data.view(-1, 36))
    60. predicted = torch.argmax(test_outputs.data, 1)
    61. for i, prediction in enumerate(predicted):
    62. print(f"Test data {i+1}: Predicted label: {prediction.item()}")

    1. #true Type 字体作为训练库,完成字符的识别
    2. import torch
    3. import torch.nn as nn
    4. import torch.optim as optim
    5. from PIL import Image, ImageDraw, ImageFont
    6. from torchvision import transforms
    7. import matplotlib.pyplot as plt
    8. Times2000=1000
    9. # 参数设置
    10. font_path = "arial.ttf" #e:\\arial.ttf"
    11. #siz28e28 = 28
    12. siz28e28=28
    13. characters = ["2","4"] +[str(i) for i in range(8,10)] + ["A","Z"] #["A", "B"]
    14. print(characters)
    15. # 1. 生成字符和数字的点阵数据
    16. def render_char(char, siz28e28):
    17. image = Image.new("L", (siz28e28, siz28e28), "white")
    18. draw = ImageDraw.Draw(image)
    19. font = ImageFont.truetype(font_path, siz28e28)
    20. #-----------------------------------------
    21. w03 = draw.textlength(char, font=font)
    22. h03 = siz28e28
    23. print("{[w3",w03, "h3",w03 ,"]} ")
    24. #-----------------------------------------
    25. draw.text(((siz28e28-w03)/2, (siz28e28-h03)/2), char, font=font, fill="black")
    26. return image
    27. data = []
    28. labels = []
    29. for i, char in enumerate(characters):
    30. img = render_char(char, siz28e28)
    31. data.append(transforms.ToTensor()(img))
    32. labels.append(i)
    33. # 2. 训练神经网络模型
    34. class SimpleNet(nn.Module):
    35. def __init__(self, num_classes):
    36. super(SimpleNet, self).__init__()
    37. self.fc = nn.Linear(siz28e28 * siz28e28, num_classes)
    38. def forward(self, x):
    39. x = x.view(-1, siz28e28 * siz28e28)
    40. x = self.fc(x)
    41. return x
    42. model = SimpleNet(len(characters))
    43. loss_function = nn.CrossEntropyLoss()
    44. optimizer = optim.SGD(model.parameters(), lr=0.01)
    45. for epoch in range(Times2000):#8000):#1000):
    46. inputs = torch.stack(data)
    47. targets = torch.tensor(labels)
    48. optimizer.zero_grad()
    49. outputs = model(inputs)
    50. loss = loss_function(outputs, targets)
    51. loss.backward()
    52. optimizer.step()
    53. # 3. 使用模型进行预测
    54. def predict_image(img_path):
    55. model.eval()
    56. img = Image.open(img_path).convert("L").resize((siz28e28, siz28e28))
    57. img_tensor = transforms.ToTensor()(img).unsqueeze(0)
    58. output = model(img_tensor)
    59. _, predicted = output.max(1)
    60. return characters[predicted[0]]
    61. #//1-----
    62. im="f:\\22letter23r1002\8_16x16.png"
    63. predicted_char = predict_image(im)
    64. print(f"预测的字符为: {predicted_char}")
    65. # 使用matplotlib显示结果
    66. plt.imshow(Image.open(im))
    67. #plt.title(f"Predicted: {predicted_char}")
    68. #plt.axis("off")
    69. #plt.show()

  • 相关阅读:
    Linux系统远程访问另一台Windows系统的解决方案
    RabbitMQ安装和使用(docker版本)
    教程分享:如何将微信公众号变成淘宝客查券返利机器人自动赚佣金?
    JUC-CyclicBarrier基础篇
    播放器事件/与JS交互
    拉斯克奖(Lasker Award)2023
    又解锁了一种OpenFeign的使用方式!
    C++:指针:智能指针
    【深基16.例3】二叉树深度
    深度学习论文精读[5]:Attention UNet
  • 原文地址:https://blog.csdn.net/aw344/article/details/134047275