• 造车先做三蹦子-之二:自制数据集(5x5数据集)230102


    1. #自制数据集:5*5的点阵输入,数字的输出9个OneHot输出……Jupyter Notebook231001
    2. import torch #pytorch
    3. import torch.nn as nn
    4. import torch.optim as optim #优化器optimizer
    5. # 定义模型
    6. class Net(nn.Module):
    7. def __init__(self):
    8. super(Net, self).__init__()
    9. self.fc1 = nn.Linear(25, 50)
    10. self.fc2 = nn.Linear(50, 9)
    11. def forward(self, x):
    12. x = x.view(-1, 25)
    13. x = torch.relu(self.fc1(x))
    14. x = self.fc2(x)
    15. return x
    16. # 训练数据
    17. i = [
    18. #0
    19. torch.tensor([[1,1,1,1,1],
    20. [1,0,0,0,1],
    21. [1,0,0,0,1],
    22. [1,0,0,0,1],
    23. [1,1,1,1,1]], dtype=torch.float32),
    24. #1
    25. torch.tensor([[0,0,1,0,0],
    26. [0,0,1,0,0],
    27. [0,0,1,0,0],
    28. [0,0,1,0,0],
    29. [0,0,1,0,0]], dtype=torch.float32),
    30. #2
    31. torch.tensor([[0,0,0,0,0],
    32. [0,0,0,0,0],
    33. [1,1,1,1,1],
    34. [0,0,0,0,0],
    35. [0,0,0,0,0]], dtype=torch.float32),
    36. torch.tensor([[0,0,1,0,0],
    37. [0,0,1,0,0],
    38. [1,1,1,1,1],
    39. [0,0,1,0,0],
    40. [0,0,1,0,0]], dtype=torch.float32),
    41. torch.tensor([[1,0,0,0,1],
    42. [0,1,0,1,0],
    43. [0,0,1,0,0],
    44. [0,1,0,1,0],
    45. [1,0,0,0,1]], dtype=torch.float32),
    46. torch.tensor([[1,1,1,1,1],
    47. [1,0,0,0,0],
    48. [1,1,1,1,1],
    49. [0,0,0,0,1],
    50. [1,1,1,1,1]], dtype=torch.float32),
    51. torch.tensor([[1,1,0,0,0],
    52. [1,0,0,0,0],
    53. [1,1,1,1,0],
    54. [1,0,0,0,1],
    55. [1,1,1,1,1]], dtype=torch.float32),
    56. #7
    57. torch.tensor([[0,1,1,1,1],
    58. [0,0,0,1,1],
    59. [0,0,0,0,1],
    60. [0,0,0,0,1],
    61. [0,0,0,1,1]], dtype=torch.float32),
    62. #数字8
    63. torch.tensor([[1,1,1,1,1],
    64. [1,0,0,0,1],
    65. [1,1,1,1,1],
    66. [1,0,0,0,1],
    67. [1,1,1,1,1]], dtype=torch.float32),
    68. # ...其他数据
    69. ]
    70. target = [torch.tensor([1,0,0,0,0,0,0,0,0]), #0
    71. torch.tensor([0,1,0,0,0,0,0,0,0]), #1
    72. torch.tensor([0,0,1,0,0,0,0,0,0]), #2
    73. torch.tensor([0,0,0,1,0,0,0,0,0]), #3
    74. torch.tensor([0,0,0,0,1,0,0,0,0]), #4
    75. torch.tensor([0,0,0,0,0,1,0,0,0]),
    76. torch.tensor([0,0,0,0,0,0,1,0,0]), #6
    77. torch.tensor([0,0,0,0,0,0,0,1,0]), #7
    78. torch.tensor([0,0,0,0,0,0,0,0,1]), #8
    79. # ...其他目标
    80. ]
    81. # 测试数据
    82. ib = [
    83. #预计数字:"0"
    84. torch.tensor([[1,1,1,1,1],
    85. [1,0,0,0,1],
    86. [1,0,0,0,1],
    87. [1,0,0,1,1],
    88. [1,1,1,1,2]], dtype=torch.float32),
    89. #预计6
    90. torch.tensor([[6,1,0,0,0],
    91. [6,0,0,0,0],
    92. [6,1,1,0,0],
    93. [6,0,0,1,0],
    94. [6,6,6,1,0]], dtype=torch.float32),
    95. torch.tensor([[0,0,0,0,0],
    96. [0,1,1,1,1],
    97. [1,1,1,1,1],
    98. [0,0,0,0,0],
    99. [0,0,0,0,0]], dtype=torch.float32),
    100. torch.tensor([[0,0,1,0,0],
    101. [0,0,1,0,0],
    102. [0,0,1,0,0],
    103. [0,0,1,1,0],
    104. [0,0,1,0,0]], dtype=torch.float32),
    105. torch.tensor([[0,0,1,0,0],
    106. [0,0,1,0,0],
    107. [1,1,1,1,1],
    108. [0,0,1,0,0],
    109. [0,0,1,0,0]], dtype=torch.float32),
    110. torch.tensor([[1,0,0,0,1],
    111. [0,1,0,1,0],
    112. [0,0,1,0,0],
    113. [0,1,0,1,0],
    114. [1,0,0,0,1]], dtype=torch.float32),
    115. torch.tensor([[1,1,1,1,1],
    116. [1,0,0,0,1],
    117. [1,0,0,0,1],
    118. [1,0,0,0.5,1],
    119. [1,1,1,1,2]], dtype=torch.float32),
    120. #预计"7"
    121. torch.tensor([[0,1,1,1,0],
    122. [0,0,0,1,0],
    123. [0,0,0.05,0,0],
    124. [0,0,0,1,0],
    125. [0,0,1,0,0]], dtype=torch.float32),
    126. #预计数字:"8"
    127. torch.tensor([[1,1,1,1,1],
    128. [1,0,0,0,1],
    129. [1,1,1,1,1],
    130. [1,0,0.5,0.9, 1.2],
    131. [1,1,1,2,3]], dtype=torch.float32),
    132. ]
    133. # 初始化网络、损失函数和优化器
    134. net = Net()
    135. criterion = nn.CrossEntropyLoss()
    136. optimizer = optim.SGD(net.parameters(), lr=0.01)
    137. # 训练模型
    138. for epoch in range(100):
    139. for inputs, labels in zip(i, target):
    140. optimizer.zero_grad()
    141. inputs = inputs.unsqueeze(0) # 增加批次维度
    142. outputs = net(inputs)
    143. loss = criterion(outputs, torch.argmax(labels.unsqueeze(0), 1))
    144. loss.backward()
    145. optimizer.step()
    146. print(f"Epoch {epoch+1}, Loss: {loss.item()}")
    147. print("Training complete!")
    148. # 使用测试数据进行测试
    149. with torch.no_grad():
    150. for test_input in ib:
    151. test_input = test_input.unsqueeze(0) # 增加批次维度
    152. outputs = net(test_input)
    153. prediction = torch.argmax(outputs, 1)
    154. print(f"Test input:\n{test_input.squeeze(0)}\nPredicted class: {prediction.item()}\n")

    1. #Jupyter Notebook231001
    2. import torch
    3. import torch.nn as nn
    4. import torch.optim as optim
    5. # 定义模型
    6. class Net(nn.Module):
    7. def __init__(self):
    8. super(Net, self).__init__()
    9. self.fc1 = nn.Linear(25, 50)
    10. self.fc2 = nn.Linear(50, 6)
    11. def forward(self, x):
    12. x = x.view(-1, 25)
    13. x = torch.relu(self.fc1(x))
    14. x = self.fc2(x)
    15. return x
    16. # 训练数据
    17. i = [torch.tensor([[0,0,1,0,0],
    18. [0,0,1,0,0],
    19. [0,0,1,0,0],
    20. [0,0,1,0,0],
    21. [0,0,1,0,0]], dtype=torch.float32),
    22. torch.tensor([[0,0,0,0,0],
    23. [0,0,0,0,0],
    24. [1,1,1,1,1],
    25. [0,0,0,0,0],
    26. [0,0,0,0,0]], dtype=torch.float32),
    27. torch.tensor([[0,0,1,0,0],
    28. [0,0,1,0,0],
    29. [1,1,1,1,1],
    30. [0,0,1,0,0],
    31. [0,0,1,0,0]], dtype=torch.float32),
    32. torch.tensor([[1,0,0,0,1],
    33. [0,1,0,1,0],
    34. [0,0,1,0,0],
    35. [0,1,0,1,0],
    36. [1,0,0,0,1]], dtype=torch.float32),
    37. torch.tensor([[1,1,1,1,1],
    38. [1,0,0,0,1],
    39. [1,0,0,0,1],
    40. [1,0,0,0,1],
    41. [1,1,1,1,1]], dtype=torch.float32),
    42. torch.tensor([[0,0,0,0,0],
    43. [0,0,0,0,0],
    44. [0,0,0,0,0],
    45. [0,0,0,0,0],
    46. [0,0,0,0,0]], dtype=torch.float32),
    47. # ...其他数据
    48. ]
    49. target = [torch.tensor([1,0,0,0,0,0]),
    50. torch.tensor([0,1,0,0,0,0]),
    51. torch.tensor([0,0,1,0,0,0]),
    52. torch.tensor([0,0,0,1,0,0]),
    53. torch.tensor([0,0,0,0,1,0]),
    54. torch.tensor([0,0,0,0,0,1]),
    55. # ...其他目标
    56. ]
    57. # 测试数据
    58. ib = [torch.tensor([[0,0,0,0,0],
    59. [0,1,1,1,1],
    60. [1,1,1,1,1],
    61. [0,0,0,0,0],
    62. [0,0,0,0,0]], dtype=torch.float32),
    63. torch.tensor([[0,0,1,0,0],
    64. [0,0,1,0,0],
    65. [0,0,1,0,0],
    66. [0,0,1,1,0],
    67. [0,0,1,0,0]], dtype=torch.float32),
    68. torch.tensor([[0,0,1,0,0],
    69. [0,0,1,0,0],
    70. [1,1,1,1,1],
    71. [0,0,1,0,0],
    72. [0,0,1,0,0]], dtype=torch.float32),
    73. torch.tensor([[1,0,0,0,1],
    74. [0,1,0,1,0],
    75. [0,0,1,0,0],
    76. [0,1,0,1,0],
    77. [1,0,0,0,1]], dtype=torch.float32),
    78. torch.tensor([[1,1,1,1,1],
    79. [1,0,0,0,1],
    80. [1,0,0,0,1],
    81. [1,0,0,0.5,1],
    82. [1,1,1,1,2]], dtype=torch.float32),
    83. torch.tensor([[0,0,0,0,0],
    84. [0,0,0,0,0],
    85. [0,0,0.05,0,0],
    86. [0,0,0,0,0],
    87. [0,0,0,0,0]], dtype=torch.float32),
    88. ]
    89. # 初始化网络、损失函数和优化器
    90. net = Net()
    91. criterion = nn.CrossEntropyLoss()
    92. optimizer = optim.SGD(net.parameters(), lr=0.01)
    93. # 训练模型
    94. for epoch in range(100):
    95. for inputs, labels in zip(i, target):
    96. optimizer.zero_grad()
    97. inputs = inputs.unsqueeze(0) # 增加批次维度
    98. outputs = net(inputs)
    99. loss = criterion(outputs, torch.argmax(labels.unsqueeze(0), 1))
    100. loss.backward()
    101. optimizer.step()
    102. print(f"Epoch {epoch+1}, Loss: {loss.item()}")
    103. print("Training complete!")
    104. # 使用测试数据进行测试
    105. with torch.no_grad():
    106. for test_input in ib:
    107. test_input = test_input.unsqueeze(0) # 增加批次维度
    108. outputs = net(test_input)
    109. prediction = torch.argmax(outputs, 1)
    110. print(f"Test input:\n{test_input.squeeze(0)}\nPredicted class: {prediction.item()}\n")

  • 相关阅读:
    设计模式之状态模式
    测试环境搭建整套大数据系统(六:搭建sqoop)
    我心目中的分布式操作系统
    零代码编程:用ChatGPT多线程批量将PDF文档转换为word格式
    【21天学习挑战赛】图像的旋转问题(二维数组)
    log4j配置
    YoloV8改进策略:独家原创,LSKA(大可分离核注意力)改进YoloV8,比Transformer更有效,包括论文翻译和实验结果
    相亲交友APP系统|婚恋交友社交软件|语音聊天平台定制开发
    C++多态收尾
    自定义注解实现Redis分布式锁、手动控制事务和根据异常名字或内容限流的三合一的功能
  • 原文地址:https://blog.csdn.net/aw344/article/details/134025828