- # -*- coding: utf-8 -*-
- """
- # @file name : lesson-05-Logsitic-Regression.py
- # @author : tingsongyu
- # @date : 2019-09-03 10:08:00
- # @brief : 逻辑回归模型训练
- """
- import torch
- import torch.nn as nn
- import matplotlib.pyplot as plt
- import numpy as np
- torch.manual_seed(10)
-
-
- # ============================ step 1/5 生成数据 ============================
- sample_nums = 100
- mean_value = 1.7
- bias = 1
- n_data = torch.ones(sample_nums, 2)
- x0 = torch.normal(mean_value * n_data, 1) + bias # 类别0 数据 shape=(100, 2)
- y0 = torch.zeros(sample_nums) # 类别0 标签 shape=(100, 1)
- x1 = torch.normal(-mean_value * n_data, 1) + bias # 类别1 数据 shape=(100, 2)
- y1 = torch.ones(sample_nums) # 类别1 标签 shape=(100, 1)
- train_x = torch.cat((x0, x1), 0)
- train_y = torch.cat((y0, y1), 0)
-
-
- # ============================ step 2/5 选择模型 逻辑回归模型 nn.module
- ============================
- class LR(nn.Module):
- def __init__(self):
- super(LR, self).__init__()
- self.features = nn.Linear(2, 1)
- self.sigmoid = nn.Sigmoid()
-
- def forward(self, x):
- x = self.features(x)
- x = self.sigmoid(x)
- return x
-
-
- lr_net = LR() # 实例化逻辑回归模型
-
-
- # ============================ step 3/5 选择损失函数 二分类交叉熵函数 ============================
- loss_fn = nn.BCELoss()
-
- # ============================ step 4/5 选择优化器 随机梯度下降法 ============================
- lr = 0.01 # 学习率
- optimizer = torch.optim.SGD(lr_net.parameters(), lr=lr, momentum=0.9)
-
- # ============================ step 5/5 模型训练 ============================
- for iteration in range(1000):
-
- # 前向传播
- y_pred = lr_net(train_x)
-
- # 计算 loss
- loss = loss_fn(y_pred.squeeze(), train_y)
-
- # 反向传播
- loss.backward()
-
- # 更新参数
- optimizer.step()
-
- # 清空梯度
- optimizer.zero_grad()
-
- # 绘图
- if iteration % 20 == 0:
-
- mask = y_pred.ge(0.5).float().squeeze() # 以0.5为阈值进行分类
- correct = (mask == train_y).sum() # 计算正确预测的样本个数
- acc = correct.item() / train_y.size(0) # 计算分类准确率
-
- plt.scatter(x0.data.numpy()[:, 0], x0.data.numpy()[:, 1], c='r', label='class 0')
- plt.scatter(x1.data.numpy()[:, 0], x1.data.numpy()[:, 1], c='b', label='class 1')
-
- w0, w1 = lr_net.features.weight[0]
- w0, w1 = float(w0.item()), float(w1.item())
- plot_b = float(lr_net.features.bias[0].item())
- plot_x = np.arange(-6, 6, 0.1)
- plot_y = (-w0 * plot_x - plot_b) / w1
-
- plt.xlim(-5, 7)
- plt.ylim(-7, 7)
- plt.plot(plot_x, plot_y)
-
- plt.text(-5, 5, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 20, 'color': 'red'})
- plt.title("Iteration: {}\nw0:{:.2f} w1:{:.2f} b: {:.2f} accuracy:{:.2%}".format(iteration, w0, w1, plot_b, acc))
- plt.legend()
-
- plt.show()
- plt.pause(0.5)
-
- if acc > 0.99:
- break
注:代码过程分为数据,模型,损失函数,优化器以及迭代过程
数据:随机生成
模型:nn.Module来构建逻辑回归模型类
损失函数:二分类交叉熵函数
优化器:随机梯度下降法
迭代过程:前向传播,损失函数计算,反向传播,更新参数
迭代器中,设置了掩码,即误差小于0.5置为true,然后统计分类正确的个数,然后根据正确率达到0.99,结束流程