• 现代c++手撸2309神经网络最简化版230901


    用c++输入数据:vector> inputs = { {1, 1}, {1, 0} };数据targets={0,1}; 测试:vector> inputs22 = { {1, 0}, {1,1} }; 构建神经网络,例如:NeuralNetwork nn({ 2, 4, 1 }); 则网络有四层、输入层2个节点、输出层1个节点、隐藏层4个节点、网络有梯度下降、反向传播…等。

    以下是一个使用C++构建的基本神经网络示例,包含前向传播和基于梯度下降的反向传播。

    在这个例子中:

    1. 我们创建了一个神经网络,具有输入层2个节点,隐藏层4个节点和输出层1个节点。
    2. 我们使用了两个输入向量({1, 1}{1, 0})进行训练,目标输出分别是01
    3. 我们进行了10000次训练迭代。
    4. 训练完成后,我们使用另外两个输入向量({1, 0}{1, 1})来测试网络的性能。

    你可以根据需要调整神经网络的参数,比如学习率和训练迭代次数,以获得更好的性能。

    第二(改进)版:

    1. #include
    2. #include
    3. using namespace std;
    4. bool whetherOutputFlag = 0;
    5. float sigmoid(float x) {
    6. return 1 / (1 + exp(-x));
    7. }
    8. float sigmoid_derivative(float x) {
    9. float s = sigmoid(x);
    10. return s * (1 - s);
    11. }
    12. class NeuralNetwork {
    13. private:
    14. std::vectorfloat>>> weights;
    15. std::vectorfloat>> layer_outputs;
    16. public:
    17. NeuralNetwork(std::vector<int> topology) {
    18. std::srand(std::time(0));
    19. for (int i = 0; i < topology.size() - 1; i++) {
    20. weights.push_back(std::vectorfloat>>(topology[i], std::vector<float>(topology[i + 1])));
    21. for (int j = 0; j < topology[i]; j++) {
    22. for (int k = 0; k < topology[i + 1]; k++) {
    23. weights[i][j][k] = (std::rand() % 2000 - 1000) / 1000.0f;
    24. }
    25. }
    26. }
    27. }
    28. std::vector<float> feedforward(std::vector<float> input) {
    29. layer_outputs.clear();
    30. layer_outputs.push_back(input);
    31. for (int i = 0; i < weights.size(); i++) {
    32. std::vector<float> output(weights[i][0].size(), 0);
    33. for (int j = 0; j < weights[i][0].size(); j++) {
    34. for (int k = 0; k < input.size(); k++) {
    35. output[j] += input[k] * weights[i][k][j];
    36. }
    37. output[j] = sigmoid(output[j]);
    38. }
    39. layer_outputs.push_back(output);
    40. input = output;
    41. }
    42. return input;
    43. }//feedforward
    44. void train(std::vector<float> input, std::vector<float> targets, float learning_rate , bool whetherOutputFlag) {
    45. //--------------------------------------------- 前向传播--输出损失
    46. std::vector<float> outputs = feedforward(input);
    47. // 初始化误差
    48. std::vector<float> errors(outputs.size(), 0);
    49. float sumA=0.0;
    50. for (int i = 0; i < targets.size(); i++) {
    51. errors[i] = (targets[i] - outputs[i]);
    52. if (true == whetherOutputFlag) {//if220
    53. sumA += fabs((errors[i] ));
    54. cout << "i"<"]";
    55. }//if220
    56. }//for110i
    57. if( true==whetherOutputFlag) std::cout << sumA;//根据标志位,决定是否输出损失
    58. //=============================================
    59. // 反向传播
    60. for (int i = weights.size() - 1; i >= 0; i--) {
    61. // 更新权重
    62. for (int j = 0; j < weights[i].size(); j++) {
    63. for (int k = 0; k < weights[i][j].size(); k++) {
    64. float derivative = sigmoid_derivative(layer_outputs[i + 1][k]);
    65. weights[i][j][k] += learning_rate * errors[k] * derivative * layer_outputs[i][j];
    66. }
    67. }
    68. // 计算下一层的误差
    69. std::vector<float> next_errors(weights[i].size(), 0);
    70. for (int j = 0; j < weights[i].size(); j++) {
    71. for (int k = 0; k < weights[i][j].size(); k++) {
    72. next_errors[j] += errors[k] * weights[i][j][k];
    73. }
    74. }
    75. errors = next_errors;
    76. }
    77. }//void train(
    78. };//
    79. int main() {
    80. NeuralNetwork nn({ 2, 8,6, 1 });
    81. std::vectorfloat>> inputs = { {1,0}, {1, 1}, {0,1},{0,0} };
    82. std::vector<float> targets = { 1,0,1,0 };
    83. for (int i = 0; i < 50000; i++) {
    84. for (size_t j = 0; j < inputs.size(); j++) {
    85. nn.train(inputs[j], { targets[j] }, 0.05, false);//false 不输出 0.1);// 0.05);// 0.1);
    86. if (0 == (i % 10000)) {
    87. nn.train(inputs[j], { targets[j] }, 0.001 , true); //true 输出
    88. std::cout << "] ";
    89. std::cout << std::endl;
    90. }
    91. }
    92. if(0==(i%10000)) std::cout << std::endl;
    93. }//for110i
    94. std::vectorfloat>> inputs22 = { {0,1},{0,0}, {1, 0}, {1, 1} };
    95. for (size_t j = 0; j < inputs22.size(); j++) {
    96. std::vector<float> output = nn.feedforward(inputs22[j]);
    97. std::cout << "Output for [" << inputs22[j][0] << ", " << inputs22[j][1] << "]: " << output[0] << std::endl;
    98. }
    99. return 0;
    100. }//main

    第一版:

    1. #include
    2. #include
    3. using namespace std;
    4. bool whetherOutputFlag = 0;
    5. float sigmoid(float x) {
    6. return 1 / (1 + exp(-x));
    7. }
    8. float sigmoid_derivative(float x) {
    9. float s = sigmoid(x);
    10. return s * (1 - s);
    11. }
    12. class NeuralNetwork {
    13. private:
    14. std::vectorfloat>>> weights;
    15. std::vectorfloat>> layer_outputs;
    16. public:
    17. NeuralNetwork(std::vector<int> topology) {
    18. std::srand(std::time(0));
    19. for (int i = 0; i < topology.size() - 1; i++) {
    20. weights.push_back(std::vectorfloat>>(topology[i], std::vector<float>(topology[i + 1])));
    21. for (int j = 0; j < topology[i]; j++) {
    22. for (int k = 0; k < topology[i + 1]; k++) {
    23. weights[i][j][k] = (std::rand() % 2000 - 1000) / 1000.0f;
    24. }
    25. }
    26. }
    27. }
    28. std::vector<float> feedforward(std::vector<float> input) {
    29. layer_outputs.clear();
    30. layer_outputs.push_back(input);
    31. for (int i = 0; i < weights.size(); i++) {
    32. std::vector<float> output(weights[i][0].size(), 0);
    33. for (int j = 0; j < weights[i][0].size(); j++) {
    34. for (int k = 0; k < input.size(); k++) {
    35. output[j] += input[k] * weights[i][k][j];
    36. }
    37. output[j] = sigmoid(output[j]);
    38. }
    39. layer_outputs.push_back(output);
    40. input = output;
    41. }
    42. return input;
    43. }//feedforward
    44. void train(std::vector<float> input, std::vector<float> targets, float learning_rate , bool whetherOutputFlag) {
    45. //--------------------------------------------- 前向传播--不输出损失
    46. std::vector<float> outputs = feedforward(input);
    47. // 初始化误差
    48. std::vector<float> errors(outputs.size(), 0);
    49. float sumA=0.0;
    50. for (int i = 0; i < targets.size(); i++) {
    51. errors[i] = (targets[i] - outputs[i]);
    52. if (true == whetherOutputFlag) {//if220
    53. sumA += fabs((targets[i] - outputs[i]));
    54. cout << "i"<"]";
    55. }//if220
    56. }//for110i
    57. if( true==whetherOutputFlag) std::cout << sumA;
    58. //=============================================
    59. // 反向传播
    60. for (int i = weights.size() - 1; i >= 0; i--) {
    61. // 更新权重
    62. for (int j = 0; j < weights[i].size(); j++) {
    63. for (int k = 0; k < weights[i][j].size(); k++) {
    64. float derivative = sigmoid_derivative(layer_outputs[i + 1][k]);
    65. weights[i][j][k] += learning_rate * errors[k] * derivative * layer_outputs[i][j];
    66. }
    67. }
    68. // 计算下一层的误差
    69. std::vector<float> next_errors(weights[i].size(), 0);
    70. for (int j = 0; j < weights[i].size(); j++) {
    71. for (int k = 0; k < weights[i][j].size(); k++) {
    72. next_errors[j] += errors[k] * weights[i][j][k];
    73. }
    74. }
    75. errors = next_errors;
    76. }
    77. }//void train(
    78. };//
    79. int main() {
    80. NeuralNetwork nn({ 2, 4, 1 });
    81. std::vectorfloat>> inputs = { {1,0}, {1, 1}, {0,1},{0,0} };
    82. std::vector<float> targets = { 1,0,1,0 };
    83. for (int i = 0; i < 50000; i++) {
    84. for (size_t j = 0; j < inputs.size(); j++) {
    85. nn.train(inputs[j], { targets[j] }, 0.05, false);//false 不输出 0.1);// 0.05);// 0.1);
    86. if (0 == (i % 10000)) {
    87. nn.train(inputs[j], { targets[j] }, 0.001 , true); //true 输出
    88. std::cout << "] ";
    89. std::cout << std::endl;
    90. }
    91. }
    92. if(0==(i%10000)) std::cout << std::endl;
    93. }//for110i
    94. std::vectorfloat>> inputs22 = { {0,1},{0,0}, {1, 0}, {1, 1} };
    95. for (size_t j = 0; j < inputs22.size(); j++) {
    96. std::vector<float> output = nn.feedforward(inputs22[j]);
    97. std::cout << "Output for [" << inputs22[j][0] << ", " << inputs22[j][1] << "]: " << output[0] << std::endl;
    98. }
    99. return 0;
    100. }//main

    下面代码出现:vector subscript out of range: float sigmoid(float x) { return 1 / (1 + exp(-x)); } float sigmoid_derivative(float x) { float s = sigmoid(x); return s * (1 - s); } class NeuralNetwork { private: std::vector>> weights; std::vector> layer_outputs; public: NeuralNetwork(std::vector topology) { std::srand(std::time(0)); for (int i = 0; i < topology.size() - 1; i++) { weights.push_back(std::vector>(topology[i], std::vector(topology[i + 1]))); for (int j = 0; j < topology[i]; j++) { for (int k = 0; k < topology[i + 1]; k++) { weights[i][j][k] = (std::rand() % 2000 - 1000) / 1000.0f; } } } } std::vector feedforward(std::vector input) { layer_outputs.clear(); layer_outputs.push_back(input); for (int i = 0; i < weights.size(); i++) { std::vector output(weights[i][0].size(), 0); for (int j = 0; j < weights[i][0].size(); j++) { for (int k = 0; k < input.size(); k++) { output[j] += input[k] * weights[i][k][j]; } output[j] = sigmoid(output[j]); } layer_outputs.push_back(output); input = output; } return input; } void train(std::vector input, std::vector targets, float learning_rate) { std::vector outputs = feedforward(input); // 反向传播 for (int i = weights.size() - 1; i >= 0; i--) { std::vector errors(targets.size(), 0); for (int j = 0; j < targets.size(); j++) { errors[j] = targets[j] - outputs[j]; } if (i != weights.size() - 1) { std::vector prev_errors = errors; errors.clear(); errors.resize(weights[i + 1].size(), 0); for (int j = 0; j < weights[i + 1].size(); j++) { for (int k = 0; k < weights[i + 1][j].size(); k++) { errors[j] += prev_errors[k] * weights[i + 1][j][k]; } } } for (int j = 0; j < weights[i].size(); j++) { for (int k = 0; k < weights[i][j].size(); k++) { float derivative = sigmoid_derivative(layer_outputs[i + 1][k]); weights[i][j][k] += learning_rate * errors[k] * derivative * layer_outputs[i][j]; } } targets = layer_outputs[i]; } } }; int main() { NeuralNetwork nn({ 2, 4, 1 }); std::vector> inputs = { {1, 1}, {1, 0} }; std::vector targets = { 0, 1 }; for (int i = 0; i < 10000; i++) { for (size_t j = 0; j < inputs.size(); j++) { nn.train(inputs[j], { targets[j] }, 0.1); } } std::vector> inputs22 = { {1, 0}, {1, 1} }; for (size_t j = 0; j < inputs22.size(); j++) { std::vector output = nn.feedforward(inputs22[j]); std::cout << "Output for [" << inputs22[j][0] << ", " << inputs22[j][1] << "]: " << output[0] << std::endl; } return 0; } 请修正错误!

  • 相关阅读:
    create® 3入门教程-创建Create3 Docker映像
    古彝文识别:文化遗产的数字化之旅
    穿透 wsl 和 ssh, 新版本 neovim 跨设备任意复制,copy anywhere!
    直接激光雷达里程计:基于稠密点云的快速定位
    第10章 初识Spring MVC框架
    【动手学深度学习-Pytorch版】序列到序列的学习(包含NLP常用的Mask技巧)
    ROS2 机器人操作系统入门和安装以及如何使用 .NET 进行开发
    MySQL 根据【父ID】获取【所有子节点】
    驱动开发:内核监控Register注册表回调
    solr学习笔记
  • 原文地址:https://blog.csdn.net/aw344/article/details/133485771