• 用c动态数组(不用c++vector)实现手撸神经网咯230901


    用c语言动态数组(不用c++的vector)实现:输入数据inputs = { {1, 1}, {0,0},{1, 0},{0,1} };目标数据targets={0,0,1,1}; 测试数据 inputs22 = { {1, 0}, {1,1},{0,1} }; 构建神经网络,例如:NeuralNetwork nn({ 2, 4,3,1 }); 则网络有四层、输入层2个nodes、输出层1个节点、第1隐藏层4nodes、第2隐藏层3nodes,网络有反向传播、梯度下降适当优化…等。

    1. #include
    2. #include
    3. #include
    4. #include
    5. #define LEARNING_RATE 0.05
    6. // Sigmoid and its derivative
    7. float sigmoid(float x) {
    8. return 1 / (1 + exp(-x));
    9. }
    10. float sigmoid_derivative(float x) {
    11. float sig = sigmoid(x);
    12. return sig * (1 - sig);
    13. }
    14. typedef struct {
    15. float*** weights;
    16. int num_layers;
    17. int* layer_sizes;
    18. float** layer_outputs;
    19. float** deltas;
    20. } NeuralNetwork;
    21. NeuralNetwork initialize_nn(int* topology, int num_layers) {
    22. NeuralNetwork nn;
    23. nn.num_layers = num_layers;
    24. nn.layer_sizes = topology;
    25. // Allocate memory for weights, layer outputs, and deltas
    26. nn.weights = (float***)malloc((num_layers - 1) * sizeof(float**));
    27. nn.layer_outputs = (float**)malloc(num_layers * sizeof(float*));
    28. nn.deltas = (float**)malloc((num_layers - 1) * sizeof(float*));
    29. srand(time(NULL));
    30. for (int i = 0; i < num_layers - 1; i++) {
    31. nn.weights[i] = (float**)malloc(topology[i] * sizeof(float*));
    32. nn.deltas[i] = (float*)malloc(topology[i + 1] * sizeof(float));
    33. for (int j = 0; j < topology[i]; j++) {
    34. nn.weights[i][j] = (float*)malloc(topology[i + 1] * sizeof(float));
    35. for (int k = 0; k < topology[i + 1]; k++) {
    36. nn.weights[i][j][k] = ((float)rand() / RAND_MAX) * 2.0f - 1.0f; // Random weights between -1 and 1
    37. }
    38. }//for220j
    39. }//for110i
    40. return nn;
    41. }//NeuralNetwork initialize_nn
    42. float* feedforward(NeuralNetwork* nn, float* input) {
    43. nn->layer_outputs[0] = input;
    44. for (int i = 0; i < nn->num_layers - 1; i++) {
    45. nn->layer_outputs[i + 1] = (float*)malloc(nn->layer_sizes[i + 1] * sizeof(float));
    46. for (int j = 0; j < nn->layer_sizes[i + 1]; j++) {
    47. nn->layer_outputs[i + 1][j] = 0;
    48. for (int k = 0; k < nn->layer_sizes[i]; k++) {
    49. int A01 = 01;
    50. nn->layer_outputs[i + 1][j] += nn->layer_outputs[i][k] * nn->weights[i][k][j];
    51. A01 = 22;
    52. }
    53. nn->layer_outputs[i + 1][j] = sigmoid(nn->layer_outputs[i + 1][j]);
    54. }//for220j
    55. }//for110i
    56. return nn->layer_outputs[nn->num_layers - 1];
    57. }//feedforward
    58. void feedLoss(NeuralNetwork* nn, float* target) {
    59. int Last01 = nn->num_layers - 1;
    60. // Calculate output layer deltas
    61. for (int i = 0; i < nn->layer_sizes[Last01]; i++) {
    62. float error = target[i] - nn->layer_outputs[Last01][i];
    63. printf("_[i:%d_:%f] ", i, error);
    64. // nn->deltas[Last01 - 1][i] = error * sigmoid_derivative(nn->layer_outputs[Last01][i]);
    65. }
    66. // Calculate hidden layer deltas
    67. }//backpropagate(NeuralNetwork* nn, float* target
    68. void backpropagate(NeuralNetwork* nn, float* target) {
    69. int Last01 = nn->num_layers - 1;
    70. // Calculate output layer deltas//计算输出层变化
    71. for (int i = 0; i < nn->layer_sizes[Last01]; i++) {
    72. float error = target[i] - nn->layer_outputs[Last01][i];
    73. nn->deltas[Last01 - 1][i] = error * sigmoid_derivative(nn->layer_outputs[Last01][i]);
    74. }
    75. // Calculate hidden layer deltas//计算隐藏层变化
    76. for (int i = Last01 - 1; i > 0; i--) {
    77. for (int j = 0; j < nn->layer_sizes[i]; j++) {
    78. float sum = 0;
    79. for (int k = 0; k < nn->layer_sizes[i + 1]; k++) {
    80. sum += nn->weights[i][j][k] * nn->deltas[i][k];
    81. }
    82. nn->deltas[i - 1][j] = sum * sigmoid_derivative(nn->layer_outputs[i][j]);
    83. }
    84. }
    85. // Adjust weights
    86. for (int i = 0; i < Last01; i++) {
    87. for (int j = 0; j < nn->layer_sizes[i]; j++) {
    88. for (int k = 0; k < nn->layer_sizes[i + 1]; k++) {
    89. nn->weights[i][j][k] += LEARNING_RATE * nn->deltas[i][k] * nn->layer_outputs[i][j];
    90. }
    91. }
    92. }//
    93. }//backpropagate(NeuralNetwork* nn, float* target
    94. void train(NeuralNetwork* nn, float inputs[][2], float* targets, int num_samples, int num_epochs) {
    95. float* outputs;
    96. bool whetherOutputLoss = 0;
    97. #define Num10000 10000
    98. for (int epoch = 0; epoch < num_epochs; epoch++) {
    99. if (0 == epoch % Num10000) { whetherOutputLoss = 1; }
    100. for (int i = 0; i < num_samples; i++) {
    101. //float* outputs =
    102. feedforward(nn, inputs[i]);
    103. //
    104. if (whetherOutputLoss) { feedLoss(nn, &targets[i]); }
    105. //
    106. backpropagate(nn, &targets[i]);
    107. }//
    108. if (whetherOutputLoss) {printf("\n");
    109. whetherOutputLoss = 0;
    110. }
    111. }//for110i
    112. }//void train
    113. int main() {
    114. int topology[] = { 2, 4, 3, 1 };
    115. NeuralNetwork nn = initialize_nn(topology, 4);
    116. float inputs[4][2] = { {1, 1}, {0, 0}, {1, 0}, {0, 1} };
    117. float targets[4] = { 0, 0, 1, 1 };
    118. // train(&nn, inputs, targets, 4, 10000);
    119. train(&nn, inputs, targets, 4, 200000);
    120. #define Num4 4
    121. float test_inputs[Num4][2] = { {0,0}, {1, 0}, {1, 1}, {0, 1} };
    122. for (int i = 0; i < Num4; i++) {
    123. float* output = feedforward(&nn, test_inputs[i]);
    124. printf("Output for [%f, %f]: %f\n", test_inputs[i][0], test_inputs[i][1], output[0]);
    125. free(output);
    126. }
    127. // Free memory
    128. for (int i = 0; i < nn.num_layers - 1; i++) {
    129. for (int j = 0; j < nn.layer_sizes[i]; j++) {
    130. free(nn.weights[i][j]);
    131. }
    132. free(nn.weights[i]);
    133. free(nn.deltas[i]);
    134. }
    135. free(nn.weights);
    136. free(nn.deltas);
    137. free(nn.layer_outputs);
    138. return 0;
    139. }//main

    用c语言动态数组(不用c++的vector)实现:inputs = { {1, 1}, {1, 0} };数据targets={0,1}; 测试数据 inputs22 = { {1, 0}, {1,1} }; 构建神经网络,例如:NeuralNetwork nn({ 2, 4, 1 }); 则网络有四层、输入层2个节点、输出层1个节点、隐藏层4个节点、网络有梯度下降、反向传播…等。

    1. #include
    2. #include
    3. #include
    4. #include
    5. float sigmoid(float x) {
    6. return 1 / (1 + exp(-x));
    7. }
    8. float sigmoid_derivative(float x) {
    9. float s = sigmoid(x);
    10. return s * (1 - s);
    11. }
    12. typedef struct {
    13. float*** weights;
    14. int num_layers;
    15. int* layer_sizes;
    16. float** layer_outputs;
    17. } NeuralNetwork;
    18. NeuralNetwork initialize_nn(int* topology, int num_layers) {
    19. NeuralNetwork nn;
    20. nn.num_layers = num_layers;
    21. nn.layer_sizes = topology;
    22. nn.weights = (float***)malloc((num_layers - 1) * sizeof(float**));
    23. nn.layer_outputs = (float**)malloc(num_layers * sizeof(float*));
    24. srand(time(NULL));
    25. for (int i = 0; i < num_layers - 1; i++) {
    26. nn.weights[i] = (float**)malloc(topology[i] * sizeof(float*));
    27. for (int j = 0; j < topology[i]; j++) {
    28. nn.weights[i][j] = (float*)malloc(topology[i + 1] * sizeof(float));
    29. for (int k = 0; k < topology[i + 1]; k++) {
    30. nn.weights[i][j][k] = (rand() % 2000 - 1000) / 1000.0f;
    31. }
    32. }
    33. }
    34. return nn;
    35. }
    36. float* feedforward(NeuralNetwork* nn, float* input, int input_size) {
    37. nn->layer_outputs[0] = input;
    38. for (int i = 0; i < nn->num_layers - 1; i++) {
    39. float* output = (float*)malloc(nn->layer_sizes[i + 1] * sizeof(float));
    40. for (int j = 0; j < nn->layer_sizes[i + 1]; j++) {
    41. output[j] = 0;
    42. for (int k = 0; k < nn->layer_sizes[i]; k++) {
    43. output[j] += nn->layer_outputs[i][k] * nn->weights[i][k][j];
    44. }
    45. output[j] = sigmoid(output[j]);
    46. }
    47. nn->layer_outputs[i + 1] = output;
    48. }
    49. return nn->layer_outputs[nn->num_layers - 1];
    50. }
    51. void train(NeuralNetwork* nn, float* input, float* targets, float learning_rate, int input_size) {
    52. float* outputs = feedforward(nn, input, input_size);
    53. float* errors = (float*)malloc(input_size * sizeof(float));
    54. for (int i = 0; i < input_size; i++) {
    55. errors[i] = targets[i] - outputs[i];
    56. }
    57. // Backpropagation
    58. for (int i = nn->num_layers - 2; i >= 0; i--) {
    59. float* next_errors = (float*)malloc(nn->layer_sizes[i] * sizeof(float));
    60. // Update weights
    61. for (int j = 0; j < nn->layer_sizes[i]; j++) {
    62. for (int k = 0; k < nn->layer_sizes[i + 1]; k++) {
    63. float derivative = sigmoid_derivative(nn->layer_outputs[i + 1][k]);
    64. nn->weights[i][j][k] += learning_rate * errors[k] * derivative * nn->layer_outputs[i][j];
    65. next_errors[j] += errors[k] * nn->weights[i][j][k];
    66. }
    67. }
    68. free(errors);
    69. errors = next_errors;
    70. }
    71. free(errors);
    72. }
    73. int main() {
    74. int topology[] = {2, 4, 1};
    75. NeuralNetwork nn = initialize_nn(topology, 3);
    76. float inputs[2][2] = {{1, 1}, {1, 0}};
    77. float targets[] = {0, 1};
    78. int epochs = 10000;
    79. for (int i = 0; i < epochs; i++) {
    80. for (int j = 0; j < 2; j++) {
    81. train(&nn, inputs[j], &targets[j], 0.05, 2);
    82. }
    83. }
    84. float test_inputs[2][2] = {{1, 0}, {1, 1}};
    85. for (int i = 0; i < 2; i++) {
    86. float* output = feedforward(&nn, test_inputs[i], 2);
    87. printf("Output for [%f, %f]: %f\n", test_inputs[i][0], test_inputs[i][1], output[0]);
    88. }
    89. // Free memory
    90. for (int i = 0; i < nn.num_layers - 1; i++) {
    91. for (int j = 0; j < nn.layer_sizes[i]; j++) {
    92. free(nn.weights[i][j]);
    93. }
    94. free(nn.weights[i]);
    95. }
    96. free(nn.weights);
    97. free(nn.layer_outputs);
    98. return 0;
    99. }

  • 相关阅读:
    SJF抢占式C语言代码
    漫谈:C语言 C++ 所有编程语言 =和==的麻烦
    零成本体验美国云服务器,更方便的体验和选择
    中国312个历史文化名镇及景区空间点位数据集
    网站SEO优化
    机器学习强基计划0-2:什么是机器学习?和AI有什么关系?
    软件测试外包到底要不要去?三年真实外包感受告诉你
    pytest测试框架
    Flink学习11:flink程序并行度
    SSM框架整合详细教程
  • 原文地址:https://blog.csdn.net/aw344/article/details/133545964