用c语言动态数组(不用c++的vector)实现:输入数据inputs = { {1, 1}, {0,0},{1, 0},{0,1} };目标数据targets={0,0,1,1}; 测试数据 inputs22 = { {1, 0}, {1,1},{0,1} }; 构建神经网络,例如:NeuralNetwork nn({ 2, 4,3,1 }); 则网络有四层、输入层2个nodes、输出层1个节点、第1隐藏层4nodes、第2隐藏层3nodes,网络有反向传播、梯度下降适当优化…等。
- #include
- #include
- #include
- #include
-
- #define LEARNING_RATE 0.05
-
- // Sigmoid and its derivative
- float sigmoid(float x) {
- return 1 / (1 + exp(-x));
- }
-
- float sigmoid_derivative(float x) {
- float sig = sigmoid(x);
- return sig * (1 - sig);
- }
-
- typedef struct {
- float*** weights;
- int num_layers;
- int* layer_sizes;
- float** layer_outputs;
- float** deltas;
- } NeuralNetwork;
-
- NeuralNetwork initialize_nn(int* topology, int num_layers) {
- NeuralNetwork nn;
- nn.num_layers = num_layers;
- nn.layer_sizes = topology;
-
- // Allocate memory for weights, layer outputs, and deltas
- nn.weights = (float***)malloc((num_layers - 1) * sizeof(float**));
- nn.layer_outputs = (float**)malloc(num_layers * sizeof(float*));
- nn.deltas = (float**)malloc((num_layers - 1) * sizeof(float*));
-
- srand(time(NULL));
- for (int i = 0; i < num_layers - 1; i++) {
- nn.weights[i] = (float**)malloc(topology[i] * sizeof(float*));
- nn.deltas[i] = (float*)malloc(topology[i + 1] * sizeof(float));
- for (int j = 0; j < topology[i]; j++) {
- nn.weights[i][j] = (float*)malloc(topology[i + 1] * sizeof(float));
- for (int k = 0; k < topology[i + 1]; k++) {
- nn.weights[i][j][k] = ((float)rand() / RAND_MAX) * 2.0f - 1.0f; // Random weights between -1 and 1
- }
- }//for220j
- }//for110i
- return nn;
- }//NeuralNetwork initialize_nn
-
- float* feedforward(NeuralNetwork* nn, float* input) {
- nn->layer_outputs[0] = input;
- for (int i = 0; i < nn->num_layers - 1; i++) {
- nn->layer_outputs[i + 1] = (float*)malloc(nn->layer_sizes[i + 1] * sizeof(float));
- for (int j = 0; j < nn->layer_sizes[i + 1]; j++) {
- nn->layer_outputs[i + 1][j] = 0;
- for (int k = 0; k < nn->layer_sizes[i]; k++) {
- int A01 = 01;
- nn->layer_outputs[i + 1][j] += nn->layer_outputs[i][k] * nn->weights[i][k][j];
- A01 = 22;
- }
- nn->layer_outputs[i + 1][j] = sigmoid(nn->layer_outputs[i + 1][j]);
- }//for220j
- }//for110i
- return nn->layer_outputs[nn->num_layers - 1];
- }//feedforward
-
-
- void feedLoss(NeuralNetwork* nn, float* target) {
- int Last01 = nn->num_layers - 1;
-
- // Calculate output layer deltas
- for (int i = 0; i < nn->layer_sizes[Last01]; i++) {
- float error = target[i] - nn->layer_outputs[Last01][i];
- printf("_[i:%d_:%f] ", i, error);
- // nn->deltas[Last01 - 1][i] = error * sigmoid_derivative(nn->layer_outputs[Last01][i]);
- }
-
- // Calculate hidden layer deltas
-
- }//backpropagate(NeuralNetwork* nn, float* target
-
-
- void backpropagate(NeuralNetwork* nn, float* target) {
- int Last01 = nn->num_layers - 1;
-
- // Calculate output layer deltas//计算输出层变化
- for (int i = 0; i < nn->layer_sizes[Last01]; i++) {
- float error = target[i] - nn->layer_outputs[Last01][i];
- nn->deltas[Last01 - 1][i] = error * sigmoid_derivative(nn->layer_outputs[Last01][i]);
- }
-
- // Calculate hidden layer deltas//计算隐藏层变化
- for (int i = Last01 - 1; i > 0; i--) {
- for (int j = 0; j < nn->layer_sizes[i]; j++) {
- float sum = 0;
- for (int k = 0; k < nn->layer_sizes[i + 1]; k++) {
- sum += nn->weights[i][j][k] * nn->deltas[i][k];
- }
- nn->deltas[i - 1][j] = sum * sigmoid_derivative(nn->layer_outputs[i][j]);
- }
- }
-
- // Adjust weights
- for (int i = 0; i < Last01; i++) {
- for (int j = 0; j < nn->layer_sizes[i]; j++) {
- for (int k = 0; k < nn->layer_sizes[i + 1]; k++) {
- nn->weights[i][j][k] += LEARNING_RATE * nn->deltas[i][k] * nn->layer_outputs[i][j];
- }
- }
- }//
- }//backpropagate(NeuralNetwork* nn, float* target
-
- void train(NeuralNetwork* nn, float inputs[][2], float* targets, int num_samples, int num_epochs) {
- float* outputs;
- bool whetherOutputLoss = 0;
- #define Num10000 10000
- for (int epoch = 0; epoch < num_epochs; epoch++) {
- if (0 == epoch % Num10000) { whetherOutputLoss = 1; }
- for (int i = 0; i < num_samples; i++) {
- //float* outputs =
- feedforward(nn, inputs[i]);
- //
- if (whetherOutputLoss) { feedLoss(nn, &targets[i]); }
- //
- backpropagate(nn, &targets[i]);
- }//
- if (whetherOutputLoss) {printf("\n");
- whetherOutputLoss = 0;
- }
-
- }//for110i
- }//void train
-
- int main() {
- int topology[] = { 2, 4, 3, 1 };
- NeuralNetwork nn = initialize_nn(topology, 4);
-
- float inputs[4][2] = { {1, 1}, {0, 0}, {1, 0}, {0, 1} };
- float targets[4] = { 0, 0, 1, 1 };
-
- // train(&nn, inputs, targets, 4, 10000);
- train(&nn, inputs, targets, 4, 200000);
-
- #define Num4 4
-
- float test_inputs[Num4][2] = { {0,0}, {1, 0}, {1, 1}, {0, 1} };
- for (int i = 0; i < Num4; i++) {
- float* output = feedforward(&nn, test_inputs[i]);
- printf("Output for [%f, %f]: %f\n", test_inputs[i][0], test_inputs[i][1], output[0]);
- free(output);
- }
-
- // Free memory
- for (int i = 0; i < nn.num_layers - 1; i++) {
- for (int j = 0; j < nn.layer_sizes[i]; j++) {
- free(nn.weights[i][j]);
- }
- free(nn.weights[i]);
- free(nn.deltas[i]);
- }
- free(nn.weights);
- free(nn.deltas);
- free(nn.layer_outputs);
-
- return 0;
- }//main
用c语言动态数组(不用c++的vector)实现:inputs = { {1, 1}, {1, 0} };数据targets={0,1}; 测试数据 inputs22 = { {1, 0}, {1,1} }; 构建神经网络,例如:NeuralNetwork nn({ 2, 4, 1 }); 则网络有四层、输入层2个节点、输出层1个节点、隐藏层4个节点、网络有梯度下降、反向传播…等。
- #include
- #include
- #include
- #include
-
- float sigmoid(float x) {
- return 1 / (1 + exp(-x));
- }
-
- float sigmoid_derivative(float x) {
- float s = sigmoid(x);
- return s * (1 - s);
- }
-
- typedef struct {
- float*** weights;
- int num_layers;
- int* layer_sizes;
- float** layer_outputs;
- } NeuralNetwork;
-
- NeuralNetwork initialize_nn(int* topology, int num_layers) {
- NeuralNetwork nn;
- nn.num_layers = num_layers;
- nn.layer_sizes = topology;
-
- nn.weights = (float***)malloc((num_layers - 1) * sizeof(float**));
- nn.layer_outputs = (float**)malloc(num_layers * sizeof(float*));
-
- srand(time(NULL));
- for (int i = 0; i < num_layers - 1; i++) {
- nn.weights[i] = (float**)malloc(topology[i] * sizeof(float*));
- for (int j = 0; j < topology[i]; j++) {
- nn.weights[i][j] = (float*)malloc(topology[i + 1] * sizeof(float));
- for (int k = 0; k < topology[i + 1]; k++) {
- nn.weights[i][j][k] = (rand() % 2000 - 1000) / 1000.0f;
- }
- }
- }
- return nn;
- }
-
- float* feedforward(NeuralNetwork* nn, float* input, int input_size) {
- nn->layer_outputs[0] = input;
- for (int i = 0; i < nn->num_layers - 1; i++) {
- float* output = (float*)malloc(nn->layer_sizes[i + 1] * sizeof(float));
- for (int j = 0; j < nn->layer_sizes[i + 1]; j++) {
- output[j] = 0;
- for (int k = 0; k < nn->layer_sizes[i]; k++) {
- output[j] += nn->layer_outputs[i][k] * nn->weights[i][k][j];
- }
- output[j] = sigmoid(output[j]);
- }
- nn->layer_outputs[i + 1] = output;
- }
- return nn->layer_outputs[nn->num_layers - 1];
- }
-
- void train(NeuralNetwork* nn, float* input, float* targets, float learning_rate, int input_size) {
- float* outputs = feedforward(nn, input, input_size);
- float* errors = (float*)malloc(input_size * sizeof(float));
-
- for (int i = 0; i < input_size; i++) {
- errors[i] = targets[i] - outputs[i];
- }
-
- // Backpropagation
- for (int i = nn->num_layers - 2; i >= 0; i--) {
- float* next_errors = (float*)malloc(nn->layer_sizes[i] * sizeof(float));
-
- // Update weights
- for (int j = 0; j < nn->layer_sizes[i]; j++) {
- for (int k = 0; k < nn->layer_sizes[i + 1]; k++) {
- float derivative = sigmoid_derivative(nn->layer_outputs[i + 1][k]);
- nn->weights[i][j][k] += learning_rate * errors[k] * derivative * nn->layer_outputs[i][j];
-
- next_errors[j] += errors[k] * nn->weights[i][j][k];
- }
- }
-
- free(errors);
- errors = next_errors;
- }
-
- free(errors);
- }
-
- int main() {
- int topology[] = {2, 4, 1};
- NeuralNetwork nn = initialize_nn(topology, 3);
-
- float inputs[2][2] = {{1, 1}, {1, 0}};
- float targets[] = {0, 1};
-
- int epochs = 10000;
- for (int i = 0; i < epochs; i++) {
- for (int j = 0; j < 2; j++) {
- train(&nn, inputs[j], &targets[j], 0.05, 2);
- }
- }
-
- float test_inputs[2][2] = {{1, 0}, {1, 1}};
- for (int i = 0; i < 2; i++) {
- float* output = feedforward(&nn, test_inputs[i], 2);
- printf("Output for [%f, %f]: %f\n", test_inputs[i][0], test_inputs[i][1], output[0]);
- }
-
- // Free memory
- for (int i = 0; i < nn.num_layers - 1; i++) {
- for (int j = 0; j < nn.layer_sizes[i]; j++) {
- free(nn.weights[i][j]);
- }
- free(nn.weights[i]);
- }
- free(nn.weights);
- free(nn.layer_outputs);
-
- return 0;
- }