• 用c动态数组(实现权重矩阵可视化)实现手撸神经网络230902


    变量即内存、指针使用的架构原理:

    1、用结构struct记录 网络架构,如 float*** ws 为权重矩阵的指针(指针地址);

    2、用 = (float*)malloc (Num * sizeof(float)) 给 具体变量分配内存;

    3、用 = (float**)malloc( Num* sizeof(float*) ) 给 指向 具体变量(一维数组)的指针…… 给分配 存放指针的变量……

    ……见代码

    1. // test22动态数组22多维数组23三维随机数230101.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
    2. #include
    3. using namespace std;
    4. typedef struct {
    5. float*** ws;
    6. int num1;
    7. float** layer_outputs;
    8. }NeuralN;
    9. //初始化 神经网络的 weights权重矩阵等
    10. NeuralN init(int* t01, int num02) {
    11. NeuralN nn;
    12. nn.num1 = num02;
    13. nn.ws = (float***)malloc((num02 - 1) * sizeof(float**) );
    14. srand(time(NULL));
    15. cout << " [num02:" << num02 << endl;
    16. for (int i = 0; i <(num02 - 1); ++i) {
    17. nn.ws[i] = (float**)malloc( t01[i] * sizeof(float*) ); //为指针分配内存
    18. for (int j = 0; j < t01[i]; ++j) {
    19. nn.ws[i][j] = (float*)malloc( t01[i + 1 ] * sizeof(float) ); //为变量 分配内存
    20. for (int k = 0; k < t01[i + 1]; k++) {
    21. //下一句 使用变量、即使用内存!(使用变量的内存)
    22. nn.ws[i][j][k] = (float)rand() / RAND_MAX;
    23. }//for330k
    24. }//for220j
    25. }//for110i
    26. return nn;
    27. }//init
    28. int main()
    29. {
    30. int t001[] = { 2,8, 7,6, 1 ,2,1};
    31. //#define Num4 4
    32. //用 for(ForEach)的方法,计数、数出 动态数组长度
    33. int Len_t001 = 0; for (int ii : t001) { ++Len_t001; }
    34. int Numm = Len_t001;
    35. cout << "Numm:"<
    36. NeuralN nn = init(t001, Numm);// Num4);
    37. //
    38. // for(float ii: (nn.ws[0][1]) )
    39. //
    40. //显示三维的 张量(即 三维数组 的 内容)
    41. for (int i = 0; i < Numm - 1; ++i) {
    42. // nn.layer_outputs[i + 1] = (float*)malloc(t001[i + 1] * sizeof(float));
    43. printf("_{ i%d_", i);
    44. for (int j = 0; j < t001[i + 1]; ++j) {
    45. // nn.layer_outputs[i + 1][j] = 0;
    46. printf("[j%d", j);
    47. for (int k = 0; k < t001[i]; ++k) {
    48. printf("(k%d(%.1f,", k, nn.ws[i][k][j]);
    49. }//
    50. printf("_} \n");
    51. }//for220j
    52. printf("\n");
    53. }//for110i
    54. std::cout << "Hello World!\n";
    55. }//main

    第二版本231001

    1. #include
    2. #include
    3. #include
    4. #include
    5. #define LEARNING_RATE 0.05//0.05
    6. // Sigmoid and its derivative
    7. float sigmoid(float x) { return 1 / (1 + exp(-x));}
    8. float sigmoid_derivative(float x) {
    9. //float sig = sigmoid(x);
    10. float sig = 1.0 / (exp(-x) + 1);
    11. return sig * (1 - sig);
    12. }
    13. typedef struct {
    14. float*** weights;
    15. int num_layers;
    16. int* layer_sizes;
    17. float** layer_outputs;
    18. float** deltas;
    19. } NeuralNetwork;
    20. NeuralNetwork initialize_nn(int* topology, int num_layers) {
    21. NeuralNetwork nn;
    22. nn.num_layers = num_layers;
    23. nn.layer_sizes = topology;
    24. // Allocate memory for weights, layer outputs, and deltas
    25. nn.weights = (float***)malloc((num_layers - 1) * sizeof(float**));
    26. nn.layer_outputs = (float**)malloc(num_layers * sizeof(float*));
    27. nn.deltas = (float**)malloc((num_layers - 1) * sizeof(float*));
    28. srand(time(NULL));
    29. for (int i = 0; i < num_layers - 1; i++) {
    30. nn.weights[i] = (float**)malloc(topology[i] * sizeof(float*));
    31. nn.deltas[i] = (float*)malloc(topology[i + 1] * sizeof(float));
    32. for (int j = 0; j < topology[i]; j++) {
    33. nn.weights[i][j] = (float*)malloc(topology[i + 1] * sizeof(float));
    34. for (int k = 0; k < topology[i + 1]; k++) {
    35. nn.weights[i][j][k] = ((float)rand() / RAND_MAX) * 2.0f - 1.0f; // Random weights between -1 and 1
    36. }
    37. }//for220j
    38. }//for110i
    39. return nn;
    40. }//NeuralNetwork initialize_nn
    41. float* feedforward(NeuralNetwork* nn, float* input) {
    42. nn->layer_outputs[0] = input;
    43. for (int i = 0; i < nn->num_layers - 1; i++) {
    44. nn->layer_outputs[i + 1] = (float*)malloc(nn->layer_sizes[i + 1] * sizeof(float));
    45. for (int j = 0; j < nn->layer_sizes[i + 1]; j++) {
    46. nn->layer_outputs[i + 1][j] = 0;
    47. for (int k = 0; k < nn->layer_sizes[i]; k++) {
    48. // int A01 = 01;
    49. nn->layer_outputs[i + 1][j] += nn->layer_outputs[i][k] * nn->weights[i][k][j];
    50. }//for330k
    51. nn->layer_outputs[i + 1][j] = sigmoid(nn->layer_outputs[i + 1][j]);
    52. }//for220j
    53. }//for110i
    54. return nn->layer_outputs[nn->num_layers - 1];
    55. }//feedforward
    56. void feedLoss(NeuralNetwork* nn, float* target) {
    57. //显示权重矩阵:
    58. //nn->layer_outputs[0] = input;
    59. for (int i = 0; i < nn->num_layers - 1; i++) {
    60. nn->layer_outputs[i + 1] = (float*)malloc(nn->layer_sizes[i + 1] * sizeof(float));
    61. for (int j = 0; j < nn->layer_sizes[i + 1]; j++) {
    62. nn->layer_outputs[i + 1][j] = 0;
    63. for (int k = 0; k < nn->layer_sizes[i]; k++) {
    64. // int A01 = 01;
    65. //nn->layer_outputs[i + 1][j] += nn->layer_outputs[i][k] * nn->weights[i][k][j];
    66. if (0 < nn->weights[i][k][j]) { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_BLUE); // FOREROUND_RED);
    67. }
    68. else { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_RED); // BLUE);
    69. }
    70. printf("(%.4f,", nn->weights[i][k][j]);
    71. // A01 = 22;
    72. }
    73. printf("] \n");
    74. nn->layer_outputs[i + 1][j] = sigmoid(nn->layer_outputs[i + 1][j]);
    75. }//for220j
    76. SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE);
    77. printf("};\n");
    78. }//for110i
    79. printf("_]};\n \n");
    80. //
    81. int Last01 = nn->num_layers - 1;
    82. // Calculate output layer deltas
    83. for (int i = 0; i < nn->layer_sizes[Last01]; ++i ) {
    84. float error = target[i] - nn->layer_outputs[Last01][i];
    85. printf("[i%d:%f] ", i, error);
    86. // nn->deltas[Last01 - 1][i] = error * sigmoid_derivative(nn->layer_outputs[Last01][i]);
    87. }
    88. // Calculate hidden layer deltas
    89. }//backpropagate(NeuralNetwork* nn, float* target
    90. void backpropagate(NeuralNetwork* nn, float* target) {
    91. int Last01 = nn->num_layers - 1;
    92. // Calculate output layer deltas//计算输出层变化
    93. for (int i = 0; i < nn->layer_sizes[Last01]; i++) {
    94. float error = target[i] - nn->layer_outputs[Last01][i];
    95. nn->deltas[Last01 - 1][i] = error * sigmoid_derivative(nn->layer_outputs[Last01][i]);
    96. }
    97. // Calculate hidden layer deltas//计算隐藏层变化
    98. for (int i = Last01 - 1; i > 0; i--) {
    99. for (int j = 0; j < nn->layer_sizes[i]; j++) {
    100. float sum = 0;
    101. for (int k = 0; k < nn->layer_sizes[i + 1]; k++) {
    102. sum += nn->weights[i][j][k] * nn->deltas[i][k];
    103. }
    104. nn->deltas[i - 1][j] = sum * sigmoid_derivative(nn->layer_outputs[i][j]);
    105. }
    106. }
    107. // Adjust weights
    108. for (int i = 0; i < Last01; i++) {
    109. for (int j = 0; j < nn->layer_sizes[i]; j++) {
    110. for (int k = 0; k < nn->layer_sizes[i + 1]; k++) {
    111. nn->weights[i][j][k] += LEARNING_RATE * nn->deltas[i][k] * nn->layer_outputs[i][j];
    112. }
    113. }
    114. }//
    115. }//backpropagate(NeuralNetwork* nn, float* target
    116. void train(NeuralNetwork* nn, float inputs[][2], float* targets, int num_samples, int num_epochs) {
    117. float* outputs;
    118. bool whetherOutputLoss = 0;
    119. #define Num10000 100000
    120. for (int epoch = 0; epoch < num_epochs; epoch++) {
    121. if (0 == (epoch % Num10000) ) { whetherOutputLoss = 1; }
    122. for (int i = 0; i < num_samples; i++) {
    123. //float* outputs =
    124. feedforward(nn, inputs[i]);
    125. //
    126. if (whetherOutputLoss) { feedLoss(nn, &targets[i]); } //当抽样时机到的时候,才显示
    127. //
    128. backpropagate(nn, &targets[i]);
    129. }//
    130. if (whetherOutputLoss) {printf("\n");
    131. whetherOutputLoss = 0;
    132. }
    133. }//for110i
    134. }//void train
    135. int main() {
    136. // int topology[] = { 2, 4, 3, 1 };
    137. // NeuralNetwork nn = initialize_nn(topology, 4);
    138. #define numLayer5 4
    139. //5
    140. //9
    141. //6
    142. //7
    143. int topology[] = { 2, /*128,*/ /*64,*/ /*32,*/ /*16,*/ /*8,*/ 3, 2, 1 };
    144. // 1, 2, 3, 4, 5, 6, 7, 8, 9
    145. NeuralNetwork nn = initialize_nn(topology, numLayer5); // 4);
    146. #define Num4 4
    147. float inputs[Num4][2] = { {1, 1}, {0, 0}, {1, 0}, {0, 1} };
    148. float targets[Num4] = { 0, 0, 1, 1 };
    149. #define Num200000 200000
    150. // train(&nn, inputs, targets, 4, 10000);
    151. train(&nn, inputs, targets, Num4, Num200000);
    152. //#define Num4 4
    153. float test_inputs[Num4][2] = { {0,0}, {1, 0}, {1, 1}, {0, 1} };
    154. for (int i = 0; i < Num4; i++) {
    155. float* output = feedforward(&nn, test_inputs[i]);
    156. printf("Output for [%f, %f]: %f\n", test_inputs[i][0], test_inputs[i][1], output[0]);
    157. free(output);
    158. }
    159. // Free memory
    160. for (int i = 0; i < nn.num_layers - 1; i++) {
    161. for (int j = 0; j < nn.layer_sizes[i]; j++) {
    162. free(nn.weights[i][j]);
    163. }
    164. free(nn.weights[i]);
    165. free(nn.deltas[i]);
    166. }
    167. free(nn.weights);
    168. free(nn.deltas);
    169. free(nn.layer_outputs);
    170. return 0;
    171. }//main

    第一版本230901

    1. #include
    2. #include
    3. //#include
    4. #include
    5. #include
    6. #define LEARNING_RATE 0.05
    7. //0.05
    8. // Sigmoid and its derivative
    9. float sigmoid(float x) { return 1 / (1 + exp(-x));}
    10. float sigmoid_derivative(float x) {
    11. //float sig = sigmoid(x);
    12. float sig = 1.0 / (exp(-x) + 1);
    13. return sig * (1 - sig);
    14. }
    15. typedef struct {
    16. float*** weights;
    17. int num_layers;
    18. int* layer_sizes;
    19. float** layer_outputs;
    20. float** deltas;
    21. } NeuralNetwork;
    22. NeuralNetwork initialize_nn(int* topology, int num_layers) {
    23. NeuralNetwork nn;
    24. nn.num_layers = num_layers;
    25. nn.layer_sizes = topology;
    26. // Allocate memory for weights, layer outputs, and deltas
    27. nn.weights = (float***)malloc((num_layers - 1) * sizeof(float**));
    28. nn.layer_outputs = (float**)malloc(num_layers * sizeof(float*));
    29. nn.deltas = (float**)malloc((num_layers - 1) * sizeof(float*));
    30. srand(time(NULL));
    31. for (int i = 0; i < num_layers - 1; i++) {
    32. nn.weights[i] = (float**)malloc(topology[i] * sizeof(float*));
    33. nn.deltas[i] = (float*)malloc(topology[i + 1] * sizeof(float));
    34. for (int j = 0; j < topology[i]; j++) {
    35. nn.weights[i][j] = (float*)malloc(topology[i + 1] * sizeof(float));
    36. for (int k = 0; k < topology[i + 1]; k++) {
    37. nn.weights[i][j][k] = ((float)rand() / RAND_MAX) * 2.0f - 1.0f; // Random weights between -1 and 1
    38. }
    39. }//for220j
    40. }//for110i
    41. return nn;
    42. }//NeuralNetwork initialize_nn
    43. float* feedforward(NeuralNetwork* nn, float* input) {
    44. nn->layer_outputs[0] = input;
    45. for (int i = 0; i < nn->num_layers - 1; i++) {
    46. nn->layer_outputs[i + 1] = (float*)malloc(nn->layer_sizes[i + 1] * sizeof(float));
    47. for (int j = 0; j < nn->layer_sizes[i + 1]; j++) {
    48. nn->layer_outputs[i + 1][j] = 0;
    49. for (int k = 0; k < nn->layer_sizes[i]; k++) {
    50. // int A01 = 01;
    51. nn->layer_outputs[i + 1][j] += nn->layer_outputs[i][k] * nn->weights[i][k][j];
    52. // A01 = 22;
    53. }
    54. nn->layer_outputs[i + 1][j] = sigmoid(nn->layer_outputs[i + 1][j]);
    55. }//for220j
    56. }//for110i
    57. return nn->layer_outputs[nn->num_layers - 1];
    58. }//feedforward
    59. void feedLoss(NeuralNetwork* nn, float* target) {
    60. //显示权重矩阵:
    61. //nn->layer_outputs[0] = input;
    62. for (int i = 0; i < nn->num_layers - 1; i++) {
    63. nn->layer_outputs[i + 1] = (float*)malloc(nn->layer_sizes[i + 1] * sizeof(float));
    64. for (int j = 0; j < nn->layer_sizes[i + 1]; j++) {
    65. nn->layer_outputs[i + 1][j] = 0;
    66. for (int k = 0; k < nn->layer_sizes[i]; k++) {
    67. // int A01 = 01;
    68. //nn->layer_outputs[i + 1][j] += nn->layer_outputs[i][k] * nn->weights[i][k][j];
    69. if (0 < nn->weights[i][k][j]) { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_BLUE); // FOREROUND_RED);
    70. }
    71. else { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_RED); // BLUE);
    72. }
    73. printf("(%.4f,", nn->weights[i][k][j]);
    74. // A01 = 22;
    75. }
    76. printf("] \n");
    77. nn->layer_outputs[i + 1][j] = sigmoid(nn->layer_outputs[i + 1][j]);
    78. }//for220j
    79. SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE);
    80. printf("};\n");
    81. }//for110i
    82. printf("_]};\n");
    83. //
    84. int Last01 = nn->num_layers - 1;
    85. // Calculate output layer deltas
    86. for (int i = 0; i < nn->layer_sizes[Last01]; ++i ) {
    87. float error = target[i] - nn->layer_outputs[Last01][i];
    88. printf("[i%d:%f] ", i, error);
    89. // nn->deltas[Last01 - 1][i] = error * sigmoid_derivative(nn->layer_outputs[Last01][i]);
    90. }
    91. // Calculate hidden layer deltas
    92. }//backpropagate(NeuralNetwork* nn, float* target
    93. void backpropagate(NeuralNetwork* nn, float* target) {
    94. int Last01 = nn->num_layers - 1;
    95. // Calculate output layer deltas//计算输出层变化
    96. for (int i = 0; i < nn->layer_sizes[Last01]; i++) {
    97. float error = target[i] - nn->layer_outputs[Last01][i];
    98. nn->deltas[Last01 - 1][i] = error * sigmoid_derivative(nn->layer_outputs[Last01][i]);
    99. }
    100. // Calculate hidden layer deltas//计算隐藏层变化
    101. for (int i = Last01 - 1; i > 0; i--) {
    102. for (int j = 0; j < nn->layer_sizes[i]; j++) {
    103. float sum = 0;
    104. for (int k = 0; k < nn->layer_sizes[i + 1]; k++) {
    105. sum += nn->weights[i][j][k] * nn->deltas[i][k];
    106. }
    107. nn->deltas[i - 1][j] = sum * sigmoid_derivative(nn->layer_outputs[i][j]);
    108. }
    109. }
    110. // Adjust weights
    111. for (int i = 0; i < Last01; i++) {
    112. for (int j = 0; j < nn->layer_sizes[i]; j++) {
    113. for (int k = 0; k < nn->layer_sizes[i + 1]; k++) {
    114. nn->weights[i][j][k] += LEARNING_RATE * nn->deltas[i][k] * nn->layer_outputs[i][j];
    115. }
    116. }
    117. }//
    118. }//backpropagate(NeuralNetwork* nn, float* target
    119. void train(NeuralNetwork* nn, float inputs[][2], float* targets, int num_samples, int num_epochs) {
    120. float* outputs;
    121. bool whetherOutputLoss = 0;
    122. #define Num10000 50000
    123. for (int epoch = 0; epoch < num_epochs; epoch++) {
    124. if (0 == (epoch % Num10000) ) { whetherOutputLoss = 1; }
    125. for (int i = 0; i < num_samples; i++) {
    126. //float* outputs =
    127. feedforward(nn, inputs[i]);
    128. //
    129. if (whetherOutputLoss) { feedLoss(nn, &targets[i]); }
    130. //
    131. backpropagate(nn, &targets[i]);
    132. }//
    133. if (whetherOutputLoss) {printf("\n");
    134. whetherOutputLoss = 0;
    135. }
    136. }//for110i
    137. }//void train
    138. int main() {
    139. // int topology[] = { 2, 4, 3, 1 };
    140. // NeuralNetwork nn = initialize_nn(topology, 4);
    141. #define numLayer5 4
    142. //5
    143. //9
    144. //6
    145. //7
    146. int topology[] = { 2, /*128,*/ /*64,*/ /*32,*/ /*16,*/ /*8,*/ 3, 2, 1 };
    147. // 1, 2, 3, 4, 5, 6, 7, 8, 9
    148. NeuralNetwork nn = initialize_nn(topology, numLayer5); // 4);
    149. #define Num4 4
    150. float inputs[Num4][2] = { {1, 1}, {0, 0}, {1, 0}, {0, 1} };
    151. float targets[Num4] = { 0, 0, 1, 1 };
    152. #define Num200000 200000
    153. // train(&nn, inputs, targets, 4, 10000);
    154. train(&nn, inputs, targets, Num4, Num200000);
    155. //#define Num4 4
    156. float test_inputs[Num4][2] = { {0,0}, {1, 0}, {1, 1}, {0, 1} };
    157. for (int i = 0; i < Num4; i++) {
    158. float* output = feedforward(&nn, test_inputs[i]);
    159. printf("Output for [%f, %f]: %f\n", test_inputs[i][0], test_inputs[i][1], output[0]);
    160. free(output);
    161. }
    162. // Free memory
    163. for (int i = 0; i < nn.num_layers - 1; i++) {
    164. for (int j = 0; j < nn.layer_sizes[i]; j++) {
    165. free(nn.weights[i][j]);
    166. }
    167. free(nn.weights[i]);
    168. free(nn.deltas[i]);
    169. }
    170. free(nn.weights);
    171. free(nn.deltas);
    172. free(nn.layer_outputs);
    173. return 0;
    174. }//main

    1. (-0.1291,(0.7803,]
    2. (-0.6326,(0.5078,]
    3. };
    4. (-0.1854,(-0.5262,(0.8464,]
    5. (0.4913,(0.0774,(0.1000,]
    6. };
    7. (0.7582,(-0.7756,]
    8. };
    9. _]};
    10. [i0:-0.500000] (0.5459,(0.0427,]
    11. (-0.1289,(0.7804,]
    12. (-0.6327,(0.5076,]
    13. };
    14. (-0.1859,(-0.5268,(0.8458,]
    15. (0.4919,(0.0780,(0.1005,]
    16. };
    17. (0.7553,(-0.7786,]
    18. };
    19. _]};
    20. [i0:-0.500000] (0.5459,(0.0427,]
    21. (-0.1289,(0.7804,]
    22. (-0.6327,(0.5076,]
    23. };
    24. (-0.1864,(-0.5273,(0.8453,]
    25. (0.4924,(0.0785,(0.1011,]
    26. };
    27. (0.7524,(-0.7815,]
    28. };
    29. _]};
    30. [i0:0.500000] (0.5458,(0.0427,]
    31. (-0.1291,(0.7804,]
    32. (-0.6326,(0.5076,]
    33. };
    34. (-0.1859,(-0.5268,(0.8458,]
    35. (0.4919,(0.0780,(0.1005,]
    36. };
    37. (0.7553,(-0.7786,]
    38. };
    39. _]};
    40. [i0:0.500000]
    41. (0.5679,(-0.3593,]
    42. (-0.8321,(1.1025,]
    43. (-0.5647,(0.1703,]
    44. };
    45. (-0.5384,(-1.1479,(0.8445,]
    46. (0.2658,(0.1725,(-0.1653,]
    47. };
    48. (1.1137,(-0.7693,]
    49. };
    50. _]};
    51. [i0:-0.500000] (0.5682,(-0.3590,]
    52. (-0.8317,(1.1029,]
    53. (-0.5651,(0.1699,]
    54. };
    55. (-0.5391,(-1.1487,(0.8437,]
    56. (0.2663,(0.1730,(-0.1647,]
    57. };
    58. (1.1107,(-0.7722,]
    59. };
    60. _]};
    61. [i0:-0.500000] (0.5682,(-0.3590,]
    62. (-0.8317,(1.1029,]
    63. (-0.5651,(0.1699,]
    64. };
    65. (-0.5399,(-1.1495,(0.8429,]
    66. (0.2668,(0.1735,(-0.1642,]
    67. };
    68. (1.1078,(-0.7751,]
    69. };
    70. _]};
    71. [i0:0.500000] (0.5679,(-0.3590,]
    72. (-0.8321,(1.1029,]
    73. (-0.5647,(0.1699,]
    74. };
    75. (-0.5391,(-1.1487,(0.8437,]
    76. (0.2663,(0.1730,(-0.1647,]
    77. };
    78. (1.1107,(-0.7722,]
    79. };
    80. _]};
    81. [i0:0.500000]
    82. (6.5241,(-6.2462,]
    83. (-6.5361,(6.8406,]
    84. (0.2226,(0.6834,]
    85. };
    86. (-3.2613,(-3.6355,(2.0290,]
    87. (0.8144,(0.6639,(-0.7503,]
    88. };
    89. (4.2499,(-0.6959,]
    90. };
    91. _]};
    92. [i0:-0.500000] (6.5288,(-6.2415,]
    93. (-6.5309,(6.8458,]
    94. (0.2196,(0.6804,]
    95. };
    96. (-3.2642,(-3.6385,(2.0261,]
    97. (0.8149,(0.6644,(-0.7498,]
    98. };
    99. (4.2469,(-0.6989,]
    100. };
    101. _]};
    102. [i0:-0.500000] (6.5288,(-6.2415,]
    103. (-6.5309,(6.8458,]
    104. (0.2196,(0.6804,]
    105. };
    106. (-3.2671,(-3.6414,(2.0231,]
    107. (0.8154,(0.6649,(-0.7494,]
    108. };
    109. (4.2440,(-0.7018,]
    110. };
    111. _]};
    112. [i0:0.500000] (6.5241,(-6.2415,]
    113. (-6.5361,(6.8458,]
    114. (0.2226,(0.6804,]
    115. };
    116. (-3.2642,(-3.6385,(2.0260,]
    117. (0.8149,(0.6644,(-0.7498,]
    118. };
    119. (4.2469,(-0.6989,]
    120. };
    121. _]};
    122. [i0:0.500000]
    123. (114.9971,(-113.4876,]
    124. (-112.8603,(114.3747,]
    125. (0.6990,(0.7116,]
    126. };
    127. (-31.6319,(-31.7725,(45.2379,]
    128. (11.9645,(11.6226,(-25.5372,]
    129. };
    130. (22.2722,(-15.6809,]
    131. };
    132. _]};
    133. [i0:-0.500000] (115.2866,(-113.1981,]
    134. (-112.5715,(114.6635,]
    135. (0.2422,(0.2548,]
    136. };
    137. (-31.6473,(-31.7879,(45.2226,]
    138. (11.9753,(11.6335,(-25.5264,]
    139. };
    140. (22.2693,(-15.6838,]
    141. };
    142. _]};
    143. [i0:-0.500000] (115.2866,(-113.1981,]
    144. (-112.5715,(114.6635,]
    145. (0.2422,(0.2548,]
    146. };
    147. (-31.6626,(-31.8033,(45.2072,]
    148. (11.9861,(11.6443,(-25.5155,]
    149. };
    150. (22.2663,(-15.6867,]
    151. };
    152. _]};
    153. [i0:0.500000] (114.9968,(-113.1981,]
    154. (-112.8605,(114.6635,]
    155. (0.6987,(0.2548,]
    156. };
    157. (-31.6473,(-31.7879,(45.2226,]
    158. (11.9753,(11.6335,(-25.5264,]
    159. };
    160. (22.2693,(-15.6838,]
    161. };
    162. _]};
    163. [i0:0.500000]
    164. Output for [0.000000, 0.000000]: 0.005787
    165. Output for [1.000000, 0.000000]: 0.993864
    166. Output for [1.000000, 1.000000]: 0.011066
    167. Output for [0.000000, 1.000000]: 0.993822

  • 相关阅读:
    Kubernetes后台数据库etcd:安装部署etcd集群,数据备份与恢复
    react dispatch不生效的坑
    湖南源点调研 为什么中小企业产品上市前一定要做市场调研?
    这3种人适合学习人工智能,看看你在不在其中?
    使用Fiddler进行移动端抓包和模拟弱网络测试
    聊聊logback的LevelFilter
    C盘扩容好帮手——傲梅分区助手
    Python:了解python基础知识
    MyBatis的各种查询功能
    ArrayList分析1:循环、扩容、版本
  • 原文地址:https://blog.csdn.net/aw344/article/details/133579523