用c++输入数据:vector
以下是一个使用C++构建的基本神经网络示例,包含前向传播和基于梯度下降的反向传播。
在这个例子中:
{1, 1}
和{1, 0}
)进行训练,目标输出分别是0
和1
。{1, 0}
和{1, 1}
)来测试网络的性能。你可以根据需要调整神经网络的参数,比如学习率和训练迭代次数,以获得更好的性能。
第二(改进)版:
- #include
- #include
-
- using namespace std;
-
- bool whetherOutputFlag = 0;
- float sigmoid(float x) {
- return 1 / (1 + exp(-x));
- }
-
- float sigmoid_derivative(float x) {
- float s = sigmoid(x);
- return s * (1 - s);
- }
-
- class NeuralNetwork {
- private:
- std::vector
float>>> weights; - std::vector
float>> layer_outputs; -
- public:
- NeuralNetwork(std::vector<int> topology) {
- std::srand(std::time(0));
- for (int i = 0; i < topology.size() - 1; i++) {
- weights.push_back(std::vector
float>>(topology[i], std::vector<float>(topology[i + 1]))); - for (int j = 0; j < topology[i]; j++) {
- for (int k = 0; k < topology[i + 1]; k++) {
- weights[i][j][k] = (std::rand() % 2000 - 1000) / 1000.0f;
- }
- }
- }
- }
-
- std::vector<float> feedforward(std::vector<float> input) {
- layer_outputs.clear();
- layer_outputs.push_back(input);
- for (int i = 0; i < weights.size(); i++) {
- std::vector<float> output(weights[i][0].size(), 0);
- for (int j = 0; j < weights[i][0].size(); j++) {
- for (int k = 0; k < input.size(); k++) {
- output[j] += input[k] * weights[i][k][j];
- }
- output[j] = sigmoid(output[j]);
- }
- layer_outputs.push_back(output);
- input = output;
- }
- return input;
- }//feedforward
-
-
- void train(std::vector<float> input, std::vector<float> targets, float learning_rate , bool whetherOutputFlag) {
-
- //--------------------------------------------- 前向传播--输出损失
-
- std::vector<float> outputs = feedforward(input);
- // 初始化误差
- std::vector<float> errors(outputs.size(), 0);
- float sumA=0.0;
- for (int i = 0; i < targets.size(); i++) {
- errors[i] = (targets[i] - outputs[i]);
- if (true == whetherOutputFlag) {//if220
- sumA += fabs((errors[i] ));
- cout << "i"<"]";
- }//if220
-
- }//for110i
- if( true==whetherOutputFlag) std::cout << sumA;//根据标志位,决定是否输出损失
- //=============================================
-
- // 反向传播
- for (int i = weights.size() - 1; i >= 0; i--) {
- // 更新权重
- for (int j = 0; j < weights[i].size(); j++) {
- for (int k = 0; k < weights[i][j].size(); k++) {
- float derivative = sigmoid_derivative(layer_outputs[i + 1][k]);
- weights[i][j][k] += learning_rate * errors[k] * derivative * layer_outputs[i][j];
- }
- }
-
- // 计算下一层的误差
- std::vector<float> next_errors(weights[i].size(), 0);
- for (int j = 0; j < weights[i].size(); j++) {
- for (int k = 0; k < weights[i][j].size(); k++) {
- next_errors[j] += errors[k] * weights[i][j][k];
- }
- }
- errors = next_errors;
- }
- }//void train(
-
- };//
-
- int main() {
- NeuralNetwork nn({ 2, 8,6, 1 });
- std::vector
float>> inputs = { {1,0}, {1, 1}, {0,1},{0,0} }; - std::vector<float> targets = { 1,0,1,0 };
-
- for (int i = 0; i < 50000; i++) {
- for (size_t j = 0; j < inputs.size(); j++) {
- nn.train(inputs[j], { targets[j] }, 0.05, false);//false 不输出 0.1);// 0.05);// 0.1);
- if (0 == (i % 10000)) {
- nn.train(inputs[j], { targets[j] }, 0.001 , true); //true 输出
- std::cout << "] ";
- std::cout << std::endl;
- }
-
- }
- if(0==(i%10000)) std::cout << std::endl;
- }//for110i
-
- std::vector
float>> inputs22 = { {0,1},{0,0}, {1, 0}, {1, 1} }; - for (size_t j = 0; j < inputs22.size(); j++) {
- std::vector<float> output = nn.feedforward(inputs22[j]);
- std::cout << "Output for [" << inputs22[j][0] << ", " << inputs22[j][1] << "]: " << output[0] << std::endl;
- }
-
- return 0;
- }//main
第一版:
-
- #include
- #include
-
- using namespace std;
-
- bool whetherOutputFlag = 0;
- float sigmoid(float x) {
- return 1 / (1 + exp(-x));
- }
-
- float sigmoid_derivative(float x) {
- float s = sigmoid(x);
- return s * (1 - s);
- }
-
- class NeuralNetwork {
- private:
- std::vector
float>>> weights; - std::vector
float>> layer_outputs; -
- public:
- NeuralNetwork(std::vector<int> topology) {
- std::srand(std::time(0));
- for (int i = 0; i < topology.size() - 1; i++) {
- weights.push_back(std::vector
float>>(topology[i], std::vector<float>(topology[i + 1]))); - for (int j = 0; j < topology[i]; j++) {
- for (int k = 0; k < topology[i + 1]; k++) {
- weights[i][j][k] = (std::rand() % 2000 - 1000) / 1000.0f;
- }
- }
- }
- }
-
- std::vector<float> feedforward(std::vector<float> input) {
- layer_outputs.clear();
- layer_outputs.push_back(input);
- for (int i = 0; i < weights.size(); i++) {
- std::vector<float> output(weights[i][0].size(), 0);
- for (int j = 0; j < weights[i][0].size(); j++) {
- for (int k = 0; k < input.size(); k++) {
- output[j] += input[k] * weights[i][k][j];
- }
- output[j] = sigmoid(output[j]);
- }
- layer_outputs.push_back(output);
- input = output;
- }
- return input;
- }//feedforward
-
-
- void train(std::vector<float> input, std::vector<float> targets, float learning_rate , bool whetherOutputFlag) {
-
- //--------------------------------------------- 前向传播--不输出损失
- std::vector<float> outputs = feedforward(input);
- // 初始化误差
- std::vector<float> errors(outputs.size(), 0);
- float sumA=0.0;
- for (int i = 0; i < targets.size(); i++) {
- errors[i] = (targets[i] - outputs[i]);
- if (true == whetherOutputFlag) {//if220
- sumA += fabs((targets[i] - outputs[i]));
- cout << "i"<"]";
- }//if220
-
- }//for110i
- if( true==whetherOutputFlag) std::cout << sumA;
- //=============================================
-
- // 反向传播
- for (int i = weights.size() - 1; i >= 0; i--) {
- // 更新权重
- for (int j = 0; j < weights[i].size(); j++) {
- for (int k = 0; k < weights[i][j].size(); k++) {
- float derivative = sigmoid_derivative(layer_outputs[i + 1][k]);
- weights[i][j][k] += learning_rate * errors[k] * derivative * layer_outputs[i][j];
- }
- }
-
- // 计算下一层的误差
- std::vector<float> next_errors(weights[i].size(), 0);
- for (int j = 0; j < weights[i].size(); j++) {
- for (int k = 0; k < weights[i][j].size(); k++) {
- next_errors[j] += errors[k] * weights[i][j][k];
- }
- }
- errors = next_errors;
- }
- }//void train(
-
- };//
-
- int main() {
- NeuralNetwork nn({ 2, 4, 1 });
- std::vector
float>> inputs = { {1,0}, {1, 1}, {0,1},{0,0} }; - std::vector<float> targets = { 1,0,1,0 };
-
- for (int i = 0; i < 50000; i++) {
- for (size_t j = 0; j < inputs.size(); j++) {
- nn.train(inputs[j], { targets[j] }, 0.05, false);//false 不输出 0.1);// 0.05);// 0.1);
- if (0 == (i % 10000)) {
- nn.train(inputs[j], { targets[j] }, 0.001 , true); //true 输出
- std::cout << "] ";
- std::cout << std::endl;
- }
-
- }
- if(0==(i%10000)) std::cout << std::endl;
- }//for110i
-
- std::vector
float>> inputs22 = { {0,1},{0,0}, {1, 0}, {1, 1} }; - for (size_t j = 0; j < inputs22.size(); j++) {
- std::vector<float> output = nn.feedforward(inputs22[j]);
- std::cout << "Output for [" << inputs22[j][0] << ", " << inputs22[j][1] << "]: " << output[0] << std::endl;
- }
-
- return 0;
- }//main
下面代码出现:vector subscript out of range: float sigmoid(float x) { return 1 / (1 + exp(-x)); } float sigmoid_derivative(float x) { float s = sigmoid(x); return s * (1 - s); } class NeuralNetwork { private: std::vector