I am a rookie of coding and I have written a c++ procedure which calls the Eigen library to do the linear operation of the matrix. Please help me to improve the efficiency of the loop.

`#include <iostream> #include <fstream> #include <Eigen/Dense> #include <time.h> using namespace std; int main() { // Weight coefficient matrix Eigen::MatrixXd wi_1,wi_2,wi_3,wi_4; wi_1.resize(100,2); wi_2.resize(100,100); wi_3.resize(100,100); wi_4.resize(5,100); wi_1.setOnes(); wi_2.setOnes(); wi_3.setOnes(); wi_4.setOnes(); // Bias vector Eigen::VectorXd bias_1,bias_2,bias_3,bias_4,Y; bias_1.resize(100); bias_2.resize(100); bias_3.resize(100); bias_4.resize(5); bias_1.setOnes(); bias_2.setOnes(); bias_3.setOnes(); bias_4.setOnes(); Eigen::Matrix<double,5,1> y_mean; Eigen::Matrix<double,5,1> y_scale; Eigen::Matrix<double,2,1> x_mean; Eigen::Matrix<double,2,1> x_scale; y_mean.setOnes(); y_scale.setOnes(); y_mean.setOnes(); x_scale.setOnes(); int n = 0; int layer; clock_t start,finish; double totaltime; start=clock(); while (n<10000) { Y.resize(2); layer = 0; Y << 0.185, 0.285;//inputx[1], x[0]; Y = (Y.array() - x_mean.array()) / x_scale.array(); //ANN forward while (layer < 4) { layer++; switch (layer) { case 1:{ Y = wi_1 * Y + bias_1; // Info << "ANN forward layer1" << endl; break; } case 2:{ Y = wi_2 * Y + bias_2; // Info << "ANN forward layer2" << endl; break; } case 3:{ Y = wi_3 * Y + bias_3; // Info << "ANN forward layer3" << endl; break; } case 4:{ Y = wi_4 * Y + bias_4; // Info << "ANN forward layer4" << endl; break; } default:{ cout<<"error"<<endl; break; } } //Relu activation function if (layer < 4) { for (int i = 0; i < Y.size(); i++) { Y(i) = ((Y(i) > 0) ? Y(i) : 0); } } } //inverse standardization Y = Y.array() * y_scale.array() + y_mean.array(); n++; } finish=clock(); totaltime=(double)(finish-start)/CLOCKS_PER_SEC*1000; cout<<"\n Running time is "<<totaltime<<"ms！"<<endl; } `