数据挖掘之人工神经网络BP算法
生活随笔
收集整理的這篇文章主要介紹了
数据挖掘之人工神经网络BP算法
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
/*
人工神經網絡BP算法思想: 神經網絡一般分為3層(也可以多層),包括輸入層,隱含層和輸出層。通過有監督的學習擬合非線性函數。假如輸入層有3個神經元,隱含層有5個神經元,輸出層有1個神經元。有監督的學習是指既給了輸入也給了輸出再進行樣本訓練。可以把該神經網絡看做3維向量的輸入,一維的輸出。每一層的神經元與相鄰的一層的神經元全連接,同層的神經元不連接。
比如樣本A的輸入是{1,2,3},輸出是{1},樣本B的輸入是{2,1,3},輸出是{2},當然還有其他的樣本,先通過前向傳播得到神經元實際的輸出值,計算期望值(即樣本給的輸出值)與輸出值得均方誤差,如果誤差非常小,就不需要調整,否則反向傳播調整連接權值(調整涉及到數學中的一些微分的知識)。直到擬合程度比較好時結束訓練。此時有一個新的樣本輸入,不知道輸出值,可以通過此神經網絡預測輸出值。
*/
#include<cstdio> #include<iostream> #include<cmath> #include<time.h> #include<fstream> #include<cstdlib> using namespace std; #define RANDOM rand()/32767.0const int Layer_Max=5; /**神經網絡層數**/ const double PI=3.1415927; /**圓周率**/ const int Layer_number[Layer_Max]={2,4,4,2,1} ; /**每層神經元個數**/ const int Neural_Max=4; /**每層最大神經元個數**/ const int InMax=21; /**樣本輸入個數**/ ofstream Out_W_File("All_W.txt",ios::out); ofstream Out_Error("Error.txt",ios::out); /**定義類BP**/ class BP{ public:BP();void BP_Print(); /**打印權系數**/double F(double x); /**激發函數**/double Y(double x1,double x2); /**要逼近的函數**/double NetWorkOut(int x1,int x2); /**網絡輸出,它的輸入為第input個樣本**/void AllLayer_D(int x1,int x2); /**求所有神經元的輸出誤差微分**/void Change_W(); /**改變權系數**/void Train(); /**訓練函數**/void After_Train_Out();double Cost(double out,double Exp); /**代價函數**/ private:double W[Layer_Max][Neural_Max][Neural_Max];/**W[i][j][k]表示第i層第j個神經元連接到前一層第k個神經元**/double Input_Net[2][InMax];/**表示第i個樣本的輸入x1,x2**/double Out_Exp[InMax][InMax]; /**期望輸出**/double Layer_Node[Layer_Max][Neural_Max]; /**各神經元的輸出**/double D[Layer_Max][Neural_Max]; /**各神經元的誤差微分**/double Study_Speed; /**學習率**/double e; }; BP::BP(){srand(time(NULL));for(int i=1;i<Layer_Max;i++){for(int j=0;j<Layer_number[i];j++){for(int k=0;k<Layer_number[i-1]+1;k++) W[i][j][k]=RANDOM; /**權系數**///Q[i][j]=RANDOM; /**初始化各神經元閾值**/ }}/**輸入和輸出歸一化**/for(int l=0;l<InMax;l++){ /**輸入的x1,x2**/Input_Net[0][l]=l*0.05;Input_Net[1][l]=1-l*0.05;}for(int i=0;i<InMax;i++){for(int j=0;j<InMax;j++){Out_Exp[i][j]=Y(Input_Net[0][i],Input_Net[1][j]); /**期望輸出**/Out_Exp[i][j]=Out_Exp[i][j]/3.000000; /**歸一化**/}}Study_Speed=0.5; /**初始化學習度**/e=0.0001; /**誤差限制**/ } double BP::F(double x){ return (1.0/(1+exp(-x))); } /**Sigmoid函數**/ double BP::Y(double x1,double x2){double temp;temp=pow(x1-1,4)+2*pow(x2,2);return temp; } /**代價函數**/ double BP::Cost(double Out,double Exp){return (pow(Out-Exp,2)); /** 計算(Ok-dk)^2 **/ } double BP::NetWorkOut(int x1,int x2){int i,j,k;double N_node[Layer_Max][Neural_Max];/**表示 第i層 第j個神經元 的總輸入**//**第0層的神經元為輸入,不用權系數和閾值**/N_node[0][0]=Input_Net[0][x1];Layer_Node[0][0]=Input_Net[0][x1];N_node[0][1]=Input_Net[1][x2];Layer_Node[0][1]=Input_Net[1][x2];for(i=1;i<Layer_Max;i++){for(j=0;j<Layer_number[i];j++){N_node[i][j]=0.0;for(k=0;k<Layer_number[i-1];k++){N_node[i][j]+=Layer_Node[i-1][k]*W[i][j][k];}N_node[i][j]=N_node[i][j]-W[i][j][k]; /**減掉閾值**/Layer_Node[i][j]=F(N_node[i][j]); /**得到輸出**/}}return Layer_Node[Layer_Max-1][0]; /**最后一層的輸出**/ } void BP::AllLayer_D(int x1,int x2){int i,j,k;double temp;D[Layer_Max-1][0]=Layer_Node[Layer_Max-1][0]*(1-Layer_Node[Layer_Max-1][0])*(Layer_Node[Layer_Max-1][0]-Out_Exp[x1][x2]);/** Ok*(1-Ok)*(Ok-dk) **/for(i=Layer_Max-1;i>0;i--){for(j=0;j<Layer_number[i-1];j++){temp=0;for(k=0;k<Layer_number[i];k++) temp=temp+W[i][k][j]*D[i][k];/****/D[i-1][j]=Layer_Node[i-1][j]*(1-Layer_Node[i-1][j])*temp;}} } void BP::Change_W(){int i,j,k;for(i=1;i<Layer_Max;i++){for(j=0;j<Layer_number[i];j++){for(k=0;k<Layer_number[i-1];k++){W[i][j][k]=W[i][j][k]-Study_Speed*D[i][j]*Layer_Node[i-1][k];}W[i][j][k]=W[i][j][k]+Study_Speed*D[i][j]; /** 修改閾值,相當于-(-1)*Study_Speed*D[i][j] **/}} } void BP::Train(){int i,j;int ok=0;double Out;long int count=0;double err;ofstream Out_count("Out_count.txt",ios::out);/**權系數變化保存在文件里**/ofstream outWFile1("W[2][0][0].txt",ios::out);ofstream outWFile2("W[2][1][1].txt",ios::out);ofstream outWFile3("W[1][0][0].txt",ios::out);ofstream outWFile4("W[1][1][0].txt",ios::out);ofstream outWFile5("W[3][0][1].txt",ios::out);while(ok<441){ /**訓練21*21個樣本,當所有的誤差精度都滿足要求才退出**/count++;for(i=0,ok=0;i<InMax;i++){for(j=0;j<InMax;j++){Out=NetWorkOut(i,j);AllLayer_D(i,j);err=Cost(Out,Out_Exp[i][j]); /**計算誤差**/if(err<e) ok++; /**是否滿足誤差精度**/else Change_W(); /**修改權系數**/}}if(count%1000==0){cout<<count<<" "<<err<<endl;Out_count<<count<<",";Out_Error<<err<<",";outWFile1<<W[2][0][0]<<",";outWFile2<<W[2][1][1]<<",";outWFile3<<W[1][0][0]<<",";outWFile4<<W[1][1][0]<<",";outWFile5<<W[3][0][1]<<",";for(int p=1;p<Layer_Max;p++){for(int j=0;j<Layer_number[p];j++){for(int k=0;k<Layer_number[p-1]+1;k++){Out_W_File<<'W'<<'['<<p<<']'<<'['<<j<<']'<<'['<<k<<']'<<'='<<W[p][j][k]<<' '<<' ';}}}Out_W_File<<'\n'<<'\n';}}cout<<err<<endl; } void BP::BP_Print(){cout<<"訓練后的權系數"<<endl;for(int i=1;i<Layer_Max;i++){for(int j=0;j<Layer_number[i];j++){for(int k=0;k<Layer_number[i-1]+1;k++) cout<<W[i][j][k]<<" ";cout<<endl;}}cout<<endl<<endl; } void BP::After_Train_Out(){int i,j;ofstream Out_x1("Out_x1.txt",ios::out);ofstream Out_x2("Out_x2.txt",ios::out);ofstream Out_Net("Out_Net.txt",ios::out);ofstream Out_Exp("Out_Exp.txt",ios::out);ofstream W_End("W_End.txt",ios::out);ofstream Q_End("Q_End.txt",ios::out);ofstream Array("Array.txt",ios::out);ofstream Out_x11("x1.txt",ios::out);ofstream Out_x22("x2.txt",ios::out);ofstream Result1("result1.txt",ios::out);ofstream Out_x111("x11.txt",ios::out);ofstream Out_x222("x22.txt",ios::out);ofstream Result2("result2.txt",ios::out);for(i=0;i<InMax;i++){for(j=0;j<InMax;j++){Out_x11<<Input_Net[0][i]<<",";Out_x22<<Input_Net[1][j]<<",";Result1<<3*NetWorkOut(i,j)<<",";Out_x1<<Input_Net[0][i]<<",";Array<<Input_Net[0][i]<<" ";Out_x2<<Input_Net[1][j]<<",";Array<<Input_Net[1][j]<<" ";Out_Net<<3*NetWorkOut(i,j)<<",";Array<<Y(Input_Net[0][i],Input_Net[1][j])<<" ";Out_Exp<<Y(Input_Net[0][i],Input_Net[1][j])<<",";Array<<3*NetWorkOut(i,j)<<" ";Array<<'\n';}Out_x1<<'\n';Out_x2<<'\n';Out_x11<<'\n';Out_x22<<'\n';Result1<<'\n';}for(j=0;j<InMax;j++){for(i=0;i<InMax;i++){Out_x111<<Input_Net[0][i]<<",";Out_x222<<Input_Net[1][j]<<",";Result2<<3*NetWorkOut(i,j)<<",";}Out_x111<<'\n';Out_x222<<'\n';Result2<<'\n';}for(i=0;i<Layer_Max;i++){for(j=0;j<Layer_number[i];j++){for(int k=0;k<Layer_number[i-1]+1;k++) W_End<<W[i][j][k]<<",";}} } int main(void){/*BP B;B.Train();B.BP_Print();B.After_Train_Out();*/return 0; }
人工神經網絡BP算法思想: 神經網絡一般分為3層(也可以多層),包括輸入層,隱含層和輸出層。通過有監督的學習擬合非線性函數。假如輸入層有3個神經元,隱含層有5個神經元,輸出層有1個神經元。有監督的學習是指既給了輸入也給了輸出再進行樣本訓練。可以把該神經網絡看做3維向量的輸入,一維的輸出。每一層的神經元與相鄰的一層的神經元全連接,同層的神經元不連接。
比如樣本A的輸入是{1,2,3},輸出是{1},樣本B的輸入是{2,1,3},輸出是{2},當然還有其他的樣本,先通過前向傳播得到神經元實際的輸出值,計算期望值(即樣本給的輸出值)與輸出值得均方誤差,如果誤差非常小,就不需要調整,否則反向傳播調整連接權值(調整涉及到數學中的一些微分的知識)。直到擬合程度比較好時結束訓練。此時有一個新的樣本輸入,不知道輸出值,可以通過此神經網絡預測輸出值。
*/
#include<cstdio> #include<iostream> #include<cmath> #include<time.h> #include<fstream> #include<cstdlib> using namespace std; #define RANDOM rand()/32767.0const int Layer_Max=5; /**神經網絡層數**/ const double PI=3.1415927; /**圓周率**/ const int Layer_number[Layer_Max]={2,4,4,2,1} ; /**每層神經元個數**/ const int Neural_Max=4; /**每層最大神經元個數**/ const int InMax=21; /**樣本輸入個數**/ ofstream Out_W_File("All_W.txt",ios::out); ofstream Out_Error("Error.txt",ios::out); /**定義類BP**/ class BP{ public:BP();void BP_Print(); /**打印權系數**/double F(double x); /**激發函數**/double Y(double x1,double x2); /**要逼近的函數**/double NetWorkOut(int x1,int x2); /**網絡輸出,它的輸入為第input個樣本**/void AllLayer_D(int x1,int x2); /**求所有神經元的輸出誤差微分**/void Change_W(); /**改變權系數**/void Train(); /**訓練函數**/void After_Train_Out();double Cost(double out,double Exp); /**代價函數**/ private:double W[Layer_Max][Neural_Max][Neural_Max];/**W[i][j][k]表示第i層第j個神經元連接到前一層第k個神經元**/double Input_Net[2][InMax];/**表示第i個樣本的輸入x1,x2**/double Out_Exp[InMax][InMax]; /**期望輸出**/double Layer_Node[Layer_Max][Neural_Max]; /**各神經元的輸出**/double D[Layer_Max][Neural_Max]; /**各神經元的誤差微分**/double Study_Speed; /**學習率**/double e; }; BP::BP(){srand(time(NULL));for(int i=1;i<Layer_Max;i++){for(int j=0;j<Layer_number[i];j++){for(int k=0;k<Layer_number[i-1]+1;k++) W[i][j][k]=RANDOM; /**權系數**///Q[i][j]=RANDOM; /**初始化各神經元閾值**/ }}/**輸入和輸出歸一化**/for(int l=0;l<InMax;l++){ /**輸入的x1,x2**/Input_Net[0][l]=l*0.05;Input_Net[1][l]=1-l*0.05;}for(int i=0;i<InMax;i++){for(int j=0;j<InMax;j++){Out_Exp[i][j]=Y(Input_Net[0][i],Input_Net[1][j]); /**期望輸出**/Out_Exp[i][j]=Out_Exp[i][j]/3.000000; /**歸一化**/}}Study_Speed=0.5; /**初始化學習度**/e=0.0001; /**誤差限制**/ } double BP::F(double x){ return (1.0/(1+exp(-x))); } /**Sigmoid函數**/ double BP::Y(double x1,double x2){double temp;temp=pow(x1-1,4)+2*pow(x2,2);return temp; } /**代價函數**/ double BP::Cost(double Out,double Exp){return (pow(Out-Exp,2)); /** 計算(Ok-dk)^2 **/ } double BP::NetWorkOut(int x1,int x2){int i,j,k;double N_node[Layer_Max][Neural_Max];/**表示 第i層 第j個神經元 的總輸入**//**第0層的神經元為輸入,不用權系數和閾值**/N_node[0][0]=Input_Net[0][x1];Layer_Node[0][0]=Input_Net[0][x1];N_node[0][1]=Input_Net[1][x2];Layer_Node[0][1]=Input_Net[1][x2];for(i=1;i<Layer_Max;i++){for(j=0;j<Layer_number[i];j++){N_node[i][j]=0.0;for(k=0;k<Layer_number[i-1];k++){N_node[i][j]+=Layer_Node[i-1][k]*W[i][j][k];}N_node[i][j]=N_node[i][j]-W[i][j][k]; /**減掉閾值**/Layer_Node[i][j]=F(N_node[i][j]); /**得到輸出**/}}return Layer_Node[Layer_Max-1][0]; /**最后一層的輸出**/ } void BP::AllLayer_D(int x1,int x2){int i,j,k;double temp;D[Layer_Max-1][0]=Layer_Node[Layer_Max-1][0]*(1-Layer_Node[Layer_Max-1][0])*(Layer_Node[Layer_Max-1][0]-Out_Exp[x1][x2]);/** Ok*(1-Ok)*(Ok-dk) **/for(i=Layer_Max-1;i>0;i--){for(j=0;j<Layer_number[i-1];j++){temp=0;for(k=0;k<Layer_number[i];k++) temp=temp+W[i][k][j]*D[i][k];/****/D[i-1][j]=Layer_Node[i-1][j]*(1-Layer_Node[i-1][j])*temp;}} } void BP::Change_W(){int i,j,k;for(i=1;i<Layer_Max;i++){for(j=0;j<Layer_number[i];j++){for(k=0;k<Layer_number[i-1];k++){W[i][j][k]=W[i][j][k]-Study_Speed*D[i][j]*Layer_Node[i-1][k];}W[i][j][k]=W[i][j][k]+Study_Speed*D[i][j]; /** 修改閾值,相當于-(-1)*Study_Speed*D[i][j] **/}} } void BP::Train(){int i,j;int ok=0;double Out;long int count=0;double err;ofstream Out_count("Out_count.txt",ios::out);/**權系數變化保存在文件里**/ofstream outWFile1("W[2][0][0].txt",ios::out);ofstream outWFile2("W[2][1][1].txt",ios::out);ofstream outWFile3("W[1][0][0].txt",ios::out);ofstream outWFile4("W[1][1][0].txt",ios::out);ofstream outWFile5("W[3][0][1].txt",ios::out);while(ok<441){ /**訓練21*21個樣本,當所有的誤差精度都滿足要求才退出**/count++;for(i=0,ok=0;i<InMax;i++){for(j=0;j<InMax;j++){Out=NetWorkOut(i,j);AllLayer_D(i,j);err=Cost(Out,Out_Exp[i][j]); /**計算誤差**/if(err<e) ok++; /**是否滿足誤差精度**/else Change_W(); /**修改權系數**/}}if(count%1000==0){cout<<count<<" "<<err<<endl;Out_count<<count<<",";Out_Error<<err<<",";outWFile1<<W[2][0][0]<<",";outWFile2<<W[2][1][1]<<",";outWFile3<<W[1][0][0]<<",";outWFile4<<W[1][1][0]<<",";outWFile5<<W[3][0][1]<<",";for(int p=1;p<Layer_Max;p++){for(int j=0;j<Layer_number[p];j++){for(int k=0;k<Layer_number[p-1]+1;k++){Out_W_File<<'W'<<'['<<p<<']'<<'['<<j<<']'<<'['<<k<<']'<<'='<<W[p][j][k]<<' '<<' ';}}}Out_W_File<<'\n'<<'\n';}}cout<<err<<endl; } void BP::BP_Print(){cout<<"訓練后的權系數"<<endl;for(int i=1;i<Layer_Max;i++){for(int j=0;j<Layer_number[i];j++){for(int k=0;k<Layer_number[i-1]+1;k++) cout<<W[i][j][k]<<" ";cout<<endl;}}cout<<endl<<endl; } void BP::After_Train_Out(){int i,j;ofstream Out_x1("Out_x1.txt",ios::out);ofstream Out_x2("Out_x2.txt",ios::out);ofstream Out_Net("Out_Net.txt",ios::out);ofstream Out_Exp("Out_Exp.txt",ios::out);ofstream W_End("W_End.txt",ios::out);ofstream Q_End("Q_End.txt",ios::out);ofstream Array("Array.txt",ios::out);ofstream Out_x11("x1.txt",ios::out);ofstream Out_x22("x2.txt",ios::out);ofstream Result1("result1.txt",ios::out);ofstream Out_x111("x11.txt",ios::out);ofstream Out_x222("x22.txt",ios::out);ofstream Result2("result2.txt",ios::out);for(i=0;i<InMax;i++){for(j=0;j<InMax;j++){Out_x11<<Input_Net[0][i]<<",";Out_x22<<Input_Net[1][j]<<",";Result1<<3*NetWorkOut(i,j)<<",";Out_x1<<Input_Net[0][i]<<",";Array<<Input_Net[0][i]<<" ";Out_x2<<Input_Net[1][j]<<",";Array<<Input_Net[1][j]<<" ";Out_Net<<3*NetWorkOut(i,j)<<",";Array<<Y(Input_Net[0][i],Input_Net[1][j])<<" ";Out_Exp<<Y(Input_Net[0][i],Input_Net[1][j])<<",";Array<<3*NetWorkOut(i,j)<<" ";Array<<'\n';}Out_x1<<'\n';Out_x2<<'\n';Out_x11<<'\n';Out_x22<<'\n';Result1<<'\n';}for(j=0;j<InMax;j++){for(i=0;i<InMax;i++){Out_x111<<Input_Net[0][i]<<",";Out_x222<<Input_Net[1][j]<<",";Result2<<3*NetWorkOut(i,j)<<",";}Out_x111<<'\n';Out_x222<<'\n';Result2<<'\n';}for(i=0;i<Layer_Max;i++){for(j=0;j<Layer_number[i];j++){for(int k=0;k<Layer_number[i-1]+1;k++) W_End<<W[i][j][k]<<",";}} } int main(void){/*BP B;B.Train();B.BP_Print();B.After_Train_Out();*/return 0; }
?
轉載于:https://www.cnblogs.com/wust-ouyangli/p/6642086.html
總結
以上是生活随笔為你收集整理的数据挖掘之人工神经网络BP算法的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 51nod 1090 1267 【二
- 下一篇: 第三次作业+105032014101