C++运行三维人脸重建 VRN
生活随笔
收集整理的這篇文章主要介紹了
C++运行三维人脸重建 VRN
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
流程:
?
?定義數據池:
1。殘差塊
struct 殘差塊 //標準殘差塊 {BN層數據 * bn0;層數據 * conv1;BN層數據 * bn1;層數據 * conv2;BN層數據 * bn2;層數據 * conv3;};2。總模型
struct RVN模型 {//層數據 * conv0;//3->64 0//BN層數據 * bn0;//64 1//殘差塊64 * res64;//64->128 3-17SpatialMaxPooling 18//殘差塊128 * res128;//128->64->128 19-32//殘差塊256 * res256;//128->256 34-48////殘差半場 * half1;//48-688 //殘差半場 * half2;//689-1329//層數據 * conv1;//256->256 1333//BN層數據 * bn1;//156 1334//層數據 * conv2;//256->200 1336//SpatialUpSamplingBilinear 1337//Sigmoid 1338BN層數據 * bn;//273個層數據 * conv;//276個//構造函數RVN模型();};RVN模型::RVN模型() {int size;//為所有 276 個卷積層 和 273 個正則化層 分配內存size= sizeof(層數據)*276;conv=(層數據 *)malloc(size);size = sizeof(BN層數據)*273;bn=(BN層數據 *)malloc(size);}3。主函數:
void RVN(char * savefilename,RVN模型 & sr) {int wid=bmp.width;int hei=bmp.height;int wh=wid * hei;cout<<"輸入圖像寬度:"<<wid<<endl;cout<<" 高度:"<<hei<<endl; //卷積層 rgb(wid,hei,3);//亮度rgb.data=new float[wid * hei*3 ]; bmp2RGB(rgb);//卷積層乘以(rgb,1.0f/255.f);層數據 * 層;int pad;////縮放到 192x192卷積層 di(192,192,3);di.data=new float[192*192*3];wid=192;hei=192;wh=wid * hei;卷積層雙三次插值(rgb,di);//卷積層復制(&rgb,&di);//save_卷積層2jpg(&di,"di");卷積層 *源=&di;卷積層 *目標=&rgb;層數據 * conv;BN層數據 * bn;wid /=2;hei /=2;Resize卷積層(*目標,wid,hei,源->depth);//load_mat2卷積層2("me/input_1.txt",源);//save_卷積層2jpg(源,"input_1");//conv_1conv=sr.conv-1+1;卷積前傳2步長無RELU(conv);// load_mat2卷積層2("me/conv_1.txt",源);//save_卷積層2jpg(源,"conv_1"); //instance_norm(*源);//卷積層乘以(*源,1.0f/255.f);//batch_norm_1bn=sr.bn-1+1;//instance_norm(*源);//vl_Scale(源,bn->權重,bn->偏移);批正則前傳(bn);//load_mat2卷積層2("me/batch_norm_1.txt",源);//save_卷積層2txt(源,"batch_norm_1-me.txt");vl_nnrelu(源);cout<<"add_1..."<<endl;//非標準殘差塊--------------------------卷積層 activation_1(wid,hei,源->depth);activation_1.data=new float[wid * hei * 源->depth];卷積層復制(源,&activation_1);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//batch_norm_2 (BatchNorm) (None, 64, 96, 96) 256 activation_1[0][0]bn=sr.bn-1+2;批正則前傳(bn);vl_nnrelu(源);//conv_2 (Conv) (None, 64, 96, 96) 4160 activation_2[0][0]conv=sr.conv-1+2;卷積前傳無RELU(conv);//batch_norm_3 (BatchNorm) (None, 64, 96, 96) 256 conv_2[0][0]bn=sr.bn-1+3;批正則前傳(bn);vl_nnrelu(源);//conv_3 (Conv) (None, 64, 96, 96) 36928 activation_3[0][0]conv=sr.conv-1+3;卷積前傳無RELU(conv);//batch_norm_4 (BatchNorm) (None, 64, 96, 96) 256 conv_3[0][0]bn=sr.bn-1+4;批正則前傳(bn);vl_nnrelu(源);//conv_4 (Conv) (None, 128, 96, 96) 8320 activation_4[0][0]conv=sr.conv-1+4;卷積前傳無RELU(conv);卷積層 conv_4(wid,hei,源->depth);conv_4.data=new float[wid * hei * 源->depth];卷積層復制(源,&conv_4);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//- - - - - ->跳到后面Resize卷積層(*源,wid,hei,activation_1.depth);卷積層復制(&activation_1,源);del卷積層(activation_1);//conv_5 (Conv) (None, 128, 96, 96) 8320 activation_1[0][0]conv=sr.conv-1+5;卷積前傳無RELU(conv);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//add_1 (Add) (None, 128, 96, 96) 0 conv_4[0][0]// conv_5[0][0]卷積層相加(&conv_4,源);//4+5del卷積層(conv_4);cout<<"最大池化 1..."<<endl;//add_1->max_pooling2d_1wid /=2;hei /=2;Resize卷積層(*目標,wid,hei,源->depth);vl_nnpool(源,目標); std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;cout<<"add_2..."<<endl;//max_pooling2d_1->add_2//起個頭殘差塊前傳(*源,*目標,5,sr);殘差塊順次前傳(*源,*目標,5,6,sr);cout<<"add_3..."<<endl;//非標準殘差塊--------------------------卷積層 add_2(wid,hei,源->depth);add_2.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_2);//batch_norm_8 (BatchNorm) (None, 128, 48, 48) 512 add_2[0][0]bn=sr.bn-1+8;批正則前傳(bn);vl_nnrelu(源);//conv_9 (Conv) (None, 128, 48, 48) 16512 activation_8[0][0]conv=sr.conv-1+9;卷積前傳無RELU(conv);//batch_norm_9 (BatchNorm) (None, 128, 48, 48) 512 conv_9[0][0]bn=sr.bn-1+9;批正則前傳(bn);vl_nnrelu(源);//conv_10 (Conv) (None, 128, 48, 48) 147584 activation_9[0][0]conv=sr.conv-1+10;卷積前傳無RELU(conv);//batch_norm_10 (BatchNorm) (None, 128, 48, 48) 512 conv_10[0][0]bn=sr.bn-1+10;批正則前傳(bn);vl_nnrelu(源);//conv_11 (Conv) (None, 256, 48, 48) 33024 activation_10[0][0]conv=sr.conv-1+11;卷積前傳無RELU(conv);卷積層 conv_11(wid,hei,源->depth);conv_11.data=new float[wid * hei * 源->depth];卷積層復制(源,&conv_11);//- - - - - ->跳到后面Resize卷積層(*源,wid,hei,add_2.depth);卷積層復制(&add_2,源);del卷積層(add_2);//add_2->conv_12conv=sr.conv-1+12;卷積前傳無RELU(conv);//conv_11 + conv_12 -> add_3卷積層相加(&conv_11,源);del卷積層(conv_11);//load_mat2卷積層2("me/add_3.txt",源);cout<<"上半場..."<<endl;wid =48;hei =48;Resize卷積層(*源,wid,hei,256);卷積層 add_3(wid,hei,源->depth);add_3.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_3);cout<<"add_3->add_6"<<endl;//add_3->add_4//起個頭殘差塊前傳(*源,*目標,11,sr);殘差塊順次前傳(*源,*目標,11,13,sr);//add_4->add_5//起個頭殘差塊前傳(*源,*目標,14,sr);殘差塊順次前傳(*源,*目標,14,16,sr);//add_5->add_6//起個頭殘差塊前傳(*源,*目標,17,sr);殘差塊順次前傳(*源,*目標,17,19,sr);卷積層 add_6(wid,hei,源->depth);add_6.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_6);cout<<"add_6->add_9"<<endl;//add_6->add_7//起個頭殘差塊前傳(*源,*目標,20,sr);殘差塊順次前傳(*源,*目標,20,22,sr);//add_7->add_8殘差塊順次前傳(*源,*目標,23,25,sr);//add_8->add_9殘差塊順次前傳(*源,*目標,26,28,sr);卷積層 add_9(wid,hei,源->depth);add_9.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_9);cout<<"add_9->add_12"<<endl;//add_9->add_10殘差塊順次前傳(*源,*目標,29,31,sr);//add_10->add_11殘差塊順次前傳(*源,*目標,32,34,sr);//add_11->add_12殘差塊順次前傳(*源,*目標,35,37,sr);卷積層 add_12(wid,hei,源->depth);add_12.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_12);cout<<"add_12->add_15"<<endl;//add_12->add_13殘差塊前傳(*源,*目標,66,71,76,68,73,78,sr);//->add_14殘差塊前傳(*源,*目標,81,86,91,83,88,93,sr);//->add_15殘差塊前傳(*源,*目標,96,101,106,98,103,108,sr);卷積層 add_15(wid,hei,源->depth);add_15.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_15);//- - - - - ->跳到后面卷積層復制(&add_12,源);del卷積層(add_12);cout<<"最大池化 2..."<<endl;//add_12->max_pooling2d_2wid /=2;hei /=2;Resize卷積層(*目標,wid,hei,源->depth);vl_nnpool(源,目標); std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//max_pooling2d_2->add_16殘差塊順次前傳(*源,*目標,38,40,sr);cout<<"add_16->add_17"<<endl;//add_16->add_17殘差塊順次前傳(*源,*目標,41,43,sr);//add_17->add_18殘差塊順次前傳(*源,*目標,44,46,sr);//->add_19殘差塊順次前傳(*源,*目標,47,49,sr);cout<<"add_19->add_20..."<<endl;//->add_20殘差塊跳X前傳(*源,*目標,50,52,sr,2);//???cout<<"add_20->add_21..."<<endl;//->add_21殘差塊跳X前傳(*源,*目標,56,58,sr,3);cout<<"add_21->add_22..."<<endl;//->add_22殘差塊前傳(*源,*目標,65,69,74,67,71,76,sr);//->add_23殘差塊前傳(*源,*目標,80,84,89,82,86,91,sr);cout<<"add_23->add_24..."<<endl;//->add_24殘差塊前傳(*源,*目標,95,99,104,97,101,106,sr);cout<<"鄰近插值..."<<endl;//add_24->up_sampling2d_1wid *=2;hei *=2;Resize卷積層(*目標,wid,hei,源->depth);最近鄰插值(*源,*目標);// up_sampling2d_8std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//add_15 + up_sampling2d_1->add_25卷積層相加(&add_15,源);del卷積層(add_15);cout<<"add_25->add_26"<<endl;//->add_26//起個頭殘差塊前傳(*源,*目標,111,sr);殘差塊跳X前傳(*源,*目標,111,113,sr,4);卷積層 add_26(wid,hei,源->depth);add_26.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_26);//- - - - - ->跳到后面卷積層復制(&add_9,源);del卷積層(add_9);cout<<"最大池化3"<<endl;//add_9->max_pooling2d_3wid /=2;hei /=2;Resize卷積層(*目標,wid,hei,源->depth);vl_nnpool(源,目標); std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//max_pooling2d_3->add_27殘差塊跳X前傳(*源,*目標,51,53,sr,2);cout<<"add_27->add_28"<<endl;//->add_28殘差塊跳X前傳(*源,*目標,57,59,sr,3);//->add_29殘差塊跳X前傳(*源,*目標,67,69,sr,5);cout<<"add_29->add_30"<<endl;//->add_30殘差塊跳X前傳(*源,*目標,82,84,sr,5);cout<<"add_30->add_31"<<endl;//->add_31殘差塊跳X前傳(*源,*目標,97,99,sr,5);//->add_32殘差塊跳X前傳(*源,*目標,110,112,sr,4);cout<<"鄰近插值..."<<endl;//add_32->up_sampling2d_2wid *=2;hei *=2;Resize卷積層(*目標,wid,hei,源->depth);最近鄰插值(*源,*目標);// up_sampling2d_8std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//add_26 +up_sampling2d_2->add_33卷積層相加(&add_26,源);del卷積層(add_26);//->add_34殘差塊跳X前傳(*源,*目標,123,125,sr,3);卷積層 add_34(wid,hei,源->depth);add_34.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_34);//- - - - - ->跳到后面卷積層復制(&add_6,源);del卷積層(add_6);cout<<"最大池化4"<<endl;//add_6->max_pooling2d_4wid /=2;hei /=2;Resize卷積層(*目標,wid,hei,源->depth);vl_nnpool(源,目標); std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//max_pooling2d_4->add_35殘差塊跳X前傳(*源,*目標,58,60,sr,3);//->add_36殘差塊跳X前傳(*源,*目標,68,70,sr,5);cout<<"add_36->add_37..."<<endl;//->add_37殘差塊跳X前傳(*源,*目標,83,85,sr,5);//->add_38殘差塊跳X前傳(*源,*目標,98,100,sr,5);cout<<"add_38->add_39..."<<endl;//->add_39殘差塊跳X前傳(*源,*目標,112,114,sr,4);cout<<"add_39->add_40..."<<endl;//->add_40殘差塊跳X前傳(*源,*目標,122,124,sr,3);cout<<"鄰近插值..."<<endl;//add_40->up_sampling2d_3wid *=2;hei *=2;Resize卷積層(*目標,wid,hei,源->depth);最近鄰插值(*源,*目標);// up_sampling2d_8std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//add_34 + up_sampling2d_3 -> add_41卷積層相加(&add_34,源);del卷積層(add_34);//add_41->add_42殘差塊跳X前傳(*源,*目標,132,134,sr,2);卷積層 add_42(wid,hei,源->depth);add_42.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_42);//- - - - - ->跳到后面卷積層復制(&add_3,源);//add_3->max_pooling2d_5cout<<"最大池化5"<<endl;wid /=2;hei /=2;Resize卷積層(*目標,wid,hei,源->depth);vl_nnpool(源,目標); std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//max_pooling2d_5->add_43殘差塊前傳(*源,*目標,70,75,79,72,77,81,sr);cout<<"add_43->add_44..."<<endl;//->add_44殘差塊前傳(*源,*目標,85,90,94,87,92,96,sr);//->add_45殘差塊前傳(*源,*目標,100,105,109,102,107,111,sr);cout<<"add_45->add_46..."<<endl;//->add_46殘差塊跳X前傳(*源,*目標,113,115,sr,4);//->add_47殘差塊跳X前傳(*源,*目標,124,126,sr,3);cout<<"add_47->add_48..."<<endl;//->add_48殘差塊跳X前傳(*源,*目標,131,133,sr,2);cout<<"鄰近插值..."<<endl;//add_48->up_sampling2d_4wid *=2;hei *=2;Resize卷積層(*目標,wid,hei,源->depth);最近鄰插值(*源,*目標);std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//add_42 +up_sampling2d_4->add_49卷積層相加(&add_42,源);del卷積層(add_42);//add_49->add_50殘差塊跳X前傳(*源,*目標,137,139,sr,1);//附加//add_50->conv_142conv=sr.conv-1+142;卷積前傳無RELU(conv);//batch_norm_140bn=sr.bn-1+140;批正則前傳(bn);vl_nnrelu(源);//conv_143conv=sr.conv-1+143;卷積前傳無RELU(conv);//batch_norm_141bn=sr.bn-1+141;批正則前傳(bn);vl_nnrelu(源);//add_3+activation_141->add_51卷積層相加(&add_3,源);//add_99del卷積層(add_3);cout<<"下半場..."<<endl;卷積層 add_51(wid,hei,源->depth);add_51.data=new float[wid * hei * 源->depth];//load_mat2卷積層2("me/add_51.txt",源);//save_卷積層2jpg(源,"add_51");卷積層復制(源,&add_51);cout<<"add_51->add_54..."<<endl;//add_51->add_52殘差塊前傳(*源,*目標,142,143,144,144,145,146,sr);//add_52->add_53殘差塊前傳(*源,*目標,145,146,147,147,148,149,sr);//add_53->add_54殘差塊前傳(*源,*目標,148,149,150,150,151,152,sr);卷積層 add_54(wid,hei,源->depth);add_54.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_54);cout<<"add_54->add_57..."<<endl;//add_54->add_55殘差塊前傳(*源,*目標,151,152,153,153,154,155,sr);//add_55->add_56殘差塊前傳(*源,*目標,154,155,156,156,157,158,sr);//add_56->add_57殘差塊前傳(*源,*目標,157,158,159,159,160,161,sr);//load_mat2卷積層2("me/add_57.txt",源);卷積層 add_57(wid,hei,源->depth);add_57.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_57);cout<<"add_57->add_60..."<<endl;//add_57->add_58殘差塊順次前傳(*源,*目標,160,162,sr);//add_58->add_59殘差塊順次前傳(*源,*目標,163,165,sr);//add_59->add_60殘差塊順次前傳(*源,*目標,166,168,sr);卷積層 add_60(wid,hei,源->depth);add_60.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_60);cout<<"add_60->add_63..."<<endl;//add_60->add_61//殘差塊前傳(*源,*目標,197,202,207,199,204,209,sr);殘差塊跳X前傳(*源,*目標,197,199,sr,5);//add_61->add_62殘差塊前傳(*源,*目標,212,217,222,214,219,224,sr);//add_62->add_63殘差塊前傳(*源,*目標,227,232,237,229,234,239,sr);卷積層 add_63(wid,hei,源->depth);add_63.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_63);//- - - - - ->跳到后面cout<<"第6最大池化"<<endl;//add_60->max_pooling2d_6卷積層復制(&add_60,源);del卷積層(add_60);wid /=2;hei /=2;Resize卷積層(*目標,wid,hei,源->depth);vl_nnpool(源,目標); std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;cout<<"池化后9殘差塊"<<endl;//max_pooling2d_6->add_64殘差塊順次前傳(*源,*目標,169,171,sr);cout<<"add_64->add_72..."<<endl;//add_64->add_65殘差塊順次前傳(*源,*目標,172,174,sr);//add_65->add_66殘差塊順次前傳(*源,*目標,175,177,sr);//add_66->add_67殘差塊順次前傳(*源,*目標,178,180,sr);//add_67->add_68殘差塊前傳(*源,*目標,181,183,185,183,185,187,sr);//add_68->add_69殘差塊前傳(*源,*目標,187,190,193,189,192,195,sr);//add_69->add_70殘差塊前傳(*源,*目標,196,200,205,198,202,207,sr);//add_70->add_71殘差塊前傳(*源,*目標,211,215,220,213,217,222,sr);//add_71->add_72殘差塊前傳(*源,*目標,226,230,235,228,232,237,sr);cout<<"鄰近插值..."<<endl;//add_72->up_sampling2d_5wid *=2;hei *=2;Resize卷積層(*目標,wid,hei,源->depth);最近鄰插值(*源,*目標);// up_sampling2d_5std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//add_63 + up_sampling2d_5 ->add_73卷積層相加(&add_63,源);del卷積層(add_63);//add_73->add_74殘差塊順次前傳(*源,*目標,160,162,sr);//load_mat2卷積層2("me/add_74.txt",源);卷積層 add_74(wid,hei,源->depth);add_74.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_74);//- - - - - ->跳到后面卷積層復制(&add_57,源);del卷積層(add_57);//add_57->max_pooling2d_7cout<<"第7最大池化"<<endl;wid /=2;hei /=2;Resize卷積層(*目標,wid,hei,源->depth);vl_nnpool(源,目標); std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;cout<<"第一6殘差塊"<<endl;//conv_188 + max_pooling2d_7 ->add_75殘差塊前傳(*源,*目標,182,184,186,184,186,188,sr);//起個頭殘差塊前傳(*源,*目標,182,sr);//add_75 - - - - - - ->add_80//add_75->add_76殘差塊前傳(*源,*目標,188,191,194,190,193,196,sr);//起個頭殘差塊前傳(*源,*目標,188,sr);//add_76->add_77殘差塊前傳(*源,*目標,198,203,208,200,205,210,sr);//起個頭殘差塊前傳(*源,*目標,198,sr);//load_mat2卷積層2("me/add_77.txt",源);//add_77->add_78殘差塊前傳(*源,*目標,213,218,223,215,220,225,sr);//add_78->add_79殘差塊前傳(*源,*目標,228,233,238,230,235,240,sr);//add_79->add_80殘差塊前傳(*源,*目標,241,245,249,243,247,251,sr);//load_mat2卷積層2("me/add_80.txt",源);cout<<"鄰近插值..."<<endl;//add_80->up_sampling2d_6wid *=2;hei *=2;Resize卷積層(*目標,wid,hei,源->depth);最近鄰插值(*源,*目標);// up_sampling2d_6std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//add_74 + up_sampling2d_6 -> add_81卷積層相加(&add_74,源);del卷積層(add_74);//add_81->add_82殘差塊前傳(*源,*目標,254,257,260,256,259,262,sr);//load_mat2卷積層2("me/add_82.txt",源); //---------后面已成功卷積層 add_82(wid,hei,源->depth);add_82.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_82);//- - - - - ->跳到后面卷積層復制(&add_54,源);//add_54-max_pooling2d_8cout<<"第8最大池化"<<endl;wid /=2;hei /=2;Resize卷積層(*目標,wid,hei,源->depth);vl_nnpool(源,目標); std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;cout<<"第二6殘差塊"<<endl;//max_pooling2d_8 -> add_83殘差塊前傳(*源,*目標,189,192,195,191,194,197,sr);//add_83------------->add_88//add_83->add_84殘差塊前傳(*源,*目標,199,204,209,201,206,211,sr);//add_84->add_85殘差塊前傳(*源,*目標,214,219,224,216,221,226,sr);//add_85->add_86殘差塊前傳(*源,*目標,229,234,239,231,236,241,sr);//add_86->add_87殘差塊前傳(*源,*目標,243,247,251,245,249,253,sr);//add_87->add_88殘差塊前傳(*源,*目標,253,256,259,255,258,261,sr);//load_mat2卷積層2("me/add_88.txt",源);cout<<"鄰近插值..."<<endl;//add_88->up_sampling2d_7wid *=2;hei *=2;Resize卷積層(*目標,wid,hei,源->depth);最近鄰插值(*源,*目標);// up_sampling2d_7std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//save_卷積層2txt(源,"up_sampling2d_7-me.txt");//add_82 + up_sampling2d_7 -> add_89卷積層相加(&add_82,源);del卷積層(add_82);//add_89->add_90殘差塊前傳(*源,*目標,263,265,267,265,267,269,sr);//load_mat2卷積層2("me/add_90.txt",源);卷積層 add_90(wid,hei,源->depth);add_90.data=new float[wid * hei * 源->depth];卷積層復制(源,&add_90);//- - - - - ->跳到后面//---------后面已成功卷積層復制(&add_51,源);//add_51-> max_pooling2d_9cout<<"第9最大池化"<<endl;wid /=2;hei /=2;Resize卷積層(*目標,wid,hei,源->depth);vl_nnpool(源,目標); std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//wid =24;hei =24;// load_mat2卷積層2("me/max_pooling2d_9.txt",源);//save_卷積層2jpg(源,"max_pooling2d_9");cout<<"第三6殘差塊"<<endl;//conv_212 + max_pooling2d_9->add_91殘差塊前傳(*源,*目標,201,206,210,203,208,212,sr);//池化后殘差塊//add_91->add_92殘差塊前傳(*源,*目標,216,221,225,218,223,227,sr);//add_92->add_93殘差塊前傳(*源,*目標,231,236,240,233,238,242,sr);//load_mat2卷積層2("me/add_93.txt",源);//add_93->add_94殘差塊前傳(*源,*目標,244,248,252,246,250,254,sr);//load_mat2卷積層2("me/add_94.txt",源);//add_94->add_95殘差塊前傳(*源,*目標,255,258,261,257,260,263,sr);//load_mat2卷積層2("me/add_95.txt",源);//add_95->add_96殘差塊前傳(*源,*目標,262,264,266,264,266,268,sr);//wid =48;hei =48;//load_mat2卷積層2("me/add_96.txt",源);cout<<"鄰近插值..."<<endl;wid *=2;hei *=2;Resize卷積層(*目標,wid,hei,源->depth);最近鄰插值(*源,*目標);// up_sampling2d_8std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//load_mat2卷積層2("me/add_90.txt",目標);//add_90 + up_sampling2d_8 -> add_97卷積層相加(&add_90,源);del卷積層(add_90);//load_mat2卷積層2("me/add_97.txt",源);//成功cout<<"后1殘差塊"<<endl;//add_97->add_98殘差塊前傳(*源,*目標,268,269,270,270,271,272,sr);//附加//load_mat2卷積層2("me/add_98.txt",源);conv=sr.conv-1+273;卷積前傳無RELU(conv);bn=sr.bn-1+271;批正則前傳(bn);vl_nnrelu(源);// load_mat2卷積層2("me/activation_271.txt",源);conv=sr.conv-1+274;卷積前傳無RELU(conv);bn=sr.bn-1+272;批正則前傳(bn);vl_nnrelu(源);卷積層相加(&add_51,源);//add_99del卷積層(add_51);//load_mat2卷積層2("me/add_99.txt",源);conv=sr.conv-1+275;//275卷積前傳無RELU(conv); //load_mat2卷積層2("me/conv_275.txt",源);bn=sr.bn-1+273;//273批正則前傳(bn);vl_nnrelu(源);load_mat2卷積層2("me/activation_273.txt",源); // save_卷積層2jpg(源,"activation_273");cout<<源->width<<","<<源->height<<","<<源->depth<<endl;conv=sr.conv-1+276;//276卷積前傳無RELU(conv);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;//cout<<*(sr.conv2->權重_數據)<<","<<*(sr.conv2->偏移_數據)<<","<<sr.conv2->偏移長度<<endl;//cout<<*(conv->權重_數據)<<","<<*(conv->偏移_數據)<<","<<conv->偏移長度<<endl;//load_mat2卷積層2("me/conv_276.txt",源);//save_卷積層2txt(源,"out.txt");//save_卷積層2jpg(源,"out"); //*//------------------放大4倍------------------------------wid *=4;hei *=4;Resize卷積層(*目標,wid,hei,源->depth);雙線性插值(*源,*目標);// 1137std::swap (目標,源);cout<<源->width<<","<<源->height<<","<<源->depth<<endl;vl_sigmoid(源);卷積層乘以(*源,255.f);卷積層 *灰度 = 轉換體素到灰度圖(*源);cout<<"保存灰度圖: gray0.jpg"<<endl;save_卷積層2jpg(灰度,"gray");//save_卷積層2txt(灰度,"gray.txt"); /*/cout<<"保存體素..."<<endl;save_卷積層2txt(源,"vol.txt");cout<<"轉換文件已經保存為: vol.txt"<<endl;//save_卷積層2txt(源,"tishu.txt");//cout<<"轉換文件已經保存為: tishu.txt"<<endl; //*/體素到點云并保存(*源); }而后按前文在meshlab中處理
效果圖:
輸入圖
由于一些我未知的原因,并沒有達到原模型的效果,估計有python下的七八分吧。
下載:
3D人臉重建win32程序
3D人臉重建 RVN(win下),由VRN-Keras-master下的vrn-unguided-keras.h5改編而來,而且并沒有完全達到 python 下的效果,算拋磚引玉吧。
https://download.csdn.net/download/juebai123/11284687
總結
以上是生活随笔為你收集整理的C++运行三维人脸重建 VRN的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 在线时间戳转换工具,纯JS 实现
- 下一篇: Verse on Premises 1.