一.部署流程
開始點擊選擇圖片按鈕,讀取一張圖片圖片灰度化處理,設置網絡的輸入大小.圖片輸入Lenet深度卷積神經網絡獲得softMax層的值獲得softMax的最大值下標結束
代碼詳細介紹
QT += core guigreaterThan(QT_MAJOR_VERSION,
4): QT += widgetsTARGET = Lenet-
5
TEMPLATE = appSOURCES += main.cpp\mainwindow.cpp \lenet.cppHEADERS += mainwindow.h \lenet.hFORMS += mainwindow.ui
INCLUDEPATH += /usr/
local/
include \
/usr/
local/
include/opencv \
/usr/
local/
include/opencv2
LIBS += /usr/
local/lib/libopencv_calib3d.so \
/usr/
local/lib/libopencv_core.so \
/usr/
local/lib/libopencv_features2d.so \
/usr/
local/lib/libopencv_flann.so \
/usr/
local/lib/libopencv_highgui.so \
/usr/
local/lib/libopencv_imgcodecs.so \
/usr/
local/lib/libopencv_imgproc.so \
/usr/
local/lib/libopencv_ml.so \
/usr/
local/lib/libopencv_objdetect.so \
/usr/
local/lib/libopencv_photo.so \
/usr/
local/lib/libopencv_shape.so \
/usr/
local/lib/libopencv_stitching.so \
/usr/
local/lib/libopencv_superres.so \
/usr/
local/lib/libopencv_videoio.so \
/usr/
local/lib/libopencv_video.so \
/usr/
local/lib/libopencv_videostab.so
INCLUDEPATH += ../
include/caffe/
include\
../
include/caffe/src \LIBS += /home/jinshan/caffe/build/lib/libcaffe.so
LIBS +=../lib/libcaffe.so
.1.0.0-rc3
LIBS += -L../lib
LIBS += -lglog -lgflags -lprotobuf -lboost_system -lboost_thread -llmdb -lleveldb -lstdc++ -lcblas -latlasQMAKE_CXXFLAGS += -std=c++
0x
CONFIG += c++
11
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- lenet.h.創建一個lenet類,包含網絡的具體調用和數據預處理(核心代碼)?
- Lenet(std::string model_file,std::string delay_file)://初始化網絡,加載權重
- void WrapInputLayer(const vector imgs, std::vector *input_channels); //多張圖片,h*w*3->3*h*w?
void WrapInputLayer(const cv::Mat imgs, std::vector *input_channels); //單張圖片,h*w*3->3*h*w - int Predict(cv::Mat img); //返回預測結果
- std::shared_ptr< Net>net; //定義網絡
#ifndef LENET_H
#define LENET_H#define CPU_ONLY #include <caffe/caffe.hpp>
#include <opencv2/opencv.hpp>
#include <string>
#include <vector>
#include <math.h>
#include <QMessageBox>
#include <qfile.h>
#include <QTextStream>
#include <QStatusBar>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
using namespace std;
using namespace caffe;
class Lenet
{
public:Lenet();~Lenet();Lenet(
std::
string model_file,
std::
string delay_file);
void WrapInputLayer(
const vector<cv::Mat> imgs,
std::
vector<cv::Mat> *input_channels);
void WrapInputLayer(
const cv::Mat imgs,
std::
vector<cv::Mat> *input_channels);
int Predict(cv::Mat img);
std::
shared_ptr< Net<
float>>net;
};
#endif - 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
#include "lenet.h"Lenet::Lenet(){}Lenet::~Lenet(){}Lenet::Lenet(
std::
string model_file,
std::
string delay_file)
{
#ifdef CPU_ONLYCaffe::set_mode(Caffe::CPU);
#elseCaffe::set_mode(Caffe::GPU);
#endifnet.reset(
new Net<
float>(delay_file,TEST)); net->CopyTrainedLayersFrom(model_file);
}
void Lenet:: WrapInputLayer(
const vector<cv::Mat> imgs,
std::
vector<cv::Mat> *input_channels)
{Blob<
float> *input_layer = net->input_blobs()[
0];
int width = input_layer->width();
int height = input_layer->height();
int num = input_layer->num();
float *input_data = input_layer->mutable_cpu_data();
for (
int j =
0; j < num; j++) {
for (
int k =
0; k < input_layer->channels(); ++k){cv::Mat channel(height, width, CV_32FC1, input_data);input_channels->push_back(channel);input_data += width * height;}cv::Mat img = imgs[j];cv::split(img, *input_channels);input_channels->clear();}
}
void Lenet::WrapInputLayer(
const cv::Mat imgs,
std::
vector<cv::Mat> *input_channels){Blob<
float> *input_layer = net->input_blobs()[
0];
int width = input_layer->width();
int height = input_layer->height();
float *input_data = input_layer->mutable_cpu_data();
for (
int k =
0; k < input_layer->channels(); ++k){cv::Mat channel(height, width, CV_32FC1, input_data);input_channels->push_back(channel);input_data += width * height;}cv::split(imgs, *input_channels);}
int Lenet::Predict(cv::Mat img){
std::
vector<float> softMax;cv::Mat grayImg;
if(img.channels()==
3){cv::cvtColor(img, grayImg, cv::COLOR_BGR2GRAY);}cv::resize(grayImg,grayImg, cv::Size(
28,
28),cv::INTER_CUBIC); grayImg.convertTo(grayImg,CV_32FC1,
0.00392157); Blob<
float> *input_layer = net->input_blobs()[
0];input_layer->Reshape(
1,
1,grayImg.rows, grayImg.cols);net->Reshape();
std::
vector<cv::Mat>channels;WrapInputLayer(grayImg,&channels);net->Forward();Blob<
float>* out_layer = net->output_blobs()[
0];
int count = out_layer->count();
const float* start_feature = out_layer->cpu_data();
const float* end_feature = out_layer->cpu_data() + count;softMax =
std::
vector<float>(start_feature, end_feature);
float result = softMax[
0];
int index =
0;
cout<<
"sfds"<<softMax.size()<<endl;
for(
int i =
1; i < softMax.size();i++){
if(result< softMax[i]){result = softMax[i];index = i;}
cout<<i<<
":"<<softMax[i];}
cout<<
"result:"<<index<<
" ";
return index;}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- mainwindow.cpp,網絡調用和初始化,選擇圖片進行處理得出預測
#include "mainwindow.h"
#include "ui_mainwindow.h"
#include "lenet.h"
MainWindow::MainWindow(QWidget *parent) :QMainWindow(parent),ui(
new Ui::MainWindow)
{ui->setupUi(
this);
this->showMaximized(); ui->srcImage->setScaledContents(
true);
string model_file =
"./lenet_iter_10000.caffemodel";
string delay_file =
"./lenet_delay.prototxt"; lenetCNN = Lenet(model_file,delay_file);
}MainWindow::~MainWindow()
{delete ui;
}
void MainWindow::on_selectImage_clicked()
{QString fileName = QFileDialog::getOpenFileName(
this,
"open image",QDir::currentPath(),
"All files(*.jpg *.png *.bmp)");cv::Mat img;
int resultNumber; img = cv::imread(fileName.toStdString()); resultNumber = lenetCNN.Predict(img); ui->reseult->setText(
"number: " + QString::number(resultNumber)); cv::cvtColor(img, img, CV_BGR2RGB);QImage qImg = QImage((
const unsigned
char*)(img.data), img.cols, img.rows, img.cols * img.channels(), QImage::Format_RGB888);ui->srcImage->clear();ui->srcImage->setPixmap(QPixmap::fromImage(qImg));
}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
#include "mainwindow.h"
#include <QApplication>int main(
int argc,
char *argv[])
{QApplication a(argc, argv);MainWindow w;w.show();
return a.exec();
}
- mainwindow.ui是界面文件,自行下載源碼查看
版權聲明:本文為博主原創文章,未經博主允許不得轉載。 https://blog.csdn.net/jmu201521121021/article/details/79048220
總結
以上是生活随笔為你收集整理的深度学习入门篇(二)Lenet网络在caffe+QtCreator上部署应用的全部內容,希望文章能夠幫你解決所遇到的問題。
如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。