生活随笔
收集整理的這篇文章主要介紹了
深度学习(三)theano学习笔记(2)基础函数-未完待续
小編覺得挺不錯(cuò)的,現(xiàn)在分享給大家,幫大家做個(gè)參考.
theano學(xué)習(xí)筆記(2)基礎(chǔ)函數(shù)
1、隨機(jī)函數(shù)庫(kù)的調(diào)用
2、卷積神經(jīng)網(wǎng)絡(luò)
[python]?view plaincopy
?? import?theano?? import?numpy?as?np?? import?matplotlib.pyplot?as?plt??????? from?loaddata?import?loadmnist?? import?theano.tensor?as?T?? ?? ?? ?? class?softmax:?? ?????? ????def?__init__(self,hiddata,outdata,nin,nout):?? ?? ????????self.w=theano.shared(value=np.zeros((nin,nout),dtype=theano.config.floatX),name='w');?? ????????self.b=theano.shared(value=np.zeros((nout,),dtype=theano.config.floatX),name='b')?? ?? ????????prey=T.nnet.softmax(T.dot(hiddata,self.w)+self.b)?? ????????self.loss=-T.mean(T.log(prey)[T.arange(outdata.shape[0]),outdata])?? ????????self.para=[self.w,self.b]?? ????????self.predict=T.argmax(prey,axis=1)?? ????????self.error=T.mean(T.neq(T.argmax(prey,axis=1),outdata))?? ?????????? ?? ?????????? ?? ?? class?HiddenLayer:?? ????def?__init__(self,inputx,nin,nout):?? ?????????a=np.sqrt(6./(nin+nout))?? ?????????ranmatrix=np.random.uniform(-a,a,(nin,nout));?? ?????????self.w=theano.shared(value=np.asarray(ranmatrix,dtype=theano.config.floatX),name='w')?? ?????????self.b=theano.shared(value=np.zeros((nout,),dtype=theano.config.floatX),name='b')?? ?????????self.out=T.tanh(T.dot(inputx,self.w)+self.b)?? ?????????self.para=[self.w,self.b]?? ?? class?mlp:?? ????def?__init__(self,nin,nhid,nout):?? ????????x=T.fmatrix('x')?? ????????y=T.ivector('y')?? ?????????? ????????hlayer=HiddenLayer(x,nin,nhid)?? ????????olayer=softmax(hlayer.out,y,nhid,nout)?? ?????????? ????????paras=hlayer.para+olayer.para?? ????????dparas=T.grad(olayer.loss,paras)?? ????????updates=[(para,para-0.1*dpara)?for?para,dpara?in?zip(paras,dparas)]?? ????????self.trainfunction=theano.function(inputs=[x,y],outputs=olayer.loss,updates=updates)?? ?? ?????????? ????def?train(self,trainx,trainy):?? ????????return?self.trainfunction(trainx,trainy)?? ?? ?? ?? ?? class?LeNetConvPoolLayer:?? ????def?__init__(self,inputx,img_shape,filter_shape,poolsize=(2,2)):?? ?????????? ????????assert?img_shape[1]==filter_shape[1]?? ????????a=np.sqrt(6./(filter_shape[0]+filter_shape[1]))?? ?? ????????v=np.random.uniform(low=-a,high=a,size=filter_shape)?? ?? ????????wvalue=np.asarray(v,dtype=theano.config.floatX)?? ????????self.w=theano.shared(value=wvalue,name='w')?? ????????bvalue=np.zeros((filter_shape[0],),dtype=theano.config.floatX)?? ????????self.b=theano.shared(value=bvalue,name='b')?? ?????????? ????????covout=T.nnet.conv2d(inputx,self.w)?? ?????????? ????????covpool=T.signal.downsample.max_pool_2d(covout,poolsize)?? ?????????? ????????self.out=T.tanh(covpool+self.b.dimshuffle('x',?0,?'x',?'x'))?? ?????????? ????????self.para=[self.w,self.b]?? ?????? ?????????? ??????????? ?? trainx,trainy=loadmnist()?? trainx=trainx.reshape(-1,1,28,28)?? batch_size=30?? m=trainx.shape[0]?? ne=m/batch_size?? ?? ?? ?? batchx=T.tensor4(name='batchx',dtype=theano.config.floatX)?? batchy=T.ivector('batchy')?? ?? ?? cov1_layer=LeNetConvPoolLayer(inputx=batchx,img_shape=(batch_size,1,28,28),filter_shape=(20,1,5,5))?? cov2_layer=LeNetConvPoolLayer(inputx=cov1_layer.out,img_shape=(batch_size,20,12,12),filter_shape=(50,20,5,5))?? cov2out=cov2_layer.out.flatten(2)?? hlayer=HiddenLayer(cov2out,4*4*50,500)?? olayer=softmax(hlayer.out,batchy,500,10)?? ?? paras=cov1_layer.para+cov2_layer.para+hlayer.para+olayer.para?? dparas=T.grad(olayer.loss,paras)?? updates=[(para,para-0.1*dpara)?for?para,dpara?in?zip(paras,dparas)]?? ?? train_function=theano.function(inputs=[batchx,batchy],outputs=olayer.loss,updates=updates)?? test_function=theano.function(inputs=[batchx,batchy],outputs=[olayer.error,olayer.predict])?? ?? testx,testy=loadmnist(True)?? testx=testx.reshape(-1,1,28,28)?? ?? train_history=[]?? test_history=[]?? ?? for?it?in?range(20):?? ????sum=0?? ????for?i?in?range(ne):?? ????????a=trainx[i*batch_size:(i+1)*batch_size]?? ????????loss_train=train_function(trainx[i*batch_size:(i+1)*batch_size],trainy[i*batch_size:(i+1)*batch_size])?? ????????sum=sum+loss_train?? ????sum=sum/ne??? ????print?'train_loss:',sum?? ????test_error,predict=test_function(testx,testy)?? ????print?'test_error:',test_error?? ?????? ????train_history=train_history+[sum]?? ????test_history=test_history+[test_error]?? n=len(train_history)?? fig1=plt.subplot(111)?? fig1.set_ylim(0.001,0.2)?? fig1.plot(np.arange(n),train_history,'-') ?
from:?http://blog.csdn.net/hjimce/article/details/46806923
《新程序員》:云原生和全面數(shù)字化實(shí)踐50位技術(shù)專家共同創(chuàng)作,文字、視頻、音頻交互閱讀
總結(jié)
以上是生活随笔為你收集整理的深度学习(三)theano学习笔记(2)基础函数-未完待续的全部?jī)?nèi)容,希望文章能夠幫你解決所遇到的問題。
如果覺得生活随笔網(wǎng)站內(nèi)容還不錯(cuò),歡迎將生活随笔推薦給好友。