生活随笔
收集整理的這篇文章主要介紹了
tensorflow就该这么学--6(多层神经网络)
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
一、線性問題和非線性問題
1、線性問題
某醫院想用神經網絡對已經有的病例進行分類,數據樣本特征x包括病人的年齡x1和腫瘤的大小x2,(x[x1,x2]),對應的標簽為良性或惡性(0、1)
二分類:
(1)生成數據集
import?tensorflow?as?tf??import?matplotlib.pyplot?as?plt??import?numpy?as?np??from?sklearn.utils?import?shuffle????????def?generate(sample_size,?mean,?cov,?diff,regression):?????????num_classes?=?2???????samples_per_class?=?int(sample_size/2)????????X0?=?np.random.multivariate_normal(mean,?cov,?samples_per_class)??????Y0?=?np.zeros(samples_per_class)????????????for?ci,?d?in?enumerate(diff):????????????????????X1?=?np.random.multivariate_normal(mean+d,?cov,?samples_per_class)??????????Y1?=?(ci+1)*np.ones(samples_per_class)????????????????X0?=?np.concatenate((X0,X1))??????????Y0?=?np.concatenate((Y0,Y1))????????????????if?regression==False:???????????class_ind?=?[Y==class_number?for?class_number?in?range(num_classes)]??????????Y?=?np.asarray(np.hstack(class_ind),?dtype=np.float32)??????X,?Y?=?shuffle(X0,?Y0)????????????return?X,Y??????????input_dim?=?2??????????????????????np.random.seed(10)??num_classes?=2??mean?=?np.random.randn(num_classes)??cov?=?np.eye(num_classes)???X,?Y?=?generate(1000,?mean,?cov,?[3.0],True)??colors?=?['r'?if?l?==?0?else?'b'?for?l?in?Y[:]]??plt.scatter(X[:,0],?X[:,1],?c=colors)??plt.xlabel("Scaled?age?(in?yrs)")??plt.ylabel("Tumor?size?(in?cm)")??plt.show()??
(2)構建網絡模型
lab_dim?=?1????input_features?=?tf.placeholder(tf.float32,?[None,?input_dim])??input_lables?=?tf.placeholder(tf.float32,?[None,?lab_dim])????W?=?tf.Variable(tf.random_normal([input_dim,lab_dim]),?name="weight")??b?=?tf.Variable(tf.zeros([lab_dim]),?name="bias")????output?=tf.nn.sigmoid(?tf.matmul(input_features,?W)?+?b)??cross_entropy?=?-(input_lables?*?tf.log(output)?+?(1?-?input_lables)?*?tf.log(1?-?output))??ser=?tf.square(input_lables?-?output)??loss?=?tf.reduce_mean(cross_entropy)??err?=?tf.reduce_mean(ser)??optimizer?=?tf.train.AdamOptimizer(0.04)???train?=?optimizer.minimize(loss)????
(3)訓練
maxEpochs?=?50??minibatchSize?=?25??????with?tf.Session()?as?sess:??????sess.run(tf.global_variables_initializer())????????for?epoch?in?range(maxEpochs):??????????sumerr=0??????????for?i?in?range(np.int32(len(Y)/minibatchSize)):??????????????x1?=?X[i*minibatchSize:(i+1)*minibatchSize,:]??????????????y1?=?np.reshape(Y[i*minibatchSize:(i+1)*minibatchSize],[-1,1])??????????????tf.reshape(y1,[-1,1])??????????????_,lossval,?outputval,errval?=?sess.run([train,loss,output,err],?feed_dict={input_features:?x1,?input_lables:y1})??????????????sumerr?=sumerr+errval????????????print?("Epoch:",?'%04d'?%?(epoch+1),?"cost=","{:.9f}".format(lossval),"err=",sumerr/minibatchSize)??? ? ? ? ??
(4)可視化
train_X,?train_Y?=?generate(100,?mean,?cov,?[3.0],True)??????colors?=?['r'?if?l?==?0?else?'b'?for?l?in?train_Y[:]]??????plt.scatter(train_X[:,0],?train_X[:,1],?c=colors)??????????????????????????????????x?=?np.linspace(-1,8,200)???????y=-x*(sess.run(W)[0]/sess.run(W)[1])-sess.run(b)/sess.run(W)[1]??????plt.plot(x,y,?label='Fitted?line')??????plt.legend()??????plt.show()? ?
多分類:
import?tensorflow?as?tf??import?numpy?as?np??import?matplotlib.pyplot?as?plt????from?sklearn.utils?import?shuffle??from?matplotlib.colors?import?colorConverter,?ListedColormap???????????from?sklearn.preprocessing?import?OneHotEncoder??def?onehot(y,start,end):??????ohe?=?OneHotEncoder()??????a?=?np.linspace(start,end-1,end-start)??????b?=np.reshape(a,[-1,1]).astype(np.int32)??????ohe.fit(b)??????c=ohe.transform(y).toarray()????????return?c???????????????def?generate(sample_size,?num_classes,?diff,regression=False):??????np.random.seed(10)??????mean?=?np.random.randn(2)??????cov?=?np.eye(2)????????????????????samples_per_class?=?int(sample_size/num_classes)????????X0?=?np.random.multivariate_normal(mean,?cov,?samples_per_class)??????Y0?=?np.zeros(samples_per_class)????????????for?ci,?d?in?enumerate(diff):??????????X1?=?np.random.multivariate_normal(mean+d,?cov,?samples_per_class)??????????Y1?=?(ci+1)*np.ones(samples_per_class)????????????????X0?=?np.concatenate((X0,X1))??????????Y0?=?np.concatenate((Y0,Y1))??????????????????????????if?regression==False:???????????Y0?=?np.reshape(Y0,[-1,1])????????????????????????????Y0?=?onehot(Y0.astype(np.int32),0,num_classes)????????????????X,?Y?=?shuffle(X0,?Y0)????????????return?X,Y?????????????np.random.seed(10)????input_dim?=?2??num_classes?=3???X,?Y?=?generate(2000,num_classes,??[[3.0],[3.0,0]],False)??aa?=?[np.argmax(l)?for?l?in?Y]??colors?=['r'?if?l?==?0?else?'b'?if?l==1?else?'y'?for?l?in?aa[:]]????plt.scatter(X[:,0],?X[:,1],?c=colors)??plt.xlabel("Scaled?age?(in?yrs)")??plt.ylabel("Tumor?size?(in?cm)")??plt.show()????lab_dim?=?num_classes????input_features?=?tf.placeholder(tf.float32,?[None,?input_dim])??input_lables?=?tf.placeholder(tf.float32,?[None,?lab_dim])????W?=?tf.Variable(tf.random_normal([input_dim,lab_dim]),?name="weight")??b?=?tf.Variable(tf.zeros([lab_dim]),?name="bias")??output?=?tf.matmul(input_features,?W)?+?b????z?=?tf.nn.softmax(?output?)????a1?=?tf.argmax(tf.nn.softmax(?output?),?axis=1)??b1?=?tf.argmax(input_lables,?axis=1)??err?=?tf.count_nonzero(a1-b1)?????cross_entropy?=?tf.nn.softmax_cross_entropy_with_logits(?labels=input_lables,logits=output)??loss?=?tf.reduce_mean(cross_entropy)????????optimizer?=?tf.train.AdamOptimizer(0.04)???train?=?optimizer.minimize(loss)??????maxEpochs?=?50??minibatchSize?=?25??????with?tf.Session()?as?sess:??????sess.run(tf.global_variables_initializer())????????????for?epoch?in?range(maxEpochs):??????????sumerr=0??????????for?i?in?range(np.int32(len(Y)/minibatchSize)):??????????????x1?=?X[i*minibatchSize:(i+1)*minibatchSize,:]??????????????y1?=?Y[i*minibatchSize:(i+1)*minibatchSize,:]????????????????_,lossval,?outputval,errval?=?sess.run([train,loss,output,err],?feed_dict={input_features:?x1,?input_lables:y1})??????????????sumerr?=sumerr+(errval/minibatchSize)????????????print?("Epoch:",?'%04d'?%?(epoch+1),?"cost=","{:.9f}".format(lossval),"err=",sumerr/minibatchSize)?????????train_X,?train_Y?=?generate(200,num_classes,??[[3.0],[3.0,0]],False)??????aa?=?[np.argmax(l)?for?l?in?train_Y]??????????????colors?=['r'?if?l?==?0?else?'b'?if?l==1?else?'y'?for?l?in?aa[:]]??????plt.scatter(train_X[:,0],?train_X[:,1],?c=colors)????????????x?=?np.linspace(-1,8,200)?????????y=-x*(sess.run(W)[0][0]/sess.run(W)[1][0])-sess.run(b)[0]/sess.run(W)[1][0]??????plt.plot(x,y,?label='first?line',lw=3)????????y=-x*(sess.run(W)[0][1]/sess.run(W)[1][1])-sess.run(b)[1]/sess.run(W)[1][1]??????plt.plot(x,y,?label='second?line',lw=2)????????y=-x*(sess.run(W)[0][2]/sess.run(W)[1][2])-sess.run(b)[2]/sess.run(W)[1][2]??????plt.plot(x,y,?label='third?line',lw=1)????????????plt.legend()??????plt.show()???????print(sess.run(W),sess.run(b))??????????????train_X,?train_Y?=?generate(200,num_classes,??[[3.0],[3.0,0]],False)??????aa?=?[np.argmax(l)?for?l?in?train_Y]??????????????colors?=['r'?if?l?==?0?else?'b'?if?l==1?else?'y'?for?l?in?aa[:]]??????plt.scatter(train_X[:,0],?train_X[:,1],?c=colors)????????????????nb_of_xs?=?200??????xs1?=?np.linspace(-1,?8,?num=nb_of_xs)??????xs2?=?np.linspace(-1,?8,?num=nb_of_xs)??????xx,?yy?=?np.meshgrid(xs1,?xs2)?????????????classification_plane?=?np.zeros((nb_of_xs,?nb_of_xs))??????for?i?in?range(nb_of_xs):??????????for?j?in?range(nb_of_xs):????????????????????????????classification_plane[i,j]?=?sess.run(a1,?feed_dict={input_features:?[[?xx[i,j],?yy[i,j]?]]}?)????????????????????????cmap?=?ListedColormap([??????????????colorConverter.to_rgba('r',?alpha=0.30),??????????????colorConverter.to_rgba('b',?alpha=0.30),??????????????colorConverter.to_rgba('y',?alpha=0.30)])????????????plt.contourf(xx,?yy,?classification_plane,?cmap=cmap)??????plt.show()??????? ? ??
2、非線性問題:利用隱藏層的神經網絡擬合操作
import?tensorflow?as?tf??import?numpy?as?np????????learning_rate?=?1e-4??n_input??=?2??n_label??=?1??n_hidden?=?2??????x?=?tf.placeholder(tf.float32,?[None,n_input])??y?=?tf.placeholder(tf.float32,?[None,?n_label])????weights?=?{??????'h1':?tf.Variable(tf.truncated_normal([n_input,?n_hidden],?stddev=0.1)),??????'h2':?tf.Variable(tf.random_normal([n_hidden,?n_label],?stddev=0.1))??????}???biases?=?{??????'h1':?tf.Variable(tf.zeros([n_hidden])),??????'h2':?tf.Variable(tf.zeros([n_label]))??????}??????????layer_1?=?tf.nn.relu(tf.add(tf.matmul(x,?weights['h1']),?biases['h1']))??????????????layer2?=tf.add(tf.matmul(layer_1,?weights['h2']),biases['h2'])??y_pred?=?tf.maximum(layer2,0.01*layer2)??????loss=tf.reduce_mean((y_pred-y)**2)??train_step?=?tf.train.AdamOptimizer(learning_rate).minimize(loss)??????X=[[0,0],[0,1],[1,0],[1,1]]??Y=[[0],[1],[1],[0]]??X=np.array(X).astype('float32')??Y=np.array(Y).astype('int16')??????sess?=?tf.InteractiveSession()??sess.run(tf.global_variables_initializer())??????for?i?in?range(10000):??????sess.run(train_step,feed_dict={x:X,y:Y}?)?????????????print(sess.run(y_pred,feed_dict={x:X}))?????????????????print(sess.run(layer_1,feed_dict={x:X}))??
二、網絡模型訓練過程中可能存在的問題
1、欠擬合
擬合效果沒有完全擬合到想要得到的真實數據情況
解決辦法:增加節點或增加神經層
2、過擬合
擬合程度過優
解決辦法:early stopping 數據集擴增、正則化、dropout
總結
以上是生活随笔為你收集整理的tensorflow就该这么学--6(多层神经网络)的全部內容,希望文章能夠幫你解決所遇到的問題。
如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。