Tensorflow实现自动编码器
生活随笔
收集整理的這篇文章主要介紹了
Tensorflow实现自动编码器
小編覺得挺不錯(cuò)的,現(xiàn)在分享給大家,幫大家做個(gè)參考.
最近在幫助師兄完成畢設(shè),涉及到了tensorflow和機(jī)器學(xué)習(xí)的知識,下面的代碼描述了自動(dòng)編碼器編碼解碼的過程;同時(shí),實(shí)現(xiàn)了模型的保存和數(shù)據(jù)替換的過程。首先,利用true.txt中的數(shù)據(jù)進(jìn)行模型的訓(xùn)練,然后在利用false的數(shù)據(jù)進(jìn)行測試,比較兩次cost的差值。具體代碼如下:
""" Created on Wed Jan 23 12:29:56 2019@author: 肖俊怡 """import tensorflow as tf #import matplotlib.pyplot as plt import numpy as np#from tensorflow.examples.tutorials.mnist import input_data #mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)train_data = np.loadtxt("true.txt") #train_label = np.loadtxt("true.txt") test_data = np.loadtxt("true.txt") #test_label = np.loadtxt("true.txt") #test_xs = np.ones((test_data.shape[0],28*28)) #test_ys = np.ones((test_data.shape[0],2)) #test_ys = test_label#學(xué)習(xí)率決定了參數(shù)移動(dòng)到最優(yōu)值的速度快慢 learning_rate = 0.01 #指數(shù)據(jù)集中每一個(gè)樣本都跑過一遍 training_epochs = 1 #每次選取的樣本個(gè)數(shù) batch_size = 200display_step = 1 n_input = 18 #分配必要的內(nèi)存,占位;類型:float;行數(shù)不定,列數(shù)為784 X = tf.placeholder("float", [None, n_input])n_hidden_1 = 16 n_hidden_2 = 8 n_hidden_3 = 4 n_hidden_4 = 2 #設(shè)置權(quán)重值,并存入字典#從截?cái)嗟恼龖B(tài)分布中輸出隨機(jī)值 weights = {'encoder_h1': tf.Variable(tf.truncated_normal([n_input, n_hidden_1],)),'encoder_h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2],)),'encoder_h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3],)),'encoder_h4': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_4],)),'decoder_h1': tf.Variable(tf.truncated_normal([n_hidden_4, n_hidden_3],)),'decoder_h2': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_2],)),'decoder_h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_1],)),'decoder_h4': tf.Variable(tf.truncated_normal([n_hidden_1, n_input],)), } #設(shè)置偏重值,并存入字典,從正態(tài)分布中生成隨機(jī)值 biases = {'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),'encoder_b3': tf.Variable(tf.random_normal([n_hidden_3])),'encoder_b4': tf.Variable(tf.random_normal([n_hidden_4])),'decoder_b1': tf.Variable(tf.random_normal([n_hidden_3])),'decoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),'decoder_b3': tf.Variable(tf.random_normal([n_hidden_1])),'decoder_b4': tf.Variable(tf.random_normal([n_input])), } #進(jìn)行函數(shù)運(yùn)算,構(gòu)建編碼器 def encoder(x):layer_1 = tf.nn.relu(tf.add(tf.matmul(x, weights['encoder_h1']),biases['encoder_b1']))layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, weights['encoder_h2']),biases['encoder_b2']))layer_3 = tf.nn.relu(tf.add(tf.matmul(layer_2, weights['encoder_h3']),biases['encoder_b3']))# 為了便于編碼層的輸出,編碼層隨后一層不使用激活函數(shù)layer_4 = tf.add(tf.matmul(layer_3, weights['encoder_h4']),biases['encoder_b4'])return layer_4 #構(gòu)建解碼器 def decoder(x):layer_1 = tf.nn.relu(tf.add(tf.matmul(x, weights['decoder_h1']),biases['decoder_b1']))layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, weights['decoder_h2']),biases['decoder_b2']))layer_3 = tf.nn.relu(tf.add(tf.matmul(layer_2, weights['decoder_h3']),biases['decoder_b3']))layer_4 = tf.nn.relu(tf.add(tf.matmul(layer_3, weights['decoder_h4']),biases['decoder_b4']))return layer_4 #構(gòu)建模型 encoder_op = encoder(X) decoder_op = decoder(encoder_op)#預(yù)測 y_pred = decoder_op y_true = X#定義代價(jià)函數(shù)和優(yōu)化器 cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2)) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)with tf.Session() as sess:# tf.initialize_all_variables() no long valid from# 2017-03-02 if using tensorflow >= 0.12if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:init = tf.initialize_all_variables()else:init = tf.global_variables_initializer()sess.run(init)# 首先計(jì)算總批數(shù),保證每次循環(huán)訓(xùn)練集中的每個(gè)樣本都參與訓(xùn)練,不同于批量訓(xùn)練 train_writer = tf.summary.FileWriter("E://logs//train",sess.graph)total_batch = int(60000/batch_size)#保存is_train=Falsesaver=tf.train.Saver(max_to_keep=1)if is_train: for epoch in range(training_epochs):start = 0for i in range(total_batch):batch_xs = batch_xs = np.ones((batch_size,18)) j = 0 while j<batch_size:batch_xs[j][:] = train_data[i*batch_size+j][:]#epoch*total_batch+j = j+1start+=1#batch_ys = np.ones((batch_size,2))# max(x) = 1, min(x) = 0_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})print("Epoch:", '%04d' % (i+1), "cost=", "{:.9f}".format(c))#if epoch % display_step == 0:# print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c))saver.save(sess,'ckpt/data1.ckpt',global_step=i+1) print("Optimization Finished!")#encoder_result = sess.run(encoder_op, feed_dict={X: mnist.test.images})#encoder_result[:, 0],encoder_result[:, 1]分別代表x,y的數(shù)據(jù)點(diǎn)# plt.scatter(encoder_result[:, 0], encoder_result[:, 1], c=mnist.test.labels)#設(shè)置顏色漸變條# plt.colorbar()#顯示圖像# plt.show()#sess.close()else:model_file=tf.train.latest_checkpoint('ckpt/')saver.restore(sess,model_file)#for epoch in range(training_epochs):#start = 0for i in range(60000):batch_xs = batch_xs = np.ones((1,18)) #j = 0 #while j<batch_size:batch_xs[0][:] = test_data[i][:]#epoch*total_batch+# j = j+1#start+=1#batch_ys = np.ones((batch_size,2))# max(x) = 1, min(x) = 0c = sess.run(cost, feed_dict={X: batch_xs})print("Epoch:", '%04d' % (i+1), "cost=", "{:.9f}".format(c))#encoder_result = sess.run(encoder_op, feed_dict={X: mnist.test.images})#encoder_result[:, 0],encoder_result[:, 1]分別代表x,y的數(shù)據(jù)點(diǎn)#plt.scatter(encoder_result[:, 0], encoder_result[:, 1], c=mnist.test.labels)#設(shè)置顏色漸變條# plt.colorbar()#顯示圖像#plt.show()#sess.close()剛剛?cè)胧?#xff0c;真的很難理解,以后有時(shí)間一定要仔細(xì)學(xué)習(xí)一下tensorflow,不過收獲還是很多的~~~
總結(jié)
以上是生活随笔為你收集整理的Tensorflow实现自动编码器的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: Python小说爬虫
- 下一篇: Python类的封装