- 預處理
數據集使用Facebook上的BABI數據集
將文件提取成可訓練的數據集,包括:文章 問題 答案
def get_data(infile):stories,questions,answers = [],[],[]story_text = []fin = open(infile,'rb')for line in fin:line = line.decode('utf-8').strip()lno,text = line.split(' ',1)if '\t' in text:question,answer,_ = text.split('\t')stories.append(story_text)questions.append(question)answers.append(answer)story_text = []else:story_text.append(text)fin.close()return stories,questions,answersdata_train = get_data('qa1_single-supporting-fact_train.txt')
data_test = get_data('qa1_single-supporting-fact_test.txt')
print('\nTrain observations:',len(data_train[0]),'Test observations:',len(data_test[0]),'\n')
輸出:
Train observations: 10000 Test observations: 1000
- 如何實現
1.預處理:創建字典并將文章,問題和答案映射到詞表,進一步映射成向量形式
2.模型創建和驗證:訓練模型并在驗證數據集上測試
3.預測結果:測試集測試數據的結果 - 代碼
from __future__ import division,print_function
import collections
import itertools
import nltk
import numpy as np
import matplotlib.pyplot as plt
import os
import randomdef get_data(infile):stories,questions,answers = [],[],[]story_text = []fin = open(infile,'rb')for line in fin:line = line.decode('utf-8').strip()lno,text = line.split(' ',1) # 去掉前面的數字標記if '\t' in text: # 有制表符的是 問題 和 答案question,answer,_ = text.split('\t')stories.append(story_text)questions.append(question)answers.append(answer)story_text = []else: # 沒制表符的是文章story_text.append(text)fin.close()return stories,questions,answersdata_train = get_data('qa1_single-supporting-fact_train.txt')
data_test = get_data('qa1_single-supporting-fact_test.txt')
print('\nTrain observations:',len(data_train[0]),'Test observations:',len(data_test[0]),'\n')
print(data_train[0][1],data_train[1][1],data_train[2][1])
# ['Daniel went back to the hallway.', 'Sandra moved to the garden.'] Where is Daniel? hallway
print(np.array(data_train).shape)
# (3, 10000)dictnry = collections.Counter() # 返回列表元素出現次數的 字典,這里沒有參數是一個空字典
for stories,questions,answers in [data_train,data_test]:for story in stories:for sent in story:for word in nltk.word_tokenize(sent):dictnry[word.lower()] += 1for question in questions:for word in nltk.word_tokenize(question):dictnry[word.lower()] += 1for answer in answers:for word in nltk.word_tokenize(answer):dictnry[word.lower()] += 1word2indx = {w:(i+1) for i,(w,_) in enumerate(dictnry.most_common())} # 按詞頻排序
word2indx['PAD'] = 0
indx2word = {v:k for k,v in word2indx.items()}vocab_size = len(word2indx) # 一共有22個不重復單詞
print('vocabulary size:',len(word2indx))story_maxlen = 0
question_maxlen = 0
for stories,questions,answers in [data_train,data_test]:for story in stories:story_len = 0for sent in story:swords = nltk.word_tokenize(sent)story_len += len(swords)if story_len > story_maxlen:story_maxlen = story_lenfor question in questions:question_len = len(nltk.word_tokenize(question))if question_len > question_maxlen:question_maxlen = question_len
print('Story maximum length:',story_maxlen,'Question maximum length:',question_maxlen)
# 文章單詞最大長度為14,問題中的單詞最大長度為4,長度不夠的補0,維度相同便于并向計算from keras.layers import Input
from keras.layers.core import Activation,Dense,Dropout,Permute
from keras.layers.embeddings import Embedding
from keras.layers.merge import add,concatenate,dot
from keras.layers.recurrent import LSTM
from keras.models import Model
from keras.preprocessing.sequence import pad_sequences
from keras.utils import np_utilsdef data_vectorization(data,word2indx,story_maxlen,question_maxlen): # 詞 => 詞向量Xs,Xq,Y = [],[],[]stories,questions,answers = datafor story,question,answer in zip(stories,questions,answers):xs = [[word2indx[w.lower()] for w in nltk.word_tokenize(s)] for s in story] #xs = list(itertools.chain.from_iterable(xs))xq = [word2indx[w.lower()] for w in nltk.word_tokenize(question)]Xs.append(xs)Xq.append(xq)Y.append(word2indx[answer.lower()])return pad_sequences(Xs,maxlen=story_maxlen),pad_sequences(Xq,maxlen=question_maxlen),\np_utils.to_categorical(Y,num_classes=len(word2indx))Xstrain,Xqtrain,Ytrain = data_vectorization(data_train,word2indx,story_maxlen,question_maxlen)
Xstest,Xqtest,Ytest = data_vectorization(data_test,word2indx,story_maxlen,question_maxlen)
print('Train story',Xstrain.shape,'Train question',Xqtrain.shape,'Train answer',Ytrain.shape)
print('Test story',Xstest.shape,'Test question',Xqtest.shape,'Test answer',Ytest.shape)# 超參數
EMBEDDING_SIZE = 128
LATENT_SIZE = 64
BATCH_SIZE = 64
NUM_EPOCHS = 40# 輸入層
story_input = Input(shape=(story_maxlen,))
question_input = Input(shape=(question_maxlen,))# Story encoder embedding
# 將正整數(索引)轉換為固定大小的密集向量。
# 例如,[[4],[20]]->[[0.25,0.1],[0.6,-0.2]] 此層只能用作模型中的第一層
story_encoder = Embedding(input_dim=vocab_size,output_dim=EMBEDDING_SIZE,input_length=story_maxlen)(story_input)
story_encoder = Dropout(0.2)(story_encoder)# Question encoder embedding
question_encoder = Embedding(input_dim=vocab_size,output_dim=EMBEDDING_SIZE,input_length=question_maxlen)(question_input)
question_encoder = Dropout(0.3)(question_encoder)# 返回兩個張量的點積
match = dot([story_encoder,question_encoder],axes=[2,2])# 將故事編碼為問題的向量空間
story_encoder_c = Embedding(input_dim=vocab_size,output_dim=question_maxlen,input_length=story_maxlen)(story_input)
story_encoder_c = Dropout(0.3)(story_encoder_c)# 結合兩個向量 match和story_encoder_c
response = add([match,story_encoder_c])
response = Permute((2,1))(response)# 結合兩個向量 response和question_encoder
answer = concatenate([response, question_encoder], axis=-1)
answer = LSTM(LATENT_SIZE)(answer)
answer = Dropout(0.2)(answer)
answer = Dense(vocab_size)(answer)
output = Activation("softmax")(answer)model = Model(inputs=[story_input, question_input], outputs=output)
model.compile(optimizer="adam", loss="categorical_crossentropy",metrics=["accuracy"])print(model.summary())# 模型訓練
history = model.fit([Xstrain,Xqtrain],[Ytrain],batch_size=BATCH_SIZE,epochs=NUM_EPOCHS,validation_data=([Xstest,Xqtest],[Ytest]))# 畫出準確率和損失函數
plt.title('Episodic Memory Q&A Accuracy')
plt.plot(history.history['acc'],color='g',label='train')
plt.plot(history.history['val_acc'],color='r',label='validation')
plt.legend(loc='best')
plt.show()# get predictions of labelsytest = np.argmax(Ytest, axis=1)
Ytest_ = model.predict([Xstest, Xqtest])
ytest_ = np.argmax(Ytest_, axis=1)# 隨機選擇幾個問題測試
NUM_DISPLAY = 10
for i in random.sample(range(Xstest.shape[0]),NUM_DISPLAY):story = " ".join([indx2word[x] for x in Xstest[i].tolist() if x != 0])question = " ".join([indx2word[x] for x in Xqtest[i].tolist()])label = indx2word[ytest[i]]prediction = indx2word[ytest_[i]]print(story, question, label, prediction)
輸出:
轉載于:https://www.cnblogs.com/peng8098/p/nlp_22.html
總結
以上是生活随笔為你收集整理的NLP(二十二)使用LSTM进行语言建模以预测最优词的全部內容,希望文章能夠幫你解決所遇到的問題。
如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。