NLP学习笔记
NLPL學(xué)習(xí)筆記
gensim-word2vec
訓(xùn)練
from gensim.models import Word2Vec from gensim.models.word2vec import LineSentence import multiprocessing def create_wordVectors(sentences, embedding_size = 128, window = 5, min_count = 5, word2vec_path = None):w2vModel = Word2Vec(sentences, size=embedding_size, window=window, min_count=min_count,workers=multiprocessing.cpu_count())w2vModel.save(word2vec_path)載入
def load_wordVectors(word2vec_path):w2vModel = Word2Vec.load(word2vec_path)return w2vModel映射
def embedding_lookup(w2vModel, sentences):all_vectors = []embeddingDim = w2vModel.vector_sizeembeddingUnknown = [0 for i in range(embeddingDim)]for sentence in sentences:this_vector = []for word in sentence:if word in w2vModel.wv.vocab:v=w2vModel[word]this_vector.append(v)else:this_vector.append(embeddingUnknown)all_vectors.append(this_vector)return all_vectors獲得單詞下標(biāo)和詞向量
w2vModel = Word2Vec.load(word2vec_path)word = '你'index = w2vModel.wv.vocab[word].index # 獲得單詞word的下標(biāo)word2= w2vModel.wv.index2word[index] # 根據(jù)index,獲得對(duì)應(yīng)的wordvector1 = w2vModel.wv.vectors[index] # 由下標(biāo)獲得詞向量vector2 = w2vModel[word] # 由word直接獲得詞向量?
總結(jié)
- 上一篇: TensorFlow Lite学习笔记
- 下一篇: 解决Bazel:Error: Linka