lstm网络python代码实现
生活随笔
收集整理的這篇文章主要介紹了
lstm网络python代码实现
小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.
?LSTM的宏觀講解推薦這篇博客,以動圖的形式展示特別容易理解https://blog.csdn.net/dQCFKyQDXYm3F8rB0/article/details/82922386
LSTM的輸入、輸出、遺忘門的控制推薦這篇博客。本篇的代碼也是基于這篇博客的
https://zybuluo.com/hanbingtao/note/581764
?
import numpy as np import matplotlib.pyplot as plt class ReluActivator(object):def forward(self, weighted_input):#return weighted_inputreturn max(0, weighted_input)def backward(self, output):return 1 if output > 0 else 0class IdentityActivator(object):def forward(self, weighted_input):return weighted_inputdef backward(self, output):return 1class SigmoidActivator(object):def forward(self, weighted_input):return 1.0 / (1.0 + np.exp(-weighted_input))def backward(self, output):return output * (1 - output)class TanhActivator(object):def forward(self, weighted_input):return 2.0 / (1.0 + np.exp(-2 * weighted_input)) - 1.0def backward(self, output):return 1 - output * output def element_wise_op(array, op):for i in np.nditer(array,op_flags=['readwrite']):i[...] = op(i) class LstmLayer(object):def __init__(self, input_width, state_width, learning_rate):self.input_width = input_widthself.state_width = state_widthself.learning_rate = learning_rate# 門的激活函數(shù)self.gate_activator = SigmoidActivator()# 輸出的激活函數(shù)self.output_activator = TanhActivator()# 當前時刻初始化為t0self.times = 0 # 各個時刻的單元狀態(tài)向量cself.c_list = self.init_state_vec()# 各個時刻的輸出向量hself.h_list = self.init_state_vec()# 各個時刻的遺忘門fself.f_list = self.init_state_vec()# 各個時刻的輸入門iself.i_list = self.init_state_vec()# 各個時刻的輸出門oself.o_list = self.init_state_vec()# 各個時刻的即時狀態(tài)c~self.ct_list = self.init_state_vec()# 遺忘門權(quán)重矩陣Wfh, Wfx, 偏置項bfself.Wfh, self.Wfx, self.bf = (self.init_weight_mat())# 輸入門權(quán)重矩陣Wfh, Wfx, 偏置項bfself.Wih, self.Wix, self.bi = (self.init_weight_mat())# 輸出門權(quán)重矩陣Wfh, Wfx, 偏置項bfself.Woh, self.Wox, self.bo = (self.init_weight_mat())# 單元狀態(tài)權(quán)重矩陣Wfh, Wfx, 偏置項bfself.Wch, self.Wcx, self.bc = (self.init_weight_mat())def init_state_vec(self):'''初始化保存狀態(tài)的向量'''state_vec_list = []state_vec_list.append(np.zeros((self.state_width, 1)))return state_vec_listdef init_weight_mat(self):'''初始化權(quán)重矩陣'''Wh = np.random.uniform(-1e-4, 1e-4,(self.state_width, self.state_width))Wx = np.random.uniform(-1e-4, 1e-4,(self.state_width, self.input_width))b = np.zeros((self.state_width, 1))return Wh, Wx, bdef forward(self, x):'''根據(jù)式1-式6進行前向計算'''self.times += 1# 遺忘門fg = self.calc_gate(x, self.Wfx, self.Wfh, self.bf, self.gate_activator)self.f_list.append(fg)# 輸入門ig = self.calc_gate(x, self.Wix, self.Wih,self.bi, self.gate_activator)self.i_list.append(ig)# 輸出門og = self.calc_gate(x, self.Wox, self.Woh,self.bo, self.gate_activator)self.o_list.append(og)# 即時狀態(tài)ct = self.calc_gate(x, self.Wcx, self.Wch,self.bc, self.output_activator)self.ct_list.append(ct)# 單元狀態(tài)c = fg * self.c_list[self.times - 1] + ig * ctself.c_list.append(c)# 輸出h = og * self.output_activator.forward(c)self.h_list.append(h)def calc_gate(self, x, Wx, Wh, b, activator):'''計算門'''h = self.h_list[self.times - 1] # 上次的LSTM輸出net = np.dot(Wh, h) + np.dot(Wx, x) + bgate = activator.forward(net)return gatedef backward(self, x, delta_h, activator):'''實現(xiàn)LSTM訓練算法'''self.calc_delta(delta_h, activator)self.calc_gradient(x)def update(self):'''按照梯度下降,更新權(quán)重'''self.Wfh -= self.learning_rate * self.Whf_gradself.Wfx -= self.learning_rate * self.Whx_gradself.bf -= self.learning_rate * self.bf_gradself.Wih -= self.learning_rate * self.Whi_gradself.Wix -= self.learning_rate * self.Whi_gradself.bi -= self.learning_rate * self.bi_gradself.Woh -= self.learning_rate * self.Wof_gradself.Wox -= self.learning_rate * self.Wox_gradself.bo -= self.learning_rate * self.bo_gradself.Wch -= self.learning_rate * self.Wcf_gradself.Wcx -= self.learning_rate * self.Wcx_gradself.bc -= self.learning_rate * self.bc_graddef calc_delta(self, delta_h, activator):# 初始化各個時刻的誤差項self.delta_h_list = self.init_delta() # 輸出誤差項self.delta_o_list = self.init_delta() # 輸出門誤差項self.delta_i_list = self.init_delta() # 輸入門誤差項self.delta_f_list = self.init_delta() # 遺忘門誤差項self.delta_ct_list = self.init_delta() # 即時輸出誤差項# 保存從上一層傳遞下來的當前時刻的誤差項self.delta_h_list[-1] = delta_h# 迭代計算每個時刻的誤差項for k in range(self.times, 0, -1):self.calc_delta_k(k)def init_delta(self):'''初始化誤差項'''delta_list = []for i in range(self.times + 1):delta_list.append(np.zeros((self.state_width, 1)))return delta_listdef calc_delta_k(self, k):'''根據(jù)k時刻的delta_h,計算k時刻的delta_f、delta_i、delta_o、delta_ct,以及k-1時刻的delta_h'''# 獲得k時刻前向計算的值ig = self.i_list[k]og = self.o_list[k]fg = self.f_list[k]ct = self.ct_list[k]c = self.c_list[k]c_prev = self.c_list[k-1]tanh_c = self.output_activator.forward(c)delta_k = self.delta_h_list[k]# 根據(jù)式9計算delta_odelta_o = (delta_k * tanh_c * self.gate_activator.backward(og))delta_f = (delta_k * og * (1 - tanh_c * tanh_c) * c_prev *self.gate_activator.backward(fg))delta_i = (delta_k * og * (1 - tanh_c * tanh_c) * ct *self.gate_activator.backward(ig))delta_ct = (delta_k * og * (1 - tanh_c * tanh_c) * ig *self.output_activator.backward(ct))delta_h_prev = (np.dot(delta_o.transpose(), self.Woh) +np.dot(delta_i.transpose(), self.Wih) +np.dot(delta_f.transpose(), self.Wfh) +np.dot(delta_ct.transpose(), self.Wch)).transpose()# 保存全部delta值self.delta_h_list[k-1] = delta_h_prevself.delta_f_list[k] = delta_fself.delta_i_list[k] = delta_iself.delta_o_list[k] = delta_oself.delta_ct_list[k] = delta_ctdef calc_gradient(self, x):# 初始化遺忘門權(quán)重梯度矩陣和偏置項self.Wfh_grad, self.Wfx_grad, self.bf_grad = (self.init_weight_gradient_mat())# 初始化輸入門權(quán)重梯度矩陣和偏置項self.Wih_grad, self.Wix_grad, self.bi_grad = (self.init_weight_gradient_mat())# 初始化輸出門權(quán)重梯度矩陣和偏置項self.Woh_grad, self.Wox_grad, self.bo_grad = (self.init_weight_gradient_mat())# 初始化單元狀態(tài)權(quán)重梯度矩陣和偏置項self.Wch_grad, self.Wcx_grad, self.bc_grad = (self.init_weight_gradient_mat())# 計算對上一次輸出h的權(quán)重梯度for t in range(self.times, 0, -1):# 計算各個時刻的梯度(Wfh_grad, bf_grad,Wih_grad, bi_grad,Woh_grad, bo_grad,Wch_grad, bc_grad) = (self.calc_gradient_t(t))# 實際梯度是各時刻梯度之和self.Wfh_grad += Wfh_gradself.bf_grad += bf_gradself.Wih_grad += Wih_gradself.bi_grad += bi_gradself.Woh_grad += Woh_gradself.bo_grad += bo_gradself.Wch_grad += Wch_gradself.bc_grad += bc_grad# 計算對本次輸入x的權(quán)重梯度xt = x.transpose()self.Wfx_grad = np.dot(self.delta_f_list[-1], xt)self.Wix_grad = np.dot(self.delta_i_list[-1], xt)self.Wox_grad = np.dot(self.delta_o_list[-1], xt)self.Wcx_grad = np.dot(self.delta_ct_list[-1], xt)def init_weight_gradient_mat(self):'''初始化權(quán)重矩陣'''Wh_grad = np.zeros((self.state_width,self.state_width))Wx_grad = np.zeros((self.state_width,self.input_width))b_grad = np.zeros((self.state_width, 1))return Wh_grad, Wx_grad, b_graddef calc_gradient_t(self, t):'''計算每個時刻t權(quán)重的梯度'''h_prev = self.h_list[t-1].transpose()Wfh_grad = np.dot(self.delta_f_list[t], h_prev)bf_grad = self.delta_f_list[t]Wih_grad = np.dot(self.delta_i_list[t], h_prev)bi_grad = self.delta_f_list[t]Woh_grad = np.dot(self.delta_o_list[t], h_prev)bo_grad = self.delta_f_list[t]Wch_grad = np.dot(self.delta_ct_list[t], h_prev)bc_grad = self.delta_ct_list[t]return Wfh_grad, bf_grad, Wih_grad, bi_grad, \Woh_grad, bo_grad, Wch_grad, bc_graddef reset_state(self):# 當前時刻初始化為t0self.times = 0 # 各個時刻的單元狀態(tài)向量cself.c_list = self.init_state_vec()# 各個時刻的輸出向量hself.h_list = self.init_state_vec()# 各個時刻的遺忘門fself.f_list = self.init_state_vec()# 各個時刻的輸入門iself.i_list = self.init_state_vec()# 各個時刻的輸出門oself.o_list = self.init_state_vec()# 各個時刻的即時狀態(tài)c~self.ct_list = self.init_state_vec()def data_set():x = [np.array([[1], [2], [3]]),np.array([[2], [3], [4]])]d = np.array([[1], [2]])return x, d def test():l = LstmLayer(3, 2, 1e-3)x, d = data_set()l.forward(x[0])l.forward(x[1])l.backward(x[1], d, IdentityActivator())return l?
總結(jié)
以上是生活随笔為你收集整理的lstm网络python代码实现的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: leetcode刷题之树(2)
- 下一篇: shell中注释一段代码的方法