数学建模——BP神经网络模型Python代码
生活随笔
收集整理的這篇文章主要介紹了
数学建模——BP神经网络模型Python代码
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
數學建模——BP神經網絡模型Python代碼
# -*- coding: utf-8 -*- """ Created on Mon Oct 1 22:15:54 2018 @author: Heisenberg """ import numpy as np import math import random import string import matplotlib as mpl import matplotlib.pyplot as plt#random.seed(0) #當我們設置相同的seed,每次生成的隨機數相同。如果不設置seed,則每次會生成不同的隨機數#參考https://blog.csdn.net/jiangjiang_jian/article/details/79031788#生成區間[a,b]內的隨機數 def random_number(a,b):return (b-a)*random.random()+a#生成一個矩陣,大小為m*n,并且設置默認零矩陣 def makematrix(m, n, fill=0.0):a = []for i in range(m):a.append([fill]*n)return a#函數sigmoid(),這里采用tanh,因為看起來要比標準的sigmoid函數好看 def sigmoid(x):return math.tanh(x)#函數sigmoid的派生函數 def derived_sigmoid(x):return 1.0 - x**2#構造三層BP網絡架構 class BPNN:def __init__(self, num_in, num_hidden, num_out):#輸入層,隱藏層,輸出層的節點數self.num_in = num_in + 1 #增加一個偏置結點self.num_hidden = num_hidden + 1 #增加一個偏置結點self.num_out = num_out#激活神經網絡的所有節點(向量)self.active_in = [1.0]*self.num_inself.active_hidden = [1.0]*self.num_hiddenself.active_out = [1.0]*self.num_out#創建權重矩陣self.wight_in = makematrix(self.num_in, self.num_hidden)self.wight_out = makematrix(self.num_hidden, self.num_out)#對權值矩陣賦初值for i in range(self.num_in):for j in range(self.num_hidden):self.wight_in[i][j] = random_number(-0.2, 0.2)for i in range(self.num_hidden):for j in range(self.num_out):self.wight_out[i][j] = random_number(-0.2, 0.2)#最后建立動量因子(矩陣)self.ci = makematrix(self.num_in, self.num_hidden)self.co = makematrix(self.num_hidden, self.num_out) #信號正向傳播def update(self, inputs):if len(inputs) != self.num_in-1:raise ValueError('與輸入層節點數不符')#數據輸入輸入層for i in range(self.num_in - 1):#self.active_in[i] = sigmoid(inputs[i]) #或者先在輸入層進行數據處理self.active_in[i] = inputs[i] #active_in[]是輸入數據的矩陣#數據在隱藏層的處理for i in range(self.num_hidden - 1):sum = 0.0for j in range(self.num_in):sum = sum + self.active_in[i] * self.wight_in[j][i]self.active_hidden[i] = sigmoid(sum) #active_hidden[]是處理完輸入數據之后存儲,作為輸出層的輸入數據#數據在輸出層的處理for i in range(self.num_out):sum = 0.0for j in range(self.num_hidden):sum = sum + self.active_hidden[j]*self.wight_out[j][i]self.active_out[i] = sigmoid(sum) #與上同理return self.active_out[:]#誤差反向傳播def errorbackpropagate(self, targets, lr, m): #lr是學習率, m是動量因子if len(targets) != self.num_out:raise ValueError('與輸出層節點數不符!')#首先計算輸出層的誤差out_deltas = [0.0]*self.num_outfor i in range(self.num_out):error = targets[i] - self.active_out[i]out_deltas[i] = derived_sigmoid(self.active_out[i])*error#然后計算隱藏層誤差hidden_deltas = [0.0]*self.num_hiddenfor i in range(self.num_hidden):error = 0.0for j in range(self.num_out):error = error + out_deltas[j]* self.wight_out[i][j]hidden_deltas[i] = derived_sigmoid(self.active_hidden[i])*error#首先更新輸出層權值for i in range(self.num_hidden):for j in range(self.num_out):change = out_deltas[j]*self.active_hidden[i]self.wight_out[i][j] = self.wight_out[i][j] + lr*change + m*self.co[i][j]self.co[i][j] = change#然后更新輸入層權值for i in range(self.num_in):for i in range(self.num_hidden):change = hidden_deltas[j]*self.active_in[i]self.wight_in[i][j] = self.wight_in[i][j] + lr*change + m* self.ci[i][j]self.ci[i][j] = change#計算總誤差error = 0.0for i in range(len(targets)):error = error + 0.5*(targets[i] - self.active_out[i])**2return error#測試def test(self, patterns):for i in patterns:print(i[0], '->', self.update(i[0]))#權重def weights(self):print("輸入層權重")for i in range(self.num_in):print(self.wight_in[i])print("輸出層權重")for i in range(self.num_hidden):print(self.wight_out[i])def train(self, pattern, itera=100000, lr = 0.1, m=0.1):for i in range(itera):error = 0.0for j in pattern:inputs = j[0]targets = j[1]self.update(inputs)error = error + self.errorbackpropagate(targets, lr, m)if i % 100 == 0:print('誤差 %-.5f' % error)#實例 def demo():patt = [[[1,2,5],[0]],[[1,3,4],[1]],[[1,6,2],[1]],[[1,5,1],[0]],[[1,8,4],[1]]]#創建神經網絡,3個輸入節點,3個隱藏層節點,1個輸出層節點n = BPNN(3, 3, 1)#訓練神經網絡n.train(patt)#測試神經網絡n.test(patt)#查閱權重值n.weights()if __name__ == '__main__':demo()總結
以上是生活随笔為你收集整理的数学建模——BP神经网络模型Python代码的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 图论的应用 计算机,图论的应用计算机技术
- 下一篇: 新手学Java编程语言怎么入门?知识点都