python拟牛顿法迭代点绘制_最速下降法、牛顿法、拟牛顿法,Python实现高维二次目标函数优化...
原理什么的就不說(shuō)了,只有代碼。。。
十元二次目標(biāo)函數(shù),求極小值點(diǎn)。
1.最速下降法
import random
import numpy as np
import matplotlib.pyplot as plt
from sympy import *
def goldsteinsearch(f, df, d, x, alpham, rho, t):
flag = 0
a = 0
b = alpham
fk = f(x)
gk = df(x)
phi0 = fk
dphi0 = np.dot(gk, d)
alpha = b * random.uniform(0, 1)
while (flag == 0):
newfk = f(x + alpha * d)
phi = newfk
# print(phi,phi0,rho,alpha ,dphi0)
if (phi - phi0) <= (rho * alpha * dphi0):
if (phi - phi0) >= ((1 - rho) * alpha * dphi0):
flag = 1
else:
a = alpha
b = b
if (b < alpham):
alpha = (a + b) / 2
else:
alpha = t * alpha
else:
a = a
b = alpha
alpha = (a + b) / 2
return alpha
def steepest(fun, gfun,x0):
c = 0.00001
imax = 100
i = 1
x = x0
grad = gfun(x)
delta = sum(grad**2)
while i < imax and delta > c:
p = -gfun(x)
alpha = goldsteinsearch(fun, gfun, p, x, 1, 0.1, 2)
x = x + alpha * p
print(i, np.array((i, delta)))
grad = gfun(x)
delta = sum(grad**2)
i = i + 1
return x, fun(x), i, delta
#目標(biāo)函數(shù)
fun = lambda x: (x[0]-1)**2+(x[1]-1)**2+(x[2]-2)**2+(x[3]-3)**2+(x[4]-4)**2+(x[5]-5)**2+(x[6]-6)**2+(x[7]-7)**2+(x[8]-8)**2+(x[9]-9)**2+x[0]*x[1]+x[2]*x[3]+x[4]*x[5]+x[6]*x[7]+x[8]*x[9]
#一階導(dǎo)數(shù)
gfun = lambda x: np.array([2*x[0] + x[1] - 2, x[0] + 2*x[1] - 2, 2*x[2] + x[3] - 4, x[2] + 2*x[3] - 6, 2*x[4] + x[5] - 8, x[4] + 2*x[5] - 10, 2*x[6] + x[7] - 12, x[6] + 2*x[7] - 14, 2*x[8] + x[9] - 16, x[8] + 2*x[9] - 18])
if __name__ == "__main__":
x0 = np.array([1.0,1.0,1.0,3.0,1.0,3.0,1.0,1.0,2.0,3.0])
steepest(fun, gfun,x0)
output = steepest(fun, gfun,x0)
print ("迭代次數(shù):" output [2])
print ("近似最優(yōu)解:" output [0])
print ("函數(shù)極小值點(diǎn):" output [1])
print ("最終迭代誤差:" output [3])
2.牛頓法
import random
import numpy as np
import matplotlib.pyplot as plt
def dampnm(fun, gfun, hess, x0):
maxk = 500
rho = 0.55
sigma = 0.4
k = 0
delta = 1e-5
_delta=delta
while k < maxk:
gk = gfun(x0)
Gk = hess(x0)
dk = -1.0 * np.linalg.solve(Gk, gk)
print(k, np.linalg.norm(dk))
if np.linalg.norm(dk) < delta:
_delta = np.linalg.norm(dk)
break
x0 += dk
k += 1
return x0, fun(x0), k,_delta
#目標(biāo)函數(shù)
fun = lambda x: (x[0]-1)**2+(x[1]-1)**2+(x[2]-2)**2+(x[3]-3)**2+(x[4]-4)**2+(x[5]-5)**2+(x[6]-6)**2+(x[7]-7)**2+(x[8]-8)**2+(x[9]-9)**2+x[0]*x[1]+x[2]*x[3]+x[4]*x[5]+x[6]*x[7]+x[8]*x[9]
#一階導(dǎo)數(shù)
gfun = lambda x: np.array([2*x[0] + x[1] - 2, x[0] + 2*x[1] - 2, 2*x[2] + x[3] - 4, x[2] + 2*x[3] - 6, 2*x[4] + x[5] - 8, x[4] + 2*x[5] - 10, 2*x[6] + x[7] - 12, x[6] + 2*x[7] - 14, 2*x[8] + x[9] - 16, x[8] + 2*x[9] - 18])
#二階導(dǎo)數(shù),也就是海森矩陣,冪次只有二次,所有二階導(dǎo)是常數(shù)
hess = lambda x: np.array([[2, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 2, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 2, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 2, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 2, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 2, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 2, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 2, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 2]])
if __name__ == "__main__":
x0 = np.array([1.0,1.0,1.0,3.0,1.0,3.0,1.0,1.0,2.0,3.0])
output=dampnm(fun, gfun, hess, x0)
print ("迭代次數(shù):" output [2])
print ("近似最優(yōu)解:" output [0])
print ("函數(shù)極小值點(diǎn):" output [1])
print ("最終迭代誤差:" output [3])
3.擬牛頓法
import numpy as np
def bfgs(fun,gfun,x0):#這里只寫出了bfgs的實(shí)現(xiàn)
maxk = 1e5
rho = 0.55
sigma = 0.4
epsilon = 1e-5
k = 0
n = np.shape(x0)[0]
Bk = np.eye(n)#np.linalg.inv(hess(x0))
_epsilon=epsilon
while k < maxk:
gk = gfun(x0)
print(k,np.linalg.norm(gk))
if np.linalg.norm(gk) < epsilon:
_epsilon=np.linalg.norm(gk)
break
dk = -1.0*np.linalg.solve(Bk,gk)
m = 0
mk = 0
while m < 20:
if fun(x0+rho**m*dk) < fun(x0)+sigma*rho**m*np.dot(gk,dk):
mk = m
break
m += 1
#BFGS
x = x0 + rho**mk*dk
sk = x - x0
yk = gfun(x) - gk
if np.dot(sk,yk) > 0:
Bs = np.dot(Bk,sk)
ys = np.dot(yk,sk)
sBs = np.dot(np.dot(sk,Bk),sk)
Bk = Bk - 1.0*Bs.reshape((n,1))*Bs/sBs + 1.0*yk.reshape((n,1))*yk/ys
k += 1
x0 = x
return x0,fun(x0),k,_epsilon#
#目標(biāo)函數(shù)
fun = lambda x: (x[0]-1)**2+(x[1]-1)**2+(x[2]-2)**2+(x[3]-3)**2+(x[4]-4)**2+(x[5]-5)**2+(x[6]-6)**2+(x[7]-7)**2+(x[8]-8)**2+(x[9]-9)**2+x[0]*x[1]+x[2]*x[3]+x[4]*x[5]+x[6]*x[7]+x[8]*x[9]
#一階導(dǎo)
gfun = lambda x: np.array([2*x[0] + x[1] - 2, x[0] + 2*x[1] - 2, 2*x[2] + x[3] - 4, x[2] + 2*x[3] - 6, 2*x[4] + x[5] - 8, x[4] + 2*x[5] - 10, 2*x[6] + x[7] - 12, x[6] + 2*x[7] - 14, 2*x[8] + x[9] - 16, x[8] + 2*x[9] - 18])
if __name__ == "__main__":
x0 = np.array([1.0,1.0,1.0,3.0,1.0,3.0,1.0,1.0,2.0,3.0])
output=bfgs(fun, gfun, x0)
print ("迭代次數(shù):" output [2])
print ("近似最優(yōu)解:" output [0])
print ("函數(shù)極小值點(diǎn):" output [1])
print ("最終迭代誤差:" output [3])
這里是同一個(gè)目標(biāo)函數(shù)在三種方法下的結(jié)果:近似最優(yōu)解極小值迭代次數(shù)最終誤差
最速下降法[0.66665596 0.66665596 0.66665596 2.66665596 2.00003211 4.00003211 3.33440243 5.33247832 4.66724938 6.66628733]92.666 66 7892774127.0733410524
04242e-06
牛頓法[0.66666667 0.66666667 0.66666667 2.66666667 2. 4. 3.33333333 5.33333333 4.66666667 6.66666667]92.6666666666666710
擬牛頓法[0.66666652 0.66666652 0.66666652 2.66666652 2.00000043 4.00000043 3.33333781 5.33333172 4.66666955 6.6666665 ]92.6666666666905739.86253776
2278836e-06
如果不考慮計(jì)算二階導(dǎo)的支出,牛頓法還是比較生猛的。
完了-′???`。
如有興趣,可參考博客:最速下降法(梯度下降法)python實(shí)現(xiàn)_Big_Pai的博客-CSDN博客_最速下降法python?blog.csdn.net
總結(jié)
以上是生活随笔為你收集整理的python拟牛顿法迭代点绘制_最速下降法、牛顿法、拟牛顿法,Python实现高维二次目标函数优化...的全部?jī)?nèi)容,希望文章能夠幫你解決所遇到的問(wèn)題。
- 上一篇: erp系统方案书_一次ERP选型实施失败
- 下一篇: unique函数_Office 365函