torch入门
張量的創(chuàng)建
import torch
import numpy as np#創(chuàng)建一個張量
x=torch.randn((5,3),dtype=torch.float16)
#張量的形狀
x.shape#創(chuàng)建一個空張量
x=torch.empty((2,3),dtype=torch.float32)#零張量
x=torch.zeros((2,3),dtype=torch.long)#1張量
x=torch.ones(2,3)#對角都是1
x=torch.eye(3,4)#從列表創(chuàng)建,并返回列表
x=torch.tensor([[2,3,4],[2,3,6]],dtype=torch.float16)
x.tolist()#從arr創(chuàng)建,并返回arr
a=np.random.random((2,2))
x=torch.from_numpy(a)
x.numpy()'''
區(qū)別:from_numpy和torch.tensor
from_numpy:如果arr變化,由arr創(chuàng)建的tensor也會變化
torch.tensor:arr變化,由arr創(chuàng)建的tensor不會變化
'''#改變形狀,reshape更強(qiáng)大
x.reshape(1,-1)
x.view(1,-1)
常見計算
x=torch.tensor([[2,3,4],[2,3,6]])
y=torch.tensor([[1,2,1],[2,6,0]])x+yx-yx / yx*y#求兩個tensor對應(yīng)位置上的最大值
torch.maximum(torch.tensor(3),x)#平方
torch.pow(x,2)#某個軸的最大值
torch.max(x,1)梯度計算和梯度下降過程?
x=np.linspace(0,100,10000)
noise=np.random.uniform(size=(10000,))#自定:w=10,b=10
y=10*x+10+noisex = torch.from_numpy(x)
y = torch.from_numpy(y)w=torch.randn(1,requires_grad=True)
b=torch.randn(1,requires_grad=True)#回歸擬合
for epoch in range(500000000):#計算預(yù)測值y_ = x * w + b#計算損失loss = torch.mean((y_ - y)**2)if epoch==0:#反向傳播loss.backward()else:# 歸零梯度w.grad.zero_()b.grad.zero_()#反向傳播loss.backward()#梯度更新,步長的選擇是個講究活,不然會發(fā)散,或者訓(xùn)練太慢w.data = w.data - 2e-4 * w.grad.datab.data = b.data - 2e-4 * b.grad.dataif loss<0.1:break#print(w,b)#w:10.0038;b:10.2498#print('epoch: {}, loss: {}'.format(epoch, loss.data))使用矩陣乘法實(shí)現(xiàn)全連接層?
x=torch.randn((4,5))
w_true=torch.randint(1,10,size=(5,1),dtype=torch.float32)
b_true=torch.tensor(20.0)
noise=torch.randn(size=(4,1))
#矩陣乘法
y=x@w_true+b_true+noisew=torch.zeros(size=(5,1),requires_grad=True,dtype=torch.float32)
b=torch.zeros(1,requires_grad=True)#訓(xùn)練
for epoch in range(10000000):y_=x@w+bloss=torch.mean((y-y_)**2)if epoch==0:loss.backward()else:w.grad.zero_()b.grad.zero_()loss.backward()w.data=w.data - 2e-4 * w.grad.datab.data=b.data - 2e-4 *b.grad.dataif loss<0.1:break
'''
#權(quán)重
w:[[ 0.5081],[ 5.0037],[ 0.8767],[ 4.9839],[13.5279]]
#偏置
b:[14.1485]
#損失
loss:0.1000
'''使用nn.Linear層
from torch import nn
from torch import optim#構(gòu)建網(wǎng)絡(luò)
net=nn.Linear(5,1,bias=True)
#構(gòu)建優(yōu)化器
optimizer=optim.Adam(net.parameters(),lr=2e-4)for epoch in range(10000000):y_=net(x)loss=torch.mean((y-y_)**2)#梯度歸零optimizer.zero_grad()#計算梯度loss.backward()#更新梯度optimizer.step()if loss<0.1:break#權(quán)重
#[ 0.6655,  4.8166, -3.5347,  7.4862, 13.4877]
net.weight.data#偏置
#[13.6001]
net.bias.data#損失
0.0999?激活函數(shù)
#ELU
def ELU_self(x, a=1.0):x=torch.tensor(x)x_0=torch.tensor(0)return torch.maximum(x_0, x) + torch.minimum(x_0, a * (torch.exp(x) - 1))#LeakyReLU
def LeakyReLU_self(x, a=1e-2):x=torch.tensor(x)x_0=torch.tensor(0)return torch.maximum(x_0, x) + a * torch.minimum(x_0, x)#ReLU
def ReLU_self(x):x=torch.tensor(x)x_0=torch.tensor(0)return torch.maximum(x_0,x)#ReLU6
def ReLU6_self(x):x=torch.tensor(x)x_0=torch.tensor(0)x_6=torch.tensor(6)return torch.minimum(torch.maximum(x_0, x), x_6)#SELU
def SELU_self(x,scale=1.0507009873554804934193349852946,a=1.6732632423543772848170429916717):x = torch.tensor(x)x_0 = torch.tensor(0)return scale * (torch.maximum(x_0, x) +torch.minimum(x_0, a * (torch.exp(x) - 1)))#CELU
def CELU_self(x, a=1.0):x = torch.tensor(x)x_0 = torch.tensor(0)return torch.maximum(x_0, x) + torch.minimum(x_0,a * (torch.exp(x / a) - 1.0))#Sigmoid
def Sigmoid_self(x):x = torch.tensor(x)return 1.0 / (1 + torch.exp(-x))#LogSigmoid
def LogSigmoid_self(x):x = torch.tensor(x)return torch.log(1.0 / (1 + torch.exp(-x)))#Tanh
def Tanh_self(x):x = torch.tensor(x)return 1 - 2.0 / (torch.exp(2 * x) + 1)#Tanhshrink
def Tanhshrink_self(x):x = torch.tensor(x)return x + 2.0 / (torch.exp(2 * x) + 1) - 1#Softplus
def Softplus_self(x, b=1.0):x = torch.tensor(x)return 1 / b * torch.log(1 + torch.exp(x * b))#Softshrink,感覺就是中心化
def Softshrink_self(x,lambd=0.5):x_=torch.tensor(x)x_=torch.where(x_>lambd,x_-lambd,x_)x_=torch.where(x_<-lambd,x_+lambd,x_)x_[x==x_]=0return x_卷積層原理和使用?
import matplotlib.pyplot as plt
#用來讀取圖片
from PIL import Image
import torch.nn as nn
from torchvision import transforms
from torchkeras import summaryimage=Image.open('tu.jpg')# 把圖片數(shù)據(jù)轉(zhuǎn)化成張量
img_transform = transforms.Compose([transforms.ToTensor()])
img_tensor = img_transform(image)#卷積的輸入是4維張量
#'_'操作是就地更改
img_tensor.unsqueeze_(dim=0)flag=0
if flag:#輸入通道,卷積個數(shù),卷積核大小,步長,填充conv_layer = nn.Conv2d(in_channels=3,out_channels=1,kernel_size=5,stride=1,padding=2)# 初始化卷積層權(quán)值nn.init.xavier_normal_(conv_layer.weight.data)# nn.init.xavier_uniform_(conv_layer.weight.data)# calculationimg_conv = conv_layer(img_tensor)
else:#轉(zhuǎn)置卷積conv_layer_ts = nn.ConvTranspose2d(in_channels=3,out_channels=1,kernel_size=5,stride=1,padding=2)nn.init.xavier_normal_(conv_layer_ts.weight.data)img_conv_ts = conv_layer_ts(img_tensor)參數(shù)的計算?
參數(shù)=卷積個數(shù)*卷積核大小*通道數(shù)+?卷積個數(shù)
76 = 1*5*5*3+1
#參數(shù)個數(shù)
32*5*5*1+32
卷基層大小?
#(輸入大小-卷積核大小+2倍的填充)/步長+1#500=(500-5+2*2)/1+1img_conv.shapetorch.Size([1, 1, 500, 500])
畫圖展示
img_tensor.squeeze_(dim=0)
img_conv.squeeze_(dim=0)
img_conv_ts.squeeze_(dim=0)plt.subplot(131).imshow(np.transpose(img_tensor.data.numpy(),[1,2,0]))
plt.axis('off')
plt.subplot(132).imshow(np.transpose(img_conv.data.numpy(),[1,2,0]))
plt.axis('off')
plt.subplot(133).imshow(np.transpose(img_conv_ts.data.numpy(),[1,2,0]))
plt.tight_layout()
plt.axis('off')
plt.show()損失函數(shù)
#標(biāo)準(zhǔn)的使用流程
criterion=Losscriterion()
loss=criterion(y_,y)常見loss的使用
#BCELoss,二分類損失
#y_pred在前,y_true在后
loss=nn.BCELoss()m=nn.Sigmoid()
x=torch.randn(3,requires_grad=True)
y_=m(x)
y=torch.randint(0,2,size=(3,),dtype=torch.float)
loss=loss(y_,y)
with torch.no_grad():loss.backward()
loss# NLLLoss,多分類損失
loss=nn.NLLLoss()
m=nn.Softmax(dim=1)
x=torch.randn((3,4),requires_grad=True)
y_=m(x)
y=torch.randint(0,4,size=(3,))
loss=loss(y_,y)
with torch.no_grad():loss.backward()
loss#L1Loss,MAE
loss=nn.L1Loss()
y_=torch.randn((1,5),requires_grad=True)
y=torch.randn((1,5))
loss=loss(y_,y)
with torch.no_grad():loss.backward()
loss#MSELoss
loss=nn.MSELoss()
y_=torch.randn((1,5),requires_grad=True)
y=torch.randn((1,5))
loss=loss(y_,y)
with torch.no_grad():loss.backward()
loss
優(yōu)化器的使用?
from torch import optim#一般的流程
#定義優(yōu)化器
optimizer=Optim()
#導(dǎo)數(shù)歸零
optimizer.zero_grad()
#更新
optimizer.step()x = torch.randn((4,5),requires_grad=False)
w_true = torch.randint(1, 10, size=(5, 1), dtype=torch.float)
b_true = torch.tensor(20.0)
noise = torch.randn(size=(4, 1))
y = x @ w_true + b_true + noiseresult = {}
for lr in [0.01, 0.1, 0.5]:#每次要重新更新網(wǎng)絡(luò)net = nn.Linear(5, 1, bias=True)#定義優(yōu)化器optimizer = optim.SGD(net.parameters(), lr=lr)#定義損失mseloss = nn.MSELoss()for epoch in range(10000000):#梯度清零optimizer.zero_grad()#計算損失loss = mseloss(net(x), y)#反向傳播loss.backward()#更新optimizer.step()if loss.item() < 0.1 or epoch >= 10000:result[lr] = {'loss': loss.item(), 'epoch': epoch}break
#結(jié)果
#當(dāng)lr過大時,發(fā)散了,不能收斂
result=
{0.01: {'loss': 0.09930270910263062, 'epoch': 766},0.1: {'loss': 0.0925668329000473, 'epoch': 76},0.5: {'loss': nan, 'epoch': 10000}}
池化層
x=torch.randn(10,3,128,128)#MaxPool2d
maxp=nn.MaxPool2d(5,3)
#42=(128-5+0*2)/3+1,是向下取整
maxp(x).shapetorch.Size([10, 3, 42, 42])maxp(x)[0,0,0,4]
tensor(1.9936)#AvgPool2d,取窗口的平均值
avgp=nn.AvgPool2d(5,3)
#42=(128-5+0*2)/3+1,是向下取整
avgp(x).shapetorch.Size([10, 3, 42, 42])avgp(x)[0,0,0,4]
tensor(-0.1445)歸一化層
- BN 層去加速訓(xùn)練和幫助模型更好收斂
- BN 層僅在 batch size 足夠大時才有明顯的效果
x=torch.randint(0,256,size=(10,3,128,128)).float()#BN
bn=nn.BatchNorm2d(3)
bn(x)[0,0,0,2]tensor(1.1019, grad_fn=<SelectBackward>)#GN
#num_channels需要被num_groups整除
gn=nn.GroupNorm(num_groups=3,num_channels=3)
gn(x)[0,0,0,2]tensor(1.0831, grad_fn=<SelectBackward>)總結(jié)
 
                            
                        - 上一篇: 网页flash怎么全屏(flash怎么设
- 下一篇: 安卓手机游戏《英雄迷宫冒险》存档修改
