网络结构可视化方法
方法一:輸出為PDF文檔(使用graphviz)
from graphviz import Digraph import torch from torch.autograd import Variabledef make_dot(var, params=None):""" Produces Graphviz representation of PyTorch autograd graphBlue nodes are the Variables that require grad, orange are Tensorssaved for backward in torch.autograd.FunctionArgs:var: output Variableparams: dict of (name, Variable) to add names to node thatrequire grad (TODO: make optional)"""if params is not None:assert isinstance(params.values()[0], Variable)param_map = {id(v): k for k, v in params.items()}node_attr = dict(style='filled',shape='box',align='left',fontsize='12',ranksep='0.1',height='0.2')dot = Digraph(node_attr=node_attr, graph_attr=dict(size="12,12"))seen = set()def size_to_str(size):return '('+(', ').join(['%d' % v for v in size])+')'def add_nodes(var):if var not in seen:if torch.is_tensor(var):dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')elif hasattr(var, 'variable'):u = var.variablename = param_map[id(u)] if params is not None else ''node_name = '%s\n %s' % (name, size_to_str(u.size()))dot.node(str(id(var)), node_name, fillcolor='lightblue')else:dot.node(str(id(var)), str(type(var).__name__))seen.add(var)if hasattr(var, 'next_functions'):for u in var.next_functions:if u[0] is not None:dot.edge(str(id(u[0])), str(id(var)))add_nodes(u[0])if hasattr(var, 'saved_tensors'):for t in var.saved_tensors:dot.edge(str(id(t)), str(id(var)))add_nodes(t)add_nodes(var.grad_fn)return dot itLEP_pil, itLEP_np = get_image(real_face_name, imsize) net = skip(input_depth, itLEP_np.shape[0], num_channels_down = [128] * 5,num_channels_up = [128] * 5,num_channels_skip = [128] * 5,filter_size_up = 3, filter_size_down = 3,upsample_mode='nearest', filter_skip_size=1,need_sigmoid=True, need_bias=True, pad=pad, act_fun='LeakyReLU').type(dtype)dummy_input = get_noise(input_depth, INPUT, itLEP_np.shape[1:]).type(dtype)#上面為定義網(wǎng)絡(luò)結(jié)構(gòu),以及定義輸入;下面為輸出網(wǎng)絡(luò)結(jié)構(gòu)圖 y = net(dummy_input) g = make_dot(y) g.view()
?
方法二:使用tensorboardX
import torch import torch.nn as nn from tensorboardX import SummaryWriter class LeNet(nn.Module):def __init__(self):super(LeNet, self).__init__()self.conv1 = nn.Sequential( #input_size=(1*28*28)nn.Conv2d(1, 6, 5, 1, 2),nn.ReLU(), #(6*28*28)nn.MaxPool2d(kernel_size=2, stride=2), #output_size=(6*14*14) )self.conv2 = nn.Sequential(nn.Conv2d(6, 16, 5),nn.ReLU(), #(16*10*10)nn.MaxPool2d(2, 2) #output_size=(16*5*5) )self.fc1 = nn.Sequential(nn.Linear(16 * 5 * 5, 120),nn.ReLU())self.fc2 = nn.Sequential(nn.Linear(120, 84),nn.ReLU())self.fc3 = nn.Linear(84, 10)# 定義前向傳播過程,輸入為xdef forward(self, x):x = self.conv1(x)x = self.conv2(x)# nn.Linear()的輸入輸出都是維度為一的值,所以要把多維度的tensor展平成一維x = x.view(x.size()[0], -1)x = self.fc1(x)x = self.fc2(x)x = self.fc3(x)return xdummy_input = torch.rand(13, 1, 28, 28) #假設(shè)輸入13張1*28*28的圖片 model = LeNet() with SummaryWriter(comment='LeNet') as w:w.add_graph(model, (dummy_input, ))?這里運(yùn)行后會生成runs文件夾,切換到runs所在的目錄,
使用 tensorboard --logdir runs該命令,得到瀏覽器地址,在不同的瀏覽器打開(因為有些瀏覽器打開看不到任何東西)
雙擊圖的結(jié)構(gòu),出現(xiàn)網(wǎng)絡(luò)細(xì)節(jié)圖
?
轉(zhuǎn)載于:https://www.cnblogs.com/hxjbc/p/10972092.html
總結(jié)
- 上一篇: 全局变量/常量
- 下一篇: [RN] React Native 调试