赞
踩
自己下载graphviz程序,然后编写make_dot函数,然后进行调用。
下载graphviz程序可以参考:文章一、文章二
参考的demo如下:
import torch from torch.autograd import Variable import torch.nn as nn from graphviz import Digraph class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=2), nn.ReLU(), nn.MaxPool2d(kernel_size=2) ) self.conv2 = nn.Sequential( nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2), nn.ReLU(), nn.MaxPool2d(kernel_size=2) ) self.out = nn.Linear(32 * 7 * 7, 10) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = x.view(x.size(0), -1) # (batch, 32*7*7) out = self.out(x) return out def make_dot(var, params=None): """ 画出 PyTorch 自动梯度图 autograd graph 的 Graphviz 表示. 蓝色节点表示有梯度计算的变量Variables; 橙色节点表示用于 torch.autograd.Function 中的 backward 的张量 Tensors. Args: var: output Variable params: dict of (name, Variable) to add names to node that require grad (TODO: make optional) """ if params is not None: assert all(isinstance(p, Variable) for p in params.values()) param_map = {id(v): k for k, v in params.items()} node_attr = dict(style='filled', shape='box', align='left', fontsize='12', ranksep='0.1', height='0.2') dot = Digraph(node_attr=node_attr, graph_attr=dict(size="12,12")) seen = set() def size_to_str(size): return '(' + (', ').join(['%d' % v for v in size]) + ')' output_nodes = (var.grad_fn,) if not isinstance(var, tuple) else tuple(v.grad_fn for v in var) def add_nodes(var): if var not in seen: if torch.is_tensor(var): # note: this used to show .saved_tensors in pytorch0.2, but stopped # working as it was moved to ATen and Variable-Tensor merged dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange') elif hasattr(var, 'variable'): u = var.variable name = param_map[id(u)] if params is not None else '' node_name = '%s\n %s' % (name, size_to_str(u.size())) dot.node(str(id(var)), node_name, fillcolor='lightblue') elif var in output_nodes: dot.node(str(id(var)), str(type(var).__name__), fillcolor='darkolivegreen1') else: dot.node(str(id(var)), str(type(var).__name__)) seen.add(var) if hasattr(var, 'next_functions'): for u in var.next_functions: if u[0] is not None: dot.edge(str(id(u[0])), str(id(var))) add_nodes(u[0]) if hasattr(var, 'saved_tensors'): for t in var.saved_tensors: dot.edge(str(id(t)), str(id(var))) add_nodes(t) # 多输出场景 multiple outputs if isinstance(var, tuple): for v in var: add_nodes(v.grad_fn) else: add_nodes(var.grad_fn) return dot if __name__ == '__main__': net = CNN() x = torch.randn(1, 1, 28, 28) y = net(x) g = make_dot(y) g.view() params = list(net.parameters()) k = 0 for i in params: l = 1 print("该层的结构:" + str(list(i.size()))) for j in i.size(): l *= j print("该层参数和:" + str(l)) k = k + l print("总参数数量和:" + str(k))
直接使用torchviz,使用前先用pip进行安装即可。
其实这种方法和方法一一样,只不过是方法一把torchviz.make_dot单独copy出来了。
参考的demo如下:
import torch import torch.nn as nn import torch.nn.functional as F from torchviz import make_dot class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=2), nn.ReLU(), nn.MaxPool2d(kernel_size=2) ) self.conv2 = nn.Sequential( nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2), nn.ReLU(), nn.MaxPool2d(kernel_size=2) ) self.out = nn.Linear(32 * 7 * 7, 10) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = x.view(x.size(0), -1) # (batch, 32*7*7) out = self.out(x) return out net = CNN() print(net) x = torch.zeros(1, 1, 28, 28, dtype=torch.float, requires_grad=True) net_out = net(x) net_struct = make_dot(net_out) # plot graph of variable, not of a nn.Module net_struct.view() # net_struct.render("net_struct", view=True)
Netron开源地址: https://github.com/lutzroeder/Netron
Netron使用很简单,但是功能却很强大。作者提供了各个平台的安装包,安装之后打开,把保存的模型文件拖入就可以了。
import torch
from torch import nn
from torchviz import make_dot, make_dot_from_trace
model = nn.Sequential()
model.add_module('W0', nn.Linear(8, 16))
model.add_module('tanh', nn.Tanh())
model.add_module('W1', nn.Linear(16, 1))
torch.save(model, 'model.pth') # 保存模型
之后用Netron打开保存的“model.pth”即可。
如果你懒得安装,还可以使用作者提供的在线Netron查看器,地址:https://lutzroeder.github.io/netron/
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。