Pytorch官方目前无法像tensorflow, caffe那样直接给出shape信息,详见
https://github.com/pytorch/pytorch/pull/3043
以下代码算一种workaround。由于CNN, RNN等模块实现不一样,添加其他模块支持可能需要改代码。
例如RNN中bias是bool类型,其权重也不是存于weight属性中,不过我们只关注shape够用了。
该方法必须构造一个输入调用forward后(model(x)调用)才可获取shape
#coding:utf-8 from collections import OrderedDict import torch from torch.autograd import Variable import torch.nn as nn import models.crnn as crnn import json def get_output_size(summary_dict, output): if isinstance(output, tuple): for i in xrange(len(output)): summary_dict[i] = OrderedDict() summary_dict[i] = get_output_size(summary_dict[i],output[i]) else: summary_dict['output_shape'] = list(output.size()) return summary_dict def summary(input_size, model): def register_hook(module): def hook(module, input, output): class_name = str(module.__class__).split('.')[-1].split("'")[0] module_idx = len(summary) m_key = '%s-%i' % (class_name, module_idx+1) summary[m_key] = OrderedDict() summary[m_key]['input_shape'] = list(input[0].size()) summary[m_key] = get_output_size(summary[m_key], output) params = 0 if hasattr(module, 'weight'): params += torch.prod(torch.LongTensor(list(module.weight.size()))) if module.weight.requires_grad: summary[m_key]['trainable'] = True else: summary[m_key]['trainable'] = False #if hasattr(module, 'bias'): # params += torch.prod(torch.LongTensor(list(module.bias.size()))) summary[m_key]['nb_params'] = params if not isinstance(module, nn.Sequential) and not isinstance(module, nn.ModuleList) and not (module == model): hooks.append(module.register_forward_hook(hook)) # check if there are multiple inputs to the network if isinstance(input_size[0], (list, tuple)): x = [Variable(torch.rand(1,*in_size)) for in_size in input_size] else: x = Variable(torch.rand(1,*input_size)) # create properties summary = OrderedDict() hooks = [] # register hook model.apply(register_hook) # make a forward pass model(x) # remove these hooks for h in hooks: h.remove() return summary crnn = crnn.CRNN(32, 1, 3755, 256, 1) x = summary([1,32,128],crnn) print json.dumps(x)
以pytorch版CRNN为例,输出shape如下
{ "Conv2d-1": { "input_shape": [1, 1, 32, 128], "output_shape": [1, 64, 32, 128], "trainable": true, "nb_params": 576 }, "ReLU-2": { "input_shape": [1, 64, 32, 128], "output_shape": [1, 64, 32, 128], "nb_params": 0 }, "MaxPool2d-3": { "input_shape": [1, 64, 32, 128], "output_shape": [1, 64, 16, 64], "nb_params": 0 }, "Conv2d-4": { "input_shape": [1, 64, 16, 64], "output_shape": [1, 128, 16, 64], "trainable": true, "nb_params": 73728 }, "ReLU-5": { "input_shape": [1, 128, 16, 64], "output_shape": [1, 128, 16, 64], "nb_params": 0 }, "MaxPool2d-6": { "input_shape": [1, 128, 16, 64], "output_shape": [1, 128, 8, 32], "nb_params": 0 }, "Conv2d-7": { "input_shape": [1, 128, 8, 32], "output_shape": [1, 256, 8, 32], "trainable": true, "nb_params": 294912 }, "BatchNorm2d-8": { "input_shape": [1, 256, 8, 32], "output_shape": [1, 256, 8, 32], "trainable": true, "nb_params": 256 }, "ReLU-9": { "input_shape": [1, 256, 8, 32], "output_shape": [1, 256, 8, 32], "nb_params": 0 }, "Conv2d-10": { "input_shape": [1, 256, 8, 32], "output_shape": [1, 256, 8, 32], "trainable": true, "nb_params": 589824 }, "ReLU-11": { "input_shape": [1, 256, 8, 32], "output_shape": [1, 256, 8, 32], "nb_params": 0 }, "MaxPool2d-12": { "input_shape": [1, 256, 8, 32], "output_shape": [1, 256, 4, 33], "nb_params": 0 }, "Conv2d-13": { "input_shape": [1, 256, 4, 33], "output_shape": [1, 512, 4, 33], "trainable": true, "nb_params": 1179648 }, "BatchNorm2d-14": { "input_shape": [1, 512, 4, 33], "output_shape": [1, 512, 4, 33], "trainable": true, "nb_params": 512 }, "ReLU-15": { "input_shape": [1, 512, 4, 33], "output_shape": [1, 512, 4, 33], "nb_params": 0 }, "Conv2d-16": { "input_shape": [1, 512, 4, 33], "output_shape": [1, 512, 4, 33], "trainable": true, "nb_params": 2359296 }, "ReLU-17": { "input_shape": [1, 512, 4, 33], "output_shape": [1, 512, 4, 33], "nb_params": 0 }, "MaxPool2d-18": { "input_shape": [1, 512, 4, 33], "output_shape": [1, 512, 2, 34], "nb_params": 0 }, "Conv2d-19": { "input_shape": [1, 512, 2, 34], "output_shape": [1, 512, 1, 33], "trainable": true, "nb_params": 1048576 }, "BatchNorm2d-20": { "input_shape": [1, 512, 1, 33], "output_shape": [1, 512, 1, 33], "trainable": true, "nb_params": 512 }, "ReLU-21": { "input_shape": [1, 512, 1, 33], "output_shape": [1, 512, 1, 33], "nb_params": 0 }, "LSTM-22": { "input_shape": [33, 1, 512], "0": { "output_shape": [33, 1, 512] }, "1": { "0": { "output_shape": [2, 1, 256] }, "1": { "output_shape": [2, 1, 256] } }, "nb_params": 0 }, "Linear-23": { "input_shape": [33, 512], "output_shape": [33, 256], "trainable": true, "nb_params": 131072 }, "BidirectionalLSTM-24": { "input_shape": [33, 1, 512], "output_shape": [33, 1, 256], "nb_params": 0 }, "LSTM-25": { "input_shape": [33, 1, 256], "0": { "output_shape": [33, 1, 512] }, "1": { "0": { "output_shape": [2, 1, 256] }, "1": { "output_shape": [2, 1, 256] } }, "nb_params": 0 }, "Linear-26": { "input_shape": [33, 512], "output_shape": [33, 3755], "trainable": true, "nb_params": 1922560 }, "BidirectionalLSTM-27": { "input_shape": [33, 1, 256], "output_shape": [33, 1, 3755], "nb_params": 0 } }
以上这篇pytorch中获取模型input/output shape实例就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持。
广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站资源来自互联网收集,仅供用于学习和交流,请遵循相关法律法规,本站一切资源不代表本站立场,如有侵权、后门、不妥请联系本站删除!
免责声明:本站资源来自互联网收集,仅供用于学习和交流,请遵循相关法律法规,本站一切资源不代表本站立场,如有侵权、后门、不妥请联系本站删除!
暂无评论...
更新日志
2024年11月25日
2024年11月25日
- 凤飞飞《我们的主题曲》飞跃制作[正版原抓WAV+CUE]
- 刘嘉亮《亮情歌2》[WAV+CUE][1G]
- 红馆40·谭咏麟《歌者恋歌浓情30年演唱会》3CD[低速原抓WAV+CUE][1.8G]
- 刘纬武《睡眠宝宝竖琴童谣 吉卜力工作室 白噪音安抚》[320K/MP3][193.25MB]
- 【轻音乐】曼托凡尼乐团《精选辑》2CD.1998[FLAC+CUE整轨]
- 邝美云《心中有爱》1989年香港DMIJP版1MTO东芝首版[WAV+CUE]
- 群星《情叹-发烧女声DSD》天籁女声发烧碟[WAV+CUE]
- 刘纬武《睡眠宝宝竖琴童谣 吉卜力工作室 白噪音安抚》[FLAC/分轨][748.03MB]
- 理想混蛋《Origin Sessions》[320K/MP3][37.47MB]
- 公馆青少年《我其实一点都不酷》[320K/MP3][78.78MB]
- 群星《情叹-发烧男声DSD》最值得珍藏的完美男声[WAV+CUE]
- 群星《国韵飘香·贵妃醉酒HQCD黑胶王》2CD[WAV]
- 卫兰《DAUGHTER》【低速原抓WAV+CUE】
- 公馆青少年《我其实一点都不酷》[FLAC/分轨][398.22MB]
- ZWEI《迟暮的花 (Explicit)》[320K/MP3][57.16MB]