dpn网络的pytorch实现方式


Posted in Python onJanuary 14, 2020

我就废话不多说了,直接上代码吧!

import torch
import torch.nn as nn
import torch.nn.functional as F



class CatBnAct(nn.Module):
 def __init__(self, in_chs, activation_fn=nn.ReLU(inplace=True)):
  super(CatBnAct, self).__init__()
  self.bn = nn.BatchNorm2d(in_chs, eps=0.001)
  self.act = activation_fn

 def forward(self, x):
  x = torch.cat(x, dim=1) if isinstance(x, tuple) else x
  return self.act(self.bn(x))


class BnActConv2d(nn.Module):
 def __init__(self, s, out_chs, kernel_size, stride,
     padding=0, groups=1, activation_fn=nn.ReLU(inplace=True)):
  super(BnActConv2d, self).__init__()
  self.bn = nn.BatchNorm2d(in_chs, eps=0.001)
  self.act = activation_fn
  self.conv = nn.Conv2d(in_chs, out_chs, kernel_size, stride, padding, groups=groups, bias=False)

 def forward(self, x):
  return self.conv(self.act(self.bn(x)))


class InputBlock(nn.Module):
 def __init__(self, num_init_features, kernel_size=7,
     padding=3, activation_fn=nn.ReLU(inplace=True)):
  super(InputBlock, self).__init__()
  self.conv = nn.Conv2d(
   3, num_init_features, kernel_size=kernel_size, stride=2, padding=padding, bias=False)
  self.bn = nn.BatchNorm2d(num_init_features, eps=0.001)
  self.act = activation_fn
  self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

 def forward(self, x):
  x = self.conv(x)
  x = self.bn(x)
  x = self.act(x)
  x = self.pool(x)
  return x


class DualPathBlock(nn.Module):
 def __init__(
   self, in_chs, num_1x1_a, num_3x3_b, num_1x1_c, inc, groups, block_type='normal', b=False):
  super(DualPathBlock, self).__init__()
  self.num_1x1_c = num_1x1_c
  self.inc = inc
  self.b = b
  if block_type is 'proj':
   self.key_stride = 1
   self.has_proj = True
  elif block_type is 'down':
   self.key_stride = 2
   self.has_proj = True
  else:
   assert block_type is 'normal'
   self.key_stride = 1
   self.has_proj = False

  if self.has_proj:
   # Using different member names here to allow easier parameter key matching for conversion
   if self.key_stride == 2:
    self.c1x1_w_s2 = BnActConv2d(
     in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2)
   else:
    self.c1x1_w_s1 = BnActConv2d(
     in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1)
  self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1)
  self.c3x3_b = BnActConv2d(
   in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3,
   stride=self.key_stride, padding=1, groups=groups)
  if b:
   self.c1x1_c = CatBnAct(in_chs=num_3x3_b)
   self.c1x1_c1 = nn.Conv2d(num_3x3_b, num_1x1_c, kernel_size=1, bias=False)
   self.c1x1_c2 = nn.Conv2d(num_3x3_b, inc, kernel_size=1, bias=False)
  else:
   self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1)

 def forward(self, x):
  x_in = torch.cat(x, dim=1) if isinstance(x, tuple) else x
  if self.has_proj:
   if self.key_stride == 2:
    x_s = self.c1x1_w_s2(x_in)
   else:
    x_s = self.c1x1_w_s1(x_in)
   x_s1 = x_s[:, :self.num_1x1_c, :, :]
   x_s2 = x_s[:, self.num_1x1_c:, :, :]
  else:
   x_s1 = x[0]
   x_s2 = x[1]
  x_in = self.c1x1_a(x_in)
  x_in = self.c3x3_b(x_in)
  if self.b:
   x_in = self.c1x1_c(x_in)
   out1 = self.c1x1_c1(x_in)
   out2 = self.c1x1_c2(x_in)
  else:
   x_in = self.c1x1_c(x_in)
   out1 = x_in[:, :self.num_1x1_c, :, :]
   out2 = x_in[:, self.num_1x1_c:, :, :]
  resid = x_s1 + out1
  dense = torch.cat([x_s2, out2], dim=1)
  return resid, dense


class DPN(nn.Module):
 def __init__(self, small=False, num_init_features=64, k_r=96, groups=32,
     b=False, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128),
     num_classes=1000, test_time_pool=False):
  super(DPN, self).__init__()
  self.test_time_pool = test_time_pool
  self.b = b
  bw_factor = 1 if small else 4

  blocks = OrderedDict()

  # conv1
  if small:
   blocks['conv1_1'] = InputBlock(num_init_features, kernel_size=3, padding=1)
  else:
   blocks['conv1_1'] = InputBlock(num_init_features, kernel_size=7, padding=3)

  # conv2
  bw = 64 * bw_factor
  inc = inc_sec[0]
  r = (k_r * bw) // (64 * bw_factor)
  blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b)
  in_chs = bw + 3 * inc
  for i in range(2, k_sec[0] + 1):
   blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
   in_chs += inc

  # conv3
  bw = 128 * bw_factor
  inc = inc_sec[1]
  r = (k_r * bw) // (64 * bw_factor)
  blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
  in_chs = bw + 3 * inc
  for i in range(2, k_sec[1] + 1):
   blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
   in_chs += inc

  # conv4
  bw = 256 * bw_factor
  inc = inc_sec[2]
  r = (k_r * bw) // (64 * bw_factor)
  blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
  in_chs = bw + 3 * inc
  for i in range(2, k_sec[2] + 1):
   blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
   in_chs += inc

  # conv5
  bw = 512 * bw_factor
  inc = inc_sec[3]
  r = (k_r * bw) // (64 * bw_factor)
  blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
  in_chs = bw + 3 * inc
  for i in range(2, k_sec[3] + 1):
   blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
   in_chs += inc
  blocks['conv5_bn_ac'] = CatBnAct(in_chs)

  self.features = nn.Sequential(blocks)

  # Using 1x1 conv for the FC layer to allow the extra pooling scheme
  self.last_linear = nn.Conv2d(in_chs, num_classes, kernel_size=1, bias=True)

 def logits(self, features):
  if not self.training and self.test_time_pool:
   x = F.avg_pool2d(features, kernel_size=7, stride=1)
   out = self.last_linear(x)
   # The extra test time pool should be pooling an img_size//32 - 6 size patch
   out = adaptive_avgmax_pool2d(out, pool_type='avgmax')
  else:
   x = adaptive_avgmax_pool2d(features, pool_type='avg')
   out = self.last_linear(x)
  return out.view(out.size(0), -1)

 def forward(self, input):
  x = self.features(input)
  x = self.logits(x)
  return x

""" PyTorch selectable adaptive pooling
Adaptive pooling with the ability to select the type of pooling from:
 * 'avg' - Average pooling
 * 'max' - Max pooling
 * 'avgmax' - Sum of average and max pooling re-scaled by 0.5
 * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim

Both a functional and a nn.Module version of the pooling is provided.

"""

def pooling_factor(pool_type='avg'):
 return 2 if pool_type == 'avgmaxc' else 1


def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
 """Selectable global pooling function with dynamic input kernel size
 """
 if pool_type == 'avgmaxc':
  x = torch.cat([
   F.avg_pool2d(
    x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),
   F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
  ], dim=1)
 elif pool_type == 'avgmax':
  x_avg = F.avg_pool2d(
    x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
  x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
  x = 0.5 * (x_avg + x_max)
 elif pool_type == 'max':
  x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
 else:
  if pool_type != 'avg':
   print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
  x = F.avg_pool2d(
   x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
 return x


class AdaptiveAvgMaxPool2d(torch.nn.Module):
 """Selectable global pooling layer with dynamic input kernel size
 """
 def __init__(self, output_size=1, pool_type='avg'):
  super(AdaptiveAvgMaxPool2d, self).__init__()
  self.output_size = output_size
  self.pool_type = pool_type
  if pool_type == 'avgmaxc' or pool_type == 'avgmax':
   self.pool = nn.ModuleList([nn.AdaptiveAvgPool2d(output_size), nn.AdaptiveMaxPool2d(output_size)])
  elif pool_type == 'max':
   self.pool = nn.AdaptiveMaxPool2d(output_size)
  else:
   if pool_type != 'avg':
    print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
   self.pool = nn.AdaptiveAvgPool2d(output_size)

 def forward(self, x):
  if self.pool_type == 'avgmaxc':
   x = torch.cat([p(x) for p in self.pool], dim=1)
  elif self.pool_type == 'avgmax':
   x = 0.5 * torch.sum(torch.stack([p(x) for p in self.pool]), 0).squeeze(dim=0)
  else:
   x = self.pool(x)
  return x

 def factor(self):
  return pooling_factor(self.pool_type)

 def __repr__(self):
  return self.__class__.__name__ + ' (' \
    + 'output_size=' + str(self.output_size) \
    + ', pool_type=' + self.pool_type + ')'

以上这篇dpn网络的pytorch实现方式就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持三水点靠木。

Python 相关文章推荐
Python中title()方法的使用简介
May 20 Python
Python进阶之递归函数的用法及其示例
Jan 31 Python
DataFrame中去除指定列为空的行方法
Apr 08 Python
Python使用cx_Oracle模块操作Oracle数据库详解
May 07 Python
Python统计python文件中代码,注释及空白对应的行数示例【测试可用】
Jul 25 Python
解决Python设置函数调用超时,进程卡住的问题
Aug 08 Python
浅谈pycharm使用及设置方法
Sep 09 Python
Python字符串中添加、插入特定字符的方法
Sep 10 Python
如何使用python3获取当前路径及os.path.dirname的使用
Dec 13 Python
Python编译成.so文件进行加密后调用的实现
Dec 23 Python
pytorch程序异常后删除占用的显存操作
Jan 13 Python
Python+Appium实现自动抢微信红包
May 21 Python
Django之form组件自动校验数据实现
Jan 14 #Python
简单了解python filter、map、reduce的区别
Jan 14 #Python
Python vtk读取并显示dicom文件示例
Jan 13 #Python
Python解析多帧dicom数据详解
Jan 13 #Python
python 将dicom图片转换成jpg图片的实例
Jan 13 #Python
基于Python和PyYAML读取yaml配置文件数据
Jan 13 #Python
Python 实现判断图片格式并转换,将转换的图像存到生成的文件夹中
Jan 13 #Python
You might like
php最简单的删除目录与文件实现方法
2014/11/28 PHP
PHP对象实例化单例方法
2017/01/19 PHP
Laravel第三方包报class not found的解决方法
2019/10/13 PHP
ExtJS扩展 垂直tabLayout实现代码
2009/06/21 Javascript
浅析javascript闭包 实例分析
2010/12/25 Javascript
Js如何判断客户端是PC还是手持设备简单分析
2012/11/22 Javascript
事件冒泡是什么如何用jquery阻止事件冒泡
2013/03/20 Javascript
js一般方法改写成面向对象方法的无限级折叠菜单示例代码
2013/07/04 Javascript
jquery中交替点击事件toggle方法的使用示例
2013/12/08 Javascript
jQuery EasyUI datagrid实现本地分页的方法
2015/02/13 Javascript
JavaScript面向对象分层思维全面解析
2016/11/22 Javascript
Bootstrap复选框和单选按钮美化插件(推荐)
2016/11/23 Javascript
AngularJS指令与指令之间的交互功能示例
2016/12/14 Javascript
JavaScript触发onScroll事件的函数节流详解
2016/12/14 Javascript
Vue 实用分页paging实例代码
2017/04/12 Javascript
Vue 重置组件到初始状态的方法示例
2018/10/10 Javascript
vuex2中使用mapGetters/mapActions报错的解决方法
2018/10/20 Javascript
基于vue-cli、elementUI的Vue超简单入门小例子(推荐)
2019/04/17 Javascript
微信小程序实现拍照画布指定区域生成图片
2019/07/18 Javascript
layui2.0使用table+laypage实现真分页
2019/07/27 Javascript
使用vue-cli4.0快速搭建一个项目的方法步骤
2019/12/04 Javascript
JS实现TITLE悬停长久显示效果完整示例
2020/02/11 Javascript
给Python IDLE加上自动补全和历史功能
2014/11/30 Python
Django CSRF跨站请求伪造防护过程解析
2019/07/31 Python
详解Python打包分发工具setuptools
2019/08/05 Python
python爬虫 猫眼电影和电影天堂数据csv和mysql存储过程解析
2019/09/05 Python
数以千计的折扣工业产品:ESE Direct
2018/05/20 全球购物
输入一行文字,找出其中大写字母、小写字母、空格、数字、及其他字符各有多少
2016/04/15 面试题
法院授权委托书范文
2014/08/02 职场文书
纪念九一八事变演讲稿:牢记历史,捍卫主权
2014/09/14 职场文书
工厂仓管员岗位职责
2015/04/01 职场文书
开学第一周总结
2015/07/16 职场文书
小学运动会入场词
2015/07/18 职场文书
学习杨善洲同志先进事迹心得体会
2016/01/23 职场文书
零基础学java之带参数以及返回值的方法
2022/04/10 Java/Android
微信小程序APP的生命周期及页面的生命周期
2022/04/19 Javascript