Shortcuts

Source code for mmagic.models.archs.resnet

# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Sequence

import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import build_activation_layer, build_conv_layer, build_norm_layer
from mmengine import MMLogger
from mmengine.model.weight_init import constant_init, kaiming_init
from mmengine.runner import load_checkpoint
from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm
from torch import Tensor


[docs]class BasicBlock(nn.Module): """Basic block for ResNet. Args: inplanes (int): Number of input channels. planes (int): Number of output channels. stride (int): Stride of the first block of one stage. Default: 1. dilation (int): Dilation of one stage. Default: 1. downsample (nn.Module): Downsample module. Default: None. act_cfg (dict): Dictionary to construct and config activation layer. Default: dict(type='ReLU'). conv_cfg (dict): Dictionary to construct and config convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN'). with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. """
[docs] expansion = 1
def __init__(self, inplanes: int, planes: int, stride: int = 1, dilation: int = 1, downsample: Optional[nn.Module] = None, act_cfg: dict = dict(type='ReLU'), conv_cfg: Optional[dict] = None, norm_cfg: dict = dict(type='BN'), with_cp: bool = False): super(BasicBlock, self).__init__() self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) self.conv1 = build_conv_layer( conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=False) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer( conv_cfg, planes, planes, 3, padding=1, bias=False) self.add_module(self.norm2_name, norm2) self.activate = build_activation_layer(act_cfg) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp @property
[docs] def norm1(self) -> nn.Module: """nn.Module: normalization layer after the first convolution layer""" return getattr(self, self.norm1_name)
@property
[docs] def norm2(self) -> nn.Module: """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm2_name)
[docs] def forward(self, x: Tensor) -> Tensor: """Forward function.""" def _inner_forward(x: Tensor) -> Tensor: identity = x out = self.conv1(x) out = self.norm1(out) out = self.activate(out) out = self.conv2(out) out = self.norm2(out) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.activate(out) return out
[docs]class Bottleneck(nn.Module): """Bottleneck block for ResNet. Args: inplanes (int): Number of input channels. planes (int): Number of output channels. stride (int): Stride of the first block of one stage. Default: 1. dilation (int): Dilation of one stage. Default: 1. downsample (nn.Module): Downsample module. Default: None. act_cfg (dict): Dictionary to construct and config activation layer. Default: dict(type='ReLU'). conv_cfg (dict): Dictionary to construct and config convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN'). with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. """
[docs] expansion = 4
def __init__(self, inplanes: int, planes: int, stride: int = 1, dilation: int = 1, downsample: Optional[nn.Module] = None, act_cfg: dict = dict(type='ReLU'), conv_cfg: Optional[dict] = None, norm_cfg: dict = dict(type='BN'), with_cp: bool = False): super(Bottleneck, self).__init__() self.inplanes = inplanes self.planes = planes self.stride = stride self.dilation = dilation self.act_cfg = act_cfg self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.conv1_stride = 1 self.conv2_stride = stride self.with_cp = with_cp self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) self.norm3_name, norm3 = build_norm_layer( norm_cfg, planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( conv_cfg, inplanes, planes, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer( conv_cfg, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer( conv_cfg, planes, planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) self.activate = build_activation_layer(act_cfg) self.downsample = downsample @property
[docs] def norm1(self) -> nn.Module: """nn.Module: normalization layer after the first convolution layer""" return getattr(self, self.norm1_name)
@property
[docs] def norm2(self) -> nn.Module: """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm2_name)
@property
[docs] def norm3(self) -> nn.Module: """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm3_name)
[docs] def forward(self, x: Tensor) -> Tensor: identity = x out = self.conv1(x) out = self.norm1(out) out = self.activate(out) out = self.conv2(out) out = self.norm2(out) out = self.activate(out) out = self.conv3(out) out = self.norm3(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.activate(out) return out
[docs]class ResNet(nn.Module): """General ResNet. This class is adopted from https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/backbones/resnet.py. Args: depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. in_channels (int): Number of input image channels. Default" 3. stem_channels (int): Number of stem channels. Default: 64. base_channels (int): Number of base channels of res layer. Default: 64. num_stages (int): Resnet stages, normally 4. strides (Sequence[int]): Strides of the first block of each stage. Default: (1, 2, 2, 2). dilations (Sequence[int]): Dilation of each stage. Default: (1, 1, 2, 4). deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. Default: False. avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottleneck. Default: False. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. Default: -1. act_cfg (dict): Dictionary to construct and config activation layer. Default: dict(type='ReLU'). conv_cfg (dict): Dictionary to construct and config convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN'). with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. multi_grid (Sequence[int]|None): Multi grid dilation rates of last stage. Default: None. contract_dilation (bool): Whether contract first dilation of each layer Default: False. zero_init_residual (bool): Whether to use zero init for last norm layer in resblocks to let them behave as identity. Default: True. """
[docs] arch_settings = { 18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))
} def __init__(self, depth: int, in_channels: int = 3, stem_channels: int = 64, base_channels: int = 64, num_stages: int = 4, strides: Sequence[int] = (1, 2, 2, 2), dilations: Sequence[int] = (1, 1, 2, 4), deep_stem: bool = False, avg_down: bool = False, frozen_stages: int = -1, act_cfg: dict = dict(type='ReLU'), conv_cfg: Optional[dict] = None, norm_cfg: dict = dict(type='BN'), with_cp: bool = False, multi_grid: Optional[Sequence[int]] = None, contract_dilation: bool = False, zero_init_residual: bool = True): super(ResNet, self).__init__() from functools import partial if depth not in self.arch_settings: raise KeyError(f'invalid depth {depth} for resnet') self.block, stage_blocks = self.arch_settings[depth] self.depth = depth self.inplanes = stem_channels self.stem_channels = stem_channels self.base_channels = base_channels self.num_stages = num_stages assert num_stages >= 1 and num_stages <= 4 self.strides = strides self.dilations = dilations assert len(strides) == len(dilations) == num_stages self.deep_stem = deep_stem self.avg_down = avg_down self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.act_cfg = act_cfg self.norm_cfg = norm_cfg self.with_cp = with_cp self.multi_grid = multi_grid self.contract_dilation = contract_dilation self.zero_init_residual = zero_init_residual self._make_stem_layer(in_channels, stem_channels) self.layer1 = self._make_layer( self.block, 64, stage_blocks[0], stride=strides[0]) self.layer2 = self._make_layer( self.block, 128, stage_blocks[1], stride=strides[1]) self.layer3 = self._make_layer( self.block, 256, stage_blocks[2], stride=strides[2]) self.layer4 = self._make_layer( self.block, 512, stage_blocks[3], stride=strides[3]) self.layer1.apply(partial(self._nostride_dilate, dilate=dilations[0])) self.layer2.apply(partial(self._nostride_dilate, dilate=dilations[1])) self.layer3.apply(partial(self._nostride_dilate, dilate=dilations[2])) self.layer4.apply(partial(self._nostride_dilate, dilate=dilations[3])) self._freeze_stages()
[docs] def _make_stem_layer(self, in_channels: int, stem_channels: int) -> None: """Make stem layer for ResNet.""" if self.deep_stem: self.stem = nn.Sequential( build_conv_layer( self.conv_cfg, in_channels, stem_channels // 2, kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels // 2)[1], build_activation_layer(self.act_cfg), build_conv_layer( self.conv_cfg, stem_channels // 2, stem_channels // 2, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels // 2)[1], build_activation_layer(self.act_cfg), build_conv_layer( self.conv_cfg, stem_channels // 2, stem_channels, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels)[1], build_activation_layer(self.act_cfg)) else: self.conv1 = build_conv_layer( self.conv_cfg, in_channels, stem_channels, kernel_size=7, stride=2, padding=3, bias=False) self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, stem_channels, postfix=1) self.add_module(self.norm1_name, norm1) self.activate = build_activation_layer(self.act_cfg) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
@property
[docs] def norm1(self) -> nn.Module: """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm1_name)
[docs] def _make_layer(self, block: BasicBlock, planes: int, blocks: int, stride: int = 1, dilation: int = 1) -> nn.Module: downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( build_conv_layer( self.conv_cfg, self.inplanes, planes * block.expansion, stride=stride, kernel_size=1, dilation=dilation, bias=False), build_norm_layer(self.norm_cfg, planes * block.expansion)[1]) layers = [] layers.append( block( self.inplanes, planes, stride, downsample=downsample, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, conv_cfg=self.conv_cfg)) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append( block( self.inplanes, planes, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, conv_cfg=self.conv_cfg)) return nn.Sequential(*layers)
[docs] def _nostride_dilate(self, m: nn.Module, dilate: int) -> None: classname = m.__class__.__name__ if classname.find('Conv') != -1 and dilate > 1: # the convolution with stride if m.stride == (2, 2): m.stride = (1, 1) if m.kernel_size == (3, 3): m.dilation = (dilate // 2, dilate // 2) m.padding = (dilate // 2, dilate // 2) # other convolutions else: if m.kernel_size == (3, 3): m.dilation = (dilate, dilate) m.padding = (dilate, dilate)
[docs] def init_weights(self, pretrained: Optional[str] = None) -> None: """Init weights for the model. Args: pretrained (str, optional): Path for pretrained weights. If given None, pretrained weights will not be loaded. Defaults to None. """ if isinstance(pretrained, str): logger = MMLogger.get_current_instance() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): constant_init(m.norm3, 0) elif isinstance(m, BasicBlock): constant_init(m.norm2, 0) else: raise TypeError('pretrained must be a str or None')
[docs] def _freeze_stages(self) -> None: """Freeze stages param and norm stats.""" if self.frozen_stages >= 0: if self.deep_stem: self.stem.eval() for param in self.stem.parameters(): param.requires_grad = False else: self.norm1.eval() for m in [self.conv1, self.norm1]: for param in m.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, f'layer{i}') m.eval() for param in m.parameters(): param.requires_grad = False
[docs] def forward(self, x: Tensor) -> List[Tensor]: """Forward function. Args: x (Tensor): Input tensor with shape (N, C, H, W). Returns: Tensor: Output tensor. """ conv_out = [x] if self.deep_stem: x = self.stem(x) else: x = self.conv1(x) x = self.norm1(x) x = self.activate(x) conv_out.append(x) x = self.maxpool(x) x = self.layer1(x) conv_out.append(x) x = self.layer2(x) conv_out.append(x) x = self.layer3(x) conv_out.append(x) x = self.layer4(x) conv_out.append(x) return conv_out
Read the Docs v: latest
Versions
latest
stable
0.x
Downloads
pdf
epub
On Read the Docs
Project Home
Builds

Free document hosting provided by Read the Docs.