Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions monai/networks/blocks/aspp.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ def __init__(
dilations: Sequence[int] = (1, 2, 4, 6),
norm_type: Optional[Union[Tuple, str]] = "BATCH",
acti_type: Optional[Union[Tuple, str]] = "LEAKYRELU",
bias: bool = False,
) -> None:
"""
Args:
Expand All @@ -54,6 +55,9 @@ def __init__(
Defaults to batch norm.
acti_type: final kernel-size-one convolution activation type.
Defaults to leaky ReLU.
bias: whether to have a bias term in convolution blocks. Defaults to False.
According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,
if a conv layer is directly followed by a batch norm layer, bias should be False.

Raises:
ValueError: When ``kernel_sizes`` length differs from ``dilations``.
Expand Down Expand Up @@ -88,6 +92,7 @@ def __init__(
kernel_size=1,
act=acti_type,
norm=norm_type,
bias=bias,
)

def forward(self, x: torch.Tensor) -> torch.Tensor:
Expand Down
8 changes: 8 additions & 0 deletions monai/networks/nets/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ def __init__(
act: Optional[Union[Tuple, str]] = Act.PRELU,
norm: Union[Tuple, str] = Norm.INSTANCE,
dropout: Optional[Union[Tuple, str, float]] = None,
bias: bool = True,
) -> None:

super().__init__()
Expand All @@ -51,6 +52,7 @@ def __init__(
self.act = act
self.norm = norm
self.dropout = dropout
self.bias = bias
self.num_inter_units = num_inter_units
self.inter_channels = inter_channels if inter_channels is not None else []
self.inter_dilations = list(inter_dilations or [1] * len(self.inter_channels))
Expand Down Expand Up @@ -103,6 +105,7 @@ def _get_intermediate_module(self, in_channels: int, num_inter_units: int) -> Tu
norm=self.norm,
dropout=self.dropout,
dilation=di,
bias=self.bias,
)
else:
unit = Convolution(
Expand All @@ -115,6 +118,7 @@ def _get_intermediate_module(self, in_channels: int, num_inter_units: int) -> Tu
norm=self.norm,
dropout=self.dropout,
dilation=di,
bias=self.bias,
)

intermediate.add_module("inter_%i" % i, unit)
Expand Down Expand Up @@ -148,6 +152,7 @@ def _get_encode_layer(self, in_channels: int, out_channels: int, strides: int, i
act=self.act,
norm=self.norm,
dropout=self.dropout,
bias=self.bias,
last_conv_only=is_last,
)
return Convolution(
Expand All @@ -159,6 +164,7 @@ def _get_encode_layer(self, in_channels: int, out_channels: int, strides: int, i
act=self.act,
norm=self.norm,
dropout=self.dropout,
bias=self.bias,
conv_only=is_last,
)

Expand All @@ -175,6 +181,7 @@ def _get_decode_layer(self, in_channels: int, out_channels: int, strides: int, i
act=self.act,
norm=self.norm,
dropout=self.dropout,
bias=self.bias,
conv_only=is_last and self.num_res_units == 0,
is_transposed=True,
)
Expand All @@ -192,6 +199,7 @@ def _get_decode_layer(self, in_channels: int, out_channels: int, strides: int, i
act=self.act,
norm=self.norm,
dropout=self.dropout,
bias=self.bias,
last_conv_only=is_last,
)

Expand Down
59 changes: 37 additions & 22 deletions monai/networks/nets/basic_unet.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ def __init__(
out_chns: int,
act: Union[str, tuple],
norm: Union[str, tuple],
bias: bool,
dropout: Union[float, tuple] = 0.0,
):
"""
Expand All @@ -40,12 +41,14 @@ def __init__(
out_chns: number of output channels.
act: activation type and arguments.
norm: feature normalization type and arguments.
bias: whether to have a bias term in convolution blocks.
dropout: dropout ratio. Defaults to no dropout.

"""
super().__init__()

conv_0 = Convolution(dim, in_chns, out_chns, act=act, norm=norm, dropout=dropout, padding=1)
conv_1 = Convolution(dim, out_chns, out_chns, act=act, norm=norm, dropout=dropout, padding=1)
conv_0 = Convolution(dim, in_chns, out_chns, act=act, norm=norm, dropout=dropout, bias=bias, padding=1)
conv_1 = Convolution(dim, out_chns, out_chns, act=act, norm=norm, dropout=dropout, bias=bias, padding=1)
self.add_module("conv_0", conv_0)
self.add_module("conv_1", conv_1)

Expand All @@ -60,6 +63,7 @@ def __init__(
out_chns: int,
act: Union[str, tuple],
norm: Union[str, tuple],
bias: bool,
dropout: Union[float, tuple] = 0.0,
):
"""
Expand All @@ -69,12 +73,14 @@ def __init__(
out_chns: number of output channels.
act: activation type and arguments.
norm: feature normalization type and arguments.
bias: whether to have a bias term in convolution blocks.
dropout: dropout ratio. Defaults to no dropout.

"""
super().__init__()

max_pooling = Pool["MAX", dim](kernel_size=2)
convs = TwoConv(dim, in_chns, out_chns, act, norm, dropout)
convs = TwoConv(dim, in_chns, out_chns, act, norm, bias, dropout)
self.add_module("max_pooling", max_pooling)
self.add_module("convs", convs)

Expand All @@ -90,6 +96,7 @@ def __init__(
out_chns: int,
act: Union[str, tuple],
norm: Union[str, tuple],
bias: bool,
dropout: Union[float, tuple] = 0.0,
upsample: str = "deconv",
pre_conv: Optional[Union[nn.Module, str]] = "default",
Expand All @@ -105,6 +112,7 @@ def __init__(
out_chns: number of output channels.
act: activation type and arguments.
norm: feature normalization type and arguments.
bias: whether to have a bias term in convolution blocks.
dropout: dropout ratio. Defaults to no dropout.
upsample: upsampling mode, available options are
``"deconv"``, ``"pixelshuffle"``, ``"nontrainable"``.
Expand Down Expand Up @@ -132,9 +140,9 @@ def __init__(
interp_mode=interp_mode,
align_corners=align_corners,
)
self.convs = TwoConv(dim, cat_chns + up_chns, out_chns, act, norm, dropout)
self.convs = TwoConv(dim, cat_chns + up_chns, out_chns, act, norm, bias, dropout)

def forward(self, x: torch.Tensor, x_e: torch.Tensor):
def forward(self, x: torch.Tensor, x_e: Optional[torch.Tensor]):
"""

Args:
Expand All @@ -143,15 +151,18 @@ def forward(self, x: torch.Tensor, x_e: torch.Tensor):
"""
x_0 = self.upsample(x)

# handling spatial shapes due to the 2x maxpooling with odd edge lengths.
dimensions = len(x.shape) - 2
sp = [0] * (dimensions * 2)
for i in range(dimensions):
if x_e.shape[-i - 1] != x_0.shape[-i - 1]:
sp[i * 2 + 1] = 1
x_0 = torch.nn.functional.pad(x_0, sp, "replicate")
if x_e is not None:
# handling spatial shapes due to the 2x maxpooling with odd edge lengths.
dimensions = len(x.shape) - 2
sp = [0] * (dimensions * 2)
for i in range(dimensions):
if x_e.shape[-i - 1] != x_0.shape[-i - 1]:
sp[i * 2 + 1] = 1
x_0 = torch.nn.functional.pad(x_0, sp, "replicate")
x = self.convs(torch.cat([x_e, x_0], dim=1)) # input channels: (cat_chns + up_chns)
else:
x = self.convs(x_0)

x = self.convs(torch.cat([x_e, x_0], dim=1)) # input channels: (cat_chns + up_chns)
return x


Expand All @@ -164,6 +175,7 @@ def __init__(
features: Sequence[int] = (32, 32, 64, 128, 256, 32),
act: Union[str, tuple] = ("LeakyReLU", {"negative_slope": 0.1, "inplace": True}),
norm: Union[str, tuple] = ("instance", {"affine": True}),
bias: bool = True,
dropout: Union[float, tuple] = 0.0,
upsample: str = "deconv",
):
Expand All @@ -188,6 +200,9 @@ def __init__(

act: activation type and arguments. Defaults to LeakyReLU.
norm: feature normalization type and arguments. Defaults to instance norm.
bias: whether to have a bias term in convolution blocks. Defaults to True.
According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,
if a conv layer is directly followed by a batch norm layer, bias should be False.
dropout: dropout ratio. Defaults to no dropout.
upsample: upsampling mode, available options are
``"deconv"``, ``"pixelshuffle"``, ``"nontrainable"``.
Expand All @@ -214,16 +229,16 @@ def __init__(
fea = ensure_tuple_rep(features, 6)
print(f"BasicUNet features: {fea}.")

self.conv_0 = TwoConv(dimensions, in_channels, features[0], act, norm, dropout)
self.down_1 = Down(dimensions, fea[0], fea[1], act, norm, dropout)
self.down_2 = Down(dimensions, fea[1], fea[2], act, norm, dropout)
self.down_3 = Down(dimensions, fea[2], fea[3], act, norm, dropout)
self.down_4 = Down(dimensions, fea[3], fea[4], act, norm, dropout)
self.conv_0 = TwoConv(dimensions, in_channels, features[0], act, norm, bias, dropout)
self.down_1 = Down(dimensions, fea[0], fea[1], act, norm, bias, dropout)
self.down_2 = Down(dimensions, fea[1], fea[2], act, norm, bias, dropout)
self.down_3 = Down(dimensions, fea[2], fea[3], act, norm, bias, dropout)
self.down_4 = Down(dimensions, fea[3], fea[4], act, norm, bias, dropout)

self.upcat_4 = UpCat(dimensions, fea[4], fea[3], fea[3], act, norm, dropout, upsample)
self.upcat_3 = UpCat(dimensions, fea[3], fea[2], fea[2], act, norm, dropout, upsample)
self.upcat_2 = UpCat(dimensions, fea[2], fea[1], fea[1], act, norm, dropout, upsample)
self.upcat_1 = UpCat(dimensions, fea[1], fea[0], fea[5], act, norm, dropout, upsample, halves=False)
self.upcat_4 = UpCat(dimensions, fea[4], fea[3], fea[3], act, norm, bias, dropout, upsample)
self.upcat_3 = UpCat(dimensions, fea[3], fea[2], fea[2], act, norm, bias, dropout, upsample)
self.upcat_2 = UpCat(dimensions, fea[2], fea[1], fea[1], act, norm, bias, dropout, upsample)
self.upcat_1 = UpCat(dimensions, fea[1], fea[0], fea[5], act, norm, bias, dropout, upsample, halves=False)

self.final_conv = Conv["conv", dimensions](fea[5], out_channels, kernel_size=1)

Expand Down
13 changes: 13 additions & 0 deletions monai/networks/nets/highresnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ def __init__(
dilation: Union[Sequence[int], int] = 1,
norm_type: Union[Tuple, str] = ("batch", {"affine": True}),
acti_type: Union[Tuple, str] = ("relu", {"inplace": True}),
bias: bool = False,
channel_matching: Union[ChannelMatching, str] = ChannelMatching.PAD,
) -> None:
"""
Expand All @@ -56,6 +57,9 @@ def __init__(
Defaults to ``("batch", {"affine": True})``.
acti_type: {``"relu"``, ``"prelu"``, ``"relu6"``}
Non-linear activation using ReLU or PReLU. Defaults to ``"relu"``.
bias: whether to have a bias term in convolution blocks. Defaults to False.
According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,
if a conv layer is directly followed by a batch norm layer, bias should be False.
channel_matching: {``"pad"``, ``"project"``}
Specifies handling residual branch and conv branch channel mismatches. Defaults to ``"pad"``.

Expand Down Expand Up @@ -85,6 +89,7 @@ def __init__(
out_channels=_out_chns,
kernel_size=kernel_size,
dilation=dilation,
bias=bias,
)
)
_in_chns = _out_chns
Expand Down Expand Up @@ -116,6 +121,9 @@ class HighResNet(nn.Module):
Defaults to ``("relu", {"inplace": True})``.
dropout_prob: probability of the feature map to be zeroed
(only applies to the penultimate conv layer).
bias: whether to have a bias term in convolution blocks. Defaults to False.
According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,
if a conv layer is directly followed by a batch norm layer, bias should be False.
layer_params: specifying key parameters of each layer/block.
channel_matching: {``"pad"``, ``"project"``}
Specifies handling residual branch and conv branch channel mismatches. Defaults to ``"pad"``.
Expand All @@ -132,6 +140,7 @@ def __init__(
norm_type: Union[str, tuple] = ("batch", {"affine": True}),
acti_type: Union[str, tuple] = ("relu", {"inplace": True}),
dropout_prob: Optional[Union[Tuple, str, float]] = 0.0,
bias: bool = False,
layer_params: Sequence[Dict] = DEFAULT_LAYER_PARAMS_3D,
channel_matching: Union[ChannelMatching, str] = ChannelMatching.PAD,
) -> None:
Expand All @@ -151,6 +160,7 @@ def __init__(
adn_ordering="NA",
act=acti_type,
norm=norm_type,
bias=bias,
)
)

Expand All @@ -168,6 +178,7 @@ def __init__(
dilation=_dilation,
norm_type=norm_type,
acti_type=acti_type,
bias=bias,
channel_matching=channel_matching,
)
)
Expand All @@ -185,6 +196,7 @@ def __init__(
adn_ordering="NAD",
act=acti_type,
norm=norm_type,
bias=bias,
dropout=dropout_prob,
)
)
Expand All @@ -200,6 +212,7 @@ def __init__(
adn_ordering="NAD",
act=acti_type,
norm=norm_type,
bias=bias,
dropout=dropout_prob,
)
)
Expand Down
11 changes: 10 additions & 1 deletion monai/networks/nets/unet.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ def __init__(
num_res_units: int = 0,
act: Union[Tuple, str] = Act.PRELU,
norm: Union[Tuple, str] = Norm.INSTANCE,
dropout=0.0,
dropout: float = 0.0,
bias: bool = True,
) -> None:
"""
Enhanced version of UNet which has residual units implemented with the ResidualUnit class.
Expand All @@ -60,6 +61,9 @@ def __init__(
act: activation type and arguments. Defaults to PReLU.
norm: feature normalization type and arguments. Defaults to instance norm.
dropout: dropout ratio. Defaults to no dropout.
bias: whether to have a bias term in convolution blocks. Defaults to True.
According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,
if a conv layer is directly followed by a batch norm layer, bias should be False.

Note: The acceptable spatial size of input data depends on the parameters of the network,
to set appropriate spatial size, please check the tutorial for more details:
Expand Down Expand Up @@ -97,6 +101,7 @@ def __init__(
self.act = act
self.norm = norm
self.dropout = dropout
self.bias = bias

def _create_block(
inc: int, outc: int, channels: Sequence[int], strides: Sequence[int], is_top: bool
Expand Down Expand Up @@ -151,6 +156,7 @@ def _get_down_layer(self, in_channels: int, out_channels: int, strides: int, is_
act=self.act,
norm=self.norm,
dropout=self.dropout,
bias=self.bias,
)
return Convolution(
self.dimensions,
Expand All @@ -161,6 +167,7 @@ def _get_down_layer(self, in_channels: int, out_channels: int, strides: int, is_
act=self.act,
norm=self.norm,
dropout=self.dropout,
bias=self.bias,
)

def _get_bottom_layer(self, in_channels: int, out_channels: int) -> nn.Module:
Expand Down Expand Up @@ -190,6 +197,7 @@ def _get_up_layer(self, in_channels: int, out_channels: int, strides: int, is_to
act=self.act,
norm=self.norm,
dropout=self.dropout,
bias=self.bias,
conv_only=is_top and self.num_res_units == 0,
is_transposed=True,
)
Expand All @@ -205,6 +213,7 @@ def _get_up_layer(self, in_channels: int, out_channels: int, strides: int, is_to
act=self.act,
norm=self.norm,
dropout=self.dropout,
bias=self.bias,
last_conv_only=is_top,
)
conv = nn.Sequential(conv, ru)
Expand Down
2 changes: 2 additions & 0 deletions monai/networks/nets/varautoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ def __init__(
act: Optional[Union[Tuple, str]] = Act.PRELU,
norm: Union[Tuple, str] = Norm.INSTANCE,
dropout: Optional[Union[Tuple, str, float]] = None,
bias: bool = True,
) -> None:

self.in_channels, *self.in_shape = in_shape
Expand All @@ -65,6 +66,7 @@ def __init__(
act,
norm,
dropout,
bias,
)

padding = same_padding(self.kernel_size)
Expand Down
Loading