Skip to content

Commit 2b39067

Browse files
authored
Adding padding option to autoencoder (#7068)
Fixes #7045 . ### Description Added "padding" option to `monai/network/nets/autoencoder.py` such that the conv and residual units will be passed the padding option. ### Types of changes <!--- Put an `x` in all the boxes that apply, and remove the not applicable items --> - [x] Non-breaking change (fix or new feature that would not break existing functionality). - [ ] New tests added to cover the changes. - [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`. - [x] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`. - [x] In-line docstrings updated. - [x] Documentation updated, tested `make html` command in the `docs/` folder. ### Notes I haven't been able to run the `./runtests.sh`, even tried running `./runtests.sh -h` and got no output (no error or anything). I guess I don't have permissions to run `.sh` files on this machine. However the changes are very small and default to previous functionality so unless somebody passes a padding argument, this should not break existing usage of the function. --------- Signed-off-by: Jupilogy <j.dick@lboro.ac.uk>
1 parent 317ef1f commit 2b39067

1 file changed

Lines changed: 10 additions & 0 deletions

File tree

monai/networks/nets/autoencoder.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,8 @@ class AutoEncoder(nn.Module):
5858
bias: whether to have a bias term in convolution blocks. Defaults to True.
5959
According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,
6060
if a conv layer is directly followed by a batch norm layer, bias should be False.
61+
padding: controls the amount of implicit zero-paddings on both sides for padding number of points
62+
for each dimension in convolution blocks. Defaults to None.
6163
6264
Examples::
6365
@@ -104,6 +106,7 @@ def __init__(
104106
norm: tuple | str = Norm.INSTANCE,
105107
dropout: tuple | str | float | None = None,
106108
bias: bool = True,
109+
padding: Sequence[int] | int | None = None,
107110
) -> None:
108111
super().__init__()
109112
self.dimensions = spatial_dims
@@ -118,6 +121,7 @@ def __init__(
118121
self.norm = norm
119122
self.dropout = dropout
120123
self.bias = bias
124+
self.padding = padding
121125
self.num_inter_units = num_inter_units
122126
self.inter_channels = inter_channels if inter_channels is not None else []
123127
self.inter_dilations = list(inter_dilations or [1] * len(self.inter_channels))
@@ -178,6 +182,7 @@ def _get_intermediate_module(self, in_channels: int, num_inter_units: int) -> tu
178182
dropout=self.dropout,
179183
dilation=di,
180184
bias=self.bias,
185+
padding=self.padding,
181186
)
182187
else:
183188
unit = Convolution(
@@ -191,6 +196,7 @@ def _get_intermediate_module(self, in_channels: int, num_inter_units: int) -> tu
191196
dropout=self.dropout,
192197
dilation=di,
193198
bias=self.bias,
199+
padding=self.padding,
194200
)
195201

196202
intermediate.add_module("inter_%i" % i, unit)
@@ -231,6 +237,7 @@ def _get_encode_layer(self, in_channels: int, out_channels: int, strides: int, i
231237
norm=self.norm,
232238
dropout=self.dropout,
233239
bias=self.bias,
240+
padding=self.padding,
234241
last_conv_only=is_last,
235242
)
236243
return mod
@@ -244,6 +251,7 @@ def _get_encode_layer(self, in_channels: int, out_channels: int, strides: int, i
244251
norm=self.norm,
245252
dropout=self.dropout,
246253
bias=self.bias,
254+
padding=self.padding,
247255
conv_only=is_last,
248256
)
249257
return mod
@@ -264,6 +272,7 @@ def _get_decode_layer(self, in_channels: int, out_channels: int, strides: int, i
264272
norm=self.norm,
265273
dropout=self.dropout,
266274
bias=self.bias,
275+
padding=self.padding,
267276
conv_only=is_last and self.num_res_units == 0,
268277
is_transposed=True,
269278
)
@@ -282,6 +291,7 @@ def _get_decode_layer(self, in_channels: int, out_channels: int, strides: int, i
282291
norm=self.norm,
283292
dropout=self.dropout,
284293
bias=self.bias,
294+
padding=self.padding,
285295
last_conv_only=is_last,
286296
)
287297

0 commit comments

Comments
 (0)