Coverage for /home/runner/work/torchcvnn/torchcvnn/src/torchcvnn/nn/modules/conv.py: 100%
10 statements
« prev ^ index » next coverage.py v7.8.0, created at 2025-04-13 08:53 +0000
« prev ^ index » next coverage.py v7.8.0, created at 2025-04-13 08:53 +0000
1# MIT License
3# Copyright (c) 2023 Jérémie Levi, Victor Dhédin, Jeremy Fix
5# Permission is hereby granted, free of charge, to any person obtaining a copy
6# of this software and associated documentation files (the "Software"), to deal
7# in the Software without restriction, including without limitation the rights
8# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9# copies of the Software, and to permit persons to whom the Software is
10# furnished to do so, subject to the following conditions:
12# The above copyright notice and this permission notice shall be included in
13# all copies or substantial portions of the Software.
15# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21# SOFTWARE.
23# External imports
24import torch
25import torch.nn as nn
26from torch.nn.common_types import _size_2_t
29class ConvTranspose2d(nn.Module):
30 """
31 Implementation of torch.nn.Conv2dTranspose for complex numbers.
32 Apply Conv2dTranspose on real and imaginary part of the complex number.
34 The parameters are the same than from the upstream pytorch layer:
36 Arguments:
37 in_channels (int): Number of channels in the input image
38 out_channels (int): Number of channels produced by the convolution
39 kernel_size (int or tuple): Size of the convolving kernel
40 stride (int or tuple, optional): Stride of the convolution. Default: 1
41 padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both sides of each dimension in the input. Default: 0
42 output_padding (int or tuple, optional): Additional size added to one side of each dimension in the output shape. Default: 0
43 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
44 bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
45 dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
46 padding_mode (str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
47 """
49 def __init__(
50 self,
51 in_channels: int,
52 out_channels: int,
53 kernel_size: _size_2_t,
54 stride: _size_2_t = 1,
55 padding: _size_2_t = 0,
56 output_padding: _size_2_t = 0,
57 groups: int = 1,
58 bias: bool = True,
59 dilation: _size_2_t = 1,
60 padding_mode: str = "zeros",
61 device=None,
62 dtype=None,
63 ) -> None:
64 super().__init__()
66 self.m_real = torch.nn.ConvTranspose2d(
67 in_channels,
68 out_channels,
69 kernel_size,
70 stride,
71 padding,
72 output_padding,
73 groups,
74 bias,
75 dilation,
76 padding_mode,
77 device,
78 dtype,
79 )
81 self.m_imag = torch.nn.ConvTranspose2d(
82 in_channels,
83 out_channels,
84 kernel_size,
85 stride,
86 padding,
87 output_padding,
88 groups,
89 bias,
90 dilation,
91 padding_mode,
92 device,
93 dtype,
94 )
96 def forward(self, z: torch.Tensor) -> torch.Tensor:
97 """
98 Performs the forward pass, applying real valued ConvTranspose2d
99 independently on both the real and imaginary parts of the input.
100 """
101 return torch.view_as_complex(
102 torch.cat(
103 (
104 torch.unsqueeze(self.m_real(z.real) - self.m_imag(z.imag), -1),
105 torch.unsqueeze(self.m_real(z.imag) + self.m_imag(z.real), -1),
106 ),
107 axis=-1,
108 )
109 )