Coverage for /home/runner/work/torchcvnn/torchcvnn/src/torchcvnn/nn/modules/upsampling.py: 100%

13 statements  

« prev     ^ index     » next       coverage.py v7.8.0, created at 2025-04-13 08:53 +0000

1# MIT License 

2 

3# Copyright (c) 2023 Jérémie Levi, Victor Dhédin, Jeremy Fix 

4 

5# Permission is hereby granted, free of charge, to any person obtaining a copy 

6# of this software and associated documentation files (the "Software"), to deal 

7# in the Software without restriction, including without limitation the rights 

8# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 

9# copies of the Software, and to permit persons to whom the Software is 

10# furnished to do so, subject to the following conditions: 

11 

12# The above copyright notice and this permission notice shall be included in 

13# all copies or substantial portions of the Software. 

14 

15# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 

16# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 

17# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 

18# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 

19# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 

20# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 

21# SOFTWARE. 

22 

23 

24# Standard imports 

25from typing import Optional 

26 

27# External imports 

28import torch 

29import torch.nn as nn 

30from torch.nn.common_types import _size_any_t, _ratio_any_t 

31 

32 

33class Upsample(nn.Module): 

34 

35 """ 

36 Works by applying independently the same upsampling to both the real and 

37 imaginary parts. 

38 

39 Note: 

40 With pytorch 2.1, applying the nn.Upsample to a complex valued tensor raises 

41 an exception "compute_indices_weights_nearest" not implemented for 'ComplexFloat' 

42 So it basically splits the input tensors in its real and imaginery 

43 parts, applies nn.Upsample on both components and view them as complex. 

44 

45 

46 Arguments: 

47 size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional): 

48 output spatial sizes 

49 scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional): 

50 multiplier for spatial size. Has to match input size if it is a tuple. 

51 mode (str, optional): the upsampling algorithm: one of ``'nearest'``, 

52 ``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``. 

53 Default: ``'nearest'`` 

54 align_corners (bool, optional): if ``True``, the corner pixels of the input 

55 and output tensors are aligned, and thus preserving the values at 

56 those pixels. This only has effect when :attr:`mode` is 

57 ``'linear'``, ``'bilinear'``, ``'bicubic'``, or ``'trilinear'``. 

58 Default: ``False`` 

59 recompute_scale_factor (bool, optional): recompute the scale_factor for use in the 

60 interpolation calculation. If `recompute_scale_factor` is ``True``, then 

61 `scale_factor` must be passed in and `scale_factor` is used to compute the 

62 output `size`. The computed output `size` will be used to infer new scales for 

63 the interpolation. Note that when `scale_factor` is floating-point, it may differ 

64 from the recomputed `scale_factor` due to rounding and precision issues. 

65 If `recompute_scale_factor` is ``False``, then `size` or `scale_factor` will 

66 be used directly for interpolation. 

67 """ 

68 

69 def __init__( 

70 self, 

71 size: Optional[_size_any_t] = None, 

72 scale_factor: Optional[_ratio_any_t] = None, 

73 mode: str = "nearest", 

74 align_corners: Optional[bool] = None, 

75 recompute_scale_factor: Optional[bool] = None, 

76 ) -> None: 

77 super().__init__() 

78 self.up_module = nn.Upsample( 

79 size, scale_factor, mode, align_corners, recompute_scale_factor 

80 ) 

81 

82 def forward(self, z: torch.Tensor) -> torch.Tensor: 

83 """ 

84 Applies the forward pass 

85 """ 

86 up_real = self.up_module(z.real).unsqueeze(-1) 

87 up_imag = self.up_module(z.imag).unsqueeze(-1) 

88 up_z = torch.cat((up_real, up_imag), axis=-1) 

89 return torch.view_as_complex(up_z)