CBAM.PyTorch icon indicating copy to clipboard operation
CBAM.PyTorch copied to clipboard

I think there are some mistakes

Open William20234 opened this issue 2 years ago • 1 comments

After applying Channel Attention Module, maybe it would be better to apply a convolution layer in order to modify the channels to the original value (usually 3 channels), instead of applying Spatial Attention Module instantly. Or Spatial Attention Module can't make sense.

William20234 avatar Dec 18 '23 06:12 William20234

My Advice: class Bottleneck(nn.Module):

def __init__(self, inplanes, planes, stride=1, downsample=None,expansion=4):
    super(Bottleneck, self).__init__()
    self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
    self.bn1 = nn.BatchNorm2d(planes)
    self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                           padding=1, bias=False)
    self.bn2 = nn.BatchNorm2d(planes)
    self.conv3 = nn.Conv2d(planes, planes * expansion, kernel_size=1, bias=False)
    self.bn3 = nn.BatchNorm2d(planes * expansion)
    self.relu = nn.ReLU(inplace=True)

    self.conv4=nn.Conv2d(planes * expansion, inplanes, kernel_size=1, bias=False)

    self.ca = ChannelAttention(planes * expansion)
    self.sa = SpatialAttention()

    self.downsample = downsample
    self.stride = stride

def forward(self, x):
    residual = x

    out = self.conv1(x)
    out = self.bn1(out)
    out = self.relu(out)

    out = self.conv2(out)
    out = self.bn2(out)
    out = self.relu(out)

    out = self.conv3(out)
    out = self.bn3(out)

    out = self.ca(out) * out
    out=self.conv4(out)
    out = self.sa(out) * out

    if self.downsample is not None:
        residual = self.downsample(x)
    
    out += residual

    out = self.relu(out)

    return out

William20234 avatar Dec 18 '23 06:12 William20234