import torch from torch import nn from torch.nn import init class ECAAttention(nn.Module): def __init__(self, kernel_size=3): super().__init__() self.gap = nn.AdaptiveAvgPool2d(1) self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=(kernel_size - 1) // 2) self.sigmoid = nn.Sigmoid() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, x): y = self.gap(x) # bs,c,1,1 y = y.squeeze(-1).permute(0, 2, 1) # bs,1,c y = self.conv(y) # bs,1,c y = self.sigmoid(y) # bs,1,c y = y.permute(0, 2, 1).unsqueeze(-1) # bs,c,1,1 return x * y.expand_as(x) # 输入 N C H W, 输出 N C H W if __name__ == '__main__': block = ECAAttention(kernel_size=3) input = torch.rand(1, 64, 64, 64) output = block(input) print(input.size(), output.size())