import torch
import torch.nn as nn
from spikingjelly.activation_based import neuron
__all__ = ['Spikformer']
class SPS(nn.Module): # Spiking Patch Splitting
def __init__(
self, in_channels, embed_dims
):
super().__init__()
# PSM (Patch Splitting Module)
self.proj_conv1 = nn.Conv2d(in_channels, embed_dims//8, kernel_size=3, stride=1, padding=1, bias=False)
self.proj_bn1 = nn.BatchNorm2d(embed_dims//8)
self.proj_lif1 = neuron.LIFNode(tau=2.0, detach_reset=True, step_mode='m', backend='cupy')
self.proj_conv2 = nn.Conv2d(embed_dims//8, embed_dims//4, kernel_size=3, stride=1, padding=1, bias=False)
self.proj_bn2 = nn.BatchNorm2d(embed_dims//4)
self.proj_lif2 = neuron.LIFNode(tau=2.0, detach_reset=True, step_mode='m', backend='cupy')
self.proj_conv3 = nn.Conv2d(embed_dims//4, embed_dims//2, kernel_size=3, stride=1, padding=1, bias=False)
self.proj_bn3 = nn.BatchNorm2d(embed_dims//2)
self.proj_lif3 = neuron.LIFNode(tau=2.0, detach_reset=True, step_mode='m', backend='cupy')
self.proj_maxpool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.proj_conv4 = nn.Conv2d(embed_dims//2, embed_dims, kernel_size=3, stride=1, padding=1, bias=False)
self.proj_bn4 = nn.BatchNorm2d(embed_dims)
self.proj_lif4 = neuron.LIFNode(tau=2.0, detach_reset=True, step_mode='m', backend='cupy')
self.proj_maxpool4 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# RPE (Relative Position Embedding)
self.rpe_lif = neuron.LIFNode(tau=2.0, detach_reset=True, step_mode='m', backend='cupy')
self.rpe_conv = nn.Conv2d(embed_dims, embed_dims, kernel_size=3, stride=1, padding=1, bias=False, groups=embed_dims)
self.rpe_bn = nn.BatchNorm2d(embed_dims)
def forward(self, x):
T, B, C, H, W = x.shape # T, B, C, H, W
x = self.proj_conv1(x.flatten(0, 1)) # T*B, C, H, W
x = self.proj_bn1(x).reshape(T, B, -1, H, W).contiguous() # T, B, C, H, W
x = self.proj_lif1(x).flatten(0, 1).contiguous() # T*B, C, H, W
x = self.proj_conv2(x) # T*B, C, H, W
x = self.proj_bn2(x).reshape(T, B, -1, H, W).contiguous() # T, B, C, H, W
x = self.proj_lif2(x).flatten(0, 1).contiguous() # T*B, C, H, W
x = self.proj_conv3(x) # T*B, C, H, W
x = self.proj_bn3(x).reshape(T, B, -1, H, W).contiguous() # T, B, C, H, W
x = self.proj_lif3(x).flatten(0, 1).contiguous() # T*B, C, H, W
x = self.proj_maxpool3(x) # T*B, C, H, W
x = self.proj_conv4(x) # T*B, C, H, W
x = self.proj_bn4(x).reshape(T, B, -1, H//2, W//2).contiguous() # T, B, C, H, W
x = self.proj_lif4(x).flatten(0, 1).contiguous() # T*B, C, H, W
x = self.proj_maxpool4(x) # T*B, C, H, W
x = x.reshape(T, B, -1, H//4, W//4).contiguous() # T, B, C, H, W
x_feat = x # T, B, C, H, W
x = self.rpe_lif(x).flatten(0, 1) # T*B, C, H, W
x = self.rpe_conv(x) # T*B, C, H, W
x = self.rpe_bn(x).reshape(T, B, -1, H//4, W//4).contiguous() # T, B, C, H, W
x = x + x_feat # T, B, C, H, W
return x
class SDSA(nn.Module): # Spiking Driven Self Attention
def __init__(
self, embed_dims, num_heads
):
super().__init__()
self.num_heads = num_heads
self.lif = neuron.LIFNode(tau=2.0, detach_reset=True, step_mode='m', backend='cupy')
self.q_conv = nn.Conv2d(embed_dims, embed_dims, kernel_size=1, stride=1, bias=False)
self.q_bn = nn.BatchNorm2d(embed_dims)
self.q_lif = neuron.LIFNode(tau=2.0, detach_reset=True, step_mode='m', backend='cupy')
self.k_conv = nn.Conv2d(embed_dims, embed_dims, kernel_size=1, stride=1, bias=False)
self.k_bn = nn.BatchNorm2d(embed_dims)
self.k_lif = neuron.LIFNode(tau=2.0, detach_reset=True, step_mode='m', backend='cupy')
self.v_conv = nn.Conv2d(embed_dims, embed_dims, kernel_size=1, stride=1, bias=False)
self.v_bn = nn.BatchNorm2d(embed_dims)
self.v_lif = neuron.LIFNode(tau=2.0, detach_reset=True, step_mode='m', backend='cupy')
self.kv_lif = neuron.LIFNode(tau=2.0, v_threshold=0.5, detach_reset=True, step_mode='m', backend='cupy')
self.proj_conv = nn.Conv2d(embed_dims, embed_dims, kernel_size=1, stride=1)
self.proj_bn = nn.BatchNorm2d(embed_dims)
def forward(self, x):
T, B, C, H, W = x.shape # T, B, C, H, W
N = H*W
x = self.lif(x) # T, B, C, H, W
x = x.flatten(0, 1) # T*B, C, H, W
q = self.q_conv(x) # T*B, C, H, W
q = self.q_bn(q).reshape(T,B,C,H,W).contiguous() # T, B, C, H, W
q = self.q_lif(q) # T, B, C, H, W
q = (
q.flatten(3) # T, B, C, N
.transpose(-1, -2) # T, B, N, C
.reshape(T, B, N, self.num_heads, C//self.num_heads) # T, B, N, h, C/h
.permute(0, 1, 3, 2, 4) # T, B, h, N, C/h
.contiguous()
)
k = self.k_conv(x) # T*B, C, H, W
k = self.k_bn(k).reshape(T,B,C,H,W).contiguous() # T, B, C, H, W
k = self.k_lif(k) # T, B, C, H, W
k = (
k.flatten(3) # T, B, C, N
.transpose(-1, -2) # T, B, N, C
.reshape(T, B, N, self.num_heads, C//self.num_heads) # T, B, N, h, C/h
.permute(0, 1, 3, 2, 4) # T, B, h, N, C/h
.contiguous()
)
v = self.v_conv(x) # T*B, C, H, W
v = self.v_bn(v).reshape(T,B,C,H,W).contiguous() # T, B, C, H, W
v = self.v_lif(v) # T, B, C, H, W
v = (
v.flatten(3) # T, B, C, N
.transpose(-1, -2) # T, B, N, C
.reshape(T, B, N, self.num_heads, C//self.num_heads) # T, B, N, h, C/h
.permute(0, 1, 3, 2, 4) # T, B, h, N, C/h
.contiguous()
)
kv = k.mul(v) # T, B, h, N, C/h
kv = kv.sum(dim=-2, keepdim=True) # T, B, h, 1, C/h
kv = self.kv_lif(kv) # T, B, h, 1, C/h
x = q.mul(kv) # T, B, h, N, C/h
x = x.transpose(3, 4) # T, B, h, C/h, N
x = x.reshape(T, B, C, H, W).contiguous() # T, B, C, H, W
x = x.flatten(0, 1) # T*B, C, H, W
x = self.proj_conv(x) # T*B, C, H, W
x = self.proj_bn(x) # T*B, C, H, W
x = x.reshape(T, B, C, H, W).contiguous() # T, B, C, H, W
return x
class MLP(nn.Module): # MultiLayer Perceptron
def __init__(
self, embed_dims, expansion=4
):
super().__init__()
self.fc1_lif = neuron.LIFNode(tau=2.0, detach_reset=True, step_mode='m', backend='cupy')
self.fc1_conv = nn.Conv2d(embed_dims, embed_dims*expansion, kernel_size=1, stride=1)
self.fc1_bn = nn.BatchNorm2d(embed_dims*expansion)
self.fc2_lif = neuron.LIFNode(tau=2.0, detach_reset=True, step_mode='m', backend='cupy')
self.fc2_conv = nn.Conv2d(embed_dims*expansion, embed_dims, kernel_size=1, stride=1)
self.fc2_bn = nn.BatchNorm2d(embed_dims)
def forward(self, x):
T, B, C, H, W = x.shape # T, B, C, H, W
x = self.fc1_lif(x) # T, B, C, H, W
x = x.flatten(0, 1) # T*B, C, H, W
x = self.fc1_conv(x) # T*B, C*exp, H, W
x = self.fc1_bn(x) # T*B, C*exp, H, W
x = x.reshape(T, B, -1, H, W).contiguous() # T, B, C*exp, H, W
x = self.fc2_lif(x) # T, B, C*exp, H, W
x = x.flatten(0, 1) # T*B, C*exp, H, W
x = self.fc2_conv(x) # T*B, C, H, W
x = self.fc2_bn(x) # T*B, C, H, W
x = x.reshape(T, B, -1, H, W).contiguous() # T, B, C, H, W
return x
class EncoderBlock(nn.Module):
def __init__(
self, embed_dims, num_heads, mlp_expansion
):
super().__init__()
self.attn = SDSA(embed_dims=embed_dims, num_heads=num_heads)
self.mlp = MLP(embed_dims=embed_dims, expansion=mlp_expansion)
def forward(self, x):
x = x + self.attn(x) # T, B, C, H, W
x = x + self.mlp(x) # T, B, C, H, W
return x
class ClassificationHead(nn.Module):
def __init__(
self, embed_dims, num_classes
):
super().__init__()
self.head = nn.Linear(embed_dims, num_classes)
def forward(self, x):
x = x.flatten(3).transpose(-1, -2) # T, B, N, C
x = x.mean(2) # T, B, C
x = x.mean(0) # B, C
x = self.head(x) # B, C(classes)
return x
class Spikformer(nn.Module):
def __init__(
self, in_channels, embed_dims, num_heads, num_classes, num_layers, mlp_expansion
):
super().__init__()
self.sps = SPS(in_channels=in_channels, embed_dims=embed_dims)
self.layers = nn.ModuleList([
EncoderBlock(embed_dims=embed_dims, num_heads=num_heads, mlp_expansion=mlp_expansion) for _ in range(num_layers)
])
self.classification = ClassificationHead(embed_dims=embed_dims, num_classes=num_classes)
def forward(self, x):
x = self.sps(x) # T, B, C, H, W
for layer in self.layers:
x = layer(x) # T, B, C, H, W
x = self.classification(x) # B, C(classes)
return x