Created
April 1, 2022 10:59
-
-
Save amirshamaei/f9a7bd1bb3ea7b979d05f31f23967065 to your computer and use it in GitHub Desktop.
MLP-Mixer: An all-MLP Architecture for Vision for 1D signal
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import numpy as np | |
from torch import nn | |
from einops.layers.torch import Rearrange | |
# adapted for 1d signal by Amir Shamaei | |
# The main code is from: | |
# https://github.com/rishikksh20/MLP-Mixer-pytorch/tree/8badd62ba03c5c1f478f6380cfa71ef8ed528f4c | |
class FeedForward(nn.Module): | |
def __init__(self, dim, hidden_dim, dropout = 0.): | |
super().__init__() | |
self.net = nn.Sequential( | |
nn.Linear(dim, hidden_dim), | |
nn.GELU(), | |
nn.Dropout(dropout), | |
nn.Linear(hidden_dim, dim), | |
nn.Dropout(dropout) | |
) | |
def forward(self, x): | |
return self.net(x) | |
class MixerBlock(nn.Module): | |
def __init__(self, dim, num_patch, token_dim, channel_dim, dropout = 0.): | |
super().__init__() | |
self.token_mix = nn.Sequential( | |
nn.LayerNorm(dim), | |
Rearrange('b n d -> b d n'), | |
FeedForward(num_patch, token_dim, dropout), | |
Rearrange('b d n -> b n d') | |
) | |
self.channel_mix = nn.Sequential( | |
nn.LayerNorm(dim), | |
FeedForward(dim, channel_dim, dropout), | |
) | |
def forward(self, x): | |
x = x + self.token_mix(x) | |
x = x + self.channel_mix(x) | |
return x | |
class MLPMixer(nn.Module): | |
def __init__(self, in_channels, dim, num_classes, patch_size, image_size, depth, token_dim, channel_dim): | |
super().__init__() | |
assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.' | |
self.num_patch = (image_size// patch_size) | |
self.to_patch_embedding = nn.Sequential( | |
nn.Conv1d(in_channels, dim, patch_size, patch_size), | |
Rearrange('b c h -> b (h) c'), | |
) | |
self.mixer_blocks = nn.ModuleList([]) | |
for _ in range(depth): | |
self.mixer_blocks.append(MixerBlock(dim, self.num_patch, token_dim, channel_dim)) | |
self.layer_norm = nn.LayerNorm(dim) | |
self.mlp_head = nn.Sequential( | |
nn.Linear(dim, num_classes) | |
) | |
def forward(self, x): | |
x = self.to_patch_embedding(x) | |
for mixer_block in self.mixer_blocks: | |
x = mixer_block(x) | |
x = self.layer_norm(x) | |
x = x.mean(dim=1) | |
return self.mlp_head(x) | |
if __name__ == "__main__": | |
img = torch.ones([1, 3, 224]) | |
model = MLPMixer(in_channels=3, image_size=224, patch_size=16, num_classes=1000, | |
dim=512, depth=8, token_dim=256, channel_dim=2048) | |
parameters = filter(lambda p: p.requires_grad, model.parameters()) | |
parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000 | |
print('Trainable Parameters: %.3fM' % parameters) | |
out_img = model(img) | |
print("Shape of out :", out_img.shape) # [B, in_channels, image_size, image_size] | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment