text stringlengths 0 93.6k |
|---|
return x |
class FeedForwardBlock(nn.Sequential): |
def __init__(self, emb_size: int, expansion: int = 2, drop_p: float = 0.): |
super().__init__( |
nn.Linear(emb_size, expansion * emb_size), |
nn.GELU(), |
nn.Dropout(drop_p), |
nn.Linear(expansion * emb_size, emb_size), |
) |
class TransformerEncoderBlock(nn.Sequential): |
''' |
We keep the forward expansion as 3, since all the hidden and MLP sizes must be 768 in the transformer encoder |
Ref. 3.6. Transformer Block |
''' |
def __init__(self, |
emb_size: int = 256, |
drop_p: float = 0., |
forward_expansion: int = 2, |
forward_drop_p: float = 0., |
** kwargs): |
super().__init__( |
# Zk = MSA(LN(Zk)) + Zk Eq. (7) |
ResidualAdd(nn.Sequential( |
# Layer Normalization (LN) |
nn.LayerNorm(emb_size), |
# Multi Head Self attention (MSA) |
MultiHeadAttention(emb_size, **kwargs), |
nn.Dropout(drop_p) |
)), |
# Zk+1 = MLP(LN(Zk))+ Zk Eq. (8) |
ResidualAdd(nn.Sequential( |
# MLP blocks |
nn.LayerNorm(emb_size), |
FeedForwardBlock( |
emb_size, expansion=forward_expansion, drop_p=forward_drop_p), |
nn.Dropout(drop_p) |
) |
)) |
class TransformerEncoder(nn.Sequential): |
''' |
The number 256 is same with projection dimension (attention dimension) d used in the transformer. |
The number of transformer layers is 8. The hidden size and MLP size are 768, |
and the number of heads = 8. |
''' |
def __init__(self, depth: int = 8, **kwargs): |
super().__init__(*[TransformerEncoderBlock(**kwargs) for _ in range(depth)]) |
class ClassificationHead(nn.Module): |
''' |
A linear classifier is used to classify the encoded input based on the MLP |
head: ZKcls ∈ R(d). There are two final categorization classes: NC and AD. |
The first token (cls_token) from the sequence is used for classification. |
''' |
def __init__(self, emb_size: int = 256, n_classes: int = 2): |
super().__init__() |
self.linear = nn.Linear(emb_size, n_classes) |
def forward(self, x): |
# As x is of shape [batch_size, num_tokens, emb_size] |
# and the cls_token is the first token in the sequence |
cls_token = x[:, 0] |
return self.linear(cls_token) |
class M3T(nn.Sequential): |
def __init__(self, |
in_channels: int = 1, |
out_channels: int = 32, |
emb_size: int = 256, |
depth: int = 8, |
n_classes: int = 2, |
**kwargs): |
super().__init__( |
CNN3DBlock(in_channels, out_channels), |
MultiPlane_MultiSlice_Extract_Project(out_channels), |
EmbeddingLayer(emb_size=emb_size), |
TransformerEncoder(depth, emb_size=emb_size, **kwargs), |
ClassificationHead(emb_size, n_classes) |
) |
# from torchsummary import summary |
# model = M3T() |
# # Calculate total number of trainable parameters |
# summary(model.to('cuda'), (1, 128, 128, 128)) |
# <FILESEP> |
"""This module contains simple helper functions """ |
from __future__ import print_function |
import torch |
import numpy as np |
import os |
import imageio |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.