[Pytorch] Transformer w/o self-attention implementation compatible with TensorRT
class TransFormer(nn.Module): def __init__(self, dim, heads, dim_head, drop=0.1, qkv_bias=True): super(TransFormer, self).__init__() self.dim_head = dim_head self.scale = dim_head ** -0.5 self.heads = heads self.to_q = nn.Linear(dim, heads * dim_head, bias=qkv_bias) self.to_k = nn.Linear(dim, heads * dim_head, bias=qkv_bias) self.to_v = nn.Line..
더보기
Feature Pyramid Network (FPN) pytorch implementation
class FPN(nn.Module): def __init__(self, dim, sizes, channels): ''' dim : target dimension sizes = [57, 113, 225, 450] channels = [1024, 512, 256, 64] ''' super(FPN, self).__init__() self.sizes = sizes self.channels = channels self.dim_reduce, self.merge = nn.ModuleDict(), nn.ModuleDict() for idx, size in enumerate(sizes): self.dim_reduce[str(size)] = nn.Conv2d(channels[idx], dim, kernel_size=1,..
더보기