[Pytorch] Transformer w/o self-attention implementation compatible with TensorRT
class TransFormer(nn.Module): def __init__(self, dim, heads, dim_head, drop=0.1, qkv_bias=True): super(TransFormer, self).__init__() self.dim_head = dim_head self.scale = dim_head ** -0.5 self.heads = heads self.to_q = nn.Linear(dim, heads * dim_head, bias=qkv_bias) self.to_k = nn.Linear(dim, heads * dim_head, bias=qkv_bias) self.to_v = nn.Line..
더보기
Feature Pyramid Network (FPN) pytorch implementation
class FPN(nn.Module): def __init__(self, dim, sizes, channels): ''' dim : target dimension sizes = [57, 113, 225, 450] channels = [1024, 512, 256, 64] ''' super(FPN, self).__init__() self.sizes = sizes self.channels = channels self.dim_reduce, self.merge = nn.ModuleDict(), nn.ModuleDict() for idx, size in enumerate(sizes): self.dim_reduce[str(size)] = nn.Conv2d(channels[idx], dim, kernel_size=1,..
더보기
Image Frustum to Global 3D
# generate camera frustum h, w = self.cfg['image']['h'], self.cfg['image']['w'] n_cam, dim, downsampled_h, downsampled_w = feat.size() # Depth grid depth_grid = torch.arange(1, 65, 1, dtype=torch.float) depth_grid = depth_grid.view(-1, 1, 1).expand(-1, downsampled_h, downsampled_w) n_depth_slices = depth_grid.shape[0] # x and y grids x_grid = torch.linspace(0, w - 1, downsampled_w, dtype=torch.f..
더보기