* Make import from __init__ files * add in init only classes or utils functions * refactor packages * fix pylint cyclic import * add task error with 63% confidence intervals and mad * fix pixel_error * update setup * update installation istructions * update instructions * update instructions * update package installation
117 lines
3.1 KiB
Python
117 lines
3.1 KiB
Python
|
|
import torch.nn as nn
|
|
|
|
|
|
class TriLinear(nn.Module):
|
|
"""
|
|
As Bilinear but without skip connection
|
|
"""
|
|
def __init__(self, input_size, output_size, p_dropout, linear_size=1024):
|
|
super(TriLinear, self).__init__()
|
|
|
|
self.input_size = input_size
|
|
self.output_size = output_size
|
|
self.l_size = linear_size
|
|
|
|
self.relu = nn.ReLU(inplace=True)
|
|
self.dropout = nn.Dropout(p_dropout)
|
|
|
|
self.w1 = nn.Linear(self.input_size, self.l_size)
|
|
self.batch_norm1 = nn.BatchNorm1d(self.l_size)
|
|
|
|
self.w2 = nn.Linear(self.l_size, self.l_size)
|
|
self.batch_norm2 = nn.BatchNorm1d(self.l_size)
|
|
|
|
self.w3 = nn.Linear(self.l_size, self.output_size)
|
|
|
|
def forward(self, x):
|
|
y = self.w1(x)
|
|
y = self.batch_norm1(y)
|
|
y = self.relu(y)
|
|
y = self.dropout(y)
|
|
|
|
y = self.w2(y)
|
|
y = self.batch_norm2(y)
|
|
y = self.relu(y)
|
|
y = self.dropout(y)
|
|
|
|
y = self.w3(y)
|
|
|
|
return y
|
|
|
|
|
|
def weight_init(batch):
|
|
"""TO initialize weights using kaiming initialization"""
|
|
if isinstance(batch, nn.Linear):
|
|
nn.init.kaiming_normal_(batch.weight)
|
|
|
|
|
|
class Linear(nn.Module):
|
|
def __init__(self, linear_size, p_dropout=0.5):
|
|
super(Linear, self).__init__()
|
|
self.l_size = linear_size
|
|
|
|
self.relu = nn.ReLU(inplace=True)
|
|
self.dropout = nn.Dropout(p_dropout)
|
|
|
|
self.w1 = nn.Linear(self.l_size, self.l_size)
|
|
self.batch_norm1 = nn.BatchNorm1d(self.l_size)
|
|
|
|
self.w2 = nn.Linear(self.l_size, self.l_size)
|
|
self.batch_norm2 = nn.BatchNorm1d(self.l_size)
|
|
|
|
def forward(self, x):
|
|
y = self.w1(x)
|
|
y = self.batch_norm1(y)
|
|
y = self.relu(y)
|
|
y = self.dropout(y)
|
|
|
|
y = self.w2(y)
|
|
y = self.batch_norm2(y)
|
|
y = self.relu(y)
|
|
y = self.dropout(y)
|
|
|
|
out = x + y
|
|
|
|
return out
|
|
|
|
|
|
class LinearModel(nn.Module):
|
|
|
|
"""Class from Simple yet effective baseline"""
|
|
def __init__(self, input_size, output_size, linear_size=256, p_dropout=0.2, num_stage=3):
|
|
super(LinearModel, self).__init__()
|
|
|
|
self.input_size = input_size
|
|
self.output_size = output_size
|
|
self.linear_size = linear_size
|
|
self.p_dropout = p_dropout
|
|
self.num_stage = num_stage
|
|
|
|
# process input to linear size
|
|
self.w1 = nn.Linear(self.input_size, self.linear_size)
|
|
self.batch_norm1 = nn.BatchNorm1d(self.linear_size)
|
|
|
|
self.linear_stages = []
|
|
for _ in range(num_stage):
|
|
self.linear_stages.append(Linear(self.linear_size, self.p_dropout))
|
|
self.linear_stages = nn.ModuleList(self.linear_stages)
|
|
|
|
# post processing
|
|
self.w2 = nn.Linear(self.linear_size, self.output_size)
|
|
|
|
self.relu = nn.ReLU(inplace=True)
|
|
self.dropout = nn.Dropout(self.p_dropout)
|
|
|
|
def forward(self, x):
|
|
# pre-processing
|
|
y = self.w1(x)
|
|
y = self.batch_norm1(y)
|
|
y = self.relu(y)
|
|
y = self.dropout(y)
|
|
# linear layers
|
|
for i in range(self.num_stage):
|
|
y = self.linear_stages[i](y)
|
|
y = self.w2(y)
|
|
return y
|