remove numpy dependancy
This commit is contained in:
parent
e3cc03a545
commit
e89176bf3f
@ -1,14 +1,13 @@
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
import numpy as np
|
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
from torch.utils.data import Dataset
|
from torch.utils.data import Dataset
|
||||||
|
|
||||||
|
|
||||||
class NuScenesDataset(Dataset):
|
class KeypointsDataset(Dataset):
|
||||||
"""
|
"""
|
||||||
Get mask joints or ground truth joints and transform into tensors
|
Dataloader fro nuscenes or kitti datasets
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, joints, phase):
|
def __init__(self, joints, phase):
|
||||||
@ -21,10 +20,10 @@ class NuScenesDataset(Dataset):
|
|||||||
dic_jo = json.load(f)
|
dic_jo = json.load(f)
|
||||||
|
|
||||||
# Define input and output for normal training and inference
|
# Define input and output for normal training and inference
|
||||||
self.inputs = np.array(dic_jo[phase]['X'])
|
self.inputs_all = torch.tensor(dic_jo[phase]['X'])
|
||||||
self.outputs = np.array(dic_jo[phase]['Y']).reshape(-1, 1)
|
self.outputs_all = torch.tensor(dic_jo[phase]['Y']).view(-1, 1)
|
||||||
self.names = dic_jo[phase]['names']
|
self.names_all = dic_jo[phase]['names']
|
||||||
self.kps = np.array(dic_jo[phase]['kps'])
|
self.kps_all = torch.tensor(dic_jo[phase]['kps'])
|
||||||
|
|
||||||
# Extract annotations divided in clusters
|
# Extract annotations divided in clusters
|
||||||
self.dic_clst = dic_jo[phase]['clst']
|
self.dic_clst = dic_jo[phase]['clst']
|
||||||
@ -33,25 +32,25 @@ class NuScenesDataset(Dataset):
|
|||||||
"""
|
"""
|
||||||
:return: number of samples (m)
|
:return: number of samples (m)
|
||||||
"""
|
"""
|
||||||
return self.inputs.shape[0]
|
return self.inputs_all.shape[0]
|
||||||
|
|
||||||
def __getitem__(self, idx):
|
def __getitem__(self, idx):
|
||||||
"""
|
"""
|
||||||
Reading the tensors when required. E.g. Retrieving one element or one batch at a time
|
Reading the tensors when required. E.g. Retrieving one element or one batch at a time
|
||||||
:param idx: corresponding to m
|
:param idx: corresponding to m
|
||||||
"""
|
"""
|
||||||
inputs = torch.from_numpy(self.inputs[idx, :]).float()
|
inputs = self.inputs_all[idx, :]
|
||||||
outputs = torch.from_numpy(np.array(self.outputs[idx])).float()
|
outputs = self.outputs_all[idx]
|
||||||
names = self.names[idx]
|
names = self.names_all[idx]
|
||||||
kps = self.kps[idx, :]
|
kps = self.kps_all[idx, :]
|
||||||
|
|
||||||
return inputs, outputs, names, kps
|
return inputs, outputs, names, kps
|
||||||
|
|
||||||
def get_cluster_annotations(self, clst):
|
def get_cluster_annotations(self, clst):
|
||||||
"""Return normalized annotations corresponding to a certain cluster
|
"""Return normalized annotations corresponding to a certain cluster
|
||||||
"""
|
"""
|
||||||
inputs = torch.from_numpy(np.array(self.dic_clst[clst]['X'])).float()
|
inputs = torch.tensor(self.dic_clst[clst]['X'])
|
||||||
outputs = torch.from_numpy(np.array(self.dic_clst[clst]['Y'])).float()
|
outputs = torch.tensor(self.dic_clst[clst]['Y']).float()
|
||||||
count = len(self.dic_clst[clst]['Y'])
|
count = len(self.dic_clst[clst]['Y'])
|
||||||
|
|
||||||
return inputs, outputs, count
|
return inputs, outputs, count
|
||||||
|
|||||||
@ -13,7 +13,7 @@ import torch.nn as nn
|
|||||||
from torch.utils.data import DataLoader
|
from torch.utils.data import DataLoader
|
||||||
from torch.optim import lr_scheduler
|
from torch.optim import lr_scheduler
|
||||||
|
|
||||||
from models.datasets import NuScenesDataset
|
from models.datasets import KeypointsDataset
|
||||||
from models.architectures import LinearModel
|
from models.architectures import LinearModel
|
||||||
from models.losses import LaplacianLoss
|
from models.losses import LaplacianLoss
|
||||||
from utils.logs import set_logger
|
from utils.logs import set_logger
|
||||||
@ -80,8 +80,9 @@ class Trainer:
|
|||||||
self.logger.info("Training arguments: \nepochs: {} \nbatch_size: {} \ndropout: {}"
|
self.logger.info("Training arguments: \nepochs: {} \nbatch_size: {} \ndropout: {}"
|
||||||
"\nbaseline: {} \nlearning rate: {} \nscheduler step: {} \nscheduler gamma: {} "
|
"\nbaseline: {} \nlearning rate: {} \nscheduler step: {} \nscheduler gamma: {} "
|
||||||
"\ninput_size: {} \nhidden_size: {} \nn_stages: {} \nr_seed: {}"
|
"\ninput_size: {} \nhidden_size: {} \nn_stages: {} \nr_seed: {}"
|
||||||
"\ninput_file: {}", epochs, bs, dropout, baseline, lr, sched_step, sched_gamma, input_size,
|
"\ninput_file: {}"
|
||||||
hidden_size, n_stage, r_seed, self.joints)
|
.format(epochs, bs, dropout, baseline, lr, sched_step, sched_gamma, input_size,
|
||||||
|
hidden_size, n_stage, r_seed, self.joints))
|
||||||
else:
|
else:
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
self.logger = logging.getLogger(__name__)
|
self.logger = logging.getLogger(__name__)
|
||||||
@ -97,14 +98,14 @@ class Trainer:
|
|||||||
torch.cuda.manual_seed(r_seed)
|
torch.cuda.manual_seed(r_seed)
|
||||||
|
|
||||||
# Dataloader
|
# Dataloader
|
||||||
self.dataloaders = {phase: DataLoader(NuScenesDataset(self.joints, phase=phase),
|
self.dataloaders = {phase: DataLoader(KeypointsDataset(self.joints, phase=phase),
|
||||||
batch_size=bs, shuffle=True) for phase in ['train', 'val']}
|
batch_size=bs, shuffle=True) for phase in ['train', 'val']}
|
||||||
|
|
||||||
self.dataset_sizes = {phase: len(NuScenesDataset(self.joints, phase=phase))
|
self.dataset_sizes = {phase: len(KeypointsDataset(self.joints, phase=phase))
|
||||||
for phase in ['train', 'val', 'test']}
|
for phase in ['train', 'val', 'test']}
|
||||||
|
|
||||||
# Define the model
|
# Define the model
|
||||||
self.logger.info('Sizes of the dataset: {}',self.dataset_sizes)
|
self.logger.info('Sizes of the dataset: {}'.format(self.dataset_sizes))
|
||||||
print(">>> creating model")
|
print(">>> creating model")
|
||||||
self.model = LinearModel(input_size=input_size, output_size=self.output_size, linear_size=hidden_size,
|
self.model = LinearModel(input_size=input_size, output_size=self.output_size, linear_size=hidden_size,
|
||||||
p_dropout=dropout, num_stage=self.n_stage)
|
p_dropout=dropout, num_stage=self.n_stage)
|
||||||
@ -221,10 +222,10 @@ class Trainer:
|
|||||||
self.model.eval()
|
self.model.eval()
|
||||||
dic_err = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0))) # initialized to zero
|
dic_err = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0))) # initialized to zero
|
||||||
phase = 'val'
|
phase = 'val'
|
||||||
dataloader_eval = DataLoader(NuScenesDataset(self.joints, phase=phase),
|
dataloader_eval = DataLoader(KeypointsDataset(self.joints, phase=phase),
|
||||||
batch_size=5000, shuffle=True)
|
batch_size=5000, shuffle=True)
|
||||||
|
|
||||||
size_eval = len(NuScenesDataset(self.joints, phase=phase))
|
size_eval = len(KeypointsDataset(self.joints, phase=phase))
|
||||||
|
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
for inputs, labels, _, _ in dataloader_eval:
|
for inputs, labels, _, _ in dataloader_eval:
|
||||||
@ -285,7 +286,7 @@ class Trainer:
|
|||||||
|
|
||||||
# Evaluate performances on different clusters and save statistics
|
# Evaluate performances on different clusters and save statistics
|
||||||
|
|
||||||
nuscenes = NuScenesDataset(self.joints, phase=phase)
|
nuscenes = KeypointsDataset(self.joints, phase=phase)
|
||||||
for clst in self.clusters:
|
for clst in self.clusters:
|
||||||
inputs, labels, size_eval = nuscenes.get_cluster_annotations(clst)
|
inputs, labels, size_eval = nuscenes.get_cluster_annotations(clst)
|
||||||
inputs, labels = inputs.to(self.device), labels.to(self.device)
|
inputs, labels = inputs.to(self.device), labels.to(self.device)
|
||||||
@ -306,7 +307,7 @@ class Trainer:
|
|||||||
if self.save and not load:
|
if self.save and not load:
|
||||||
torch.save(self.model.state_dict(), self.path_model)
|
torch.save(self.model.state_dict(), self.path_model)
|
||||||
print('-'*120)
|
print('-'*120)
|
||||||
self.logger.info("model saved: {} \n", self.path_model)
|
self.logger.info("model saved: {} \n".format(self.path_model))
|
||||||
else:
|
else:
|
||||||
self.logger.info("model not saved\n")
|
self.logger.info("model not saved\n")
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user