major review
This commit is contained in:
parent
bee58a107b
commit
cea055bb7d
@ -23,12 +23,15 @@ class ActivityEvaluator:
|
||||
|
||||
def __init__(self, args):
|
||||
|
||||
self.dir_ann = args.dir_ann
|
||||
assert os.listdir(self.dir_ann), "Annotation directory is empty"
|
||||
|
||||
# COLLECTIVE ACTIVITY DATASET (talking)
|
||||
# -------------------------------------------------------------------------------------------------------------
|
||||
if args.dataset == 'collective':
|
||||
self.folders_collective = ['seq02', 'seq14', 'seq12', 'seq13', 'seq11', 'seq36']
|
||||
self.sequences = ['seq02', 'seq14', 'seq12', 'seq13', 'seq11', 'seq36']
|
||||
# folders_collective = ['seq02']
|
||||
self.path_collective = ['data/activity/' + fold for fold in self.folders_collective]
|
||||
self.dir_data = 'data/activity/dataset'
|
||||
self.THRESHOLD_PROB = 0.25 # Concordance for samples
|
||||
self.THRESHOLD_DIST = 2 # Threshold to check distance of people
|
||||
self.RADII = (0.3, 0.5) # expected radii of the o-space
|
||||
@ -39,8 +42,7 @@ class ActivityEvaluator:
|
||||
# KITTI DATASET (social distancing)
|
||||
# ------------------------------------------------------------------------------------------------------------
|
||||
else:
|
||||
self.dir_ann_kitti = '/data/lorenzo-data/annotations/kitti/scale_2_july'
|
||||
self.dir_gt_kitti = 'data/kitti/gt_activity'
|
||||
self.dir_data = 'data/kitti/gt_activity'
|
||||
self.dir_kk = os.path.join('data', 'kitti', 'calib')
|
||||
self.THRESHOLD_PROB = 0.25 # Concordance for samples
|
||||
self.THRESHOLD_DIST = 2 # Threshold to check distance of people
|
||||
@ -62,24 +64,25 @@ class ActivityEvaluator:
|
||||
def eval_collective(self):
|
||||
"""Parse Collective Activity Dataset and predict if people are talking or not"""
|
||||
|
||||
for fold in self.path_collective:
|
||||
images = glob.glob(fold + '/*.jpg')
|
||||
initial_path = os.path.join(fold, 'frame0001.jpg')
|
||||
with open(initial_path, 'rb') as f:
|
||||
for seq in self.sequences:
|
||||
images = glob.glob(os.path.join(self.dir_data, 'images', seq + '*.jpg'))
|
||||
initial_im = os.path.join(self.dir_data, 'images', seq + '_frame0001.jpg')
|
||||
with open(initial_im, 'rb') as f:
|
||||
image = Image.open(f).convert('RGB')
|
||||
im_size = image.size
|
||||
assert len(im_size) > 1, "image with frame0001 not available"
|
||||
|
||||
for idx, im_path in enumerate(images):
|
||||
|
||||
# Collect PifPaf files and calibration
|
||||
basename = os.path.basename(im_path)
|
||||
extension = '.pifpaf.json'
|
||||
path_pif = os.path.join(fold, basename + extension)
|
||||
path_pif = os.path.join(self.dir_ann, basename + extension)
|
||||
annotations = open_annotations(path_pif)
|
||||
kk, _ = factory_for_gt(im_size, verbose=False)
|
||||
|
||||
# Collect corresponding gt files (ys_gt: 1 or 0)
|
||||
boxes_gt, ys_gt = parse_gt_collective(fold, path_pif)
|
||||
boxes_gt, ys_gt = parse_gt_collective(self.dir_data, seq, path_pif)
|
||||
|
||||
# Run Monoloco
|
||||
dic_out, boxes = self.run_monoloco(annotations, kk, im_size=im_size)
|
||||
@ -88,17 +91,17 @@ class ActivityEvaluator:
|
||||
matches = get_iou_matches(boxes, boxes_gt, iou_min=0.3)
|
||||
|
||||
# Estimate activity
|
||||
categories = [os.path.basename(fold)] * len(boxes_gt)
|
||||
categories = [seq] * len(boxes_gt) # for compatibility with KITTI evaluation
|
||||
self.estimate_activity(dic_out, matches, ys_gt, categories=categories)
|
||||
|
||||
# Print Results
|
||||
cout_results(self.cnt, self.all_gt, self.all_pred, categories=self.folders_collective)
|
||||
cout_results(self.cnt, self.all_gt, self.all_pred, categories=self.sequences)
|
||||
|
||||
def eval_kitti(self):
|
||||
"""Parse KITTI Dataset and predict if people are talking or not"""
|
||||
|
||||
from ..utils import factory_file
|
||||
files = glob.glob(self.dir_gt_kitti + '/*.txt')
|
||||
files = glob.glob(self.dir_data + '/*.txt')
|
||||
# files = [self.dir_gt_kitti + '/001782.txt']
|
||||
assert files, "Empty directory"
|
||||
|
||||
@ -107,10 +110,10 @@ class ActivityEvaluator:
|
||||
# Collect PifPaf files and calibration
|
||||
basename, _ = os.path.splitext(os.path.basename(file))
|
||||
path_calib = os.path.join(self.dir_kk, basename + '.txt')
|
||||
annotations, kk, tt = factory_file(path_calib, self.dir_ann_kitti, basename)
|
||||
annotations, kk, tt = factory_file(path_calib, self.dir_ann, basename)
|
||||
|
||||
# Collect corresponding gt files (ys_gt: 1 or 0)
|
||||
path_gt = os.path.join(self.dir_gt_kitti, basename + '.txt')
|
||||
path_gt = os.path.join(self.dir_data, basename + '.txt')
|
||||
boxes_gt, ys_gt, difficulties = parse_gt_kitti(path_gt)
|
||||
|
||||
# Run Monoloco
|
||||
@ -131,7 +134,6 @@ class ActivityEvaluator:
|
||||
angles = dic_out['angles']
|
||||
dds = dic_out['dds_pred']
|
||||
stds = dic_out['stds_ale']
|
||||
confs = dic_out['confs']
|
||||
xz_centers = [[xx[0], xx[2]] for xx in dic_out['xyz_pred']]
|
||||
|
||||
# Count gt statistics
|
||||
@ -141,7 +143,7 @@ class ActivityEvaluator:
|
||||
|
||||
for i_m, (idx, idx_gt) in enumerate(matches):
|
||||
|
||||
# Select keys to update resultd for Collective or KITTI
|
||||
# Select keys to update results for Collective or KITTI
|
||||
keys = ('all', categories[idx_gt])
|
||||
|
||||
# Run social interactions rule
|
||||
@ -166,10 +168,12 @@ class ActivityEvaluator:
|
||||
return dic_out, boxes
|
||||
|
||||
|
||||
def parse_gt_collective(fold, path_pif):
|
||||
def parse_gt_collective(dir_data, seq, path_pif):
|
||||
"""Parse both gt and binary label (1/0) for talking or not"""
|
||||
|
||||
with open(os.path.join(fold, "annotations.txt"), "r") as ff:
|
||||
path = os.path.join(dir_data, 'annotations', seq + '_annotations.txt')
|
||||
|
||||
with open(path, "r") as ff:
|
||||
reader = csv.reader(ff, delimiter='\t')
|
||||
dic_frames = defaultdict(lambda: defaultdict(list))
|
||||
for idx, line in enumerate(reader):
|
||||
@ -244,8 +248,8 @@ def convert_category(cat):
|
||||
def extract_frame_number(path):
|
||||
"""extract frame number from path"""
|
||||
name = os.path.basename(path)
|
||||
if name[5] == '0':
|
||||
frame = name[6:9]
|
||||
if name[11] == '0':
|
||||
frame = name[12:15]
|
||||
else:
|
||||
frame = name[5:9]
|
||||
frame = name[11:15]
|
||||
return frame
|
||||
|
||||
@ -26,6 +26,9 @@ class GenerateKitti:
|
||||
|
||||
def __init__(self, model, dir_ann, p_dropout=0.2, n_dropout=0, hidden_size=1024):
|
||||
|
||||
self.dir_ann = dir_ann
|
||||
assert os.listdir(self.dir_ann), "Annotation directory is empty"
|
||||
|
||||
# Load monoloco
|
||||
use_cuda = torch.cuda.is_available()
|
||||
device = torch.device("cuda" if use_cuda else "cpu")
|
||||
@ -47,7 +50,6 @@ class GenerateKitti:
|
||||
# model_mono = 'data/models/monoloco-190719-0923.pkl' # NuScenes
|
||||
self.monoloco = Loco(model=model_mono, net='monoloco', device=device, n_dropout=n_dropout,
|
||||
p_dropout=p_dropout, linear_size=256)
|
||||
self.dir_ann = dir_ann
|
||||
|
||||
# Extract list of pifpaf files in validation images
|
||||
self.dir_gt = os.path.join('data', 'kitti', 'gt')
|
||||
|
||||
@ -36,7 +36,7 @@ class Trainer:
|
||||
lambdas = (1, 1, 1, 1, 1, 1, 1, 1)
|
||||
|
||||
def __init__(self, joints, epochs=100, bs=256, dropout=0.2, lr=0.002,
|
||||
sched_step=20, sched_gamma=1, hidden_size=256, n_stage=3, r_seed=1, n_samples=100,
|
||||
sched_step=20, sched_gamma=1, hidden_size=256, n_stage=3, r_seed=0, n_samples=100,
|
||||
monocular=False, save=False, print_loss=True):
|
||||
"""
|
||||
Initialize directories, load the data and parameters for the training
|
||||
@ -131,6 +131,7 @@ class Trainer:
|
||||
# Optimizer and scheduler
|
||||
all_params = chain(self.model.parameters(), self.mt_loss.parameters())
|
||||
self.optimizer = torch.optim.Adam(params=all_params, lr=lr)
|
||||
self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min')
|
||||
self.scheduler = lr_scheduler.StepLR(self.optimizer, step_size=self.sched_step, gamma=self.sched_gamma)
|
||||
|
||||
def train(self):
|
||||
@ -155,11 +156,11 @@ class Trainer:
|
||||
labels = labels.to(self.device)
|
||||
with torch.set_grad_enabled(phase == 'train'):
|
||||
if phase == 'train':
|
||||
self.optimizer.zero_grad()
|
||||
outputs = self.model(inputs)
|
||||
loss, loss_values = self.mt_loss(outputs, labels, phase=phase)
|
||||
self.optimizer.zero_grad()
|
||||
loss.backward()
|
||||
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 2)
|
||||
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 3)
|
||||
self.optimizer.step()
|
||||
self.scheduler.step()
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user