update readme and compatibility

This commit is contained in:
Lorenzo 2020-11-30 14:22:49 +01:00
parent 4992d4c34e
commit 5abd31839c
4 changed files with 18 additions and 14 deletions

View File

@ -149,13 +149,20 @@ data/kitti/images`
### Annotations to preprocess
MonStereo is trained using 2D human pose joints. To create them run pifaf over KITTI training images.
You can create them running the predict script and using `--mode pifpaf`.
MonStereo is trained using 2D human pose joints. To obtain the joints the first step is to run
pifaf over KITTI training images, by either running the predict script and using `--mode pifpaf`,
or by using pifpaf code directly.
MonStereo preprocess script expects annotations from left and right images in 2 different folders
with the same path apart from the suffix `_right` for the ``right" folder.
For example `data/annotations` and `data/annotations_right`.
Do not change name of json files created by pifpaf. For each left annotation,
the code will look for the corresponding right annotation.
### Inputs joints for training
MonoStereo is trained using 2D human pose joints matched with the ground truth location provided by
KITTI Dataset. To create the joints run: `python3 -m monstereo.run prep` specifying:
1. `--dir_ann` annotation directory containing Pifpaf joints of KITTI.
`--dir_ann` annotation directory containing Pifpaf joints of KITTI for the left images.
### Ground truth file for evaluation

View File

@ -24,10 +24,9 @@ from .transforms import flip_inputs, flip_labels, height_augmentation
class PreprocessKitti:
"""Prepare arrays with same format as nuScenes preprocessing but using ground truth txt files"""
# AV_W = 0.68
# AV_L = 0.75
# AV_H = 1.72
# WLH_STD = 0.1
dir_gt = os.path.join('data', 'kitti', 'gt')
dir_images = '/data/lorenzo-data/kitti/original_images/training/image_2'
dir_byc_l = '/data/lorenzo-data/kitti/object_detection/left'
# SOCIAL DISTANCING PARAMETERS
THRESHOLD_DIST = 2 # Threshold to check distance of people
@ -51,9 +50,6 @@ class PreprocessKitti:
self.dir_ann = dir_ann
self.iou_min = iou_min
self.monocular = monocular
self.dir_gt = os.path.join('data', 'kitti', 'gt')
self.dir_images = '/data/lorenzo-data/kitti/original_images/training/image_2'
self.dir_byc_l = '/data/lorenzo-data/kitti/object_detection/left'
self.names_gt = tuple(os.listdir(self.dir_gt))
self.dir_kk = os.path.join('data', 'kitti', 'calib')
self.list_gt = glob.glob(self.dir_gt + '/*.txt')

View File

@ -2,6 +2,7 @@
import argparse
from openpifpaf.network.factory import cli as openpifpaf_cli
from openpifpaf.network import nets
from openpifpaf import decoder
@ -41,8 +42,8 @@ def cli():
predict_parser.add_argument('--dpi', help='image resolution', type=int, default=100)
# Pifpaf
nets.cli(predict_parser)
decoder.cli(predict_parser, force_complete_pose=True, instance_threshold=0.15)
openpifpaf_cli(predict_parser)
decoder.cli(predict_parser)
predict_parser.add_argument('--scale', default=1.0, type=float, help='change the scale of the image to preprocess')
# Monoloco

View File

@ -199,11 +199,11 @@ def factory_file(path_calib, dir_ann, basename, mode='left'):
if mode == 'left':
kk, tt = p_left[:]
path_ann = os.path.join(dir_ann, basename + '.png.pifpaf.json')
path_ann = os.path.join(dir_ann, basename + '.png.predictions.json')
else:
kk, tt = p_right[:]
path_ann = os.path.join(dir_ann + '_right', basename + '.png.pifpaf.json')
path_ann = os.path.join(dir_ann + '_right', basename + '.png.predictions.json')
from ..utils import open_annotations
annotations = open_annotations(path_ann)