Update readme and fix data names
This commit is contained in:
parent
28faf348c5
commit
cb344c4664
36
README.md
36
README.md
@ -36,7 +36,8 @@ pip install nuscenes-devkit openpifpaf
|
|||||||
Data
|
Data
|
||||||
├── arrays
|
├── arrays
|
||||||
├── models
|
├── models
|
||||||
├── baselines
|
├── kitti
|
||||||
|
├── nuscenes
|
||||||
├── logs
|
├── logs
|
||||||
|
|
||||||
|
|
||||||
@ -44,7 +45,7 @@ Run the following to create the folders:
|
|||||||
```
|
```
|
||||||
mkdir data
|
mkdir data
|
||||||
cd data
|
cd data
|
||||||
mkdir arrays models baselines logs
|
mkdir arrays models data-kitti data-nuscenes logs
|
||||||
```
|
```
|
||||||
|
|
||||||
### Pre-trained Models
|
### Pre-trained Models
|
||||||
@ -52,11 +53,6 @@ mkdir arrays models baselines logs
|
|||||||
* Download a Pifpaf pre-trained model from [openpifpaf](https://github.com/vita-epfl/openpifpaf) project
|
* Download a Pifpaf pre-trained model from [openpifpaf](https://github.com/vita-epfl/openpifpaf) project
|
||||||
and save it into `data/models`
|
and save it into `data/models`
|
||||||
|
|
||||||
### Baselines
|
|
||||||
Download KITTI ground truth txt files from Google Drive ADD link and unzip them in `data/baselines`
|
|
||||||
The zip file also contains nuScenes and KITTI splits for training and validations as well as detections
|
|
||||||
from other baselines (more details in Eval section)
|
|
||||||
|
|
||||||
|
|
||||||
# Interfaces
|
# Interfaces
|
||||||
All the commands are run through a main file called `main.py` using subparsers.
|
All the commands are run through a main file called `main.py` using subparsers.
|
||||||
@ -102,6 +98,22 @@ Without ground_truth matching (all the detected people):
|
|||||||

|

|
||||||
|
|
||||||
# Preprocess
|
# Preprocess
|
||||||
|
|
||||||
|
### Datasets
|
||||||
|
|
||||||
|
#### 1) KITTI dataset
|
||||||
|
Download KITTI ground truth files and camera calibration matrices for training
|
||||||
|
from [here](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d) and
|
||||||
|
save them respectively into `data/kitti/gt` and `data/kitti/calib`.
|
||||||
|
To extract pifpaf joints, you also need to download training images, put it in any folder and soft link in `
|
||||||
|
data/kitti/images`
|
||||||
|
|
||||||
|
#### 2) nuScenes dataset
|
||||||
|
Download nuScenes dataset (any version: Mini, Teaser or TrainVal) from [nuScenes](https://www.nuscenes.org/download),
|
||||||
|
save it anywhere and soft link it in `data/nuscenes`
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Input joints for training
|
### Input joints for training
|
||||||
MonoLoco is trained using 2D human pose joints detected by pifpaf and matched with the ground truth location provided by
|
MonoLoco is trained using 2D human pose joints detected by pifpaf and matched with the ground truth location provided by
|
||||||
nuScenes or KITTI Dataset. To create the joints run: `python src/main.py prep` specifying:
|
nuScenes or KITTI Dataset. To create the joints run: `python src/main.py prep` specifying:
|
||||||
@ -112,7 +124,12 @@ dataset are supported: nuscenes_mini, nuscenes, nuscenes_teaser.
|
|||||||
|
|
||||||
### Ground truth file for evaluation
|
### Ground truth file for evaluation
|
||||||
The preprocessing script also outputs a second json file called **names.json** which provide a dictionary indexed
|
The preprocessing script also outputs a second json file called **names.json** which provide a dictionary indexed
|
||||||
by the image name to easily access ground truth files for evaluation purposes.
|
by the image name to easily access ground truth files for evaluation and prediction purposes.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Train
|
### Train
|
||||||
@ -134,6 +151,9 @@ and stereo Baselines:
|
|||||||
[MonoDepth](https://arxiv.org/abs/1609.03677) and our
|
[MonoDepth](https://arxiv.org/abs/1609.03677) and our
|
||||||
[Geometrical Baseline](src/eval/geom_baseline.py).
|
[Geometrical Baseline](src/eval/geom_baseline.py).
|
||||||
|
|
||||||
|
Alternatively we provide the links to download them.
|
||||||
|
|
||||||
|
|
||||||
The following graph is obtained running:
|
The following graph is obtained running:
|
||||||
`python3 src/main.py eval --dataset kitti --model data/models/base_model.pickle`
|
`python3 src/main.py eval --dataset kitti --model data/models/base_model.pickle`
|
||||||

|

|
||||||
|
|||||||
@ -30,13 +30,13 @@ class KittiEval:
|
|||||||
from visuals.results import print_results
|
from visuals.results import print_results
|
||||||
self.print_results = print_results
|
self.print_results = print_results
|
||||||
|
|
||||||
self.dir_gt = os.path.join('data', 'baselines', 'gt')
|
self.dir_gt = os.path.join('data', 'kitti', 'gt')
|
||||||
self.dir_m3d = os.path.join('data', 'baselines', 'm3d')
|
self.dir_m3d = os.path.join('data', 'kitti', 'm3d')
|
||||||
self.dir_3dop = os.path.join('data', 'baselines', '3dop')
|
self.dir_3dop = os.path.join('data', 'kitti', '3dop')
|
||||||
self.dir_md = os.path.join('data', 'baselines', 'monodepth')
|
self.dir_md = os.path.join('data', 'kitti', 'monodepth')
|
||||||
self.dir_psm = os.path.join('data', 'baselines', 'psm')
|
self.dir_psm = os.path.join('data', 'kitti', 'psm')
|
||||||
self.dir_our = os.path.join('data', 'baselines', 'monoloco')
|
self.dir_our = os.path.join('data', 'kitti', 'monoloco')
|
||||||
path_val = os.path.join('data', 'baselines', 'val.txt')
|
path_val = os.path.join('data', 'kitti', 'val.txt')
|
||||||
dir_logs = os.path.join('data', 'logs')
|
dir_logs = os.path.join('data', 'logs')
|
||||||
assert dir_logs, "No directory to save final statistics"
|
assert dir_logs, "No directory to save final statistics"
|
||||||
|
|
||||||
|
|||||||
@ -15,7 +15,7 @@ class PreprocessKitti:
|
|||||||
|
|
||||||
self.dir_ann = dir_ann
|
self.dir_ann = dir_ann
|
||||||
self.iou_thresh = iou_thresh
|
self.iou_thresh = iou_thresh
|
||||||
self.dir_gt = os.path.join('data', 'baselines', 'gt')
|
self.dir_gt = os.path.join('data', 'kitti', 'gt')
|
||||||
self.names_gt = os.listdir(self.dir_gt)
|
self.names_gt = os.listdir(self.dir_gt)
|
||||||
self.dir_kk = os.path.join('data', 'baselines', 'calib')
|
self.dir_kk = os.path.join('data', 'baselines', 'calib')
|
||||||
self.list_gt = glob.glob(self.dir_gt + '/*.txt')
|
self.list_gt = glob.glob(self.dir_gt + '/*.txt')
|
||||||
|
|||||||
@ -10,7 +10,6 @@ from openpifpaf import decoder
|
|||||||
from features.preprocess_nu import PreprocessNuscenes
|
from features.preprocess_nu import PreprocessNuscenes
|
||||||
from features.preprocess_ki import PreprocessKitti
|
from features.preprocess_ki import PreprocessKitti
|
||||||
from predict.predict_2d_3d import predict
|
from predict.predict_2d_3d import predict
|
||||||
from features.trial import trials
|
|
||||||
from models.trainer import Trainer
|
from models.trainer import Trainer
|
||||||
from eval.run_kitti import RunKitti
|
from eval.run_kitti import RunKitti
|
||||||
from eval.geom_baseline import GeomBaseline
|
from eval.geom_baseline import GeomBaseline
|
||||||
@ -62,7 +61,7 @@ def cli():
|
|||||||
default="data/models/best_model__seed_2_.pickle")
|
default="data/models/best_model__seed_2_.pickle")
|
||||||
predict_parser.add_argument('--path_gt', help='path of json file with gt 3d localization',
|
predict_parser.add_argument('--path_gt', help='path of json file with gt 3d localization',
|
||||||
default='data/arrays/names-kitti-000.json')
|
default='data/arrays/names-kitti-000.json')
|
||||||
predict_parser.add_argument('--dir_calib', default='data/baselines/calib/', help='directory of calib_files')
|
predict_parser.add_argument('--dir_calib', default='data/kitti/calib/', help='directory of calib_files')
|
||||||
predict_parser.add_argument('--transform', help='transformation for the pose', default='None')
|
predict_parser.add_argument('--transform', help='transformation for the pose', default='None')
|
||||||
predict_parser.add_argument('--draw_kps', help='to draw kps in the images', action='store_true')
|
predict_parser.add_argument('--draw_kps', help='to draw kps in the images', action='store_true')
|
||||||
predict_parser.add_argument('--predict', help='whether to make prediction', action='store_true')
|
predict_parser.add_argument('--predict', help='whether to make prediction', action='store_true')
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user