fix in case of no dir

This commit is contained in:
Lorenzo 2021-03-22 16:10:56 +01:00
parent 9e0877e150
commit 4ed80aef19
10 changed files with 19 additions and 14 deletions

View File

@ -3,14 +3,14 @@
<img src="docs/monoloco.gif" alt="gif" />
This library is based on three research projects for monocular/stereo 3D human localization, orientation and social distancing.
This library is based on three research projects for monocular/stereo 3D human localization (detection), body orientation, and social distancing.
> __MonStereo: When Monocular and Stereo Meet at the Tail of 3D Human Localization__<br />
> _[L. Bertoni](https://scholar.google.com/citations?user=f-4YHeMAAAAJ&hl=en), [S. Kreiss](https://www.svenkreiss.com),
[T. Mordan](https://people.epfl.ch/taylor.mordan/?lang=en), [A. Alahi](https://scholar.google.com/citations?user=UIhXQ64AAAAJ&hl=en)_, ICRA 2021 <br />
__[Article](https://arxiv.org/abs/2008.10913)__ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; __[Citation](#Citation)__ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; __[Video](#Todo)__
<img src="docs/000840_multi.jpg" width="700"/>
<img src="docs/out_000840_multi.jpg" width="700"/>
---
@ -128,7 +128,7 @@ To show all the instances estimated by MonoLoco add the argument `show_all` to t
It is also possible to run [openpifpaf](https://github.com/vita-epfl/openpifpaf) directly
by usingt `--mode keypoints`. All the other pifpaf arguments are also supported
and can be checked with `python -m monstereo.run predict --help`.
and can be checked with `python -m monoloco.run predict --help`.
![predict](docs/out_002282_pifpaf.jpg)
@ -149,16 +149,17 @@ python3 -m monoloco.run predict --mode stereo \
-o data/output -long_edge 2500
```
![Crowded scene](docs/out_000840.jpg)
![Crowded scene](docs/out_000840_multi.jpg)
```
python3 -m monoloco.run predict --glob docs/005523*.png \ --output_types multi \
--model data/models/ms-200710-1511.pkl \
--path_gt <to match results with ground-truths> \
-o data/output --long_edge 2500
-o data/output --long_edge 2500 \
--instance-threshold 0.05 --seed-threshold 0.05
```
![Occluded hard example](docs/out_005523.jpg)
![Occluded hard example](docs/out_005523.png.multi.jpg)
## B) Social Distancing (and Talking activity)
To visualize social distancing compliance, simply add the argument `--social-distance` to the predict command. This visualization is not supported with a stereo camera.
@ -180,17 +181,16 @@ python -m monoloco.run predict docs/frame0038.jpg \
<img src="docs/out_frame0038.jpg.front_bird.jpg" width="700"/>
## C) Orientation and Bounding Box dimensions
MonoLoco++ estimates orientation and box dimensions as well. Results are saved in a json file when using the command
The network estimates orientation and box dimensions as well. Results are saved in a json file when using the command
`--output_types json`. At the moment, the only visualization including orientation is the social distancing one.
<br>
<br />
## Training
We train on the KITTI dataset (MonoLoco/Monoloco++/MonStereo) or the nuScenes dataset (MonoLoco) specifying the path of the json file containing the input joints. Please download them [heere](https://drive.google.com/file/d/1e-wXTO460ip_Je2NdXojxrOrJ-Oirlgh/view?usp=sharing) or follow [preprocessing instructions](#Preprocessing).
Our results for MonoLoco++ are obtained with:
Results for MonoLoco++ are obtained with:
```
python -m monoloco.run train --joints data/arrays/joints-kitti-201202-1743.json --save --monocular
@ -207,7 +207,7 @@ Finally, for a more extensive list of available parameters, run:
`python -m monstereo.run train --help`
<br>
<br />
## Preprocessing
Preprocessing and training step are already fully supported by the code provided,

BIN
docs/000840.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 736 KiB

BIN
docs/000840_right.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 732 KiB

View File

Before

Width:  |  Height:  |  Size: 633 KiB

After

Width:  |  Height:  |  Size: 633 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 398 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 255 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 688 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 149 KiB

View File

@ -108,6 +108,7 @@ def factory_from_args(args):
else:
args.batch_size = 1
# Patch for stereo images with batch_size = 2
if args.batch_size == 2 and not args.long_edge:
args.long_edge = 1238
LOG.info("Long-edge set to %i".format(args.long_edge))
@ -177,7 +178,9 @@ def predict(args):
else:
file_name = os.path.basename(meta['file_name'])
output_path = os.path.join(args.output_directory, 'out_' + file_name)
print('image', batch_i, meta['file_name'], output_path)
im_name = os.path.basename(meta['file_name'])
print(f'{batch_i} image {im_name} saved as {output_path}')
# Only for MonStereo
else:
@ -186,7 +189,7 @@ def predict(args):
# 3D Predictions
if args.mode != 'keypoints':
im_size = (cpu_image.size[0], cpu_image.size[1]) # Original
kk, dic_gt = factory_for_gt(im_size, focal_length=args.focal, name=file_name, path_gt=args.path_gt)
kk, dic_gt = factory_for_gt(im_size, focal_length=args.focal, name=im_name, path_gt=args.path_gt)
# Preprocess pifpaf outputs and run monoloco
boxes, keypoints = preprocess_pifpaf(pifpaf_outs['left'], im_size, enlarge_boxes=False)

View File

@ -45,6 +45,8 @@ def cli():
predict_parser.add_argument('--font-size', default=0, type=int, help='annotation font size')
predict_parser.add_argument('--monocolor-connections', default=False, action='store_true',
help='use a single color per instance')
predict_parser.add_argument('--instance-threshold', type=float, default=None, help='threshold for entire instance')
predict_parser.add_argument('--seed-threshold', type=float, default=None, help='threshold for single seed')
predict_parser.add_argument('--disable-cuda', action='store_true', help='disable CUDA')
predict_parser.add_argument('--focal',
help='focal length in mm for a sensor size of 7.2x5.4 mm. Default nuScenes sensor',