merge from master
9
.gitignore
vendored
@ -2,10 +2,13 @@
|
||||
data/
|
||||
.DS_store
|
||||
__pycache__
|
||||
Monoloco/*.pyc
|
||||
monstereo/*.pyc
|
||||
.pytest*
|
||||
dist/
|
||||
build/
|
||||
dist/
|
||||
*.egg-info
|
||||
tests/*.png
|
||||
kitti-eval/*
|
||||
kitti-eval/build
|
||||
kitti-eval/cmake-build-debug
|
||||
figures/
|
||||
visual_tests/
|
||||
|
||||
@ -9,7 +9,7 @@ Good-names=xx,dd,zz,hh,ww,pp,kk,lr,w1,w2,w3,mm,im,uv,ax,COV_MIN,CONF_MIN
|
||||
|
||||
[TYPECHECK]
|
||||
|
||||
disable=E1102,missing-docstring,useless-object-inheritance,duplicate-code,too-many-arguments,too-many-instance-attributes,too-many-locals,too-few-public-methods,arguments-differ,logging-format-interpolation
|
||||
disable=import-error,invalid-name,unused-variable,E1102,missing-docstring,useless-object-inheritance,duplicate-code,too-many-arguments,too-many-instance-attributes,too-many-locals,too-few-public-methods,arguments-differ,logging-format-interpolation,import-outside-toplevel
|
||||
|
||||
|
||||
# List of members which are set dynamically and missed by pylint inference
|
||||
|
||||
4
LICENSE
@ -1,4 +1,4 @@
|
||||
Copyright 2019 by EPFL/VITA. All rights reserved.
|
||||
Copyright 2020-2021 by EPFL/VITA. All rights reserved.
|
||||
|
||||
This project and all its files are licensed under
|
||||
GNU AGPLv3 or later version.
|
||||
@ -6,4 +6,4 @@ GNU AGPLv3 or later version.
|
||||
If this license is not suitable for your business or project
|
||||
please contact EPFL-TTO (https://tto.epfl.ch/) for a full commercial license.
|
||||
|
||||
This software may not be used to harm any person deliberately.
|
||||
This software may not be used to harm any person deliberately or for any military application.
|
||||
|
||||
237
README.md
@ -1,51 +1,59 @@
|
||||
# Monoloco
|
||||
# Perceiving Humans in 3D
|
||||
|
||||
> We tackle the fundamentally ill-posed problem of 3D human localization from monocular RGB images. Driven by the limitation of neural networks outputting point estimates, we address the ambiguity in the task by predicting confidence intervals through a loss function based on the Laplace distribution. Our architecture is a light-weight feed-forward neural network that predicts 3D locations and corresponding confidence intervals given 2D human poses. The design is particularly well suited for small training data, cross-dataset generalization, and real-time applications. Our experiments show that we (i) outperform state-of-the-art results on KITTI and nuScenes datasets, (ii) even outperform a stereo-based method for far-away pedestrians, and (iii) estimate meaningful confidence intervals. We further share insights on our model of uncertainty in cases of limited observations and out-of-distribution samples.
|
||||
This repository contains the code for two research projects:
|
||||
|
||||
1. **Perceiving Humans: from Monocular 3D Localization to Social Distancing (MonoLoco++)**
|
||||
[README](https://github.com/vita-epfl/monstereo/blob/master/docs/MonoLoco%2B%2B.md) & [Article](https://arxiv.org/abs/2009.00984)
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
|
||||
2. **MonStereo: When Monocular and Stereo Meet at the Tail of 3D Human Localization**
|
||||
[README](https://github.com/vita-epfl/monstereo/blob/master/docs/MonStereo.md) & [Article](https://arxiv.org/abs/2008.10913)
|
||||
|
||||

|
||||
|
||||
```
|
||||
@InProceedings{Bertoni_2019_ICCV,
|
||||
author = {Bertoni, Lorenzo and Kreiss, Sven and Alahi, Alexandre},
|
||||
title = {MonoLoco: Monocular 3D Pedestrian Localization and Uncertainty Estimation},
|
||||
booktitle = {The IEEE International Conference on Computer Vision (ICCV)},
|
||||
month = {October},
|
||||
year = {2019}
|
||||
}
|
||||
```
|
||||
**2021**
|
||||
Both projects has been built upon the CVPR'19 project [Openpifpaf](https://github.com/vita-epfl/openpifpaf)
|
||||
for 2D pose estimation and the ICCV'19 project [MonoLoco](https://github.com/vita-epfl/monoloco) for monocular 3D localization.
|
||||
All projects share the AGPL Licence.
|
||||
|
||||
**NEW! MonoLoco++ is [available](https://github.com/vita-epfl/monstereo):**
|
||||
* It estimates 3D localization, orientation, and bounding box dimensions
|
||||
* It verifies social distance requirements. More info: [video](https://www.youtube.com/watch?v=r32UxHFAJ2M) and [project page](http://vita.epfl.ch/monoloco)
|
||||
* It works with [OpenPifPaf](https://github.com/vita-epfl/openpifpaf) 0.12 and PyTorch 1.7
|
||||
|
||||
**2020**
|
||||
* Paper on [ICCV'19](http://openaccess.thecvf.com/content_ICCV_2019/html/Bertoni_MonoLoco_Monocular_3D_Pedestrian_Localization_and_Uncertainty_Estimation_ICCV_2019_paper.html) website or [ArXiv](https://arxiv.org/abs/1906.06059)
|
||||
* Check our video with method description and qualitative results on [YouTube](https://www.youtube.com/watch?v=ii0fqerQrec)
|
||||
|
||||
* Live demo available! (more info in the webcam section)
|
||||
|
||||
* Continuously tested with Travis CI: [](https://travis-ci.org/vita-epfl/monoloco)<br />
|
||||
|
||||
<img src="docs/pull.png" height="600">
|
||||
|
||||
# Setup
|
||||
Installation steps are the same for both projects.
|
||||
|
||||
### Install
|
||||
Python 3 is required. Python 2 is not supported.
|
||||
Do not clone this repository and make sure there is no folder named monoloco in your current directory.
|
||||
The installation has been tested on OSX and Linux operating systems, with Python 3.6 or Python 3.7.
|
||||
Packages have been installed with pip and virtual environments.
|
||||
For quick installation, do not clone this repository,
|
||||
and make sure there is no folder named monstereo in your current directory.
|
||||
A GPU is not required, yet highly recommended for real-time performances.
|
||||
MonoLoco++ and MonStereo can be installed as a single package, by:
|
||||
|
||||
```
|
||||
pip3 install monoloco
|
||||
pip3 install monstereo
|
||||
```
|
||||
|
||||
For development of the monoloco source code itself, you need to clone this repository and then:
|
||||
For development of the monstereo source code itself, you need to clone this repository and then:
|
||||
```
|
||||
pip3 install -e '.[test, prep]'
|
||||
pip3 install sdist
|
||||
cd monstereo
|
||||
python3 setup.py sdist bdist_wheel
|
||||
pip3 install -e .
|
||||
```
|
||||
Python 3.6 or 3.7 is required for nuScenes development kit.
|
||||
All details for Pifpaf pose detector at [openpifpaf](https://github.com/vita-epfl/openpifpaf).
|
||||
|
||||
### Interfaces
|
||||
All the commands are run through a main file called `main.py` using subparsers.
|
||||
To check all the commands for the parser and the subparsers (including openpifpaf ones) run:
|
||||
|
||||
* `python3 -m monstereo.run --help`
|
||||
* `python3 -m monstereo.run predict --help`
|
||||
* `python3 -m monstereo.run train --help`
|
||||
* `python3 -m monstereo.run eval --help`
|
||||
* `python3 -m monstereo.run prep --help`
|
||||
|
||||
or check the file `monstereo/run.py`
|
||||
|
||||
### Data structure
|
||||
|
||||
@ -53,7 +61,7 @@ All details for Pifpaf pose detector at [openpifpaf](https://github.com/vita-epf
|
||||
├── arrays
|
||||
├── models
|
||||
├── kitti
|
||||
├── nuscenes
|
||||
├── figures
|
||||
├── logs
|
||||
|
||||
|
||||
@ -61,163 +69,10 @@ Run the following to create the folders:
|
||||
```
|
||||
mkdir data
|
||||
cd data
|
||||
mkdir arrays models kitti nuscenes logs
|
||||
mkdir arrays models kitti figures logs
|
||||
```
|
||||
|
||||
### Pre-trained Models
|
||||
* Download a MonoLoco pre-trained model from
|
||||
[Google Drive](https://drive.google.com/open?id=1F7UG1HPXGlDD_qL-AN5cv2Eg-mhdQkwv) and save it in `data/models`
|
||||
(default) or in any folder and call it through the command line option `--model <model path>`
|
||||
* Pifpaf pre-trained model will be automatically downloaded at the first run.
|
||||
Three standard, pretrained models are available when using the command line option
|
||||
`--checkpoint resnet50`, `--checkpoint resnet101` and `--checkpoint resnet152`.
|
||||
Alternatively, you can download a Pifpaf pre-trained model from [openpifpaf](https://github.com/vita-epfl/openpifpaf)
|
||||
and call it with `--checkpoint <pifpaf model path>`
|
||||
|
||||
|
||||
# Interfaces
|
||||
All the commands are run through a main file called `main.py` using subparsers.
|
||||
To check all the commands for the parser and the subparsers (including openpifpaf ones) run:
|
||||
|
||||
* `python3 -m monoloco.run --help`
|
||||
* `python3 -m monoloco.run predict --help`
|
||||
* `python3 -m monoloco.run train --help`
|
||||
* `python3 -m monoloco.run eval --help`
|
||||
* `python3 -m monoloco.run prep --help`
|
||||
|
||||
or check the file `monoloco/run.py`
|
||||
|
||||
# Prediction
|
||||
The predict script receives an image (or an entire folder using glob expressions),
|
||||
calls PifPaf for 2d human pose detection over the image
|
||||
and runs Monoloco for 3d location of the detected poses.
|
||||
The command `--networks` defines if saving pifpaf outputs, MonoLoco outputs or both.
|
||||
You can check all commands for Pifpaf at [openpifpaf](https://github.com/vita-epfl/openpifpaf).
|
||||
|
||||
|
||||
Output options include json files and/or visualization of the predictions on the image in *frontal mode*,
|
||||
*birds-eye-view mode* or *combined mode* and can be specified with `--output_types`
|
||||
|
||||
|
||||
### Ground truth matching
|
||||
* In case you provide a ground-truth json file to compare the predictions of MonoLoco,
|
||||
the script will match every detection using Intersection over Union metric.
|
||||
The ground truth file can be generated using the subparser `prep` and called with the command `--path_gt`.
|
||||
Check preprocess section for more details or download the file from
|
||||
[here](https://drive.google.com/open?id=1F7UG1HPXGlDD_qL-AN5cv2Eg-mhdQkwv).
|
||||
|
||||
* In case you don't provide a ground-truth file, the script will look for a predefined path.
|
||||
If it does not find the file, it will generate images
|
||||
with all the predictions without ground-truth matching.
|
||||
|
||||
Below an example with and without ground-truth matching. They have been created (adding or removing `--path_gt`) with:
|
||||
`python3 -m monoloco.run predict --glob docs/002282.png --output_types combined --scale 2
|
||||
--model data/models/monoloco-190513-1437.pkl --n_dropout 50 --z_max 30`
|
||||
|
||||
With ground truth matching (only matching people):
|
||||

|
||||
|
||||
Without ground_truth matching (all the detected people):
|
||||

|
||||
|
||||
### Images without calibration matrix
|
||||
To accurately estimate distance, the focal length is necessary.
|
||||
However, it is still possible to test Monoloco on images where the calibration matrix is not available.
|
||||
Absolute distances are not meaningful but relative distance still are.
|
||||
Below an example on a generic image from the web, created with:
|
||||
`python3 -m monoloco.run predict --glob docs/surf.jpg --output_types combined --model data/models/monoloco-190513-1437.pkl --n_dropout 50 --z_max 25`
|
||||
|
||||

|
||||
|
||||
|
||||
# Webcam
|
||||
<img src="docs/webcam_short.gif" height=350 alt="example image" />
|
||||
|
||||
MonoLoco can run on personal computers with only CPU and low resolution images (e.g. 256x144) at ~2fps.
|
||||
It support 3 types of visualizations: `front`, `bird` and `combined`.
|
||||
Multiple visualizations can be combined in different windows.
|
||||
|
||||
The above gif has been obtained running on a Macbook the command:
|
||||
|
||||
```pip3 install opencv-python
|
||||
python3 -m monoloco.run predict --webcam --scale 0.2 --output_types combined --z_max 10 --checkpoint resnet50 --model data/models/monoloco-190513-1437.pkl
|
||||
```
|
||||
|
||||
# Preprocessing
|
||||
|
||||
### Datasets
|
||||
|
||||
#### 1) KITTI dataset
|
||||
Download KITTI ground truth files and camera calibration matrices for training
|
||||
from [here](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d) and
|
||||
save them respectively into `data/kitti/gt` and `data/kitti/calib`.
|
||||
To extract pifpaf joints, you also need to download training images soft link the folder in `
|
||||
data/kitti/images`
|
||||
|
||||
#### 2) nuScenes dataset
|
||||
Download nuScenes dataset from [nuScenes](https://www.nuscenes.org/download) (either Mini or TrainVal),
|
||||
save it anywhere and soft link it in `data/nuscenes`
|
||||
|
||||
nuScenes preprocessing requires `pip3 install nuscenes-devkit`
|
||||
|
||||
|
||||
### Annotations to preprocess
|
||||
MonoLoco is trained using 2D human pose joints. To create them run pifaf over KITTI or nuScenes training images.
|
||||
You can create them running the predict script and using `--networks pifpaf`.
|
||||
|
||||
### Inputs joints for training
|
||||
MonoLoco is trained using 2D human pose joints matched with the ground truth location provided by
|
||||
nuScenes or KITTI Dataset. To create the joints run: `python3 -m monoloco.run prep` specifying:
|
||||
1. `--dir_ann` annotation directory containing Pifpaf joints of KITTI or nuScenes.
|
||||
|
||||
2. `--dataset` Which dataset to preprocess. For nuscenes, all three versions of the
|
||||
dataset are supported: nuscenes_mini, nuscenes, nuscenes_teaser.
|
||||
|
||||
### Ground truth file for evaluation
|
||||
The preprocessing script also outputs a second json file called **names-<date-time>.json** which provide a dictionary indexed
|
||||
by the image name to easily access ground truth files for evaluation and prediction purposes.
|
||||
|
||||
|
||||
|
||||
# Training
|
||||
Provide the json file containing the preprocess joints as argument.
|
||||
|
||||
As simple as `python3 -m monoloco.run --train --joints <json file path>`
|
||||
|
||||
All the hyperparameters options can be checked at `python3 -m monoloco.run train --help`.
|
||||
|
||||
### Hyperparameters tuning
|
||||
Random search in log space is provided. An example: `python3 -m monoloco.run train --hyp --multiplier 10 --r_seed 1`.
|
||||
One iteration of the multiplier includes 6 runs.
|
||||
|
||||
|
||||
# Evaluation (KITTI Dataset)
|
||||
We provide evaluation on KITTI for models trained on nuScenes or KITTI. We compare them with other monocular
|
||||
and stereo Baselines:
|
||||
|
||||
[Mono3D](https://www.cs.toronto.edu/~urtasun/publications/chen_etal_cvpr16.pdf),
|
||||
[3DOP](https://xiaozhichen.github.io/papers/nips15chen.pdf),
|
||||
[MonoDepth](https://arxiv.org/abs/1609.03677) and our
|
||||
[Geometrical Baseline](monoloco/eval/geom_baseline.py).
|
||||
|
||||
* **Mono3D**: download validation files from [here](http://3dimage.ee.tsinghua.edu.cn/cxz/mono3d)
|
||||
and save them into `data/kitti/m3d`
|
||||
* **3DOP**: download validation files from [here](https://xiaozhichen.github.io/)
|
||||
and save them into `data/kitti/3dop`
|
||||
* **MonoDepth**: compute an average depth for every instance using the following script
|
||||
[here](https://github.com/Parrotlife/pedestrianDepth-baseline/tree/master/MonoDepth-PyTorch)
|
||||
and save them into `data/kitti/monodepth`
|
||||
* **GeometricalBaseline**: A geometrical baseline comparison is provided.
|
||||
The average geometrical value for comparison can be obtained running:
|
||||
`python3 -m monoloco.run eval --geometric
|
||||
--model data/models/monoloco-190719-0923.pkl --joints data/arrays/joints-nuscenes_teaser-190717-1424.json`
|
||||
|
||||
|
||||
The following results are obtained running:
|
||||
`python3 -m monoloco.run eval --model data/models/monoloco-190719-0923.pkl --generate
|
||||
--dir_ann <folder containing pifpaf annotations of KITTI images>`
|
||||
|
||||

|
||||

|
||||
|
||||
Further instructions for prediction, preprocessing, training and evaluation can be found here:
|
||||
|
||||
* [MonoLoco++ README](https://github.com/vita-epfl/monstereo/blob/master/docs/MonoLoco%2B%2B.md)
|
||||
* [MonStereo README](https://github.com/vita-epfl/monstereo/blob/master/docs/MonStereo.md)
|
||||
|
||||
BIN
docs/000840.png
Executable file
|
After Width: | Height: | Size: 736 KiB |
BIN
docs/000840_multi.jpg
Normal file
|
After Width: | Height: | Size: 633 KiB |
BIN
docs/000840_right.png
Executable file
|
After Width: | Height: | Size: 732 KiB |
BIN
docs/005523.png
Executable file
|
After Width: | Height: | Size: 872 KiB |
BIN
docs/005523_right.png
Executable file
|
After Width: | Height: | Size: 826 KiB |
151
docs/MonStereo.md
Normal file
@ -0,0 +1,151 @@
|
||||
|
||||
# MonStereo
|
||||
|
||||
|
||||
> Monocular and stereo vision are cost-effective solutions for 3D human localization
|
||||
in the context of self-driving cars or social robots. However, they are usually developed independently
|
||||
and have their respective strengths and limitations. We propose a novel unified learning framework that
|
||||
leverages the strengths of both monocular and stereo cues for 3D human localization.
|
||||
Our method jointly (i) associates humans in left-right images,
|
||||
(ii) deals with occluded and distant cases in stereo settings by relying on the robustness of monocular cues,
|
||||
and (iii) tackles the intrinsic ambiguity of monocular perspective projection by exploiting prior knowledge
|
||||
of human height distribution.
|
||||
We achieve state-of-the-art quantitative results for the 3D localization task on KITTI dataset
|
||||
and estimate confidence intervals that account for challenging instances.
|
||||
We show qualitative examples for the long tail challenges such as occluded,
|
||||
far-away, and children instances.
|
||||
|
||||
```
|
||||
@InProceedings{bertoni_monstereo,
|
||||
author = {Bertoni, Lorenzo and Kreiss, Sven and Mordan, Taylor and Alahi, Alexandre},
|
||||
title = {MonStereo: When Monocular and Stereo Meet at the Tail of 3D Human Localization},
|
||||
booktitle = {arXiv:2008.10913},
|
||||
month = {August},
|
||||
year = {2020}
|
||||
}
|
||||
```
|
||||
|
||||
# Prediction
|
||||
The predict script receives an image (or an entire folder using glob expressions),
|
||||
calls PifPaf for 2d human pose detection over the image
|
||||
and runs MonStereo for 3d location of the detected poses.
|
||||
|
||||
Output options include json files and/or visualization of the predictions on the image in *frontal mode*,
|
||||
*birds-eye-view mode* or *multi mode* and can be specified with `--output_types`
|
||||
|
||||
|
||||
### Pre-trained Models
|
||||
* Download Monstereo pre-trained model from
|
||||
[Google Drive](https://drive.google.com/drive/folders/1jZToVMBEZQMdLB5BAIq2CdCLP5kzNo9t?usp=sharing),
|
||||
and save them in `data/models`
|
||||
(default) or in any folder and call it through the command line option `--model <model path>`
|
||||
* Pifpaf pre-trained model will be automatically downloaded at the first run.
|
||||
Three standard, pretrained models are available when using the command line option
|
||||
`--checkpoint resnet50`, `--checkpoint resnet101` and `--checkpoint resnet152`.
|
||||
Alternatively, you can download a Pifpaf pre-trained model from [openpifpaf](https://github.com/vita-epfl/openpifpaf)
|
||||
and call it with `--checkpoint <pifpaf model path>`. All experiments have been run with v0.8 of pifpaf.
|
||||
If you'd like to use an updated version, we suggest to re-train the MonStereo model as well.
|
||||
* The model for the experiments is provided in *data/models/ms-200710-1511.pkl*
|
||||
|
||||
|
||||
### Ground truth matching
|
||||
* In case you provide a ground-truth json file to compare the predictions of MonSter,
|
||||
the script will match every detection using Intersection over Union metric.
|
||||
The ground truth file can be generated using the subparser `prep` and called with the command `--path_gt`.
|
||||
As this step requires running the pose detector over all the training images and save the annotations, we
|
||||
provide the resulting json file for the category *pedestrians* from
|
||||
[Google Drive](https://drive.google.com/file/d/1e-wXTO460ip_Je2NdXojxrOrJ-Oirlgh/view?usp=sharing)
|
||||
and save it into `data/arrays`.
|
||||
|
||||
* In case the ground-truth json file is not available, with the command `--show_all`, is possible to
|
||||
show all the prediction for the image
|
||||
|
||||
After downloading model and ground-truth file, a demo can be tested with the following commands:
|
||||
|
||||
`python3 -m monstereo.run predict --glob docs/000840*.png --output_types multi --scale 2
|
||||
--model data/models/ms-200710-1511.pkl --z_max 30 --checkpoint resnet152 --path_gt data/arrays/names-kitti-200615-1022.json
|
||||
-o data/output`
|
||||
|
||||

|
||||
|
||||
`python3 -m monstereo.run predict --glob docs/005523*.png --output_types multi --scale 2
|
||||
--model data/models/ms-200710-1511.pkl --z_max 30 --checkpoint resnet152 --path_gt data/arrays/names-kitti-200615-1022.json
|
||||
-o data/output`
|
||||
|
||||

|
||||
|
||||
# Preprocessing
|
||||
Preprocessing and training step are already fully supported by the code provided,
|
||||
but require first to run a pose detector over
|
||||
all the training images and collect the annotations.
|
||||
The code supports this option (by running the predict script and using `--mode pifpaf`).
|
||||
|
||||
### Data structure
|
||||
|
||||
Data
|
||||
├── arrays
|
||||
├── models
|
||||
├── kitti
|
||||
├── logs
|
||||
├── output
|
||||
|
||||
Run the following to create the folders:
|
||||
```
|
||||
mkdir data
|
||||
cd data
|
||||
mkdir arrays models kitti logs output
|
||||
```
|
||||
|
||||
|
||||
### Datasets
|
||||
Download KITTI ground truth files and camera calibration matrices for training
|
||||
from [here](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d) and
|
||||
save them respectively into `data/kitti/gt` and `data/kitti/calib`.
|
||||
To extract pifpaf joints, you also need to download training images soft link the folder in `
|
||||
data/kitti/images`
|
||||
|
||||
|
||||
### Annotations to preprocess
|
||||
MonStereo is trained using 2D human pose joints. To obtain the joints the first step is to run
|
||||
pifaf over KITTI training images, by either running the predict script and using `--mode pifpaf`,
|
||||
or by using pifpaf code directly.
|
||||
MonStereo preprocess script expects annotations from left and right images in 2 different folders
|
||||
with the same path apart from the suffix `_right` for the ``right" folder.
|
||||
For example `data/annotations` and `data/annotations_right`.
|
||||
Do not change name of json files created by pifpaf. For each left annotation,
|
||||
the code will look for the corresponding right annotation.
|
||||
|
||||
### Inputs joints for training
|
||||
MonoStereo is trained using 2D human pose joints matched with the ground truth location provided by
|
||||
KITTI Dataset. To create the joints run: `python3 -m monstereo.run prep` specifying:
|
||||
|
||||
`--dir_ann` annotation directory containing Pifpaf joints of KITTI for the left images.
|
||||
|
||||
|
||||
### Ground truth file for evaluation
|
||||
The preprocessing script also outputs a second json file called **names-<date-time>.json** which provide a dictionary indexed
|
||||
by the image name to easily access ground truth files for evaluation and prediction purposes.
|
||||
|
||||
|
||||
# Training
|
||||
Provide the json file containing the preprocess joints as argument.
|
||||
As simple as `python3 -m monstereo.run train --joints <json file path> `
|
||||
All the hyperparameters options can be checked at `python3 -m monstereo.run train --help`.
|
||||
|
||||
# Evaluation (KITTI Dataset)
|
||||
### Average Localization Metric (ALE)
|
||||
We provide evaluation on KITTI in the eval section. Txt files for MonStereo are generated with the command:
|
||||
|
||||
`python -m monstereo.run eval --dir_ann <directory of pifpaf annotations> --model data/models/ms-200710-1511.pkl --generate`
|
||||
|
||||
<img src="quantitative_mono.png" width="600"/>
|
||||
|
||||
### Relative Average Precision Localization (RALP-5%)
|
||||
We modified the original C++ evaluation of KITTI to make it relative to distance. We use **cmake**.
|
||||
To run the evaluation, first generate the txt files with:
|
||||
|
||||
`python -m monstereo.run eval --dir_ann <directory of pifpaf annotations> --model data/models/ms-200710-1511.pkl --generate`
|
||||
|
||||
Then follow the instructions of this [repository](https://github.com/cguindel/eval_kitti)
|
||||
to prepare the folders accordingly (or follow kitti guidelines) and run evaluation.
|
||||
The modified file is called *evaluate_object.cpp* and runs exactly as the original kitti evaluation.
|
||||
230
docs/MonoLoco++.md
Normal file
@ -0,0 +1,230 @@
|
||||
|
||||
# Perceiving Humans: from Monocular 3D Localization to Social Distancing
|
||||
|
||||
> Perceiving humans in the context of Intelligent Transportation Systems (ITS)
|
||||
often relies on multiple cameras or expensive LiDAR sensors.
|
||||
In this work, we present a new cost- effective vision-based method that perceives humans’ locations in 3D
|
||||
and their body orientation from a single image.
|
||||
We address the challenges related to the ill-posed monocular 3D tasks by proposing a deep learning method
|
||||
that predicts confidence intervals in contrast to point estimates. Our neural network architecture estimates
|
||||
humans 3D body locations and their orientation with a measure of uncertainty.
|
||||
Our vision-based system (i) is privacy-safe, (ii) works with any fixed or moving cameras,
|
||||
and (iii) does not rely on ground plane estimation.
|
||||
We demonstrate the performance of our method with respect to three applications:
|
||||
locating humans in 3D, detecting social interactions,
|
||||
and verifying the compliance of recent safety measures due to the COVID-19 outbreak.
|
||||
Indeed, we show that we can rethink the concept of “social distancing” as a form of social interaction
|
||||
in contrast to a simple location-based rule. We publicly share the source code towards an open science mission.
|
||||
|
||||
```
|
||||
@InProceedings{bertoni_social,
|
||||
author = {Bertoni, Lorenzo and Kreiss, Sven and Alahi, Alexandre},
|
||||
title={Perceiving Humans: from Monocular 3D Localization to Social Distancing},
|
||||
booktitle = {arXiv:2009.00984},
|
||||
month = {September},
|
||||
year = {2020}
|
||||
}
|
||||
```
|
||||

|
||||
|
||||
## Predictions
|
||||
For a quick setup download a pifpaf and a MonoLoco++ models from
|
||||
[here](https://drive.google.com/drive/folders/1jZToVMBEZQMdLB5BAIq2CdCLP5kzNo9t?usp=sharing) and save them into `data/models`.
|
||||
|
||||
### 3D Localization
|
||||
The predict script receives an image (or an entire folder using glob expressions),
|
||||
calls PifPaf for 2d human pose detection over the image
|
||||
and runs Monoloco++ for 3d location of the detected poses.
|
||||
The command `--net` defines if saving pifpaf outputs, MonoLoco++ outputs or MonStereo ones.
|
||||
You can check all commands for Pifpaf at [openpifpaf](https://github.com/vita-epfl/openpifpaf).
|
||||
|
||||
Output options include json files and/or visualization of the predictions on the image in *frontal mode*,
|
||||
*birds-eye-view mode* or *combined mode* and can be specified with `--output_types`
|
||||
|
||||
Ground-truth KITTI files for comparing results can be downloaded from
|
||||
[here](https://drive.google.com/drive/folders/1jZToVMBEZQMdLB5BAIq2CdCLP5kzNo9t?usp=sharing)
|
||||
(file called *names-kitti*) and should be saved into `data/arrays`
|
||||
Ground-truth files can also be generated, more info in the preprocessing section.
|
||||
|
||||
For an example image, run the following command:
|
||||
|
||||
```
|
||||
python -m monstereo.run predict \
|
||||
docs/002282.png \
|
||||
--net monoloco_pp \
|
||||
--output_types multi \
|
||||
--model data/models/monoloco_pp-201203-1424.pkl \
|
||||
--path_gt data/arrays/names-kitti-200615-1022.json \
|
||||
-o <output directory> \
|
||||
--long-edge <rescale the image by providing dimension of long side. If None original resolution>
|
||||
--n_dropout <50 to include epistemic uncertainty, 0 otherwise>
|
||||
```
|
||||
|
||||

|
||||
|
||||
To show all the instances estimated by MonoLoco add the argument `show_all` to the above command.
|
||||
|
||||

|
||||
|
||||
It is also possible to run [openpifpaf](https://github.com/vita-epfl/openpifpaf) directly
|
||||
by specifying the network with the argument `--net pifpaf`. All the other pifpaf arguments are also supported
|
||||
and can be checked with `python -m monstereo.run predict --help`.
|
||||
|
||||

|
||||
|
||||
### Focal Length and Camera Parameters
|
||||
Absolute distances are affected by the camera intrinsic parameters.
|
||||
When processing KITTI images, the network uses the provided intrinsic matrix of the dataset.
|
||||
In all the other cases, we use the parameters of nuScenes cameras, with "1/1.8'' CMOS sensors of size 7.2 x 5.4 mm.
|
||||
The default focal length is 5.7mm and this parameter can be modified using the argument `--focal`.
|
||||
|
||||
### Social Distancing
|
||||
To visualize social distancing compliance, simply add the argument `--social-distance` to the predict command.
|
||||
|
||||
An example from the Collective Activity Dataset is provided below.
|
||||
|
||||
<img src="frame0038.jpg" width="500"/>
|
||||
|
||||
To visualize social distancing run the below, command:
|
||||
```
|
||||
python -m monstereo.run predict \
|
||||
docs/frame0038.jpg \
|
||||
--net monoloco_pp \
|
||||
--social_distance \
|
||||
--output_types front bird --show_all \
|
||||
--model data/models/monoloco_pp-201203-1424.pkl -o <output directory>
|
||||
```
|
||||
<img src="out_frame0038.jpg.front.png" width="400"/>
|
||||
|
||||
|
||||
<img src="out_frame0038.jpg.bird.png" width="400"/>
|
||||
|
||||
Threshold distance and radii (for F-formations) can be set using `--threshold-dist` and `--radii`, respectively.
|
||||
|
||||
For more info, run:
|
||||
|
||||
`python -m monstereo.run predict --help`
|
||||
|
||||
### Orientation and Bounding Box dimensions
|
||||
MonoLoco++ estimates orientation and box dimensions as well. Results are saved in a json file when using the command
|
||||
`--output_types json`. At the moment, the only visualization including orientation is the social distancing one.
|
||||
|
||||
## Preprocessing
|
||||
|
||||
### Kitti
|
||||
Annotations from a pose detector needs to be stored in a folder.
|
||||
For example by using [openpifpaf](https://github.com/vita-epfl/openpifpaf):
|
||||
```
|
||||
python -m openpifpaf.predict \
|
||||
--glob "<kitti images directory>/*.png" \
|
||||
--json-output <directory to contain predictions>
|
||||
--checkpoint=shufflenetv2k30 \
|
||||
--instance-threshold=0.05 --seed-threshold 0.05 --force-complete-pose
|
||||
```
|
||||
Once the step is complete:
|
||||
`python -m monstereo.run prep --dir_ann <directory that contains predictions> --monocular`
|
||||
|
||||
|
||||
### Collective Activity Dataset
|
||||
To evaluate on of the [collective activity dataset](http://vhosts.eecs.umich.edu/vision//activity-dataset.html)
|
||||
(without any training) we selected 6 scenes that contain people talking to each other.
|
||||
This allows for a balanced dataset, but any other configuration will work.
|
||||
|
||||
THe expected structure for the dataset is the following:
|
||||
|
||||
collective_activity
|
||||
├── images
|
||||
├── annotations
|
||||
|
||||
where images and annotations inside have the following name convention:
|
||||
|
||||
IMAGES: seq<sequence_name>_frame<frame_name>.jpg
|
||||
ANNOTATIONS: seq<sequence_name>_annotations.txt
|
||||
|
||||
With respect to the original datasets the images and annotations are moved to a single folder
|
||||
and the sequence is added in their name. One command to do this is:
|
||||
|
||||
`rename -v -n 's/frame/seq14_frame/' f*.jpg`
|
||||
|
||||
which for example change the name of all the jpg images in that folder adding the sequence number
|
||||
(remove `-n` after checking it works)
|
||||
|
||||
Pifpaf annotations should also be saved in a single folder and can be created with:
|
||||
|
||||
```
|
||||
python -m openpifpaf.predict \
|
||||
--glob "data/collective_activity/images/*.jpg" \
|
||||
--checkpoint=shufflenetv2k30 \
|
||||
--instance-threshold=0.05 --seed-threshold 0.05 --force-complete-pose\
|
||||
--json-output /data/lorenzo-data/annotations/collective_activity/v012
|
||||
```
|
||||
|
||||
Finally, to evaluate activity using a MonoLoco++ pre-trained model trained either on nuSCENES or KITTI:
|
||||
```
|
||||
python -m monstereo.run eval --activity \
|
||||
--net monoloco_pp --dataset collective \
|
||||
--model <MonoLoco++ model path> --dir_ann <pifpaf annotations directory>
|
||||
```
|
||||
|
||||
## Training
|
||||
We train on KITTI or nuScenes dataset specifying the path of the input joints.
|
||||
|
||||
Our results are obtained with:
|
||||
|
||||
`python -m monstereo.run train --lr 0.001 --joints data/arrays/joints-kitti-201202-1743.json --save --monocular`
|
||||
|
||||
For a more extensive list of available parameters, run:
|
||||
|
||||
`python -m monstereo.run train --help`
|
||||
|
||||
## Evaluation
|
||||
|
||||
### 3D Localization
|
||||
We provide evaluation on KITTI for models trained on nuScenes or KITTI. We compare them with other monocular
|
||||
and stereo Baselines:
|
||||
|
||||
[MonoLoco](https://github.com/vita-epfl/monoloco),
|
||||
[Mono3D](https://www.cs.toronto.edu/~urtasun/publications/chen_etal_cvpr16.pdf),
|
||||
[3DOP](https://xiaozhichen.github.io/papers/nips15chen.pdf),
|
||||
[MonoDepth](https://arxiv.org/abs/1609.03677)
|
||||
[MonoPSR](https://github.com/kujason/monopsr) and our
|
||||
[MonoDIS](https://research.mapillary.com/img/publications/MonoDIS.pdf) and our
|
||||
[Geometrical Baseline](monoloco/eval/geom_baseline.py).
|
||||
|
||||
* **Mono3D**: download validation files from [here](http://3dimage.ee.tsinghua.edu.cn/cxz/mono3d)
|
||||
and save them into `data/kitti/m3d`
|
||||
* **3DOP**: download validation files from [here](https://xiaozhichen.github.io/)
|
||||
and save them into `data/kitti/3dop`
|
||||
* **MonoDepth**: compute an average depth for every instance using the following script
|
||||
[here](https://github.com/Parrotlife/pedestrianDepth-baseline/tree/master/MonoDepth-PyTorch)
|
||||
and save them into `data/kitti/monodepth`
|
||||
* **GeometricalBaseline**: A geometrical baseline comparison is provided.
|
||||
|
||||
The average geometrical value for comparison can be obtained running:
|
||||
```
|
||||
python -m monstereo.run eval
|
||||
--dir_ann <annotation directory>
|
||||
--model <model path>
|
||||
--net monoloco_pp
|
||||
--generate
|
||||
````
|
||||
|
||||
To include also geometric baselines and MonoLoco, add the flag ``--baselines``
|
||||
|
||||
<img src="quantitative_mono.png" width="550"/>
|
||||
|
||||
Adding the argument `save`, a few plots will be added including 3D localization error as a function of distance:
|
||||
<img src="results.png" width="600"/>
|
||||
|
||||
### Activity Estimation (Talking)
|
||||
Please follow preprocessing steps for Collective activity dataset and run pifpaf over the dataset images.
|
||||
Evaluation on this dataset is done with models trained on either KITTI or nuScenes.
|
||||
For optimal performances, we suggest the model trained on nuScenes teaser (TODO add link)
|
||||
```
|
||||
python -m monstereo.run eval
|
||||
--activity
|
||||
--dataset collective
|
||||
--net monoloco_pp
|
||||
--model <path to the model>
|
||||
--dir_ann <annotation directory>
|
||||
```
|
||||
BIN
docs/frame0038.jpg
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
docs/out_000840.jpg
Normal file
|
After Width: | Height: | Size: 197 KiB |
BIN
docs/out_002282.png.multi.jpg
Normal file
|
After Width: | Height: | Size: 398 KiB |
BIN
docs/out_002282.png.multi_all.jpg
Normal file
|
After Width: | Height: | Size: 411 KiB |
BIN
docs/out_002282_pifpaf.jpg
Normal file
|
After Width: | Height: | Size: 193 KiB |
BIN
docs/out_005523.jpg
Normal file
|
After Width: | Height: | Size: 255 KiB |
BIN
docs/out_frame0038.jpg.bird.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
docs/out_frame0038.jpg.front.png
Normal file
|
After Width: | Height: | Size: 184 KiB |
BIN
docs/quantitative.png
Normal file
|
After Width: | Height: | Size: 246 KiB |
BIN
docs/quantitative_mono.png
Normal file
|
After Width: | Height: | Size: 295 KiB |
BIN
docs/social_distancing.jpg
Normal file
|
After Width: | Height: | Size: 289 KiB |
BIN
docs/truck.jpg
Normal file
|
After Width: | Height: | Size: 149 KiB |
8
kitti-eval/CMakeLists.txt
Normal file
@ -0,0 +1,8 @@
|
||||
cmake_minimum_required (VERSION 2.6)
|
||||
project(devkit_object)
|
||||
|
||||
find_package(PNG REQUIRED)
|
||||
|
||||
add_executable(evaluate_object evaluate_object.cpp)
|
||||
include_directories(${PNG_INCLUDE_DIR})
|
||||
target_link_libraries(evaluate_object ${PNG_LIBRARY})
|
||||
34
kitti-eval/README.md
Normal file
@ -0,0 +1,34 @@
|
||||
# eval_kitti #
|
||||
|
||||
[](https://travis-ci.org/cguindel/eval_kitti)
|
||||
[](https://creativecommons.org/licenses/by-nc-sa/3.0/)
|
||||
|
||||
The *eval_kitti* software contains tools to evaluate object detection results using the KITTI dataset. The code is based on the [KITTI object development kit](http://www.cvlibs.net/datasets/kitti/eval_object.php).
|
||||
|
||||
### Tools ###
|
||||
|
||||
* *evaluate_object* is an improved version of the official KITTI evaluation that enables multi-class evaluation and splits of the training set for validation. It's updated according to the modifications introduced in 2017 by the KITTI authors.
|
||||
* *parser* is meant to provide mAP and mAOS stats from the precision-recall curves obtained with the evaluation script.
|
||||
* *create_link* is a helper that can be used to create a link to the results obtained with [lsi-faster-rcnn](https://github.com/cguindel/lsi-faster-rcnn).
|
||||
|
||||
### Usage ###
|
||||
Build *evaluate_object* with CMake:
|
||||
```
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make
|
||||
```
|
||||
|
||||
The `evaluate_object` executable will be then created inside `build`. The following folders are also required to be placed there in order to perform the evaluation:
|
||||
|
||||
* `data/object/label_2`, with the KITTI dataset labels.
|
||||
* `lists`, containing the `.txt` files with the train/validation splits. These files are expected to contain a list of the used image indices, one per row.
|
||||
* `results`, in which a subfolder should be created for every test, including a second-level `data` folder with the resulting `.txt` files to be evaluated.
|
||||
|
||||
`evaluate_object` should be called with the name of the results folder and the validation split; e.g.: ```./evaluate_object leaderboard valsplit ```
|
||||
|
||||
`parser` needs the results folder; e.g.: ```./parser.py leaderboard ```. **Note**: *parser* will only provide results for *Car*, *Pedestrian* and *Cyclist*; modify it (line 8) if you need to evaluate the rest of classes.
|
||||
|
||||
### Copyright ###
|
||||
This work is a derivative of [The KITTI Vision Benchmark Suite](http://www.cvlibs.net/datasets/kitti/eval_object.php) by A. Geiger, P. Lenz, C. Stiller and R. Urtasun, used under [CC BY-NC-SA](https://creativecommons.org/licenses/by-nc-sa/3.0/). Consequently, code in this repository is published under the same [Creative Commons Attribution-NonCommercial-ShareAlike 3.0 License](https://creativecommons.org/licenses/by-nc-sa/3.0/). This means that you must attribute the work in the manner specified by the authors, you may not use this work for commercial purposes and if you alter, transform, or build upon this work, you may distribute the resulting work only under the same license.
|
||||
1447
kitti-eval/evaluate_object.cpp
Normal file
1003
kitti-eval/original.cpp
Normal file
59
kitti-eval/parser.py
Executable file
@ -0,0 +1,59 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
import os
|
||||
import numpy as np
|
||||
|
||||
# CLASSES = ['car', 'pedestrian', 'cyclist', 'van', 'truck', 'person_sitting', 'tram']
|
||||
CLASSES = ['pedestrian']
|
||||
|
||||
# PARAMS = ['detection', 'orientation', 'iour', 'mppe']
|
||||
PARAMS = ['detection', 'detection_1%', 'detection_5%', 'detection_10%', 'detection_3d', 'detection_ground', 'orientation']
|
||||
|
||||
DIFFICULTIES = ['easy', 'moderate', 'hard', 'all']
|
||||
|
||||
eval_type = ''
|
||||
|
||||
if len(sys.argv)<2:
|
||||
print('Usage: parser.py results_folder [evaluation_type]')
|
||||
|
||||
if len(sys.argv)==3:
|
||||
eval_type = sys.argv[2]
|
||||
|
||||
result_sha = sys.argv[1]
|
||||
txt_dir = os.path.join('build','results', result_sha)
|
||||
|
||||
for class_name in CLASSES:
|
||||
for param in PARAMS:
|
||||
print("--{:s} {:s}--".format(class_name, param))
|
||||
if eval_type is '':
|
||||
txt_name = os.path.join(txt_dir, 'stats_' + class_name + '_' + param + '.txt')
|
||||
else:
|
||||
txt_name = os.path.join(txt_dir, 'stats_' + class_name + '_' + param + '_' + eval_type + '.txt')
|
||||
|
||||
if not os.path.isfile(txt_name):
|
||||
print(txt_name, ' not found')
|
||||
continue
|
||||
|
||||
cont = np.loadtxt(txt_name)
|
||||
|
||||
averages = []
|
||||
for idx, difficulty in enumerate(DIFFICULTIES):
|
||||
sum = 0
|
||||
if param in PARAMS:
|
||||
for i in range(1, 41):
|
||||
sum += cont[idx][i]
|
||||
|
||||
average = sum/40.0
|
||||
|
||||
#print class_name, difficulty, param, average
|
||||
averages.append(average)
|
||||
|
||||
#print "\n"+class_name+" "+param
|
||||
print("Easy\tMod.\tHard\tAll")
|
||||
print("{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}".format(100*averages[0], 100*averages[1],100*averages[2],100*averages[3]))
|
||||
print("---------------------------------------------------------------------------------")
|
||||
if eval_type is not '' and param=='detection':
|
||||
break # No orientation for 3D or bird eye
|
||||
|
||||
#print '================='
|
||||
@ -1,4 +1,4 @@
|
||||
|
||||
"""Open implementation of MonoLoco."""
|
||||
"""Open implementation of MonoLoco++ / MonStereo."""
|
||||
|
||||
__version__ = '0.4.9'
|
||||
__version__ = '0.2.3'
|
||||
|
||||
235
monoloco/activity.py
Normal file
@ -0,0 +1,235 @@
|
||||
|
||||
# pylint: disable=too-many-statements
|
||||
|
||||
import math
|
||||
import copy
|
||||
from contextlib import contextmanager
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.patches import Circle, FancyArrow
|
||||
|
||||
from .network.process import laplace_sampling
|
||||
from .visuals.pifpaf_show import KeypointPainter, image_canvas
|
||||
|
||||
|
||||
def social_interactions(idx, centers, angles, dds, stds=None, social_distance=False,
|
||||
n_samples=100, threshold_prob=0.25, threshold_dist=2, radii=(0.3, 0.5)):
|
||||
"""
|
||||
return flag of alert if social distancing is violated
|
||||
"""
|
||||
|
||||
# A) Check whether people are close together
|
||||
xx = centers[idx][0]
|
||||
zz = centers[idx][1]
|
||||
distances = [math.sqrt((xx - centers[i][0]) ** 2 + (zz - centers[i][1]) ** 2) for i, _ in enumerate(centers)]
|
||||
sorted_idxs = np.argsort(distances)
|
||||
indices = [idx_t for idx_t in sorted_idxs[1:] if distances[idx_t] <= threshold_dist]
|
||||
|
||||
# B) Check whether people are looking inwards and whether there are no intrusions
|
||||
# Deterministic
|
||||
if n_samples < 2:
|
||||
for idx_t in indices:
|
||||
if check_f_formations(idx, idx_t, centers, angles,
|
||||
radii=radii, # Binary value
|
||||
social_distance=social_distance):
|
||||
return True
|
||||
|
||||
# Probabilistic
|
||||
else:
|
||||
# Samples distance
|
||||
dds = torch.tensor(dds).view(-1, 1)
|
||||
stds = torch.tensor(stds).view(-1, 1)
|
||||
# stds_te = get_task_error(dds) # similar results to MonoLoco but lower true positive
|
||||
laplace_d = torch.cat((dds, stds), dim=1)
|
||||
samples_d = laplace_sampling(laplace_d, n_samples=n_samples)
|
||||
|
||||
# Iterate over close people
|
||||
for idx_t in indices:
|
||||
f_forms = []
|
||||
for s_d in range(n_samples):
|
||||
new_centers = copy.deepcopy(centers)
|
||||
for el in (idx, idx_t):
|
||||
delta_d = dds[el] - float(samples_d[s_d, el])
|
||||
theta = math.atan2(new_centers[el][1], new_centers[el][0])
|
||||
delta_x = delta_d * math.cos(theta)
|
||||
delta_z = delta_d * math.sin(theta)
|
||||
new_centers[el][0] += delta_x
|
||||
new_centers[el][1] += delta_z
|
||||
f_forms.append(check_f_formations(idx, idx_t, new_centers, angles,
|
||||
radii=radii,
|
||||
social_distance=social_distance))
|
||||
if (sum(f_forms) / n_samples) >= threshold_prob:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def check_f_formations(idx, idx_t, centers, angles, radii, social_distance=False):
|
||||
"""
|
||||
Check F-formations for people close together (this function do not expect far away people):
|
||||
1) Empty space of a certain radius (no other people or themselves inside)
|
||||
2) People looking inward
|
||||
"""
|
||||
|
||||
# Extract centers and angles
|
||||
other_centers = np.array([cent for l, cent in enumerate(centers) if l not in (idx, idx_t)])
|
||||
theta0 = angles[idx]
|
||||
theta1 = angles[idx_t]
|
||||
|
||||
# Find the center of o-space as average of two candidates (based on their orientation)
|
||||
for radius in radii:
|
||||
x_0 = np.array([float(centers[idx][0]), float(centers[idx][1])])
|
||||
x_1 = np.array([float(centers[idx_t][0]), float(centers[idx_t][1])])
|
||||
|
||||
mu_0 = np.array([
|
||||
float(centers[idx][0]) + radius * math.cos(theta0),
|
||||
float(centers[idx][1]) - radius * math.sin(theta0)])
|
||||
mu_1 = np.array([
|
||||
float(centers[idx_t][0]) + radius * math.cos(theta1),
|
||||
float(centers[idx_t][1]) - radius * math.sin(theta1)])
|
||||
o_c = (mu_0 + mu_1) / 2
|
||||
|
||||
# 1) Verify they are looking inwards.
|
||||
# The distance between mus and the center should be less wrt the original position and the center
|
||||
d_new = np.linalg.norm(mu_0 - mu_1) / 2 if social_distance else np.linalg.norm(mu_0 - mu_1)
|
||||
d_0 = np.linalg.norm(x_0 - o_c)
|
||||
d_1 = np.linalg.norm(x_1 - o_c)
|
||||
|
||||
# 2) Verify no intrusion for third parties
|
||||
if other_centers.size:
|
||||
other_distances = np.linalg.norm(other_centers - o_c.reshape(1, -1), axis=1)
|
||||
else:
|
||||
other_distances = 100 * np.ones((1, 1)) # Condition verified if no other people
|
||||
|
||||
# Binary Classification
|
||||
# if np.min(other_distances) > radius: # Ablation without orientation
|
||||
if d_new <= min(d_0, d_1) and np.min(other_distances) > radius:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def show_social(args, image_t, output_path, annotations, dic_out):
|
||||
"""Output frontal image with poses or combined with bird eye view"""
|
||||
|
||||
assert 'front' in args.output_types or 'bird' in args.output_types, "outputs allowed: front and/or bird"
|
||||
|
||||
angles = dic_out['angles']
|
||||
stds = dic_out['stds_ale']
|
||||
xz_centers = [[xx[0], xx[2]] for xx in dic_out['xyz_pred']]
|
||||
|
||||
# Prepare color for social distancing
|
||||
colors = ['r' if flag else 'deepskyblue' for flag in dic_out['social_distance']]
|
||||
|
||||
# Draw keypoints and orientation
|
||||
if 'front' in args.output_types:
|
||||
keypoint_sets, scores = get_pifpaf_outputs(annotations)
|
||||
uv_centers = dic_out['uv_heads']
|
||||
sizes = [abs(dic_out['uv_heads'][idx][1] - uv_s[1]) / 1.5 for idx, uv_s in
|
||||
enumerate(dic_out['uv_shoulders'])]
|
||||
keypoint_painter = KeypointPainter(show_box=False)
|
||||
|
||||
with image_canvas(image_t,
|
||||
output_path + '.front.png',
|
||||
show=args.show,
|
||||
fig_width=10,
|
||||
dpi_factor=1.0) as ax:
|
||||
keypoint_painter.keypoints(ax, keypoint_sets, colors=colors)
|
||||
draw_orientation(ax, uv_centers, sizes, angles, colors, mode='front')
|
||||
|
||||
if 'bird' in args.output_types:
|
||||
z_max = min(args.z_max, 4 + max([el[1] for el in xz_centers]))
|
||||
with bird_canvas(output_path, z_max) as ax1:
|
||||
draw_orientation(ax1, xz_centers, [], angles, colors, mode='bird')
|
||||
draw_uncertainty(ax1, xz_centers, stds)
|
||||
|
||||
|
||||
def get_pifpaf_outputs(annotations):
|
||||
# TODO extract direct from predictions with pifpaf 0.11+
|
||||
"""Extract keypoints sets and scores from output dictionary"""
|
||||
if not annotations:
|
||||
return [], []
|
||||
keypoints_sets = np.array([dic['keypoints'] for dic in annotations]).reshape((-1, 17, 3))
|
||||
score_weights = np.ones((keypoints_sets.shape[0], 17))
|
||||
score_weights[:, 3] = 3.0
|
||||
score_weights /= np.sum(score_weights[0, :])
|
||||
kps_scores = keypoints_sets[:, :, 2]
|
||||
ordered_kps_scores = np.sort(kps_scores, axis=1)[:, ::-1]
|
||||
scores = np.sum(score_weights * ordered_kps_scores, axis=1)
|
||||
return keypoints_sets, scores
|
||||
|
||||
|
||||
@contextmanager
|
||||
def bird_canvas(output_path, z_max):
|
||||
fig, ax = plt.subplots(1, 1)
|
||||
fig.set_tight_layout(True)
|
||||
output_path = output_path + '.bird.png'
|
||||
x_max = z_max / 1.5
|
||||
ax.plot([0, x_max], [0, z_max], 'k--')
|
||||
ax.plot([0, -x_max], [0, z_max], 'k--')
|
||||
ax.set_ylim(0, z_max + 1)
|
||||
yield ax
|
||||
fig.savefig(output_path)
|
||||
plt.close(fig)
|
||||
print('Bird-eye-view image saved')
|
||||
|
||||
|
||||
def draw_orientation(ax, centers, sizes, angles, colors, mode):
|
||||
|
||||
if mode == 'front':
|
||||
length = 5
|
||||
fill = False
|
||||
alpha = 0.6
|
||||
zorder_circle = 0.5
|
||||
zorder_arrow = 5
|
||||
linewidth = 1.5
|
||||
edgecolor = 'k'
|
||||
radiuses = [s / 1.2 for s in sizes]
|
||||
else:
|
||||
length = 1.3
|
||||
head_width = 0.3
|
||||
linewidth = 2
|
||||
radiuses = [0.2] * len(centers)
|
||||
# length = 1.6
|
||||
# head_width = 0.4
|
||||
# linewidth = 2.7
|
||||
radiuses = [0.2] * len(centers)
|
||||
fill = True
|
||||
alpha = 1
|
||||
zorder_circle = 2
|
||||
zorder_arrow = 1
|
||||
|
||||
for idx, theta in enumerate(angles):
|
||||
color = colors[idx]
|
||||
radius = radiuses[idx]
|
||||
|
||||
if mode == 'front':
|
||||
x_arr = centers[idx][0] + (length + radius) * math.cos(theta)
|
||||
z_arr = length + centers[idx][1] + (length + radius) * math.sin(theta)
|
||||
delta_x = math.cos(theta)
|
||||
delta_z = math.sin(theta)
|
||||
head_width = max(10, radiuses[idx] / 1.5)
|
||||
|
||||
else:
|
||||
edgecolor = color
|
||||
x_arr = centers[idx][0]
|
||||
z_arr = centers[idx][1]
|
||||
delta_x = length * math.cos(theta)
|
||||
delta_z = - length * math.sin(theta) # keep into account kitti convention
|
||||
|
||||
circle = Circle(centers[idx], radius=radius, color=color, fill=fill, alpha=alpha, zorder=zorder_circle)
|
||||
arrow = FancyArrow(x_arr, z_arr, delta_x, delta_z, head_width=head_width, edgecolor=edgecolor,
|
||||
facecolor=color, linewidth=linewidth, zorder=zorder_arrow)
|
||||
ax.add_patch(circle)
|
||||
ax.add_patch(arrow)
|
||||
|
||||
|
||||
def draw_uncertainty(ax, centers, stds):
|
||||
for idx, std in enumerate(stds):
|
||||
std = stds[idx]
|
||||
theta = math.atan2(centers[idx][1], centers[idx][0])
|
||||
delta_x = std * math.cos(theta)
|
||||
delta_z = std * math.sin(theta)
|
||||
x = (centers[idx][0] - delta_x, centers[idx][0] + delta_x)
|
||||
z = (centers[idx][1] - delta_z, centers[idx][1] + delta_z)
|
||||
ax.plot(x, z, color='g', linewidth=2.5)
|
||||
@ -1,4 +1,2 @@
|
||||
|
||||
from .eval_kitti import EvalKitti
|
||||
from .generate_kitti import GenerateKitti
|
||||
from .geom_baseline import geometric_baseline
|
||||
|
||||
262
monoloco/eval/eval_activity.py
Normal file
@ -0,0 +1,262 @@
|
||||
|
||||
import os
|
||||
import glob
|
||||
import csv
|
||||
from collections import defaultdict
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from PIL import Image
|
||||
from sklearn.metrics import accuracy_score
|
||||
|
||||
from monstereo.network import Loco
|
||||
from monstereo.network.process import factory_for_gt, preprocess_pifpaf
|
||||
from monstereo.activity import social_interactions
|
||||
from monstereo.utils import open_annotations, get_iou_matches, get_difficulty
|
||||
|
||||
|
||||
class ActivityEvaluator:
|
||||
"""Evaluate talking activity for Collective Activity Dataset & KITTI"""
|
||||
|
||||
dic_cnt = dict(fp=0, fn=0, det=0)
|
||||
cnt = {'pred': defaultdict(int), 'gt': defaultdict(int)} # pred is for matched instances
|
||||
|
||||
def __init__(self, args):
|
||||
|
||||
self.dir_ann = args.dir_ann
|
||||
assert self.dir_ann is not None and os.path.exists(self.dir_ann), \
|
||||
"Annotation directory not provided / does not exist"
|
||||
assert os.listdir(self.dir_ann), "Annotation directory is empty"
|
||||
|
||||
# COLLECTIVE ACTIVITY DATASET (talking)
|
||||
# -------------------------------------------------------------------------------------------------------------
|
||||
if args.dataset == 'collective':
|
||||
self.sequences = ['seq02', 'seq14', 'seq12', 'seq13', 'seq11', 'seq36']
|
||||
# folders_collective = ['seq02']
|
||||
self.dir_data = 'data/activity/dataset'
|
||||
self.THRESHOLD_PROB = 0.25 # Concordance for samples
|
||||
self.THRESHOLD_DIST = 2 # Threshold to check distance of people
|
||||
self.RADII = (0.3, 0.5) # expected radii of the o-space
|
||||
self.PIFPAF_CONF = 0.3
|
||||
self.SOCIAL_DISTANCE = False
|
||||
# -------------------------------------------------------------------------------------------------------------
|
||||
|
||||
# KITTI DATASET (social distancing)
|
||||
# ------------------------------------------------------------------------------------------------------------
|
||||
else:
|
||||
self.dir_data = 'data/kitti/gt_activity'
|
||||
self.dir_kk = os.path.join('data', 'kitti', 'calib')
|
||||
self.THRESHOLD_PROB = 0.25 # Concordance for samples
|
||||
self.THRESHOLD_DIST = 2 # Threshold to check distance of people
|
||||
self.RADII = (0.3, 0.5, 1) # expected radii of the o-space
|
||||
self.PIFPAF_CONF = 0.3
|
||||
self.SOCIAL_DISTANCE = True
|
||||
# ---------------------------------------------------------------------------------------------------------
|
||||
|
||||
# Load model
|
||||
device = torch.device('cpu')
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device('cuda')
|
||||
self.monoloco = Loco(model=args.model, net=args.net,
|
||||
device=device, n_dropout=args.n_dropout, p_dropout=args.dropout)
|
||||
self.all_pred = defaultdict(list)
|
||||
self.all_gt = defaultdict(list)
|
||||
assert args.dataset in ('collective', 'kitti')
|
||||
|
||||
def eval_collective(self):
|
||||
"""Parse Collective Activity Dataset and predict if people are talking or not"""
|
||||
|
||||
for seq in self.sequences:
|
||||
images = glob.glob(os.path.join(self.dir_data, 'images', seq + '*.jpg'))
|
||||
initial_im = os.path.join(self.dir_data, 'images', seq + '_frame0001.jpg')
|
||||
with open(initial_im, 'rb') as f:
|
||||
image = Image.open(f).convert('RGB')
|
||||
im_size = image.size
|
||||
assert len(im_size) > 1, "image with frame0001 not available"
|
||||
|
||||
for idx, im_path in enumerate(images):
|
||||
|
||||
# Collect PifPaf files and calibration
|
||||
basename = os.path.basename(im_path)
|
||||
extension = '.predictions.json'
|
||||
path_pif = os.path.join(self.dir_ann, basename + extension)
|
||||
annotations = open_annotations(path_pif)
|
||||
kk, _ = factory_for_gt(im_size, verbose=False)
|
||||
|
||||
# Collect corresponding gt files (ys_gt: 1 or 0)
|
||||
boxes_gt, ys_gt = parse_gt_collective(self.dir_data, seq, path_pif)
|
||||
# Run Monoloco
|
||||
dic_out, boxes = self.run_monoloco(annotations, kk, im_size=im_size)
|
||||
|
||||
# Match and update stats
|
||||
matches = get_iou_matches(boxes, boxes_gt, iou_min=0.3)
|
||||
|
||||
# Estimate activity
|
||||
categories = [seq] * len(boxes_gt) # for compatibility with KITTI evaluation
|
||||
self.estimate_activity(dic_out, matches, ys_gt, categories=categories)
|
||||
|
||||
# Print Results
|
||||
acc = accuracy_score(self.all_gt[seq], self.all_pred[seq])
|
||||
print(f"Accuracy of category {seq}: {100*acc:.2f}%")
|
||||
cout_results(self.cnt, self.all_gt, self.all_pred, categories=self.sequences)
|
||||
|
||||
def eval_kitti(self):
|
||||
"""Parse KITTI Dataset and predict if people are talking or not"""
|
||||
|
||||
from ..utils import factory_file
|
||||
files = glob.glob(self.dir_data + '/*.txt')
|
||||
# files = [self.dir_gt_kitti + '/001782.txt']
|
||||
assert files, "Empty directory"
|
||||
|
||||
for file in files:
|
||||
|
||||
# Collect PifPaf files and calibration
|
||||
basename, _ = os.path.splitext(os.path.basename(file))
|
||||
path_calib = os.path.join(self.dir_kk, basename + '.txt')
|
||||
annotations, kk, tt = factory_file(path_calib, self.dir_ann, basename)
|
||||
|
||||
# Collect corresponding gt files (ys_gt: 1 or 0)
|
||||
path_gt = os.path.join(self.dir_data, basename + '.txt')
|
||||
boxes_gt, ys_gt, difficulties = parse_gt_kitti(path_gt)
|
||||
|
||||
# Run Monoloco
|
||||
dic_out, boxes = self.run_monoloco(annotations, kk, im_size=(1242, 374))
|
||||
|
||||
# Match and update stats
|
||||
matches = get_iou_matches(boxes, boxes_gt, iou_min=0.3)
|
||||
|
||||
# Estimate activity
|
||||
self.estimate_activity(dic_out, matches, ys_gt, categories=difficulties)
|
||||
|
||||
# Print Results
|
||||
cout_results(self.cnt, self.all_gt, self.all_pred, categories=('easy', 'moderate', 'hard'))
|
||||
|
||||
def estimate_activity(self, dic_out, matches, ys_gt, categories):
|
||||
|
||||
# Calculate social interaction
|
||||
angles = dic_out['angles']
|
||||
dds = dic_out['dds_pred']
|
||||
stds = dic_out['stds_ale']
|
||||
xz_centers = [[xx[0], xx[2]] for xx in dic_out['xyz_pred']]
|
||||
|
||||
# Count gt statistics. (One element each gt)
|
||||
for key in categories:
|
||||
self.cnt['gt'][key] += 1
|
||||
self.cnt['gt']['all'] += 1
|
||||
|
||||
for i_m, (idx, idx_gt) in enumerate(matches):
|
||||
|
||||
# Select keys to update results for Collective or KITTI
|
||||
keys = ('all', categories[idx_gt])
|
||||
|
||||
# Run social interactions rule
|
||||
flag = social_interactions(idx, xz_centers, angles, dds,
|
||||
stds=stds,
|
||||
threshold_prob=self.THRESHOLD_PROB,
|
||||
threshold_dist=self.THRESHOLD_DIST,
|
||||
radii=self.RADII,
|
||||
social_distance=self.SOCIAL_DISTANCE)
|
||||
# Accumulate results
|
||||
for key in keys:
|
||||
self.all_pred[key].append(flag)
|
||||
self.all_gt[key].append(ys_gt[idx_gt])
|
||||
self.cnt['pred'][key] += 1
|
||||
|
||||
def run_monoloco(self, annotations, kk, im_size=None):
|
||||
|
||||
boxes, keypoints = preprocess_pifpaf(annotations, im_size, enlarge_boxes=True, min_conf=self.PIFPAF_CONF)
|
||||
dic_out = self.monoloco.forward(keypoints, kk)
|
||||
dic_out = self.monoloco.post_process(dic_out, boxes, keypoints, kk, dic_gt=None, reorder=False, verbose=False)
|
||||
|
||||
return dic_out, boxes
|
||||
|
||||
|
||||
def parse_gt_collective(dir_data, seq, path_pif):
|
||||
"""Parse both gt and binary label (1/0) for talking or not"""
|
||||
|
||||
path = os.path.join(dir_data, 'annotations', seq + '_annotations.txt')
|
||||
|
||||
with open(path, "r") as ff:
|
||||
reader = csv.reader(ff, delimiter='\t')
|
||||
dic_frames = defaultdict(lambda: defaultdict(list))
|
||||
for idx, line in enumerate(reader):
|
||||
box = convert_box(line[1:5])
|
||||
cat = convert_category(line[5])
|
||||
dic_frames[line[0]]['boxes'].append(box)
|
||||
dic_frames[line[0]]['y'].append(cat)
|
||||
|
||||
frame = extract_frame_number(path_pif)
|
||||
boxes_gt = dic_frames[frame]['boxes']
|
||||
ys_gt = np.array(dic_frames[frame]['y'])
|
||||
return boxes_gt, ys_gt
|
||||
|
||||
|
||||
def parse_gt_kitti(path_gt):
|
||||
"""Parse both gt and binary label (1/0) for talking or not"""
|
||||
boxes_gt = []
|
||||
ys = []
|
||||
difficulties = []
|
||||
with open(path_gt, "r") as f_gt:
|
||||
for line_gt in f_gt:
|
||||
line = line_gt.split()
|
||||
box = [float(x) for x in line[4:8]]
|
||||
boxes_gt.append(box)
|
||||
y = int(line[-1])
|
||||
assert y in (1, 0), "Expected to be binary (1/0)"
|
||||
ys.append(y)
|
||||
trunc = float(line[1])
|
||||
occ = int(line[2])
|
||||
difficulties.append(get_difficulty(box, trunc, occ))
|
||||
return boxes_gt, ys, difficulties
|
||||
|
||||
|
||||
def cout_results(cnt, all_gt, all_pred, categories=()):
|
||||
|
||||
categories = list(categories)
|
||||
categories.append('all')
|
||||
print('-' * 80)
|
||||
|
||||
# Split by folders for collective activity
|
||||
for key in categories:
|
||||
acc = accuracy_score(all_gt[key], all_pred[key])
|
||||
print("Accuracy of category {}: {:.2f}% , Recall: {:.2f}%, #: {}, Pred/Real positive: {:.1f}% / {:.1f}%"
|
||||
.format(key,
|
||||
acc * 100,
|
||||
cnt['pred'][key] / cnt['gt'][key]*100,
|
||||
cnt['pred'][key],
|
||||
sum(all_pred[key]) / len(all_pred[key]) * 100,
|
||||
sum(all_gt[key]) / len(all_gt[key]) * 100
|
||||
)
|
||||
)
|
||||
|
||||
# Final Accuracy
|
||||
acc = accuracy_score(all_gt['all'], all_pred['all'])
|
||||
recall = cnt['pred']['all'] / cnt['gt']['all'] * 100 # only predictions that match a ground-truth are included
|
||||
print('-' * 80)
|
||||
print(f"Final Accuracy: {acc * 100:.2f} Final Recall:{recall:.2f}")
|
||||
print('-' * 80)
|
||||
|
||||
|
||||
def convert_box(box_str):
|
||||
"""from string with left and center to standard """
|
||||
box = [float(el) for el in box_str] # x, y, w h
|
||||
box[2] += box[0]
|
||||
box[3] += box[1]
|
||||
return box
|
||||
|
||||
|
||||
def convert_category(cat):
|
||||
"""Talking = 6"""
|
||||
if cat == '6':
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
def extract_frame_number(path):
|
||||
"""extract frame number from path"""
|
||||
name = os.path.basename(path)
|
||||
if name[11] == '0':
|
||||
frame = name[12:15]
|
||||
else:
|
||||
frame = name[11:15]
|
||||
return frame
|
||||
@ -1,158 +1,199 @@
|
||||
"""Evaluate Monoloco code on KITTI dataset using ALE and ALP metrics with the following baselines:
|
||||
- Mono3D
|
||||
- 3DOP
|
||||
- MonoDepth
|
||||
"""
|
||||
"""
|
||||
Evaluate MonStereo code on KITTI dataset using ALE metric
|
||||
"""
|
||||
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
|
||||
import os
|
||||
import math
|
||||
import logging
|
||||
import datetime
|
||||
from collections import defaultdict
|
||||
from itertools import chain
|
||||
|
||||
from tabulate import tabulate
|
||||
|
||||
from ..utils import get_iou_matches, get_task_error, get_pixel_error, check_conditions, get_category, split_training, \
|
||||
parse_ground_truth
|
||||
from ..visuals import show_results, show_spread, show_task_error
|
||||
from ..utils import get_iou_matches, get_task_error, get_pixel_error, check_conditions, \
|
||||
get_difficulty, split_training, parse_ground_truth, get_iou_matches_matrix
|
||||
from ..visuals import show_results, show_spread, show_task_error, show_box_plot
|
||||
|
||||
|
||||
class EvalKitti:
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
CLUSTERS = ('easy', 'moderate', 'hard', 'all', '6', '10', '15', '20', '25', '30', '40', '50', '>50')
|
||||
CLUSTERS = ('easy', 'moderate', 'hard', 'all', '3', '5', '7', '9', '11', '13', '15', '17', '19', '21', '23', '25',
|
||||
'27', '29', '31', '49')
|
||||
ALP_THRESHOLDS = ('<0.5m', '<1m', '<2m')
|
||||
METHODS_MONO = ['m3d', 'monodepth', '3dop', 'monoloco']
|
||||
METHODS_STEREO = ['ml_stereo', 'pose', 'reid']
|
||||
BASELINES = ['geometric', 'task_error', 'pixel_error']
|
||||
OUR_METHODS = ['geometric', 'monoloco', 'monoloco_pp', 'pose', 'reid', 'monstereo']
|
||||
METHODS_MONO = ['m3d', 'monopsr', 'smoke', 'monodis']
|
||||
METHODS_STEREO = ['3dop', 'psf', 'pseudo-lidar', 'e2e', 'oc-stereo']
|
||||
BASELINES = ['task_error', 'pixel_error']
|
||||
HEADERS = ('method', '<0.5', '<1m', '<2m', 'easy', 'moderate', 'hard', 'all')
|
||||
CATEGORIES = ('pedestrian',)
|
||||
methods = OUR_METHODS + METHODS_MONO + METHODS_STEREO
|
||||
|
||||
def __init__(self, thresh_iou_monoloco=0.3, thresh_iou_base=0.3, thresh_conf_monoloco=0.3, thresh_conf_base=0.3,
|
||||
verbose=False, stereo=False):
|
||||
# Set directories
|
||||
main_dir = os.path.join('data', 'kitti')
|
||||
dir_gt = os.path.join(main_dir, 'gt')
|
||||
path_train = os.path.join('splits', 'kitti_train.txt')
|
||||
path_val = os.path.join('splits', 'kitti_val.txt')
|
||||
dir_logs = os.path.join('data', 'logs')
|
||||
assert os.path.exists(dir_logs), "No directory to save final statistics"
|
||||
dir_fig = os.path.join('data', 'figures')
|
||||
assert os.path.exists(dir_logs), "No directory to save figures"
|
||||
|
||||
self.main_dir = os.path.join('data', 'kitti')
|
||||
self.dir_gt = os.path.join(self.main_dir, 'gt')
|
||||
self.methods = self.METHODS_MONO
|
||||
self.stereo = stereo
|
||||
if self.stereo:
|
||||
self.methods.extend(self.METHODS_STEREO)
|
||||
path_train = os.path.join('splits', 'kitti_train.txt')
|
||||
path_val = os.path.join('splits', 'kitti_val.txt')
|
||||
dir_logs = os.path.join('data', 'logs')
|
||||
assert dir_logs, "No directory to save final statistics"
|
||||
# Set thresholds to obtain comparable recalls
|
||||
thresh_iou_monoloco = 0.3
|
||||
thresh_iou_base = 0.3
|
||||
thresh_conf_monoloco = 0.2
|
||||
thresh_conf_base = 0.5
|
||||
|
||||
def __init__(self, args):
|
||||
|
||||
self.verbose = args.verbose
|
||||
self.net = args.net
|
||||
self.save = args.save
|
||||
self.show = args.show
|
||||
|
||||
now = datetime.datetime.now()
|
||||
now_time = now.strftime("%Y%m%d-%H%M")[2:]
|
||||
self.path_results = os.path.join(dir_logs, 'eval-' + now_time + '.json')
|
||||
self.verbose = verbose
|
||||
self.path_results = os.path.join(self.dir_logs, 'eval-' + now_time + '.json')
|
||||
|
||||
self.dic_thresh_iou = {method: (thresh_iou_monoloco if method[:8] == 'monoloco' else thresh_iou_base)
|
||||
# Set thresholds for comparable recalls
|
||||
self.dic_thresh_iou = {method: (self.thresh_iou_monoloco if method in self.OUR_METHODS
|
||||
else self.thresh_iou_base)
|
||||
for method in self.methods}
|
||||
self.dic_thresh_conf = {method: (thresh_conf_monoloco if method[:8] == 'monoloco' else thresh_conf_base)
|
||||
self.dic_thresh_conf = {method: (self.thresh_conf_monoloco if method in self.OUR_METHODS
|
||||
else self.thresh_conf_base)
|
||||
for method in self.methods}
|
||||
|
||||
# Set thresholds to obtain comparable recall
|
||||
self.dic_thresh_conf['monopsr'] += 0.4
|
||||
self.dic_thresh_conf['e2e-pl'] = -100
|
||||
self.dic_thresh_conf['oc-stereo'] = -100
|
||||
self.dic_thresh_conf['smoke'] = -100
|
||||
self.dic_thresh_conf['monodis'] = -100
|
||||
|
||||
# Extract validation images for evaluation
|
||||
names_gt = tuple(os.listdir(self.dir_gt))
|
||||
_, self.set_val = split_training(names_gt, path_train, path_val)
|
||||
_, self.set_val = split_training(names_gt, self.path_train, self.path_val)
|
||||
|
||||
# self.set_val = ('002282.txt', )
|
||||
|
||||
# Define variables to save statistics
|
||||
self.dic_methods = None
|
||||
self.errors = None
|
||||
self.dic_stds = None
|
||||
self.dic_stats = None
|
||||
self.dic_cnt = None
|
||||
self.cnt_gt = 0
|
||||
self.dic_methods = self.errors = self.dic_stds = self.dic_stats = self.dic_cnt = self.cnt_gt = self.category \
|
||||
= None
|
||||
self.cnt = 0
|
||||
|
||||
# Filter methods with empty or non existent directory
|
||||
filter_directories(self.main_dir, self.methods)
|
||||
|
||||
def run(self):
|
||||
"""Evaluate Monoloco performances on ALP and ALE metrics"""
|
||||
|
||||
for category in self.CATEGORIES:
|
||||
|
||||
for self.category in self.CATEGORIES:
|
||||
# Initialize variables
|
||||
self.errors = defaultdict(lambda: defaultdict(list))
|
||||
self.dic_stds = defaultdict(lambda: defaultdict(list))
|
||||
self.dic_stds = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
|
||||
self.dic_stats = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(float))))
|
||||
self.dic_cnt = defaultdict(int)
|
||||
self.cnt_gt = 0
|
||||
self.cnt_gt = defaultdict(int)
|
||||
|
||||
# Iterate over each ground truth file in the training set
|
||||
# self.set_val = ('000063.txt',)
|
||||
for name in self.set_val:
|
||||
path_gt = os.path.join(self.dir_gt, name)
|
||||
self.name = name
|
||||
|
||||
# Iterate over each line of the gt file and save box location and distances
|
||||
out_gt = parse_ground_truth(path_gt, category)
|
||||
out_gt = parse_ground_truth(path_gt, self.category)
|
||||
methods_out = defaultdict(tuple) # Save all methods for comparison
|
||||
self.cnt_gt += len(out_gt[0])
|
||||
|
||||
# Count ground_truth:
|
||||
boxes_gt, ys, truncs_gt, occs_gt = out_gt # pylint: disable=unbalanced-tuple-unpacking
|
||||
for idx, box in enumerate(boxes_gt):
|
||||
mode = get_difficulty(box, truncs_gt[idx], occs_gt[idx])
|
||||
self.cnt_gt[mode] += 1
|
||||
self.cnt_gt['all'] += 1
|
||||
|
||||
if out_gt[0]:
|
||||
for method in self.methods:
|
||||
# Extract annotations
|
||||
dir_method = os.path.join(self.main_dir, method)
|
||||
assert os.path.exists(dir_method), "directory of the method %s does not exists" % method
|
||||
path_method = os.path.join(dir_method, name)
|
||||
methods_out[method] = self._parse_txts(path_method, category, method=method)
|
||||
methods_out[method] = self._parse_txts(path_method, method=method)
|
||||
|
||||
# Compute the error with ground truth
|
||||
self._estimate_error(out_gt, methods_out[method], method=method)
|
||||
|
||||
# Iterate over all the files together to find a pool of common annotations
|
||||
self._compare_error(out_gt, methods_out)
|
||||
|
||||
# Update statistics of errors and uncertainty
|
||||
for key in self.errors:
|
||||
add_true_negatives(self.errors[key], self.cnt_gt)
|
||||
for clst in self.CLUSTERS[:-2]: # M3d and pifpaf does not have annotations above 40 meters
|
||||
get_statistics(self.dic_stats['test'][key][clst], self.errors[key][clst], self.dic_stds[clst], key)
|
||||
add_true_negatives(self.errors[key], self.cnt_gt['all'])
|
||||
for clst in self.CLUSTERS[:-1]:
|
||||
|
||||
try:
|
||||
get_statistics(self.dic_stats['test'][key][clst],
|
||||
self.errors[key][clst],
|
||||
self.dic_stds[key][clst], key)
|
||||
except ZeroDivisionError:
|
||||
print('\n'+'-'*100 + '\n'+f'ERROR: method {key} at cluster {clst} is empty' + '\n'+'-'*100+'\n')
|
||||
raise
|
||||
|
||||
# Show statistics
|
||||
print('\n' + category.upper() + ':')
|
||||
print('\n' + self.category.upper() + ':')
|
||||
self.show_statistics()
|
||||
|
||||
def printer(self, show, save):
|
||||
if save or show:
|
||||
show_results(self.dic_stats, show, save, stereo=self.stereo)
|
||||
show_spread(self.dic_stats, show, save)
|
||||
show_task_error(show, save)
|
||||
def printer(self):
|
||||
if self.save or self.show:
|
||||
show_results(self.dic_stats, self.CLUSTERS, self.net, self.dir_fig, show=self.show, save=self.save)
|
||||
show_spread(self.dic_stats, self.CLUSTERS, self.net, self.dir_fig, show=self.show, save=self.save)
|
||||
if self.net == 'monstero':
|
||||
show_box_plot(self.errors, self.CLUSTERS, self.dir_fig, show=self.show, save=self.save)
|
||||
else:
|
||||
show_task_error(self.dir_fig, show=self.show, save=self.save)
|
||||
|
||||
def _parse_txts(self, path, category, method):
|
||||
def _parse_txts(self, path, method):
|
||||
|
||||
boxes = []
|
||||
dds = []
|
||||
stds_ale = []
|
||||
stds_epi = []
|
||||
dds_geometric = []
|
||||
output = (boxes, dds) if method != 'monoloco' else (boxes, dds, stds_ale, stds_epi, dds_geometric)
|
||||
cat = []
|
||||
|
||||
if method == 'psf':
|
||||
path = os.path.splitext(path)[0] + '.png.txt'
|
||||
if method in self.OUR_METHODS:
|
||||
bis, epis = [], []
|
||||
output = (boxes, dds, cat, bis, epis)
|
||||
else:
|
||||
output = (boxes, dds, cat)
|
||||
try:
|
||||
with open(path, "r") as ff:
|
||||
for line_str in ff:
|
||||
line = line_str.split()
|
||||
if check_conditions(line, category, method=method, thresh=self.dic_thresh_conf[method]):
|
||||
if method == 'monodepth':
|
||||
box = [float(x[:-1]) for x in line[0:4]]
|
||||
delta_h = (box[3] - box[1]) / 7
|
||||
delta_w = (box[2] - box[0]) / 3.5
|
||||
assert delta_h > 0 and delta_w > 0, "Bounding box <=0"
|
||||
box[0] -= delta_w
|
||||
box[1] -= delta_h
|
||||
box[2] += delta_w
|
||||
box[3] += delta_h
|
||||
dd = float(line[5][:-1])
|
||||
else:
|
||||
if method == 'psf':
|
||||
line = line_str.split(", ")
|
||||
box = [float(x) for x in line[4:8]]
|
||||
boxes.append(box)
|
||||
loc = ([float(x) for x in line[11:14]])
|
||||
dd = math.sqrt(loc[0] ** 2 + loc[1] ** 2 + loc[2] ** 2)
|
||||
dds.append(dd)
|
||||
cat.append('Pedestrian')
|
||||
else:
|
||||
line = line_str.split()
|
||||
if check_conditions(line,
|
||||
category='pedestrian',
|
||||
method=method,
|
||||
thresh=self.dic_thresh_conf[method]):
|
||||
box = [float(x) for x in line[4:8]]
|
||||
box.append(float(line[15])) # Add confidence
|
||||
loc = ([float(x) for x in line[11:14]])
|
||||
dd = math.sqrt(loc[0] ** 2 + loc[1] ** 2 + loc[2] ** 2)
|
||||
boxes.append(box)
|
||||
dds.append(dd)
|
||||
self.dic_cnt[method] += 1
|
||||
if method == 'monoloco':
|
||||
stds_ale.append(float(line[16]))
|
||||
stds_epi.append(float(line[17]))
|
||||
dds_geometric.append(float(line[18]))
|
||||
self.dic_cnt['geometric'] += 1
|
||||
cat.append(line[0])
|
||||
boxes.append(box)
|
||||
dds.append(dd)
|
||||
if method in self.OUR_METHODS:
|
||||
bis.append(float(line[16]))
|
||||
epis.append(float(line[17]))
|
||||
self.dic_cnt[method] += 1
|
||||
|
||||
return output
|
||||
except FileNotFoundError:
|
||||
return output
|
||||
@ -160,67 +201,39 @@ class EvalKitti:
|
||||
def _estimate_error(self, out_gt, out, method):
|
||||
"""Estimate localization error"""
|
||||
|
||||
boxes_gt, _, dds_gt, zzs_gt, truncs_gt, occs_gt = out_gt
|
||||
if method == 'monoloco':
|
||||
boxes, dds, stds_ale, stds_epi, dds_geometric = out
|
||||
else:
|
||||
boxes, dds = out
|
||||
boxes_gt, ys, truncs_gt, occs_gt = out_gt
|
||||
|
||||
matches = get_iou_matches(boxes, boxes_gt, self.dic_thresh_iou[method])
|
||||
if method in self.OUR_METHODS:
|
||||
boxes, dds, cat, bis, epis = out
|
||||
else:
|
||||
boxes, dds, cat = out
|
||||
|
||||
if method == 'psf':
|
||||
matches = get_iou_matches_matrix(boxes, boxes_gt, self.dic_thresh_iou[method])
|
||||
else:
|
||||
matches = get_iou_matches(boxes, boxes_gt, self.dic_thresh_iou[method])
|
||||
|
||||
for (idx, idx_gt) in matches:
|
||||
# Update error if match is found
|
||||
cat = get_category(boxes_gt[idx_gt], truncs_gt[idx_gt], occs_gt[idx_gt])
|
||||
self.update_errors(dds[idx], dds_gt[idx_gt], cat, self.errors[method])
|
||||
dd_gt = ys[idx_gt][3]
|
||||
zz_gt = ys[idx_gt][2]
|
||||
mode = get_difficulty(boxes_gt[idx_gt], truncs_gt[idx_gt], occs_gt[idx_gt])
|
||||
|
||||
if method == 'monoloco':
|
||||
self.update_errors(dds_geometric[idx], dds_gt[idx_gt], cat, self.errors['geometric'])
|
||||
self.update_uncertainty(stds_ale[idx], stds_epi[idx], dds[idx], dds_gt[idx_gt], cat)
|
||||
dd_task_error = dds_gt[idx_gt] + (get_task_error(dds_gt[idx_gt]))**2
|
||||
self.update_errors(dd_task_error, dds_gt[idx_gt], cat, self.errors['task_error'])
|
||||
dd_pixel_error = dds_gt[idx_gt] + get_pixel_error(zzs_gt[idx_gt])
|
||||
self.update_errors(dd_pixel_error, dds_gt[idx_gt], cat, self.errors['pixel_error'])
|
||||
|
||||
def _compare_error(self, out_gt, methods_out):
|
||||
"""Compare the error for a pool of instances commonly matched by all methods"""
|
||||
boxes_gt, _, dds_gt, zzs_gt, truncs_gt, occs_gt = out_gt
|
||||
|
||||
# Find IoU matches
|
||||
matches = []
|
||||
boxes_monoloco = methods_out['monoloco'][0]
|
||||
matches_monoloco = get_iou_matches(boxes_monoloco, boxes_gt, self.dic_thresh_iou['monoloco'])
|
||||
|
||||
base_methods = [method for method in self.methods if method != 'monoloco']
|
||||
for method in base_methods:
|
||||
boxes = methods_out[method][0]
|
||||
matches.append(get_iou_matches(boxes, boxes_gt, self.dic_thresh_iou[method]))
|
||||
|
||||
# Update error of commonly matched instances
|
||||
for (idx, idx_gt) in matches_monoloco:
|
||||
check, indices = extract_indices(idx_gt, *matches)
|
||||
if check:
|
||||
cat = get_category(boxes_gt[idx_gt], truncs_gt[idx_gt], occs_gt[idx_gt])
|
||||
dd_gt = dds_gt[idx_gt]
|
||||
|
||||
for idx_indices, method in enumerate(base_methods):
|
||||
dd = methods_out[method][1][indices[idx_indices]]
|
||||
self.update_errors(dd, dd_gt, cat, self.errors[method + '_merged'])
|
||||
|
||||
dd_monoloco = methods_out['monoloco'][1][idx]
|
||||
dd_geometric = methods_out['monoloco'][4][idx]
|
||||
self.update_errors(dd_monoloco, dd_gt, cat, self.errors['monoloco_merged'])
|
||||
self.update_errors(dd_geometric, dd_gt, cat, self.errors['geometric_merged'])
|
||||
self.update_errors(dd_gt + get_task_error(dd_gt), dd_gt, cat, self.errors['task_error_merged'])
|
||||
dd_pixel = dd_gt + get_pixel_error(zzs_gt[idx_gt])
|
||||
self.update_errors(dd_pixel, dd_gt, cat, self.errors['pixel_error_merged'])
|
||||
|
||||
for key in self.methods:
|
||||
self.dic_cnt[key + '_merged'] += 1
|
||||
if cat[idx].lower() in (self.category, 'pedestrian'):
|
||||
self.update_errors(dds[idx], dd_gt, mode, self.errors[method])
|
||||
if method == 'monoloco':
|
||||
dd_task_error = dd_gt + (get_task_error(zz_gt))**2
|
||||
dd_pixel_error = dd_gt + get_pixel_error(zz_gt)
|
||||
self.update_errors(dd_task_error, dd_gt, mode, self.errors['task_error'])
|
||||
self.update_errors(dd_pixel_error, dd_gt, mode, self.errors['pixel_error'])
|
||||
if method in self.OUR_METHODS:
|
||||
epi = max(epis[idx], bis[idx])
|
||||
self.update_uncertainty(bis[idx], epi, dds[idx], dd_gt, mode, self.dic_stds[method])
|
||||
|
||||
def update_errors(self, dd, dd_gt, cat, errors):
|
||||
"""Compute and save errors between a single box and the gt box which match"""
|
||||
diff = abs(dd - dd_gt)
|
||||
clst = find_cluster(dd_gt, self.CLUSTERS)
|
||||
clst = find_cluster(dd_gt, self.CLUSTERS[4:])
|
||||
errors['all'].append(diff)
|
||||
errors[cat].append(diff)
|
||||
errors[clst].append(diff)
|
||||
@ -241,46 +254,49 @@ class EvalKitti:
|
||||
else:
|
||||
errors['<2m'].append(0)
|
||||
|
||||
def update_uncertainty(self, std_ale, std_epi, dd, dd_gt, cat):
|
||||
def update_uncertainty(self, std_ale, std_epi, dd, dd_gt, mode, dic_stds):
|
||||
|
||||
clst = find_cluster(dd_gt, self.CLUSTERS)
|
||||
self.dic_stds['all']['ale'].append(std_ale)
|
||||
self.dic_stds[clst]['ale'].append(std_ale)
|
||||
self.dic_stds[cat]['ale'].append(std_ale)
|
||||
self.dic_stds['all']['epi'].append(std_epi)
|
||||
self.dic_stds[clst]['epi'].append(std_epi)
|
||||
self.dic_stds[cat]['epi'].append(std_epi)
|
||||
clst = find_cluster(dd_gt, self.CLUSTERS[4:])
|
||||
dic_stds['all']['ale'].append(std_ale)
|
||||
dic_stds[clst]['ale'].append(std_ale)
|
||||
dic_stds[mode]['ale'].append(std_ale)
|
||||
dic_stds['all']['epi'].append(std_epi)
|
||||
dic_stds[clst]['epi'].append(std_epi)
|
||||
dic_stds[mode]['epi'].append(std_epi)
|
||||
dic_stds['all']['epi_rel'].append(std_epi / dd)
|
||||
dic_stds[clst]['epi_rel'].append(std_epi / dd)
|
||||
dic_stds[mode]['epi_rel'].append(std_epi / dd)
|
||||
|
||||
# Number of annotations inside the confidence interval
|
||||
std = std_epi if std_epi > 0 else std_ale # consider aleatoric uncertainty if epistemic is not calculated
|
||||
if abs(dd - dd_gt) <= std:
|
||||
self.dic_stds['all']['interval'].append(1)
|
||||
self.dic_stds[clst]['interval'].append(1)
|
||||
self.dic_stds[cat]['interval'].append(1)
|
||||
dic_stds['all']['interval'].append(1)
|
||||
dic_stds[clst]['interval'].append(1)
|
||||
dic_stds[mode]['interval'].append(1)
|
||||
else:
|
||||
self.dic_stds['all']['interval'].append(0)
|
||||
self.dic_stds[clst]['interval'].append(0)
|
||||
self.dic_stds[cat]['interval'].append(0)
|
||||
dic_stds['all']['interval'].append(0)
|
||||
dic_stds[clst]['interval'].append(0)
|
||||
dic_stds[mode]['interval'].append(0)
|
||||
|
||||
# Annotations at risk inside the confidence interval
|
||||
if dd_gt <= dd:
|
||||
self.dic_stds['all']['at_risk'].append(1)
|
||||
self.dic_stds[clst]['at_risk'].append(1)
|
||||
self.dic_stds[cat]['at_risk'].append(1)
|
||||
dic_stds['all']['at_risk'].append(1)
|
||||
dic_stds[clst]['at_risk'].append(1)
|
||||
dic_stds[mode]['at_risk'].append(1)
|
||||
|
||||
if abs(dd - dd_gt) <= std_epi:
|
||||
self.dic_stds['all']['at_risk-interval'].append(1)
|
||||
self.dic_stds[clst]['at_risk-interval'].append(1)
|
||||
self.dic_stds[cat]['at_risk-interval'].append(1)
|
||||
dic_stds['all']['at_risk-interval'].append(1)
|
||||
dic_stds[clst]['at_risk-interval'].append(1)
|
||||
dic_stds[mode]['at_risk-interval'].append(1)
|
||||
else:
|
||||
self.dic_stds['all']['at_risk-interval'].append(0)
|
||||
self.dic_stds[clst]['at_risk-interval'].append(0)
|
||||
self.dic_stds[cat]['at_risk-interval'].append(0)
|
||||
dic_stds['all']['at_risk-interval'].append(0)
|
||||
dic_stds[clst]['at_risk-interval'].append(0)
|
||||
dic_stds[mode]['at_risk-interval'].append(0)
|
||||
|
||||
else:
|
||||
self.dic_stds['all']['at_risk'].append(0)
|
||||
self.dic_stds[clst]['at_risk'].append(0)
|
||||
self.dic_stds[cat]['at_risk'].append(0)
|
||||
dic_stds['all']['at_risk'].append(0)
|
||||
dic_stds[clst]['at_risk'].append(0)
|
||||
dic_stds[mode]['at_risk'].append(0)
|
||||
|
||||
# Precision of uncertainty
|
||||
eps = 1e-4
|
||||
@ -288,12 +304,12 @@ class EvalKitti:
|
||||
prec_1 = abs(dd - dd_gt) / (std_epi + eps)
|
||||
|
||||
prec_2 = abs(std_epi - task_error)
|
||||
self.dic_stds['all']['prec_1'].append(prec_1)
|
||||
self.dic_stds[clst]['prec_1'].append(prec_1)
|
||||
self.dic_stds[cat]['prec_1'].append(prec_1)
|
||||
self.dic_stds['all']['prec_2'].append(prec_2)
|
||||
self.dic_stds[clst]['prec_2'].append(prec_2)
|
||||
self.dic_stds[cat]['prec_2'].append(prec_2)
|
||||
dic_stds['all']['prec_1'].append(prec_1)
|
||||
dic_stds[clst]['prec_1'].append(prec_1)
|
||||
dic_stds[mode]['prec_1'].append(prec_1)
|
||||
dic_stds['all']['prec_2'].append(prec_2)
|
||||
dic_stds[clst]['prec_2'].append(prec_2)
|
||||
dic_stds[mode]['prec_2'].append(prec_2)
|
||||
|
||||
def show_statistics(self):
|
||||
|
||||
@ -301,9 +317,21 @@ class EvalKitti:
|
||||
print('-'*90)
|
||||
self.summary_table(all_methods)
|
||||
|
||||
# Uncertainty
|
||||
for net in ('monoloco_pp', 'monstereo'):
|
||||
print(('-'*100))
|
||||
print(net.upper())
|
||||
for clst in ('easy', 'moderate', 'hard', 'all'):
|
||||
print(" Annotations in clst {}: {:.0f}, Recall: {:.1f}. Precision: {:.2f}, Relative size is {:.1f} %"
|
||||
.format(clst,
|
||||
self.dic_stats['test'][net][clst]['cnt'],
|
||||
self.dic_stats['test'][net][clst]['interval']*100,
|
||||
self.dic_stats['test'][net][clst]['prec_1'],
|
||||
self.dic_stats['test'][net][clst]['epi_rel']*100))
|
||||
|
||||
if self.verbose:
|
||||
all_methods_merged = list(chain.from_iterable((method, method + '_merged') for method in all_methods))
|
||||
for key in all_methods_merged:
|
||||
for key in all_methods:
|
||||
print(key.upper())
|
||||
for clst in self.CLUSTERS[:4]:
|
||||
print(" {} Average error in cluster {}: {:.2f} with a max error of {:.1f}, "
|
||||
"for {} annotations"
|
||||
@ -311,22 +339,14 @@ class EvalKitti:
|
||||
self.dic_stats['test'][key][clst]['max'],
|
||||
self.dic_stats['test'][key][clst]['cnt']))
|
||||
|
||||
if key == 'monoloco':
|
||||
print("% of annotation inside the confidence interval: {:.1f} %, "
|
||||
"of which {:.1f} % at higher risk"
|
||||
.format(self.dic_stats['test'][key][clst]['interval']*100,
|
||||
self.dic_stats['test'][key][clst]['at_risk']*100))
|
||||
|
||||
for perc in self.ALP_THRESHOLDS:
|
||||
print("{} Instances with error {}: {:.2f} %"
|
||||
.format(key, perc, 100 * average(self.errors[key][perc])))
|
||||
|
||||
print("\nMatched annotations: {:.1f} %".format(self.errors[key]['matched']))
|
||||
print(" Detected annotations : {}/{} ".format(self.dic_cnt[key], self.cnt_gt))
|
||||
print(" Detected annotations : {}/{} ".format(self.dic_cnt[key], self.cnt_gt['all']))
|
||||
print("-" * 100)
|
||||
|
||||
print("\n Annotations inside the confidence interval: {:.1f} %"
|
||||
.format(self.dic_stats['test']['monoloco']['all']['interval']))
|
||||
print("precision 1: {:.2f}".format(self.dic_stats['test']['monoloco']['all']['prec_1']))
|
||||
print("precision 2: {:.2f}".format(self.dic_stats['test']['monoloco']['all']['prec_2']))
|
||||
|
||||
@ -337,8 +357,8 @@ class EvalKitti:
|
||||
for perc in ['<0.5m', '<1m', '<2m']]
|
||||
for key in all_methods]
|
||||
|
||||
ale = [[str(self.dic_stats['test'][key + '_merged'][clst]['mean'])[:4] + ' (' +
|
||||
str(self.dic_stats['test'][key][clst]['mean'])[:4] + ')'
|
||||
ale = [[str(round(self.dic_stats['test'][key][clst]['mean'], 2))[:4] + ' [' +
|
||||
str(round(self.dic_stats['test'][key][clst]['cnt'] / self.cnt_gt[clst] * 100))[:2] + '%]'
|
||||
for clst in self.CLUSTERS[:4]]
|
||||
for key in all_methods]
|
||||
|
||||
@ -346,17 +366,39 @@ class EvalKitti:
|
||||
print(tabulate(results, headers=self.HEADERS))
|
||||
print('-' * 90 + '\n')
|
||||
|
||||
def stats_height(self):
|
||||
heights = []
|
||||
for name in self.set_val:
|
||||
path_gt = os.path.join(self.dir_gt, name)
|
||||
self.name = name
|
||||
# Iterate over each line of the gt file and save box location and distances
|
||||
out_gt = parse_ground_truth(path_gt, 'pedestrian')
|
||||
boxes_gt, ys, truncs_gt, occs_gt = out_gt # pylint: disable=unbalanced-tuple-unpacking
|
||||
for label in ys:
|
||||
heights.append(label[4])
|
||||
import numpy as np
|
||||
tail1, tail2 = np.nanpercentile(np.array(heights), [5, 95])
|
||||
print(average(heights))
|
||||
print(len(heights))
|
||||
print(tail1, tail2)
|
||||
|
||||
|
||||
def get_statistics(dic_stats, errors, dic_stds, key):
|
||||
"""Update statistics of a cluster"""
|
||||
|
||||
dic_stats['mean'] = average(errors)
|
||||
dic_stats['max'] = max(errors)
|
||||
dic_stats['cnt'] = len(errors)
|
||||
try:
|
||||
dic_stats['mean'] = average(errors)
|
||||
dic_stats['max'] = max(errors)
|
||||
dic_stats['cnt'] = len(errors)
|
||||
except ValueError:
|
||||
dic_stats['mean'] = - 1
|
||||
dic_stats['max'] = - 1
|
||||
dic_stats['cnt'] = - 1
|
||||
|
||||
if key == 'monoloco':
|
||||
if key in ('monoloco', 'monoloco_pp', 'monstereo'):
|
||||
dic_stats['std_ale'] = average(dic_stds['ale'])
|
||||
dic_stats['std_epi'] = average(dic_stds['epi'])
|
||||
dic_stats['epi_rel'] = average(dic_stds['epi_rel'])
|
||||
dic_stats['interval'] = average(dic_stds['interval'])
|
||||
dic_stats['at_risk'] = average(dic_stds['at_risk'])
|
||||
dic_stats['prec_1'] = average(dic_stds['prec_1'])
|
||||
@ -376,13 +418,12 @@ def add_true_negatives(err, cnt_gt):
|
||||
|
||||
|
||||
def find_cluster(dd, clusters):
|
||||
"""Find the correct cluster. The first and the last one are not numeric"""
|
||||
"""Find the correct cluster. Above the last cluster goes into "excluded (together with the ones from kitti cat"""
|
||||
|
||||
for clst in clusters[4: -1]:
|
||||
if dd <= int(clst):
|
||||
for idx, clst in enumerate(clusters[:-1]):
|
||||
if int(clst) < dd <= int(clusters[idx+1]):
|
||||
return clst
|
||||
|
||||
return clusters[-1]
|
||||
return 'excluded'
|
||||
|
||||
|
||||
def extract_indices(idx_to_check, *args):
|
||||
@ -410,3 +451,14 @@ def extract_indices(idx_to_check, *args):
|
||||
def average(my_list):
|
||||
"""calculate mean of a list"""
|
||||
return sum(my_list) / len(my_list)
|
||||
|
||||
|
||||
def filter_directories(main_dir, methods):
|
||||
for method in methods:
|
||||
dir_method = os.path.join(main_dir, method)
|
||||
if not os.path.exists(dir_method):
|
||||
methods.remove(method)
|
||||
print(f"\nMethod {method}. No directory found. Skipping it..")
|
||||
elif not os.listdir(dir_method):
|
||||
methods.remove(method)
|
||||
print(f"\nMethod {method}. Directory is empty. Skipping it..")
|
||||
|
||||
237
monoloco/eval/eval_variance.py
Normal file
@ -0,0 +1,237 @@
|
||||
# pylint: disable=too-many-statements,too-many-branches,cyclic-import
|
||||
|
||||
"""Joints Analysis: Supplementary material of MonStereo"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from collections import defaultdict
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
from .eval_kitti import find_cluster, average
|
||||
from ..visuals.figures import get_distances
|
||||
|
||||
COCO_KEYPOINTS = [
|
||||
'nose', # 0
|
||||
'left_eye', # 1
|
||||
'right_eye', # 2
|
||||
'left_ear', # 3
|
||||
'right_ear', # 4
|
||||
'left_shoulder', # 5
|
||||
'right_shoulder', # 6
|
||||
'left_elbow', # 7
|
||||
'right_elbow', # 8
|
||||
'left_wrist', # 9
|
||||
'right_wrist', # 10
|
||||
'left_hip', # 11
|
||||
'right_hip', # 12
|
||||
'left_knee', # 13
|
||||
'right_knee', # 14
|
||||
'left_ankle', # 15
|
||||
'right_ankle', # 16
|
||||
]
|
||||
|
||||
|
||||
def joints_variance(joints, clusters, dic_ms):
|
||||
# CLUSTERS = ('3', '5', '7', '9', '11', '13', '15', '17', '19', '21', '23', '25', '27', '29', '31', '49')
|
||||
BF = 0.54 * 721
|
||||
phase = 'train'
|
||||
methods = ('pifpaf', 'mask')
|
||||
dic_fin = {}
|
||||
|
||||
for method in methods:
|
||||
dic_var = defaultdict(lambda: defaultdict(list))
|
||||
dic_joints = defaultdict(list)
|
||||
dic_avg = defaultdict(lambda: defaultdict(float))
|
||||
path_joints = joints + '_' + method + '.json'
|
||||
|
||||
with open(path_joints, 'r') as f:
|
||||
dic_jo = json.load(f)
|
||||
|
||||
for idx, keypoint in enumerate(dic_jo[phase]['kps']):
|
||||
# if dic_jo[phase]['names'][idx] == '005856.txt' and dic_jo[phase]['Y'][idx][2] > 14:
|
||||
# aa = 4
|
||||
assert len(keypoint) < 2
|
||||
kps = np.array(keypoint[0])[:, :17]
|
||||
kps_r = np.array(keypoint[0])[:, 17:]
|
||||
disps = kps[0] - kps_r[0]
|
||||
zz = dic_jo[phase]['Y'][idx][2]
|
||||
disps_3 = get_variance(kps, kps_r, zz)
|
||||
disps_8 = get_variance_conf(kps, kps_r, num=8)
|
||||
disps_4 = get_variance_conf(kps, kps_r, num=4)
|
||||
disp_gt = BF / zz
|
||||
clst = find_cluster(zz, clusters) # 4 = '3' 35 = '31' 42 = 2 = 'excl'
|
||||
dic_var['std_d'][clst].append(disps.std())
|
||||
errors = np.minimum(30, np.abs(zz - BF / disps))
|
||||
dic_var['mean_dev'][clst].append(min(30, abs(zz - BF / np.median(disps))))
|
||||
dic_var['mean_3'][clst].append(min(30, abs(zz - BF / disps_3.mean())))
|
||||
dic_var['mean_8'][clst].append(min(30, abs(zz - BF / np.median(disps_8))))
|
||||
dic_var['mean_4'][clst].append(min(30, abs(zz - BF / np.median(disps_4))))
|
||||
arg_best = np.argmin(errors)
|
||||
conf = np.mean((kps[2][arg_best], kps_r[2][arg_best]))
|
||||
dic_var['mean_best'][clst].append(np.min(errors))
|
||||
dic_var['conf_best'][clst].append(conf)
|
||||
dic_var['conf'][clst].append(np.mean((np.mean(kps[2]), np.mean(kps_r[2]))))
|
||||
# dic_var['std_z'][clst].append(zzs.std())
|
||||
for ii, el in enumerate(disps):
|
||||
if abs(el-disp_gt) < 1:
|
||||
dic_var['rep'][clst].append(1)
|
||||
dic_joints[str(ii)].append(1)
|
||||
else:
|
||||
dic_var['rep'][clst].append(0)
|
||||
dic_joints[str(ii)].append(0)
|
||||
|
||||
for key in dic_var:
|
||||
for clst in clusters[:-1]: # 41 needs to be excluded (36 = '31')
|
||||
dic_avg[key][clst] = average(dic_var[key][clst])
|
||||
dic_fin[method] = dic_avg
|
||||
for key in dic_joints:
|
||||
dic_fin[method]['joints'][key] = average(dic_joints[key])
|
||||
dic_fin['monstereo'] = {clst: dic_ms[clst]['mean'] for clst in clusters[:-1]}
|
||||
variance_figures(dic_fin, clusters)
|
||||
|
||||
|
||||
def get_variance(kps, kps_r, zz):
|
||||
|
||||
thresh = 0.5 - zz / 100
|
||||
disps_2 = []
|
||||
disps = kps[0] - kps_r[0]
|
||||
arg_disp = np.argsort(disps)[::-1]
|
||||
|
||||
for idx in arg_disp[1:]:
|
||||
if kps[2][idx] > thresh and kps_r[2][idx] > thresh:
|
||||
disps_2.append(disps[idx])
|
||||
if len(disps_2) >= 3:
|
||||
return np.array(disps_2)
|
||||
return disps
|
||||
|
||||
|
||||
def get_variance_conf(kps, kps_r, num=8):
|
||||
|
||||
disps_conf = []
|
||||
confs = (kps[2, :] + kps_r[2, :]) / 2
|
||||
disps = kps[0] - kps_r[0]
|
||||
arg_disp = np.argsort(confs)[::-1]
|
||||
|
||||
for idx in arg_disp[:num]:
|
||||
disps_conf.append(disps[idx])
|
||||
return np.array(disps_conf)
|
||||
|
||||
|
||||
def variance_figures(dic_fin, clusters):
|
||||
"""Predicted confidence intervals and task error as a function of ground-truth distance"""
|
||||
|
||||
dir_out = 'docs'
|
||||
x_min = 3
|
||||
x_max = 43
|
||||
y_min = 0
|
||||
y_max = 1
|
||||
|
||||
plt.figure(0)
|
||||
plt.xlabel("Ground-truth distance [m]")
|
||||
plt.title("Repeatability by distance")
|
||||
plt.xlim(x_min, x_max)
|
||||
plt.ylim(y_min, y_max)
|
||||
plt.grid(linewidth=0.2)
|
||||
|
||||
xxs = get_distances(clusters)
|
||||
yys_p = [el for _, el in dic_fin['pifpaf']['rep'].items()]
|
||||
yys_m = [el for _, el in dic_fin['mask']['rep'].items()]
|
||||
plt.plot(xxs, yys_p, marker='s', label="PifPaf")
|
||||
plt.plot(xxs, yys_m, marker='o', label="Mask R-CNN")
|
||||
plt.tight_layout()
|
||||
plt.legend()
|
||||
path_fig = os.path.join(dir_out, 'repeatability.png')
|
||||
plt.savefig(path_fig)
|
||||
print("Figure of repeatability saved in {}".format(path_fig))
|
||||
|
||||
plt.figure(1)
|
||||
plt.xlabel("Ground-truth distance [m]")
|
||||
plt.ylabel("[m]")
|
||||
plt.title("Depth error")
|
||||
plt.grid(linewidth=0.2)
|
||||
y_min = 0
|
||||
y_max = 2.7
|
||||
plt.ylim(y_min, y_max)
|
||||
yys_p = [el for _, el in dic_fin['pifpaf']['mean_dev'].items()]
|
||||
# yys_m = [el for _, el in dic_fin['mask']['mean_dev'].items()]
|
||||
yys_p_3 = [el for _, el in dic_fin['pifpaf']['mean_3'].items()]
|
||||
yys_p_8 = [el for _, el in dic_fin['pifpaf']['mean_8'].items()]
|
||||
yys_p_4 = [el for _, el in dic_fin['pifpaf']['mean_4'].items()]
|
||||
# yys_m_3 = [el for _, el in dic_fin['mask']['mean_3'].items()]
|
||||
yys_ms = [el for _, el in dic_fin['monstereo'].items()]
|
||||
yys_p_best = [el for _, el in dic_fin['pifpaf']['mean_best'].items()]
|
||||
plt.plot(xxs, yys_p_4, marker='o', linestyle=':', label="PifPaf (highest 4)")
|
||||
plt.plot(xxs, yys_p, marker='+', label="PifPaf (median)")
|
||||
# plt.plot(xxs, yys_m, marker='o', label="Mask R-CNN (median")
|
||||
plt.plot(xxs, yys_p_3, marker='s', linestyle='--', label="PifPaf (closest 3)")
|
||||
plt.plot(xxs, yys_p_8, marker='*', linestyle=':', label="PifPaf (highest 8)")
|
||||
plt.plot(xxs, yys_ms, marker='^', label="MonStereo")
|
||||
plt.plot(xxs, yys_p_best, marker='o', label="PifPaf (best)")
|
||||
# plt.plot(xxs, yys_m_3, marker='o', color='r', label="Mask R-CNN (closest 3)")
|
||||
# plt.plot(xxs, yys_mon, marker='o', color='b', label="Our MonStereo")
|
||||
|
||||
plt.legend()
|
||||
plt.tight_layout()
|
||||
path_fig = os.path.join(dir_out, 'mean_deviation.png')
|
||||
plt.savefig(path_fig)
|
||||
print("Figure of mean deviation saved in {}".format(path_fig))
|
||||
|
||||
plt.figure(2)
|
||||
plt.xlabel("Ground-truth distance [m]")
|
||||
plt.ylabel("Pixels")
|
||||
plt.title("Standard deviation of joints disparity")
|
||||
yys_p = [el for _, el in dic_fin['pifpaf']['std_d'].items()]
|
||||
yys_m = [el for _, el in dic_fin['mask']['std_d'].items()]
|
||||
yys_p_z = [el for _, el in dic_fin['pifpaf']['std_z'].items()]
|
||||
yys_m_z = [el for _, el in dic_fin['mask']['std_z'].items()]
|
||||
plt.plot(xxs, yys_p, marker='s', label="PifPaf")
|
||||
plt.plot(xxs, yys_m, marker='o', label="Mask R-CNN")
|
||||
# plt.plot(xxs, yys_p_z, marker='s', color='b', label="PifPaf (meters)")
|
||||
# plt.plot(xxs, yys_m_z, marker='o', color='r', label="Mask R-CNN (meters)")
|
||||
|
||||
plt.grid(linewidth=0.2)
|
||||
plt.legend()
|
||||
path_fig = os.path.join(dir_out, 'std_joints.png')
|
||||
plt.savefig(path_fig)
|
||||
print("Figure of standard deviation of joints by distance in {}".format(path_fig))
|
||||
|
||||
plt.figure(3)
|
||||
# plt.style.use('ggplot')
|
||||
width = 0.35
|
||||
xxs = np.arange(len(COCO_KEYPOINTS))
|
||||
yys_p = [el for _, el in dic_fin['pifpaf']['joints'].items()]
|
||||
yys_m = [el for _, el in dic_fin['mask']['joints'].items()]
|
||||
plt.bar(xxs, yys_p, width, color='C0', label='Pifpaf')
|
||||
plt.bar(xxs + width, yys_m, width, color='C1', label='Mask R-CNN')
|
||||
plt.ylim(0, 1)
|
||||
|
||||
plt.xlabel("Keypoints")
|
||||
plt.title("Repeatability by keypoint type")
|
||||
|
||||
plt.xticks(xxs + width / 2, xxs)
|
||||
plt.legend(loc='best')
|
||||
path_fig = os.path.join(dir_out, 'repeatability_2.png')
|
||||
plt.savefig(path_fig)
|
||||
plt.close('all')
|
||||
print("Figure of standard deviation of joints by keypointd in {}".format(path_fig))
|
||||
|
||||
plt.figure(4)
|
||||
plt.xlabel("Ground-truth distance [m]")
|
||||
plt.ylabel("Confidence")
|
||||
plt.grid(linewidth=0.2)
|
||||
xxs = get_distances(clusters)
|
||||
yys_p_conf = [el for _, el in dic_fin['pifpaf']['conf'].items()]
|
||||
yys_p_conf_best = [el for _, el in dic_fin['pifpaf']['conf_best'].items()]
|
||||
yys_m_conf = [el for _, el in dic_fin['mask']['conf'].items()]
|
||||
yys_m_conf_best = [el for _, el in dic_fin['mask']['conf_best'].items()]
|
||||
plt.plot(xxs, yys_p_conf_best, marker='s', color='lightblue', label="PifPaf (best)")
|
||||
plt.plot(xxs, yys_p_conf, marker='s', color='b', label="PifPaf (mean)")
|
||||
plt.plot(xxs, yys_m_conf_best, marker='^', color='darkorange', label="Mask (best)")
|
||||
plt.plot(xxs, yys_m_conf, marker='o', color='r', label="Mask R-CNN (mean)")
|
||||
plt.legend()
|
||||
plt.tight_layout()
|
||||
path_fig = os.path.join(dir_out, 'confidence.png')
|
||||
plt.savefig(path_fig)
|
||||
print("Figure of confidence saved in {}".format(path_fig))
|
||||
@ -1,221 +1,282 @@
|
||||
|
||||
"""Run monoloco over all the pifpaf joints of KITTI images
|
||||
and extract and save the annotations in txt files"""
|
||||
# pylint: disable=too-many-branches
|
||||
|
||||
"""
|
||||
Run MonoLoco/MonStereo and converts annotations into KITTI format
|
||||
"""
|
||||
|
||||
import os
|
||||
import glob
|
||||
import shutil
|
||||
import math
|
||||
from collections import defaultdict
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from ..network import MonoLoco
|
||||
from ..network import Loco
|
||||
from ..network.process import preprocess_pifpaf
|
||||
from ..eval.geom_baseline import compute_distance
|
||||
from ..utils import get_keypoints, pixel_to_camera, xyz_from_distance, get_calibration, open_annotations, split_training
|
||||
from ..network.geom_baseline import geometric_coordinates
|
||||
from ..utils import get_keypoints, pixel_to_camera, factory_file, factory_basename, make_new_directory, get_category, \
|
||||
xyz_from_distance, read_and_rewrite
|
||||
from .stereo_baselines import baselines_association
|
||||
from .reid_baseline import ReID, get_reid_features
|
||||
from .reid_baseline import get_reid_features, ReID
|
||||
|
||||
|
||||
class GenerateKitti:
|
||||
|
||||
def __init__(self, model, dir_ann, p_dropout=0.2, n_dropout=0, stereo=True):
|
||||
dir_gt = os.path.join('data', 'kitti', 'gt')
|
||||
dir_gt_new = os.path.join('data', 'kitti', 'gt_new')
|
||||
dir_kk = os.path.join('data', 'kitti', 'calib')
|
||||
dir_byc = '/data/lorenzo-data/kitti/object_detection/left'
|
||||
monoloco_checkpoint = 'data/models/monoloco-190717-0952.pkl'
|
||||
baselines = {'mono': [], 'stereo': []}
|
||||
|
||||
def __init__(self, args):
|
||||
|
||||
# Load Network
|
||||
self.net = args.net
|
||||
assert args.net in ('monstereo', 'monoloco_pp'), "net not recognized"
|
||||
|
||||
# Load monoloco
|
||||
use_cuda = torch.cuda.is_available()
|
||||
device = torch.device("cuda" if use_cuda else "cpu")
|
||||
self.monoloco = MonoLoco(model=model, device=device, n_dropout=n_dropout, p_dropout=p_dropout)
|
||||
self.dir_ann = dir_ann
|
||||
self.model = Loco(
|
||||
model=args.model,
|
||||
net=args.net,
|
||||
device=device,
|
||||
n_dropout=args.n_dropout,
|
||||
p_dropout=args.dropout,
|
||||
linear_size=args.hidden_size
|
||||
)
|
||||
|
||||
# Extract list of pifpaf files in validation images
|
||||
dir_gt = os.path.join('data', 'kitti', 'gt')
|
||||
self.set_basename = factory_basename(dir_ann, dir_gt)
|
||||
self.dir_kk = os.path.join('data', 'kitti', 'calib')
|
||||
self.dir_ann = args.dir_ann
|
||||
self.generate_official = args.generate_official
|
||||
assert os.listdir(self.dir_ann), "Annotation directory is empty"
|
||||
self.set_basename = factory_basename(args.dir_ann, self.dir_gt)
|
||||
|
||||
# Calculate stereo baselines
|
||||
self.stereo = stereo
|
||||
if stereo:
|
||||
self.baselines = ['ml_stereo', 'pose', 'reid']
|
||||
self.cnt_disparity = defaultdict(int)
|
||||
self.cnt_no_stereo = 0
|
||||
# For quick testing
|
||||
# ------------------------------------------------------------------------------------------------------------
|
||||
# self.set_basename = ('001782',)
|
||||
# self.set_basename = ('002282',)
|
||||
# ------------------------------------------------------------------------------------------------------------
|
||||
|
||||
# ReID Baseline
|
||||
weights_path = 'data/models/reid_model_market.pkl'
|
||||
self.reid_net = ReID(weights_path=weights_path, device=device, num_classes=751, height=256, width=128)
|
||||
self.dir_images = os.path.join('data', 'kitti', 'images')
|
||||
self.dir_images_r = os.path.join('data', 'kitti', 'images_r')
|
||||
# Add monocular and stereo baselines (they require monoloco as backbone)
|
||||
if args.baselines:
|
||||
|
||||
# Load MonoLoco
|
||||
self.baselines['mono'] = ['monoloco', 'geometric']
|
||||
self.monoloco = Loco(
|
||||
model=self.monoloco_checkpoint,
|
||||
net='monoloco',
|
||||
device=device,
|
||||
n_dropout=args.n_dropout,
|
||||
p_dropout=args.dropout,
|
||||
linear_size=256
|
||||
)
|
||||
# Stereo baselines
|
||||
if args.net == 'monstereo':
|
||||
self.baselines['stereo'] = ['pose', 'reid']
|
||||
self.cnt_disparity = defaultdict(int)
|
||||
self.cnt_no_stereo = 0
|
||||
self.dir_images = os.path.join('data', 'kitti', 'images')
|
||||
self.dir_images_r = os.path.join('data', 'kitti', 'images_r')
|
||||
|
||||
# ReID Baseline
|
||||
weights_path = 'data/models/reid_model_market.pkl'
|
||||
self.reid_net = ReID(weights_path=weights_path, device=device, num_classes=751, height=256, width=128)
|
||||
|
||||
def run(self):
|
||||
"""Run Monoloco and save txt files for KITTI evaluation"""
|
||||
|
||||
cnt_ann = cnt_file = cnt_no_file = 0
|
||||
dir_out = {"monoloco": os.path.join('data', 'kitti', 'monoloco')}
|
||||
make_new_directory(dir_out["monoloco"])
|
||||
print("\nCreated empty output directory for txt files")
|
||||
|
||||
if self.stereo:
|
||||
for key in self.baselines:
|
||||
dir_out[key] = os.path.join('data', 'kitti', key)
|
||||
make_new_directory(dir_out[key])
|
||||
print("Created empty output directory for {}".format(key))
|
||||
print("\n")
|
||||
# Prepare empty folder
|
||||
di = os.path.join('data', 'kitti', self.net)
|
||||
make_new_directory(di)
|
||||
dir_out = {self.net: di}
|
||||
|
||||
# Run monoloco over the list of images
|
||||
for mode, names in self.baselines.items():
|
||||
for name in names:
|
||||
di = os.path.join('data', 'kitti', name)
|
||||
make_new_directory(di)
|
||||
dir_out[name] = di
|
||||
|
||||
# Run the model
|
||||
for basename in self.set_basename:
|
||||
path_calib = os.path.join(self.dir_kk, basename + '.txt')
|
||||
annotations, kk, tt = factory_file(path_calib, self.dir_ann, basename)
|
||||
boxes, keypoints = preprocess_pifpaf(annotations, im_size=(1242, 374))
|
||||
assert keypoints, "all pifpaf files should have at least one annotation"
|
||||
cnt_ann += len(boxes)
|
||||
cnt_file += 1
|
||||
cat = get_category(keypoints, os.path.join(self.dir_byc, basename + '.json'))
|
||||
if keypoints:
|
||||
annotations_r, _, _ = factory_file(path_calib, self.dir_ann, basename, mode='right')
|
||||
_, keypoints_r = preprocess_pifpaf(annotations_r, im_size=(1242, 374))
|
||||
|
||||
# Run the network and the geometric baseline
|
||||
outputs, varss = self.monoloco.forward(keypoints, kk)
|
||||
dds_geom = eval_geometric(keypoints, kk, average_y=0.48)
|
||||
if self.net == 'monstereo':
|
||||
dic_out = self.model.forward(keypoints, kk, keypoints_r=keypoints_r)
|
||||
elif self.net == 'monoloco_pp':
|
||||
dic_out = self.model.forward(keypoints, kk)
|
||||
|
||||
# Save the file
|
||||
uv_centers = get_keypoints(keypoints, mode='bottom') # Kitti uses the bottom center to calculate depth
|
||||
xy_centers = pixel_to_camera(uv_centers, kk, 1)
|
||||
outputs = outputs.detach().cpu()
|
||||
zzs = xyz_from_distance(outputs[:, 0:1], xy_centers)[:, 2].tolist()
|
||||
all_outputs = {self.net: [dic_out['xyzd'], dic_out['bi'], dic_out['epi'],
|
||||
dic_out['yaw'], dic_out['h'], dic_out['w'], dic_out['l']]}
|
||||
zzs = [float(el[2]) for el in dic_out['xyzd']]
|
||||
|
||||
all_outputs = [outputs.detach().cpu(), varss.detach().cpu(), dds_geom, zzs]
|
||||
all_inputs = [boxes, xy_centers]
|
||||
all_params = [kk, tt]
|
||||
path_txt = {'monoloco': os.path.join(dir_out['monoloco'], basename + '.txt')}
|
||||
save_txts(path_txt['monoloco'], all_inputs, all_outputs, all_params)
|
||||
# Save txt files
|
||||
params = [kk, tt]
|
||||
path_txt = os.path.join(dir_out[self.net], basename + '.txt')
|
||||
save_txts(path_txt, boxes, all_outputs[self.net], params, mode=self.net, cat=cat)
|
||||
cnt_ann += len(boxes)
|
||||
cnt_file += 1
|
||||
|
||||
# Correct using stereo disparity and save in different folder
|
||||
if self.stereo:
|
||||
zzs = self._run_stereo_baselines(basename, boxes, keypoints, zzs, path_calib)
|
||||
for key in zzs:
|
||||
path_txt[key] = os.path.join(dir_out[key], basename + '.txt')
|
||||
save_txts(path_txt[key], all_inputs, zzs[key], all_params, mode='baseline')
|
||||
# MONO (+ STEREO BASELINES)
|
||||
if self.baselines['mono']:
|
||||
# MONOLOCO
|
||||
dic_out = self.monoloco.forward(keypoints, kk)
|
||||
zzs_geom, xy_centers = geometric_coordinates(keypoints, kk, average_y=0.48)
|
||||
all_outputs['monoloco'] = [dic_out['d'], dic_out['bi'], dic_out['epi']] + [zzs_geom, xy_centers]
|
||||
all_outputs['geometric'] = all_outputs['monoloco']
|
||||
|
||||
# monocular baselines
|
||||
for key in self.baselines['mono']:
|
||||
path_txt = {key: os.path.join(dir_out[key], basename + '.txt')}
|
||||
save_txts(path_txt[key], boxes, all_outputs[key], params, mode=key, cat=cat)
|
||||
|
||||
# stereo baselines
|
||||
if self.baselines['stereo']:
|
||||
all_inputs = {}
|
||||
dic_xyz = self._run_stereo_baselines(basename, boxes, keypoints, zzs, path_calib)
|
||||
for key in dic_xyz:
|
||||
all_outputs[key] = all_outputs['monoloco'].copy()
|
||||
all_outputs[key][0] = dic_xyz[key]
|
||||
all_inputs[key] = boxes
|
||||
|
||||
path_txt[key] = os.path.join(dir_out[key], basename + '.txt')
|
||||
save_txts(path_txt[key], all_inputs[key], all_outputs[key], params,
|
||||
mode='baseline',
|
||||
cat=cat)
|
||||
|
||||
print("\nSaved in {} txt {} annotations. Not found {} images".format(cnt_file, cnt_ann, cnt_no_file))
|
||||
|
||||
if self.stereo:
|
||||
if self.net == 'monstereo':
|
||||
print("STEREO:")
|
||||
for key in self.baselines:
|
||||
for key in self.baselines['stereo']:
|
||||
print("Annotations corrected using {} baseline: {:.1f}%".format(
|
||||
key, self.cnt_disparity[key] / cnt_ann * 100))
|
||||
print("Maximum possible stereo associations: {:.1f}%".format(self.cnt_disparity['max'] / cnt_ann * 100))
|
||||
print("Not found {}/{} stereo files".format(self.cnt_no_stereo, cnt_file))
|
||||
|
||||
if self.generate_official:
|
||||
create_empty_files(dir_out, self.net) # Create empty files for official evaluation
|
||||
|
||||
def _run_stereo_baselines(self, basename, boxes, keypoints, zzs, path_calib):
|
||||
|
||||
annotations_r, _, _ = factory_file(path_calib, self.dir_ann, basename, mode='right')
|
||||
boxes_r, keypoints_r = preprocess_pifpaf(annotations_r, im_size=(1242, 374))
|
||||
_, kk, tt = factory_file(path_calib, self.dir_ann, basename)
|
||||
|
||||
uv_centers = get_keypoints(keypoints, mode='bottom') # Kitti uses the bottom center to calculate depth
|
||||
xy_centers = pixel_to_camera(uv_centers, kk, 1)
|
||||
|
||||
# Stereo baselines
|
||||
if keypoints_r:
|
||||
path_image = os.path.join(self.dir_images, basename + '.png')
|
||||
path_image_r = os.path.join(self.dir_images_r, basename + '.png')
|
||||
reid_features = get_reid_features(self.reid_net, boxes, boxes_r, path_image, path_image_r)
|
||||
zzs, cnt = baselines_association(self.baselines, zzs, keypoints, keypoints_r, reid_features)
|
||||
dic_zzs, cnt = baselines_association(self.baselines['stereo'], zzs, keypoints, keypoints_r, reid_features)
|
||||
|
||||
for key in cnt:
|
||||
self.cnt_disparity[key] += cnt[key]
|
||||
|
||||
else:
|
||||
self.cnt_no_stereo += 1
|
||||
zzs = {key: zzs for key in self.baselines}
|
||||
return zzs
|
||||
dic_zzs = {key: zzs for key in self.baselines['stereo']}
|
||||
|
||||
# Combine the stereo zz with x, y from 2D detection (no MonoLoco involved)
|
||||
dic_xyz = defaultdict(list)
|
||||
for key in dic_zzs:
|
||||
for idx, zz_base in enumerate(dic_zzs[key]):
|
||||
xx = float(xy_centers[idx][0]) * zz_base
|
||||
yy = float(xy_centers[idx][1]) * zz_base
|
||||
dic_xyz[key].append([xx, yy, zz_base])
|
||||
|
||||
return dic_xyz
|
||||
|
||||
|
||||
def save_txts(path_txt, all_inputs, all_outputs, all_params, mode='monoloco'):
|
||||
def save_txts(path_txt, all_inputs, all_outputs, all_params, mode='monoloco', cat=None):
|
||||
|
||||
assert mode in ('monoloco', 'baseline')
|
||||
if mode == 'monoloco':
|
||||
outputs, varss, dds_geom, zzs = all_outputs[:]
|
||||
assert mode in ('monoloco', 'monstereo', 'geometric', 'baseline', 'monoloco_pp')
|
||||
|
||||
if mode in ('monstereo', 'monoloco_pp'):
|
||||
xyzd, bis, epis, yaws, hs, ws, ls = all_outputs[:]
|
||||
xyz = xyzd[:, 0:3]
|
||||
tt = [0, 0, 0]
|
||||
elif mode in ('monoloco', 'geometric'):
|
||||
tt = [0, 0, 0]
|
||||
dds, bis, epis, zzs_geom, xy_centers = all_outputs[:]
|
||||
xyz = xyz_from_distance(dds, xy_centers)
|
||||
else:
|
||||
zzs = all_outputs
|
||||
uv_boxes, xy_centers = all_inputs[:]
|
||||
kk, tt = all_params[:]
|
||||
_, tt = all_params[:]
|
||||
xyz, bis, epis, zzs_geom, xy_centers = all_outputs[:]
|
||||
uv_boxes = all_inputs[:]
|
||||
assert len(uv_boxes) == len(list(xyz)), "Number of inputs different from number of outputs"
|
||||
|
||||
with open(path_txt, "w+") as ff:
|
||||
for idx, zz_base in enumerate(zzs):
|
||||
for idx, uv_box in enumerate(uv_boxes):
|
||||
|
||||
xx = float(xyz[idx][0]) - tt[0]
|
||||
yy = float(xyz[idx][1]) - tt[1]
|
||||
zz = float(xyz[idx][2]) - tt[2]
|
||||
|
||||
if mode == 'geometric':
|
||||
zz = zzs_geom[idx]
|
||||
|
||||
xx = float(xy_centers[idx][0]) * zzs[idx] + tt[0]
|
||||
yy = float(xy_centers[idx][1]) * zzs[idx] + tt[1]
|
||||
zz = zz_base + tt[2]
|
||||
cam_0 = [xx, yy, zz]
|
||||
output_list = [0.]*3 + uv_boxes[idx][:-1] + [0.]*3 + cam_0 + [0.] + uv_boxes[idx][-1:] # kitti format
|
||||
ff.write("%s " % 'pedestrian')
|
||||
bi = float(bis[idx])
|
||||
epi = float(epis[idx])
|
||||
if mode in ('monstereo', 'monoloco_pp'):
|
||||
alpha, ry = float(yaws[0][idx]), float(yaws[1][idx])
|
||||
hwl = [float(hs[idx]), float(ws[idx]), float(ls[idx])]
|
||||
else:
|
||||
alpha, ry, hwl = -10., -10., [0, 0, 0]
|
||||
|
||||
# Set the scale to obtain (approximately) same recall at evaluation
|
||||
if mode == 'monstereo':
|
||||
conf_scale = 0.03
|
||||
elif mode == 'monoloco_pp':
|
||||
conf_scale = 0.033
|
||||
# conf_scale = 0.035 # nuScenes for having same recall
|
||||
else:
|
||||
conf_scale = 0.05
|
||||
conf = conf_scale * (uv_box[-1]) / (bi / math.sqrt(xx ** 2 + yy ** 2 + zz ** 2))
|
||||
|
||||
output_list = [alpha] + uv_box[:-1] + hwl + cam_0 + [ry, conf, bi, epi]
|
||||
category = cat[idx]
|
||||
if category < 0.1:
|
||||
ff.write("%s " % 'Pedestrian')
|
||||
else:
|
||||
ff.write("%s " % 'Cyclist')
|
||||
|
||||
ff.write("%i %i " % (-1, -1))
|
||||
for el in output_list:
|
||||
ff.write("%f " % el)
|
||||
|
||||
# add additional uncertainty information
|
||||
if mode == 'monoloco':
|
||||
ff.write("%f " % float(outputs[idx][1]))
|
||||
ff.write("%f " % float(varss[idx]))
|
||||
ff.write("%f " % dds_geom[idx])
|
||||
ff.write("\n")
|
||||
|
||||
|
||||
def factory_file(path_calib, dir_ann, basename, mode='left'):
|
||||
"""Choose the annotation and the calibration files. Stereo option with ite = 1"""
|
||||
def create_empty_files(dir_out, net):
|
||||
"""Create empty txt files to run official kitti metrics on MonStereo and all other methods"""
|
||||
|
||||
assert mode in ('left', 'right')
|
||||
p_left, p_right = get_calibration(path_calib)
|
||||
methods = ['pseudo-lidar', 'monopsr', '3dop', 'm3d', 'oc-stereo', 'e2e', 'monodis', 'smoke']
|
||||
dirs = [os.path.join('data', 'kitti', method) for method in methods]
|
||||
dirs_orig = [os.path.join('data', 'kitti', method + '-orig') for method in methods]
|
||||
|
||||
if mode == 'left':
|
||||
kk, tt = p_left[:]
|
||||
path_ann = os.path.join(dir_ann, basename + '.png.pifpaf.json')
|
||||
for di, di_orig in zip(dirs, dirs_orig):
|
||||
make_new_directory(di)
|
||||
|
||||
else:
|
||||
kk, tt = p_right[:]
|
||||
path_ann = os.path.join(dir_ann + '_right', basename + '.png.pifpaf.json')
|
||||
for i in range(7481):
|
||||
name = "0" * (6 - len(str(i))) + str(i) + '.txt'
|
||||
path_orig = os.path.join(di_orig, name)
|
||||
path = os.path.join(di, name)
|
||||
|
||||
annotations = open_annotations(path_ann)
|
||||
# If the file exits, rewrite in new folder, otherwise create empty file
|
||||
read_and_rewrite(path_orig, path)
|
||||
|
||||
return annotations, kk, tt
|
||||
|
||||
|
||||
def eval_geometric(keypoints, kk, average_y=0.48):
|
||||
""" Evaluate geometric distance"""
|
||||
|
||||
dds_geom = []
|
||||
|
||||
uv_centers = get_keypoints(keypoints, mode='center')
|
||||
uv_shoulders = get_keypoints(keypoints, mode='shoulder')
|
||||
uv_hips = get_keypoints(keypoints, mode='hip')
|
||||
|
||||
xy_centers = pixel_to_camera(uv_centers, kk, 1)
|
||||
xy_shoulders = pixel_to_camera(uv_shoulders, kk, 1)
|
||||
xy_hips = pixel_to_camera(uv_hips, kk, 1)
|
||||
|
||||
for idx, xy_center in enumerate(xy_centers):
|
||||
zz = compute_distance(xy_shoulders[idx], xy_hips[idx], average_y)
|
||||
xyz_center = np.array([xy_center[0], xy_center[1], zz])
|
||||
dd_geom = float(np.linalg.norm(xyz_center))
|
||||
dds_geom.append(dd_geom)
|
||||
|
||||
return dds_geom
|
||||
|
||||
|
||||
def make_new_directory(dir_out):
|
||||
"""Remove the output directory if already exists (avoid residual txt files)"""
|
||||
if os.path.exists(dir_out):
|
||||
shutil.rmtree(dir_out)
|
||||
os.makedirs(dir_out)
|
||||
|
||||
|
||||
def factory_basename(dir_ann, dir_gt):
|
||||
""" Return all the basenames in the annotations folder corresponding to validation images"""
|
||||
|
||||
# Extract ground truth validation images
|
||||
names_gt = tuple(os.listdir(dir_gt))
|
||||
path_train = os.path.join('splits', 'kitti_train.txt')
|
||||
path_val = os.path.join('splits', 'kitti_val.txt')
|
||||
_, set_val_gt = split_training(names_gt, path_train, path_val)
|
||||
set_val_gt = {os.path.basename(x).split('.')[0] for x in set_val_gt}
|
||||
|
||||
# Extract pifpaf files corresponding to validation images
|
||||
list_ann = glob.glob(os.path.join(dir_ann, '*.json'))
|
||||
set_basename = {os.path.basename(x).split('.')[0] for x in list_ann}
|
||||
set_val = set_basename.intersection(set_val_gt)
|
||||
assert set_val, " Missing json annotations file to create txt files for KITTI datasets"
|
||||
return set_val
|
||||
for i in range(7481):
|
||||
name = "0" * (6 - len(str(i))) + str(i) + '.txt'
|
||||
ff = open(os.path.join(dir_out[net], name), "a+")
|
||||
ff.close()
|
||||
|
||||
@ -29,7 +29,7 @@ def get_reid_features(reid_net, boxes, boxes_r, path_image, path_image_r):
|
||||
|
||||
class ReID(object):
|
||||
def __init__(self, weights_path, device, num_classes=751, height=256, width=128):
|
||||
super(ReID, self).__init__()
|
||||
super().__init__()
|
||||
torch.manual_seed(1)
|
||||
self.device = device
|
||||
|
||||
@ -90,7 +90,7 @@ class ReID(object):
|
||||
|
||||
class ResNet50(nn.Module):
|
||||
def __init__(self, num_classes, loss):
|
||||
super(ResNet50, self).__init__()
|
||||
super().__init__()
|
||||
self.loss = loss
|
||||
resnet50 = torchvision.models.resnet50(pretrained=True)
|
||||
self.base = nn.Sequential(*list(resnet50.children())[:-2])
|
||||
|
||||
@ -1,12 +1,11 @@
|
||||
|
||||
""""Generate stereo baselines for kitti evaluation"""
|
||||
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..utils import get_keypoints
|
||||
from ..utils import get_keypoints, mask_joint_disparity, disparity_to_depth
|
||||
|
||||
|
||||
def baselines_association(baselines, zzs, keypoints, keypoints_right, reid_features):
|
||||
@ -38,13 +37,14 @@ def baselines_association(baselines, zzs, keypoints, keypoints_right, reid_featu
|
||||
best = np.nanmin(similarity)
|
||||
while not np.isnan(best):
|
||||
idx, arg_best = np.unravel_index(np.nanargmin(similarity), similarity.shape) # pylint: disable=W0632
|
||||
zz_stereo, flag = similarity_to_depth(avg_disparities[idx, arg_best])
|
||||
zz_stereo, flag = disparity_to_depth(avg_disparities[idx, arg_best])
|
||||
zz_mono = zzs[idx]
|
||||
similarity[idx, :] = np.nan
|
||||
indices_stereo.append(idx)
|
||||
|
||||
# Filter stereo depth
|
||||
if flag and verify_stereo(zz_stereo, zz_mono, disparities_x[idx, arg_best], disparities_y[idx, arg_best]):
|
||||
# if flag and verify_stereo(zz_stereo, zz_mono, disparities_x[idx, arg_best], disparities_y[idx, arg_best]):
|
||||
if flag and (1 < zz_stereo < 80): # Do not add hand-crafted verifications to stereo baselines
|
||||
zzs_stereo[key][idx] = zz_stereo
|
||||
cnt_stereo[key] += 1
|
||||
similarity[:, arg_best] = np.nan
|
||||
@ -101,77 +101,3 @@ def features_similarity(features, features_r, key, avg_disparities, zzs):
|
||||
|
||||
similarity[idx] = sim_row
|
||||
return similarity
|
||||
|
||||
|
||||
def similarity_to_depth(avg_disparity):
|
||||
|
||||
try:
|
||||
zz_stereo = 0.54 * 721. / float(avg_disparity)
|
||||
flag = True
|
||||
except (ZeroDivisionError, ValueError): # All nan-slices or zero division
|
||||
zz_stereo = np.nan
|
||||
flag = False
|
||||
|
||||
return zz_stereo, flag
|
||||
|
||||
|
||||
def mask_joint_disparity(keypoints, keypoints_r):
|
||||
"""filter joints based on confidence and interquartile range of the distribution"""
|
||||
|
||||
CONF_MIN = 0.3
|
||||
with warnings.catch_warnings() and np.errstate(invalid='ignore'):
|
||||
disparity_x_mask = np.empty((keypoints.shape[0], keypoints_r.shape[0], 17))
|
||||
disparity_y_mask = np.empty((keypoints.shape[0], keypoints_r.shape[0], 17))
|
||||
avg_disparity = np.empty((keypoints.shape[0], keypoints_r.shape[0]))
|
||||
|
||||
for idx, kps in enumerate(keypoints):
|
||||
disparity_x = kps[0, :] - keypoints_r[:, 0, :]
|
||||
disparity_y = kps[1, :] - keypoints_r[:, 1, :]
|
||||
|
||||
# Mask for low confidence
|
||||
mask_conf_left = kps[2, :] > CONF_MIN
|
||||
mask_conf_right = keypoints_r[:, 2, :] > CONF_MIN
|
||||
mask_conf = mask_conf_left & mask_conf_right
|
||||
disparity_x_conf = np.where(mask_conf, disparity_x, np.nan)
|
||||
disparity_y_conf = np.where(mask_conf, disparity_y, np.nan)
|
||||
|
||||
# Mask outliers using iqr
|
||||
mask_outlier = interquartile_mask(disparity_x_conf)
|
||||
x_mask_row = np.where(mask_outlier, disparity_x_conf, np.nan)
|
||||
y_mask_row = np.where(mask_outlier, disparity_y_conf, np.nan)
|
||||
avg_row = np.nanmedian(x_mask_row, axis=1) # ignore the nan
|
||||
|
||||
# Append
|
||||
disparity_x_mask[idx] = x_mask_row
|
||||
disparity_y_mask[idx] = y_mask_row
|
||||
avg_disparity[idx] = avg_row
|
||||
|
||||
return avg_disparity, disparity_x_mask, disparity_y_mask
|
||||
|
||||
|
||||
def verify_stereo(zz_stereo, zz_mono, disparity_x, disparity_y):
|
||||
"""Verify disparities based on coefficient of variation, maximum y difference and z difference wrt monoloco"""
|
||||
|
||||
COV_MIN = 0.1
|
||||
y_max_difference = (50 / zz_mono)
|
||||
z_max_difference = 0.6 * zz_mono
|
||||
|
||||
cov = float(np.nanstd(disparity_x) / np.abs(np.nanmean(disparity_x))) # Coefficient of variation
|
||||
avg_disparity_y = np.nanmedian(disparity_y)
|
||||
|
||||
if abs(zz_stereo - zz_mono) < z_max_difference and \
|
||||
avg_disparity_y < y_max_difference and \
|
||||
cov < COV_MIN\
|
||||
and 4 < zz_stereo < 40:
|
||||
return True
|
||||
# if not np.isnan(zz_stereo):
|
||||
# return True
|
||||
return False
|
||||
|
||||
|
||||
def interquartile_mask(distribution):
|
||||
quartile_1, quartile_3 = np.nanpercentile(distribution, [25, 75], axis=1)
|
||||
iqr = quartile_3 - quartile_1
|
||||
lower_bound = quartile_1 - (iqr * 1.5)
|
||||
upper_bound = quartile_3 + (iqr * 1.5)
|
||||
return (distribution < upper_bound.reshape(-1, 1)) & (distribution > lower_bound.reshape(-1, 1))
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
|
||||
from .pifpaf import PifPaf, ImageList
|
||||
from .losses import LaplacianLoss
|
||||
from .net import MonoLoco
|
||||
from .net import Loco
|
||||
from .process import unnormalize_bi, extract_outputs, extract_labels, extract_labels_aux
|
||||
|
||||
@ -1,15 +1,115 @@
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
class LinearModel(nn.Module):
|
||||
class MonStereoModel(nn.Module):
|
||||
|
||||
def __init__(self, input_size, output_size=2, linear_size=512, p_dropout=0.2, num_stage=3, device='cuda'):
|
||||
super().__init__()
|
||||
|
||||
self.num_stage = num_stage
|
||||
self.stereo_size = input_size
|
||||
self.mono_size = int(input_size / 2)
|
||||
self.output_size = output_size - 1
|
||||
self.linear_size = linear_size
|
||||
self.p_dropout = p_dropout
|
||||
self.num_stage = num_stage
|
||||
self.linear_stages = []
|
||||
self.device = device
|
||||
|
||||
# Initialize weights
|
||||
|
||||
# Preprocessing
|
||||
self.w1 = nn.Linear(self.stereo_size, self.linear_size)
|
||||
self.batch_norm1 = nn.BatchNorm1d(self.linear_size)
|
||||
|
||||
# Internal loop
|
||||
for _ in range(num_stage):
|
||||
self.linear_stages.append(MyLinearSimple(self.linear_size, self.p_dropout))
|
||||
self.linear_stages = nn.ModuleList(self.linear_stages)
|
||||
|
||||
# Post processing
|
||||
self.w2 = nn.Linear(self.linear_size, self.linear_size)
|
||||
self.w3 = nn.Linear(self.linear_size, self.linear_size)
|
||||
self.batch_norm3 = nn.BatchNorm1d(self.linear_size)
|
||||
|
||||
# ------------------------Other----------------------------------------------
|
||||
# Auxiliary
|
||||
self.w_aux = nn.Linear(self.linear_size, 1)
|
||||
|
||||
# Final
|
||||
self.w_fin = nn.Linear(self.linear_size, self.output_size)
|
||||
|
||||
# NO-weight operations
|
||||
self.relu = nn.ReLU(inplace=True)
|
||||
self.dropout = nn.Dropout(self.p_dropout)
|
||||
|
||||
def forward(self, x):
|
||||
|
||||
y = self.w1(x)
|
||||
y = self.batch_norm1(y)
|
||||
y = self.relu(y)
|
||||
y = self.dropout(y)
|
||||
|
||||
for i in range(self.num_stage):
|
||||
y = self.linear_stages[i](y)
|
||||
|
||||
# Auxiliary task
|
||||
y = self.w2(y)
|
||||
aux = self.w_aux(y)
|
||||
|
||||
# Final layers
|
||||
y = self.w3(y)
|
||||
y = self.batch_norm3(y)
|
||||
y = self.relu(y)
|
||||
y = self.dropout(y)
|
||||
y = self.w_fin(y)
|
||||
|
||||
# Cat with auxiliary task
|
||||
y = torch.cat((y, aux), dim=1)
|
||||
return y
|
||||
|
||||
|
||||
class MyLinearSimple(nn.Module):
|
||||
def __init__(self, linear_size, p_dropout=0.5):
|
||||
super().__init__()
|
||||
self.l_size = linear_size
|
||||
|
||||
self.relu = nn.ReLU(inplace=True)
|
||||
self.dropout = nn.Dropout(p_dropout)
|
||||
|
||||
self.w1 = nn.Linear(self.l_size, self.l_size)
|
||||
self.batch_norm1 = nn.BatchNorm1d(self.l_size)
|
||||
|
||||
self.w2 = nn.Linear(self.l_size, self.l_size)
|
||||
self.batch_norm2 = nn.BatchNorm1d(self.l_size)
|
||||
|
||||
def forward(self, x):
|
||||
|
||||
y = self.w1(x)
|
||||
y = self.batch_norm1(y)
|
||||
y = self.relu(y)
|
||||
y = self.dropout(y)
|
||||
|
||||
y = self.w2(y)
|
||||
y = self.batch_norm2(y)
|
||||
y = self.relu(y)
|
||||
y = self.dropout(y)
|
||||
|
||||
out = x + y
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class MonolocoModel(nn.Module):
|
||||
"""
|
||||
Architecture inspired by https://github.com/una-dinosauria/3d-pose-baseline
|
||||
Pytorch implementation from: https://github.com/weigq/3d_pose_baseline_pytorch
|
||||
"""
|
||||
|
||||
def __init__(self, input_size, output_size=2, linear_size=256, p_dropout=0.2, num_stage=3):
|
||||
super(LinearModel, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
self.input_size = input_size
|
||||
self.output_size = output_size
|
||||
@ -23,7 +123,7 @@ class LinearModel(nn.Module):
|
||||
|
||||
self.linear_stages = []
|
||||
for _ in range(num_stage):
|
||||
self.linear_stages.append(Linear(self.linear_size, self.p_dropout))
|
||||
self.linear_stages.append(MyLinear(self.linear_size, self.p_dropout))
|
||||
self.linear_stages = nn.ModuleList(self.linear_stages)
|
||||
|
||||
# post processing
|
||||
@ -45,9 +145,9 @@ class LinearModel(nn.Module):
|
||||
return y
|
||||
|
||||
|
||||
class Linear(nn.Module):
|
||||
class MyLinear(nn.Module):
|
||||
def __init__(self, linear_size, p_dropout=0.5):
|
||||
super(Linear, self).__init__()
|
||||
super().__init__()
|
||||
self.l_size = linear_size
|
||||
|
||||
self.relu = nn.ReLU(inplace=True)
|
||||
@ -60,6 +160,7 @@ class Linear(nn.Module):
|
||||
self.batch_norm2 = nn.BatchNorm1d(self.l_size)
|
||||
|
||||
def forward(self, x):
|
||||
|
||||
y = self.w1(x)
|
||||
y = self.batch_norm1(y)
|
||||
y = self.relu(y)
|
||||
|
||||
213
monoloco/network/geom_baseline.py
Normal file
@ -0,0 +1,213 @@
|
||||
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
from collections import defaultdict
|
||||
|
||||
import numpy as np
|
||||
|
||||
from monstereo.utils import pixel_to_camera, get_keypoints
|
||||
|
||||
AVERAGE_Y = 0.48
|
||||
CLUSTERS = ['10', '20', '30', 'all']
|
||||
|
||||
|
||||
def geometric_coordinates(keypoints, kk, average_y=0.48):
|
||||
""" Evaluate geometric depths for a set of keypoints"""
|
||||
|
||||
zzs_geom = []
|
||||
uv_shoulders = get_keypoints(keypoints, mode='shoulder')
|
||||
uv_hips = get_keypoints(keypoints, mode='hip')
|
||||
uv_centers = get_keypoints(keypoints, mode='center')
|
||||
|
||||
xy_shoulders = pixel_to_camera(uv_shoulders, kk, 1)
|
||||
xy_hips = pixel_to_camera(uv_hips, kk, 1)
|
||||
xy_centers = pixel_to_camera(uv_centers, kk, 1)
|
||||
|
||||
for idx, xy_shoulder in enumerate(xy_shoulders):
|
||||
zz = compute_depth(xy_shoulders[idx], xy_hips[idx], average_y)
|
||||
zzs_geom.append(zz)
|
||||
return zzs_geom, xy_centers
|
||||
|
||||
|
||||
def geometric_baseline(joints):
|
||||
"""
|
||||
List of json files --> 2 lists with mean and std for each segment and the total count of instances
|
||||
|
||||
For each annotation:
|
||||
1. From gt boxes calculate the height (deltaY) for the segments head, shoulder, hip, ankle
|
||||
2. From mask boxes calculate distance of people using average height of people and real pixel height
|
||||
|
||||
For left-right ambiguities we chose always the average of the joints
|
||||
|
||||
The joints are mapped from 0 to 16 in the following order:
|
||||
['nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear', 'left_shoulder', 'right_shoulder', 'left_elbow',
|
||||
'right_elbow', 'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee', 'right_knee', 'left_ankle',
|
||||
'right_ankle']
|
||||
|
||||
"""
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
cnt_tot = 0
|
||||
dic_dist = defaultdict(lambda: defaultdict(list))
|
||||
|
||||
# Access the joints file
|
||||
with open(joints, 'r') as ff:
|
||||
dic_joints = json.load(ff)
|
||||
|
||||
# Calculate distances for all the instances in the joints dictionary
|
||||
for phase in ['train', 'val']:
|
||||
cnt = update_distances(dic_joints[phase], dic_dist, phase, AVERAGE_Y)
|
||||
cnt_tot += cnt
|
||||
|
||||
# Calculate mean and std of each segment
|
||||
dic_h_means = calculate_heights(dic_dist['heights'], mode='mean')
|
||||
dic_h_stds = calculate_heights(dic_dist['heights'], mode='std')
|
||||
errors = calculate_error(dic_dist['error'])
|
||||
|
||||
# Show results
|
||||
logger.info("Computed distance of {} annotations".format(cnt_tot))
|
||||
for key in dic_h_means:
|
||||
logger.info("Average height of segment {} is {:.2f} with a std of {:.2f}".
|
||||
format(key, dic_h_means[key], dic_h_stds[key]))
|
||||
for clst in CLUSTERS:
|
||||
logger.info("Average error over the val set for clst {}: {:.2f}".format(clst, errors[clst]))
|
||||
logger.info("Joints used: {}".format(joints))
|
||||
|
||||
|
||||
def update_distances(dic_fin, dic_dist, phase, average_y):
|
||||
|
||||
# Loop over each annotation in the json file corresponding to the image
|
||||
cnt = 0
|
||||
for idx, kps in enumerate(dic_fin['kps']):
|
||||
|
||||
# Extract pixel coordinates of head, shoulder, hip, ankle and and save them
|
||||
dic_uv = {mode: get_keypoints(kps, mode) for mode in ['head', 'shoulder', 'hip', 'ankle']}
|
||||
|
||||
# Convert segments from pixel coordinate to camera coordinate
|
||||
kk = dic_fin['K'][idx]
|
||||
z_met = dic_fin['boxes_3d'][idx][2]
|
||||
|
||||
# Create a dict with all annotations in meters
|
||||
dic_xyz = {key: pixel_to_camera(dic_uv[key], kk, z_met) for key in dic_uv}
|
||||
dic_xyz_norm = {key: pixel_to_camera(dic_uv[key], kk, 1) for key in dic_uv}
|
||||
|
||||
# Compute real height
|
||||
dy_met = abs(float((dic_xyz['hip'][0][1] - dic_xyz['shoulder'][0][1])))
|
||||
|
||||
# Estimate distance for a single annotation
|
||||
z_met_real = compute_depth(dic_xyz_norm['shoulder'][0], dic_xyz_norm['hip'][0], average_y,
|
||||
mode='real', dy_met=dy_met)
|
||||
z_met_approx = compute_depth(dic_xyz_norm['shoulder'][0], dic_xyz_norm['hip'][0], average_y, mode='average')
|
||||
|
||||
# Compute distance with respect to the center of the 3D bounding box
|
||||
d_real = math.sqrt(z_met_real ** 2 + dic_fin['boxes_3d'][idx][0] ** 2 + dic_fin['boxes_3d'][idx][1] ** 2)
|
||||
d_approx = math.sqrt(z_met_approx ** 2 +
|
||||
dic_fin['boxes_3d'][idx][0] ** 2 + dic_fin['boxes_3d'][idx][1] ** 2)
|
||||
|
||||
# Update the dictionary with distance and heights metrics
|
||||
dic_dist = update_dic_dist(dic_dist, dic_xyz, d_real, d_approx, phase)
|
||||
cnt += 1
|
||||
|
||||
return cnt
|
||||
|
||||
|
||||
def compute_depth(xyz_norm_1, xyz_norm_2, average_y, mode='average', dy_met=0):
|
||||
"""
|
||||
Compute depth Z of a mask annotation (solving a linear system) for 2 possible cases:
|
||||
1. knowing specific height of the annotation (head-ankle) dy_met
|
||||
2. using mean height of people (average_y)
|
||||
"""
|
||||
assert mode in ('average', 'real')
|
||||
|
||||
x1 = float(xyz_norm_1[0])
|
||||
y1 = float(xyz_norm_1[1])
|
||||
x2 = float(xyz_norm_2[0])
|
||||
y2 = float(xyz_norm_2[1])
|
||||
xx = (x1 + x2) / 2
|
||||
|
||||
# Choose if solving for provided height or average one.
|
||||
if mode == 'average':
|
||||
cc = - average_y # Y axis goes down
|
||||
else:
|
||||
cc = -dy_met
|
||||
|
||||
# Solving the linear system Ax = b
|
||||
matrix = np.array([[y1, 0, -xx],
|
||||
[0, -y1, 1],
|
||||
[y2, 0, -xx],
|
||||
[0, -y2, 1]])
|
||||
|
||||
bb = np.array([cc * xx, -cc, 0, 0]).reshape(4, 1)
|
||||
xx = np.linalg.lstsq(matrix, bb, rcond=None)
|
||||
z_met = abs(np.float(xx[0][1])) # Abs take into account specularity behind the observer
|
||||
|
||||
return z_met
|
||||
|
||||
|
||||
def update_dic_dist(dic_dist, dic_xyz, d_real, d_approx, phase):
|
||||
""" For every annotation in a single image, update the final dictionary"""
|
||||
|
||||
# Update the dict with heights metric
|
||||
if phase == 'train':
|
||||
dic_dist['heights']['head'].append(float(dic_xyz['head'][0][1]))
|
||||
dic_dist['heights']['shoulder'].append(float(dic_xyz['shoulder'][0][1]))
|
||||
dic_dist['heights']['hip'].append(float(dic_xyz['hip'][0][1]))
|
||||
dic_dist['heights']['ankle'].append(float(dic_xyz['ankle'][0][1]))
|
||||
|
||||
# Update the dict with distance metrics for the test phase
|
||||
if phase == 'val':
|
||||
error = abs(d_real - d_approx)
|
||||
|
||||
if d_real <= 10:
|
||||
dic_dist['error']['10'].append(error)
|
||||
elif d_real <= 20:
|
||||
dic_dist['error']['20'].append(error)
|
||||
elif d_real <= 30:
|
||||
dic_dist['error']['30'].append(error)
|
||||
else:
|
||||
dic_dist['error']['>30'].append(error)
|
||||
|
||||
dic_dist['error']['all'].append(error)
|
||||
|
||||
return dic_dist
|
||||
|
||||
|
||||
def calculate_heights(heights, mode):
|
||||
"""
|
||||
Compute statistics of heights based on the distance
|
||||
"""
|
||||
|
||||
assert mode in ('mean', 'std', 'max')
|
||||
heights_fin = {}
|
||||
|
||||
head_shoulder = np.array(heights['shoulder']) - np.array(heights['head'])
|
||||
shoulder_hip = np.array(heights['hip']) - np.array(heights['shoulder'])
|
||||
hip_ankle = np.array(heights['ankle']) - np.array(heights['hip'])
|
||||
|
||||
if mode == 'mean':
|
||||
heights_fin['head_shoulder'] = np.float(np.mean(head_shoulder)) * 100
|
||||
heights_fin['shoulder_hip'] = np.float(np.mean(shoulder_hip)) * 100
|
||||
heights_fin['hip_ankle'] = np.float(np.mean(hip_ankle)) * 100
|
||||
|
||||
elif mode == 'std':
|
||||
heights_fin['head_shoulder'] = np.float(np.std(head_shoulder)) * 100
|
||||
heights_fin['shoulder_hip'] = np.float(np.std(shoulder_hip)) * 100
|
||||
heights_fin['hip_ankle'] = np.float(np.std(hip_ankle)) * 100
|
||||
|
||||
elif mode == 'max':
|
||||
heights_fin['head_shoulder'] = np.float(np.max(head_shoulder)) * 100
|
||||
heights_fin['shoulder_hip'] = np.float(np.max(shoulder_hip)) * 100
|
||||
heights_fin['hip_ankle'] = np.float(np.max(hip_ankle)) * 100
|
||||
|
||||
return heights_fin
|
||||
|
||||
|
||||
def calculate_error(dic_errors):
|
||||
"""
|
||||
Compute statistics of distances based on the distance
|
||||
"""
|
||||
errors = {}
|
||||
for clst in dic_errors:
|
||||
errors[clst] = np.float(np.mean(np.array(dic_errors[clst])))
|
||||
return errors
|
||||
@ -1,27 +1,46 @@
|
||||
# pylint: disable=too-many-statements
|
||||
|
||||
"""
|
||||
Monoloco class. From 2D joints to real-world distances
|
||||
Loco super class for MonStereo, MonoLoco, MonoLoco++ nets.
|
||||
From 2D joints to real-world distances with monocular &/or stereo cameras
|
||||
"""
|
||||
|
||||
import math
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
|
||||
import torch
|
||||
|
||||
from ..utils import get_iou_matches, reorder_matches, get_keypoints, pixel_to_camera, xyz_from_distance
|
||||
from .process import preprocess_monoloco, unnormalize_bi, laplace_sampling
|
||||
from .architectures import LinearModel
|
||||
from .process import preprocess_monstereo, preprocess_monoloco, extract_outputs, extract_outputs_mono,\
|
||||
filter_outputs, cluster_outputs, unnormalize_bi
|
||||
from ..activity import social_interactions
|
||||
from .architectures import MonolocoModel, MonStereoModel
|
||||
|
||||
|
||||
class MonoLoco:
|
||||
|
||||
class Loco:
|
||||
"""Class for both MonoLoco and MonStereo"""
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
INPUT_SIZE = 17 * 2
|
||||
LINEAR_SIZE = 256
|
||||
LINEAR_SIZE_MONO = 256
|
||||
N_SAMPLES = 100
|
||||
|
||||
def __init__(self, model, device=None, n_dropout=0, p_dropout=0.2):
|
||||
def __init__(self, model, net='monstereo', device=None, n_dropout=0, p_dropout=0.2, linear_size=1024):
|
||||
self.net = net
|
||||
assert self.net in ('monstereo', 'monoloco', 'monoloco_p', 'monoloco_pp')
|
||||
if self.net == 'monstereo':
|
||||
input_size = 68
|
||||
output_size = 10
|
||||
elif self.net == 'monoloco_p':
|
||||
input_size = 34
|
||||
output_size = 9
|
||||
linear_size = 256
|
||||
elif self.net == 'monoloco_pp':
|
||||
input_size = 34
|
||||
output_size = 9
|
||||
else:
|
||||
input_size = 34
|
||||
output_size = 2
|
||||
|
||||
if not device:
|
||||
self.device = torch.device('cpu')
|
||||
@ -33,86 +52,225 @@ class MonoLoco:
|
||||
# if the path is provided load the model parameters
|
||||
if isinstance(model, str):
|
||||
model_path = model
|
||||
self.model = LinearModel(p_dropout=p_dropout, input_size=self.INPUT_SIZE, linear_size=self.LINEAR_SIZE)
|
||||
self.model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
|
||||
if net in ('monoloco', 'monoloco_p'):
|
||||
self.model = MonolocoModel(p_dropout=p_dropout, input_size=input_size, linear_size=linear_size,
|
||||
output_size=output_size)
|
||||
else:
|
||||
self.model = MonStereoModel(p_dropout=p_dropout, input_size=input_size, output_size=output_size,
|
||||
linear_size=linear_size, device=self.device)
|
||||
|
||||
# if the model is directly provided
|
||||
self.model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
|
||||
else:
|
||||
self.model = model
|
||||
self.model.eval() # Default is train
|
||||
self.model.to(self.device)
|
||||
|
||||
def forward(self, keypoints, kk):
|
||||
"""forward pass of monoloco network"""
|
||||
def forward(self, keypoints, kk, keypoints_r=None):
|
||||
"""
|
||||
Forward pass of MonSter or monoloco network
|
||||
It includes preprocessing and postprocessing of data
|
||||
"""
|
||||
if not keypoints:
|
||||
return None, None
|
||||
return None
|
||||
|
||||
with torch.no_grad():
|
||||
inputs = preprocess_monoloco(torch.tensor(keypoints).to(self.device), torch.tensor(kk).to(self.device))
|
||||
if self.n_dropout > 0:
|
||||
self.model.dropout.training = True # Manually reactivate dropout in eval
|
||||
total_outputs = torch.empty((0, inputs.size()[0])).to(self.device)
|
||||
keypoints = torch.tensor(keypoints).to(self.device)
|
||||
kk = torch.tensor(kk).to(self.device)
|
||||
|
||||
if self.net == 'monoloco':
|
||||
inputs = preprocess_monoloco(keypoints, kk, zero_center=True)
|
||||
outputs = self.model(inputs)
|
||||
bi = unnormalize_bi(outputs)
|
||||
dic_out = {'d': outputs[:, 0:1], 'bi': bi}
|
||||
dic_out = {key: el.detach().cpu() for key, el in dic_out.items()}
|
||||
|
||||
elif self.net == 'monoloco_p':
|
||||
inputs = preprocess_monoloco(keypoints, kk)
|
||||
outputs = self.model(inputs)
|
||||
dic_out = extract_outputs_mono(outputs)
|
||||
|
||||
elif self.net == 'monoloco_pp':
|
||||
inputs = preprocess_monoloco(keypoints, kk)
|
||||
outputs = self.model(inputs)
|
||||
dic_out = extract_outputs(outputs)
|
||||
|
||||
for _ in range(self.n_dropout):
|
||||
outputs = self.model(inputs)
|
||||
outputs = unnormalize_bi(outputs)
|
||||
samples = laplace_sampling(outputs, self.N_SAMPLES)
|
||||
total_outputs = torch.cat((total_outputs, samples), 0)
|
||||
varss = total_outputs.std(0)
|
||||
self.model.dropout.training = False
|
||||
else:
|
||||
varss = torch.zeros(inputs.size()[0])
|
||||
if keypoints_r:
|
||||
keypoints_r = torch.tensor(keypoints_r).to(self.device)
|
||||
else:
|
||||
keypoints_r = keypoints[0:1, :].clone()
|
||||
inputs, _ = preprocess_monstereo(keypoints, keypoints_r, kk)
|
||||
outputs = self.model(inputs)
|
||||
|
||||
# Don't use dropout for the mean prediction
|
||||
outputs = cluster_outputs(outputs, keypoints_r.shape[0])
|
||||
outputs_fin, mask = filter_outputs(outputs)
|
||||
dic_out = extract_outputs(outputs_fin)
|
||||
|
||||
# For Median baseline
|
||||
# dic_out = median_disparity(dic_out, keypoints, keypoints_r, mask)
|
||||
|
||||
if self.n_dropout > 0 and self.net != 'monstereo':
|
||||
varss = self.epistemic_uncertainty(inputs)
|
||||
dic_out['epi'] = varss
|
||||
else:
|
||||
dic_out['epi'] = [0.] * outputs.shape[0]
|
||||
# Add in the dictionary
|
||||
|
||||
return dic_out
|
||||
|
||||
def epistemic_uncertainty(self, inputs):
|
||||
"""
|
||||
Apply dropout at test time to obtain combined aleatoric + epistemic uncertainty
|
||||
"""
|
||||
assert self.net in ('monoloco', 'monoloco_p', 'monoloco_pp'), "Not supported for MonStereo"
|
||||
from .process import laplace_sampling
|
||||
|
||||
self.model.dropout.training = True # Manually reactivate dropout in eval
|
||||
total_outputs = torch.empty((0, inputs.size()[0])).to(self.device)
|
||||
|
||||
for _ in range(self.n_dropout):
|
||||
outputs = self.model(inputs)
|
||||
outputs = unnormalize_bi(outputs)
|
||||
return outputs, varss
|
||||
|
||||
# Extract localization output
|
||||
if self.net == 'monoloco':
|
||||
db = outputs[:, 0:2]
|
||||
else:
|
||||
db = outputs[:, 2:4]
|
||||
|
||||
# Unnormalize b and concatenate
|
||||
bi = unnormalize_bi(db)
|
||||
outputs = torch.cat((db[:, 0:1], bi), dim=1)
|
||||
|
||||
samples = laplace_sampling(outputs, self.N_SAMPLES)
|
||||
total_outputs = torch.cat((total_outputs, samples), 0)
|
||||
varss = total_outputs.std(0)
|
||||
self.model.dropout.training = False
|
||||
return varss
|
||||
|
||||
@staticmethod
|
||||
def post_process(outputs, varss, boxes, keypoints, kk, dic_gt=None, iou_min=0.3):
|
||||
def post_process(dic_in, boxes, keypoints, kk, dic_gt=None, iou_min=0.3, reorder=True, verbose=False):
|
||||
"""Post process monoloco to output final dictionary with all information for visualizations"""
|
||||
|
||||
dic_out = defaultdict(list)
|
||||
if outputs is None:
|
||||
if dic_in is None:
|
||||
return dic_out
|
||||
|
||||
if dic_gt:
|
||||
boxes_gt, dds_gt = dic_gt['boxes'], dic_gt['dds']
|
||||
matches = get_iou_matches(boxes, boxes_gt, thresh=iou_min)
|
||||
print("found {} matches with ground-truth".format(len(matches)))
|
||||
else:
|
||||
matches = [(idx, idx) for idx, _ in enumerate(boxes)] # Replicate boxes
|
||||
boxes_gt = dic_gt['boxes']
|
||||
dds_gt = [el[3] for el in dic_gt['ys']]
|
||||
matches = get_iou_matches(boxes, boxes_gt, iou_min=iou_min)
|
||||
dic_out['gt'] = [True]
|
||||
if verbose:
|
||||
print("found {} matches with ground-truth".format(len(matches)))
|
||||
|
||||
# Keep track of instances non-matched
|
||||
idxs_matches = [el[0] for el in matches]
|
||||
not_matches = [idx for idx, _ in enumerate(boxes) if idx not in idxs_matches]
|
||||
|
||||
else:
|
||||
matches = []
|
||||
not_matches = list(range(len(boxes)))
|
||||
if verbose:
|
||||
print("NO ground-truth associated")
|
||||
|
||||
if reorder and matches:
|
||||
matches = reorder_matches(matches, boxes, mode='left_right')
|
||||
|
||||
all_idxs = [idx for idx, _ in matches] + not_matches
|
||||
dic_out['gt'] = [True]*len(matches) + [False]*len(not_matches)
|
||||
|
||||
matches = reorder_matches(matches, boxes, mode='left_right')
|
||||
uv_shoulders = get_keypoints(keypoints, mode='shoulder')
|
||||
uv_heads = get_keypoints(keypoints, mode='head')
|
||||
uv_centers = get_keypoints(keypoints, mode='center')
|
||||
xy_centers = pixel_to_camera(uv_centers, kk, 1)
|
||||
|
||||
# Match with ground truth if available
|
||||
for idx, idx_gt in matches:
|
||||
dd_pred = float(outputs[idx][0])
|
||||
ale = float(outputs[idx][1])
|
||||
var_y = float(varss[idx])
|
||||
dd_real = dds_gt[idx_gt] if dic_gt else dd_pred
|
||||
|
||||
# Add all the predicted annotations, starting with the ones that match a ground-truth
|
||||
for idx in all_idxs:
|
||||
kps = keypoints[idx]
|
||||
box = boxes[idx]
|
||||
dd_pred = float(dic_in['d'][idx])
|
||||
bi = float(dic_in['bi'][idx])
|
||||
var_y = float(dic_in['epi'][idx])
|
||||
uu_s, vv_s = uv_shoulders.tolist()[idx][0:2]
|
||||
uu_c, vv_c = uv_centers.tolist()[idx][0:2]
|
||||
uu_h, vv_h = uv_heads.tolist()[idx][0:2]
|
||||
uv_shoulder = [round(uu_s), round(vv_s)]
|
||||
uv_center = [round(uu_c), round(vv_c)]
|
||||
xyz_real = xyz_from_distance(dd_real, xy_centers[idx])
|
||||
xyz_pred = xyz_from_distance(dd_pred, xy_centers[idx])
|
||||
uv_head = [round(uu_h), round(vv_h)]
|
||||
xyz_pred = xyz_from_distance(dd_pred, xy_centers[idx])[0]
|
||||
distance = math.sqrt(float(xyz_pred[0])**2 + float(xyz_pred[1])**2 + float(xyz_pred[2])**2)
|
||||
conf = 0.035 * (box[-1]) / (bi / distance)
|
||||
|
||||
dic_out['boxes'].append(box)
|
||||
dic_out['boxes_gt'].append(boxes_gt[idx_gt] if dic_gt else boxes[idx])
|
||||
dic_out['dds_real'].append(dd_real)
|
||||
dic_out['confs'].append(conf)
|
||||
dic_out['dds_pred'].append(dd_pred)
|
||||
dic_out['stds_ale'].append(ale)
|
||||
dic_out['stds_ale'].append(bi)
|
||||
dic_out['stds_epi'].append(var_y)
|
||||
dic_out['xyz_real'].append(xyz_real.squeeze().tolist())
|
||||
|
||||
dic_out['xyz_pred'].append(xyz_pred.squeeze().tolist())
|
||||
dic_out['uv_kps'].append(kps)
|
||||
dic_out['uv_centers'].append(uv_center)
|
||||
dic_out['uv_shoulders'].append(uv_shoulder)
|
||||
dic_out['uv_heads'].append(uv_head)
|
||||
|
||||
# For MonStereo / MonoLoco++
|
||||
try:
|
||||
dic_out['angles'].append(float(dic_in['yaw'][0][idx])) # Predicted angle
|
||||
dic_out['angles_egocentric'].append(float(dic_in['yaw'][1][idx])) # Egocentric angle
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
# Only for MonStereo
|
||||
try:
|
||||
dic_out['aux'].append(float(dic_in['aux'][idx]))
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
for idx, idx_gt in matches:
|
||||
dd_real = dds_gt[idx_gt]
|
||||
xyz_real = xyz_from_distance(dd_real, xy_centers[idx])
|
||||
dic_out['dds_real'].append(dd_real)
|
||||
dic_out['boxes_gt'].append(boxes_gt[idx_gt])
|
||||
dic_out['xyz_real'].append(xyz_real.squeeze().tolist())
|
||||
return dic_out
|
||||
|
||||
@staticmethod
|
||||
def social_distance(dic_out, args):
|
||||
|
||||
angles = dic_out['angles']
|
||||
dds = dic_out['dds_pred']
|
||||
stds = dic_out['stds_ale']
|
||||
xz_centers = [[xx[0], xx[2]] for xx in dic_out['xyz_pred']]
|
||||
|
||||
# Prepare color for social distancing
|
||||
dic_out['social_distance'] = [bool(social_interactions(idx, xz_centers, angles, dds,
|
||||
stds=stds,
|
||||
threshold_prob=args.threshold_prob,
|
||||
threshold_dist=args.threshold_dist,
|
||||
radii=args.radii))
|
||||
for idx, _ in enumerate(dic_out['xyz_pred'])]
|
||||
return dic_out
|
||||
|
||||
|
||||
def median_disparity(dic_out, keypoints, keypoints_r, mask):
|
||||
"""
|
||||
Ablation study: whenever a matching is found, compute depth by median disparity instead of using MonSter
|
||||
Filters are applied to masks nan joints and remove outlier disparities with iqr
|
||||
The mask input is used to filter the all-vs-all approach
|
||||
"""
|
||||
import numpy as np
|
||||
from ..utils import mask_joint_disparity
|
||||
|
||||
keypoints = keypoints.cpu().numpy()
|
||||
keypoints_r = keypoints_r.cpu().numpy()
|
||||
mask = mask.cpu().numpy()
|
||||
avg_disparities, _, _ = mask_joint_disparity(keypoints, keypoints_r)
|
||||
BF = 0.54 * 721
|
||||
for idx, aux in enumerate(dic_out['aux']):
|
||||
if aux > 0.5:
|
||||
idx_r = np.argmax(mask[idx])
|
||||
z = BF / avg_disparities[idx][idx_r]
|
||||
if 1 < z < 80:
|
||||
dic_out['xyzd'][idx][2] = z
|
||||
dic_out['xyzd'][idx][3] = torch.norm(dic_out['xyzd'][idx][0:3])
|
||||
return dic_out
|
||||
|
||||
@ -1,14 +1,49 @@
|
||||
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision
|
||||
|
||||
from ..utils import get_keypoints, pixel_to_camera
|
||||
from ..utils import get_keypoints, pixel_to_camera, to_cartesian, back_correct_angles
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
BF = 0.54 * 721
|
||||
z_min = 4
|
||||
z_max = 60
|
||||
D_MIN = BF / z_max
|
||||
D_MAX = BF / z_min
|
||||
Sx = 7.2 # nuScenes sensor size (mm)
|
||||
Sy = 5.4 # nuScenes sensor size (mm)
|
||||
|
||||
|
||||
def preprocess_monoloco(keypoints, kk):
|
||||
def preprocess_monstereo(keypoints, keypoints_r, kk):
|
||||
"""
|
||||
Combine left and right keypoints in all-vs-all settings
|
||||
"""
|
||||
clusters = []
|
||||
inputs_l = preprocess_monoloco(keypoints, kk)
|
||||
inputs_r = preprocess_monoloco(keypoints_r, kk)
|
||||
|
||||
inputs = torch.empty((0, 68)).to(inputs_l.device)
|
||||
for idx, inp_l in enumerate(inputs_l.split(1)):
|
||||
clst = 0
|
||||
# inp_l = torch.cat((inp_l, cat[:, idx:idx+1]), dim=1)
|
||||
for idx_r, inp_r in enumerate(inputs_r.split(1)):
|
||||
# if D_MIN < avg_disparities[idx_r] < D_MAX: # Check the range of disparities
|
||||
inp_r = inputs_r[idx_r, :]
|
||||
inp = torch.cat((inp_l, inp_l - inp_r), dim=1) # (1,68)
|
||||
inputs = torch.cat((inputs, inp), dim=0)
|
||||
clst += 1
|
||||
clusters.append(clst)
|
||||
return inputs, clusters
|
||||
|
||||
|
||||
def preprocess_monoloco(keypoints, kk, zero_center=False):
|
||||
|
||||
""" Preprocess batches of inputs
|
||||
keypoints = torch tensors of (m, 3, 17) or list [3,17]
|
||||
@ -22,46 +57,51 @@ def preprocess_monoloco(keypoints, kk):
|
||||
uv_center = get_keypoints(keypoints, mode='center')
|
||||
xy1_center = pixel_to_camera(uv_center, kk, 10)
|
||||
xy1_all = pixel_to_camera(keypoints[:, 0:2, :], kk, 10)
|
||||
kps_norm = xy1_all - xy1_center.unsqueeze(1) # (m, 17, 3) - (m, 1, 3)
|
||||
if zero_center:
|
||||
kps_norm = xy1_all - xy1_center.unsqueeze(1) # (m, 17, 3) - (m, 1, 3)
|
||||
else:
|
||||
kps_norm = xy1_all
|
||||
kps_out = kps_norm[:, :, 0:2].reshape(kps_norm.size()[0], -1) # no contiguous for view
|
||||
# kps_out = torch.cat((kps_out, keypoints[:, 2, :]), dim=1)
|
||||
return kps_out
|
||||
|
||||
|
||||
def factory_for_gt(im_size, name=None, path_gt=None):
|
||||
def factory_for_gt(im_size, focal_length=5.7, name=None, path_gt=None, verbose=True):
|
||||
"""Look for ground-truth annotations file and define calibration matrix based on image size """
|
||||
|
||||
try:
|
||||
with open(path_gt, 'r') as f:
|
||||
dic_names = json.load(f)
|
||||
print('-' * 120 + "\nGround-truth file opened")
|
||||
if verbose:
|
||||
logger.info('-' * 120 + "\nGround-truth file opened")
|
||||
except (FileNotFoundError, TypeError):
|
||||
print('-' * 120 + "\nGround-truth file not found")
|
||||
if verbose:
|
||||
logger.info('-' * 120 + "\nGround-truth file not found")
|
||||
dic_names = {}
|
||||
|
||||
try:
|
||||
kk = dic_names[name]['K']
|
||||
dic_gt = dic_names[name]
|
||||
print("Matched ground-truth file!")
|
||||
if verbose:
|
||||
logger.info("Matched ground-truth file!")
|
||||
except KeyError:
|
||||
dic_gt = None
|
||||
x_factor = im_size[0] / 1600
|
||||
y_factor = im_size[1] / 900
|
||||
pixel_factor = (x_factor + y_factor) / 2 # TODO remove and check it
|
||||
if im_size[0] / im_size[1] > 2.5:
|
||||
if im_size[0] / im_size[1] > 2.5: # KITTI default
|
||||
kk = [[718.3351, 0., 600.3891], [0., 718.3351, 181.5122], [0., 0., 1.]] # Kitti calibration
|
||||
else:
|
||||
kk = [[1266.4 * pixel_factor, 0., 816.27 * x_factor],
|
||||
[0, 1266.4 * pixel_factor, 491.5 * y_factor],
|
||||
[0., 0., 1.]] # nuScenes calibration
|
||||
|
||||
print("Using a standard calibration matrix...")
|
||||
else: # nuScenes camera parameters
|
||||
kk = [
|
||||
[im_size[0]*focal_length/Sx, 0., im_size[0]/2],
|
||||
[0., im_size[1]*focal_length/Sy, im_size[1]/2],
|
||||
[0., 0., 1.]]
|
||||
if verbose:
|
||||
logger.info("Using a standard calibration matrix...")
|
||||
|
||||
return kk, dic_gt
|
||||
|
||||
|
||||
def laplace_sampling(outputs, n_samples):
|
||||
|
||||
# np.random.seed(1)
|
||||
torch.manual_seed(1)
|
||||
mu = outputs[:, 0]
|
||||
bi = torch.abs(outputs[:, 1])
|
||||
|
||||
@ -83,14 +123,38 @@ def laplace_sampling(outputs, n_samples):
|
||||
return xx
|
||||
|
||||
|
||||
def unnormalize_bi(outputs):
|
||||
"""Unnormalize relative bi of a nunmpy array"""
|
||||
def unnormalize_bi(loc):
|
||||
"""
|
||||
Unnormalize relative bi of a nunmpy array
|
||||
Input --> tensor of (m, 2)
|
||||
"""
|
||||
assert loc.size()[1] == 2, "size of the output tensor should be (m, 2)"
|
||||
bi = torch.exp(loc[:, 1:2]) * loc[:, 0:1]
|
||||
|
||||
outputs[:, 1] = torch.exp(outputs[:, 1]) * outputs[:, 0]
|
||||
return outputs
|
||||
return bi
|
||||
|
||||
|
||||
def preprocess_pifpaf(annotations, im_size=None):
|
||||
def preprocess_mask(dir_ann, basename, mode='left'):
|
||||
|
||||
dir_ann = os.path.join(os.path.split(dir_ann)[0], 'mask')
|
||||
if mode == 'left':
|
||||
path_ann = os.path.join(dir_ann, basename + '.json')
|
||||
elif mode == 'right':
|
||||
path_ann = os.path.join(dir_ann + '_right', basename + '.json')
|
||||
|
||||
from ..utils import open_annotations
|
||||
dic = open_annotations(path_ann)
|
||||
if isinstance(dic, list):
|
||||
return [], []
|
||||
|
||||
keypoints = []
|
||||
for kps in dic['keypoints']:
|
||||
kps = prepare_pif_kps(np.array(kps).reshape(51,).tolist())
|
||||
keypoints.append(kps)
|
||||
return dic['boxes'], keypoints
|
||||
|
||||
|
||||
def preprocess_pifpaf(annotations, im_size=None, enlarge_boxes=True, min_conf=0.):
|
||||
"""
|
||||
Preprocess pif annotations:
|
||||
1. enlarge the box of 10%
|
||||
@ -99,19 +163,32 @@ def preprocess_pifpaf(annotations, im_size=None):
|
||||
|
||||
boxes = []
|
||||
keypoints = []
|
||||
enlarge = 1 if enlarge_boxes else 2 # Avoid enlarge boxes for social distancing
|
||||
|
||||
for dic in annotations:
|
||||
box = dic['bbox']
|
||||
if box[3] < 0.5: # Check for no detections (boxes 0,0,0,0)
|
||||
return [], []
|
||||
|
||||
kps = prepare_pif_kps(dic['keypoints'])
|
||||
conf = float(np.sort(np.array(kps[2]))[-3]) # The confidence is the 3rd highest value for the keypoints
|
||||
box = dic['bbox']
|
||||
try:
|
||||
conf = dic['score']
|
||||
# Enlarge boxes
|
||||
delta_h = (box[3]) / (10 * enlarge)
|
||||
delta_w = (box[2]) / (5 * enlarge)
|
||||
# from width height to corners
|
||||
box[2] += box[0]
|
||||
box[3] += box[1]
|
||||
|
||||
except KeyError:
|
||||
all_confs = np.array(kps[2])
|
||||
score_weights = np.ones(17)
|
||||
score_weights[:3] = 3.0
|
||||
score_weights[5:] = 0.1
|
||||
# conf = np.sum(score_weights * np.sort(all_confs)[::-1])
|
||||
conf = float(np.mean(all_confs))
|
||||
# Add 15% for y and 20% for x
|
||||
delta_h = (box[3] - box[1]) / (7 * enlarge)
|
||||
delta_w = (box[2] - box[0]) / (3.5 * enlarge)
|
||||
assert delta_h > -5 and delta_w > -5, "Bounding box <=0"
|
||||
|
||||
# Add 15% for y and 20% for x
|
||||
delta_h = (box[3] - box[1]) / 7
|
||||
delta_w = (box[2] - box[0]) / 3.5
|
||||
assert delta_h > -5 and delta_w > -5, "Bounding box <=0"
|
||||
box[0] -= delta_w
|
||||
box[1] -= delta_h
|
||||
box[2] += delta_w
|
||||
@ -124,9 +201,10 @@ def preprocess_pifpaf(annotations, im_size=None):
|
||||
box[2] = min(box[2], im_size[0])
|
||||
box[3] = min(box[3], im_size[1])
|
||||
|
||||
box.append(conf)
|
||||
boxes.append(box)
|
||||
keypoints.append(kps)
|
||||
if conf >= min_conf:
|
||||
box.append(conf)
|
||||
boxes.append(box)
|
||||
keypoints.append(kps)
|
||||
|
||||
return boxes, keypoints
|
||||
|
||||
@ -150,3 +228,135 @@ def image_transform(image):
|
||||
)
|
||||
transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor(), normalize, ])
|
||||
return transforms(image)
|
||||
|
||||
|
||||
def extract_outputs(outputs, tasks=()):
|
||||
"""
|
||||
Extract the outputs for multi-task training and predictions
|
||||
Inputs:
|
||||
tensor (m, 10) or (m,9) if monoloco
|
||||
Outputs:
|
||||
- if tasks are provided return ordered list of raw tensors
|
||||
- else return a dictionary with processed outputs
|
||||
"""
|
||||
dic_out = {'x': outputs[:, 0:1],
|
||||
'y': outputs[:, 1:2],
|
||||
'd': outputs[:, 2:4],
|
||||
'h': outputs[:, 4:5],
|
||||
'w': outputs[:, 5:6],
|
||||
'l': outputs[:, 6:7],
|
||||
'ori': outputs[:, 7:9]}
|
||||
|
||||
if outputs.shape[1] == 10:
|
||||
dic_out['aux'] = outputs[:, 9:10]
|
||||
|
||||
# Multi-task training
|
||||
if len(tasks) >= 1:
|
||||
assert isinstance(tasks, tuple), "tasks need to be a tuple"
|
||||
return [dic_out[task] for task in tasks]
|
||||
|
||||
# Preprocess the tensor
|
||||
# AV_H, AV_W, AV_L, HWL_STD = 1.72, 0.75, 0.68, 0.1
|
||||
bi = unnormalize_bi(dic_out['d'])
|
||||
dic_out['bi'] = bi
|
||||
|
||||
dic_out = {key: el.detach().cpu() for key, el in dic_out.items()}
|
||||
x = to_cartesian(outputs[:, 0:3].detach().cpu(), mode='x')
|
||||
y = to_cartesian(outputs[:, 0:3].detach().cpu(), mode='y')
|
||||
d = dic_out['d'][:, 0:1]
|
||||
z = torch.sqrt(d**2 - x**2 - y**2)
|
||||
dic_out['xyzd'] = torch.cat((x, y, z, d), dim=1)
|
||||
dic_out.pop('d')
|
||||
dic_out.pop('x')
|
||||
dic_out.pop('y')
|
||||
dic_out['d'] = d
|
||||
|
||||
yaw_pred = torch.atan2(dic_out['ori'][:, 0:1], dic_out['ori'][:, 1:2])
|
||||
yaw_orig = back_correct_angles(yaw_pred, dic_out['xyzd'][:, 0:3])
|
||||
dic_out['yaw'] = (yaw_pred, yaw_orig) # alpha, ry
|
||||
|
||||
if outputs.shape[1] == 10:
|
||||
dic_out['aux'] = torch.sigmoid(dic_out['aux'])
|
||||
return dic_out
|
||||
|
||||
|
||||
def extract_labels_aux(labels, tasks=None):
|
||||
|
||||
dic_gt_out = {'aux': labels[:, 0:1]}
|
||||
|
||||
if tasks is not None:
|
||||
assert isinstance(tasks, tuple), "tasks need to be a tuple"
|
||||
return [dic_gt_out[task] for task in tasks]
|
||||
|
||||
dic_gt_out = {key: el.detach().cpu() for key, el in dic_gt_out.items()}
|
||||
return dic_gt_out
|
||||
|
||||
|
||||
def extract_labels(labels, tasks=None):
|
||||
|
||||
dic_gt_out = {'x': labels[:, 0:1], 'y': labels[:, 1:2], 'z': labels[:, 2:3], 'd': labels[:, 3:4],
|
||||
'h': labels[:, 4:5], 'w': labels[:, 5:6], 'l': labels[:, 6:7],
|
||||
'ori': labels[:, 7:9], 'aux': labels[:, 10:11]}
|
||||
|
||||
if tasks is not None:
|
||||
assert isinstance(tasks, tuple), "tasks need to be a tuple"
|
||||
return [dic_gt_out[task] for task in tasks]
|
||||
|
||||
dic_gt_out = {key: el.detach().cpu() for key, el in dic_gt_out.items()}
|
||||
return dic_gt_out
|
||||
|
||||
|
||||
def cluster_outputs(outputs, clusters):
|
||||
"""Cluster the outputs based on the number of right keypoints"""
|
||||
|
||||
# Check for "no right keypoints" condition
|
||||
if clusters == 0:
|
||||
clusters = max(1, round(outputs.shape[0] / 2))
|
||||
|
||||
assert outputs.shape[0] % clusters == 0, "Unexpected number of inputs"
|
||||
outputs = outputs.view(-1, clusters, outputs.shape[1])
|
||||
return outputs
|
||||
|
||||
|
||||
def filter_outputs(outputs):
|
||||
"""Extract a single output for each left keypoint"""
|
||||
|
||||
# Max of auxiliary task
|
||||
val = outputs[:, :, -1]
|
||||
best_val, _ = val.max(dim=1, keepdim=True)
|
||||
mask = val >= best_val
|
||||
output = outputs[mask] # broadcasting happens only if 3rd dim not present
|
||||
return output, mask
|
||||
|
||||
|
||||
def extract_outputs_mono(outputs, tasks=None):
|
||||
"""
|
||||
Extract the outputs for single di
|
||||
Inputs:
|
||||
tensor (m, 10) or (m,9) if monoloco
|
||||
Outputs:
|
||||
- if tasks are provided return ordered list of raw tensors
|
||||
- else return a dictionary with processed outputs
|
||||
"""
|
||||
dic_out = {'xyz': outputs[:, 0:3], 'zb': outputs[:, 2:4],
|
||||
'h': outputs[:, 4:5], 'w': outputs[:, 5:6], 'l': outputs[:, 6:7], 'ori': outputs[:, 7:9]}
|
||||
|
||||
# Multi-task training
|
||||
if tasks is not None:
|
||||
assert isinstance(tasks, tuple), "tasks need to be a tuple"
|
||||
return [dic_out[task] for task in tasks]
|
||||
|
||||
# Preprocess the tensor
|
||||
bi = unnormalize_bi(dic_out['zb'])
|
||||
|
||||
dic_out = {key: el.detach().cpu() for key, el in dic_out.items()}
|
||||
dd = torch.norm(dic_out['xyz'], p=2, dim=1).view(-1, 1)
|
||||
dic_out['xyzd'] = torch.cat((dic_out['xyz'], dd), dim=1)
|
||||
|
||||
dic_out['d'], dic_out['bi'] = dd, bi
|
||||
|
||||
yaw_pred = torch.atan2(dic_out['ori'][:, 0:1], dic_out['ori'][:, 1:2])
|
||||
yaw_orig = back_correct_angles(yaw_pred, dic_out['xyzd'][:, 0:3])
|
||||
|
||||
dic_out['yaw'] = (yaw_pred, yaw_orig) # alpha, ry
|
||||
return dic_out
|
||||
|
||||
@ -1,123 +1,190 @@
|
||||
# pylint: disable=too-many-statements, too-many-branches, undefined-loop-variable
|
||||
|
||||
"""
|
||||
Adapted from https://github.com/vita-epfl/openpifpaf/blob/master/openpifpaf/predict.py
|
||||
"""
|
||||
|
||||
import os
|
||||
import glob
|
||||
import json
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
import torch
|
||||
from PIL import Image
|
||||
from openpifpaf import show
|
||||
import PIL
|
||||
import openpifpaf
|
||||
import openpifpaf.datasets as datasets
|
||||
from openpifpaf.predict import processor_factory, preprocess_factory
|
||||
from openpifpaf import decoder, network, visualizer, show, logger
|
||||
|
||||
from .visuals.printer import Printer
|
||||
from .network import PifPaf, ImageList, MonoLoco
|
||||
from .network import Loco
|
||||
from .network.process import factory_for_gt, preprocess_pifpaf
|
||||
from .activity import show_social
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
OPENPIFPAF_PATH = 'data/models/shufflenetv2k30-201104-224654-cocokp-d75ed641.pkl' # Default model
|
||||
|
||||
|
||||
def factory_from_args(args):
|
||||
|
||||
# Data
|
||||
if args.glob:
|
||||
args.images += glob.glob(args.glob)
|
||||
if not args.images:
|
||||
raise Exception("no image files given")
|
||||
|
||||
# Model
|
||||
if not args.checkpoint:
|
||||
if os.path.exists(OPENPIFPAF_PATH):
|
||||
args.checkpoint = OPENPIFPAF_PATH
|
||||
else:
|
||||
LOG.info("Checkpoint for OpenPifPaf not specified and default model not found in 'data/models'. "
|
||||
"Using a ShuffleNet backbone")
|
||||
args.checkpoint = 'shufflenetv2k30'
|
||||
|
||||
logger.configure(args, LOG) # logger first
|
||||
|
||||
# Devices
|
||||
args.device = torch.device('cpu')
|
||||
args.pin_memory = False
|
||||
if torch.cuda.is_available():
|
||||
args.device = torch.device('cuda')
|
||||
args.pin_memory = True
|
||||
LOG.debug('neural network device: %s', args.device)
|
||||
|
||||
# Add visualization defaults
|
||||
args.figure_width = 10
|
||||
args.dpi_factor = 1.0
|
||||
|
||||
if args.net == 'monstereo':
|
||||
args.batch_size = 2
|
||||
else:
|
||||
args.batch_size = 1
|
||||
|
||||
# Make default pifpaf argument
|
||||
args.force_complete_pose = True
|
||||
LOG.info("Force complete pose is active")
|
||||
|
||||
# Configure
|
||||
decoder.configure(args)
|
||||
network.Factory.configure(args)
|
||||
show.configure(args)
|
||||
visualizer.configure(args)
|
||||
|
||||
return args
|
||||
|
||||
|
||||
def predict(args):
|
||||
|
||||
cnt = 0
|
||||
args = factory_from_args(args)
|
||||
|
||||
# load pifpaf and monoloco models
|
||||
pifpaf = PifPaf(args)
|
||||
monoloco = MonoLoco(model=args.model, device=args.device, n_dropout=args.n_dropout, p_dropout=args.dropout)
|
||||
# Load Models
|
||||
assert args.net in ('monoloco_pp', 'monstereo', 'pifpaf')
|
||||
if args.net in ('monoloco_pp', 'monstereo'):
|
||||
net = Loco(model=args.model, net=args.net, device=args.device, n_dropout=args.n_dropout, p_dropout=args.dropout)
|
||||
|
||||
# data
|
||||
data = ImageList(args.images, scale=args.scale)
|
||||
processor, model = processor_factory(args)
|
||||
preprocess = preprocess_factory(args)
|
||||
|
||||
# data
|
||||
data = datasets.ImageList(args.images, preprocess=preprocess)
|
||||
if args.net == 'monstereo':
|
||||
assert len(data.image_paths) % 2 == 0, "Odd number of images in a stereo setting"
|
||||
|
||||
data_loader = torch.utils.data.DataLoader(
|
||||
data, batch_size=1, shuffle=False,
|
||||
pin_memory=args.pin_memory, num_workers=args.loader_workers)
|
||||
data, batch_size=args.batch_size, shuffle=False,
|
||||
pin_memory=False, collate_fn=datasets.collate_images_anns_meta)
|
||||
|
||||
for idx, (image_paths, image_tensors, processed_images_cpu) in enumerate(data_loader):
|
||||
images = image_tensors.permute(0, 2, 3, 1)
|
||||
for batch_i, (image_tensors_batch, _, meta_batch) in enumerate(data_loader):
|
||||
pred_batch = processor.batch(model, image_tensors_batch, device=args.device)
|
||||
|
||||
processed_images = processed_images_cpu.to(args.device, non_blocking=True)
|
||||
fields_batch = pifpaf.fields(processed_images)
|
||||
|
||||
# unbatch
|
||||
for image_path, image, processed_image_cpu, fields in zip(
|
||||
image_paths, images, processed_images_cpu, fields_batch):
|
||||
# unbatch (only for MonStereo)
|
||||
for idx, (pred, meta) in enumerate(zip(pred_batch, meta_batch)):
|
||||
print('batch %d: %s', batch_i, meta['file_name'])
|
||||
pred = [ann.inverse_transform(meta) for ann in pred]
|
||||
|
||||
if args.output_directory is None:
|
||||
output_path = image_path
|
||||
splits = os.path.split(meta['file_name'])
|
||||
output_path = os.path.join(splits[0], 'out_' + splits[1])
|
||||
else:
|
||||
file_name = os.path.basename(image_path)
|
||||
output_path = os.path.join(args.output_directory, file_name)
|
||||
print('image', idx, image_path, output_path)
|
||||
file_name = os.path.basename(meta['file_name'])
|
||||
output_path = os.path.join(args.output_directory, 'out_' + file_name)
|
||||
print('image', batch_i, meta['file_name'], output_path)
|
||||
|
||||
keypoint_sets, scores, pifpaf_out = pifpaf.forward(image, processed_image_cpu, fields)
|
||||
pifpaf_outputs = [keypoint_sets, scores, pifpaf_out] # keypoints_sets and scores for pifpaf printing
|
||||
images_outputs = [image] # List of 1 or 2 elements with pifpaf tensor (resized) and monoloco original image
|
||||
if idx == 0:
|
||||
with open(meta_batch[0]['file_name'], 'rb') as f:
|
||||
cpu_image = PIL.Image.open(f).convert('RGB')
|
||||
pifpaf_outs = {
|
||||
'pred': pred,
|
||||
'left': [ann.json_data() for ann in pred],
|
||||
'image': cpu_image}
|
||||
else:
|
||||
pifpaf_outs['right'] = [ann.json_data() for ann in pred]
|
||||
|
||||
if 'monoloco' in args.networks:
|
||||
im_size = (float(image.size()[1] / args.scale),
|
||||
float(image.size()[0] / args.scale)) # Width, Height (original)
|
||||
# 3D Predictions
|
||||
if args.net in ('monoloco_pp', 'monstereo'):
|
||||
|
||||
# Extract calibration matrix and ground truth file if present
|
||||
with open(image_path, 'rb') as f:
|
||||
pil_image = Image.open(f).convert('RGB')
|
||||
images_outputs.append(pil_image)
|
||||
im_name = os.path.basename(meta['file_name'])
|
||||
im_size = (cpu_image.size[0], cpu_image.size[1]) # Original
|
||||
kk, dic_gt = factory_for_gt(im_size, focal_length=args.focal, name=im_name, path_gt=args.path_gt)
|
||||
|
||||
im_name = os.path.basename(image_path)
|
||||
# Preprocess pifpaf outputs and run monoloco
|
||||
boxes, keypoints = preprocess_pifpaf(pifpaf_outs['left'], im_size, enlarge_boxes=False)
|
||||
|
||||
kk, dic_gt = factory_for_gt(im_size, name=im_name, path_gt=args.path_gt)
|
||||
|
||||
# Preprocess pifpaf outputs and run monoloco
|
||||
boxes, keypoints = preprocess_pifpaf(pifpaf_out, im_size)
|
||||
outputs, varss = monoloco.forward(keypoints, kk)
|
||||
dic_out = monoloco.post_process(outputs, varss, boxes, keypoints, kk, dic_gt)
|
||||
if args.net == 'monoloco_pp':
|
||||
LOG.info("Prediction with MonoLoco++")
|
||||
dic_out = net.forward(keypoints, kk)
|
||||
dic_out = net.post_process(dic_out, boxes, keypoints, kk, dic_gt)
|
||||
if args.social_distance:
|
||||
dic_out = net.social_distance(dic_out, args)
|
||||
|
||||
else:
|
||||
dic_out = None
|
||||
kk = None
|
||||
LOG.info("Prediction with MonStereo")
|
||||
boxes_r, keypoints_r = preprocess_pifpaf(pifpaf_outs['right'], im_size)
|
||||
dic_out = net.forward(keypoints, kk, keypoints_r=keypoints_r)
|
||||
dic_out = net.post_process(dic_out, boxes, keypoints, kk, dic_gt)
|
||||
|
||||
factory_outputs(args, images_outputs, output_path, pifpaf_outputs, dic_out=dic_out, kk=kk)
|
||||
print('Image {}\n'.format(cnt) + '-' * 120)
|
||||
cnt += 1
|
||||
else:
|
||||
dic_out = defaultdict(list)
|
||||
kk = None
|
||||
|
||||
# Outputs
|
||||
factory_outputs(args, pifpaf_outs, dic_out, output_path, kk=kk)
|
||||
LOG.info('Image {}\n'.format(cnt) + '-' * 120)
|
||||
cnt += 1
|
||||
|
||||
|
||||
def factory_outputs(args, images_outputs, output_path, pifpaf_outputs, dic_out=None, kk=None):
|
||||
def factory_outputs(args, pifpaf_outs, dic_out, output_path, kk=None):
|
||||
"""Output json files or images according to the choice"""
|
||||
|
||||
# Save json file
|
||||
if 'pifpaf' in args.networks:
|
||||
keypoint_sets, scores, pifpaf_out = pifpaf_outputs[:]
|
||||
# Verify conflicting options
|
||||
if any((xx in args.output_types for xx in ['front', 'bird', 'multi'])):
|
||||
assert args.net != 'pifpaf', "please use pifpaf original arguments"
|
||||
if args.social_distance:
|
||||
assert args.net == 'monoloco_pp', "Social distancing only works with MonoLoco++ network"
|
||||
|
||||
# Visualizer
|
||||
keypoint_painter = show.KeypointPainter(show_box=False)
|
||||
skeleton_painter = show.KeypointPainter(show_box=False, color_connections=True,
|
||||
markersize=1, linewidth=4)
|
||||
if args.net == 'pifpaf':
|
||||
annotation_painter = openpifpaf.show.AnnotationPainter()
|
||||
with openpifpaf.show.image_canvas(pifpaf_outs['image'], output_path) as ax:
|
||||
annotation_painter.annotations(ax, pifpaf_outs['pred'])
|
||||
|
||||
if 'json' in args.output_types and keypoint_sets.size > 0:
|
||||
with open(output_path + '.pifpaf.json', 'w') as f:
|
||||
json.dump(pifpaf_out, f)
|
||||
elif any((xx in args.output_types for xx in ['front', 'bird', 'multi'])):
|
||||
LOG.info(output_path)
|
||||
if args.social_distance:
|
||||
show_social(args, pifpaf_outs['image'], output_path, pifpaf_outs['left'], dic_out)
|
||||
else:
|
||||
printer = Printer(pifpaf_outs['image'], output_path, kk, args)
|
||||
figures, axes = printer.factory_axes(dic_out)
|
||||
printer.draw(figures, axes, pifpaf_outs['image'])
|
||||
|
||||
if 'keypoints' in args.output_types:
|
||||
with show.image_canvas(images_outputs[0],
|
||||
output_path + '.keypoints.png',
|
||||
show=args.show,
|
||||
fig_width=args.figure_width,
|
||||
dpi_factor=args.dpi_factor) as ax:
|
||||
keypoint_painter.keypoints(ax, keypoint_sets)
|
||||
elif 'json' in args.output_types:
|
||||
with open(os.path.join(output_path + '.monoloco.json'), 'w') as ff:
|
||||
json.dump(dic_out, ff)
|
||||
|
||||
if 'skeleton' in args.output_types:
|
||||
with show.image_canvas(images_outputs[0],
|
||||
output_path + '.skeleton.png',
|
||||
show=args.show,
|
||||
fig_width=args.figure_width,
|
||||
dpi_factor=args.dpi_factor) as ax:
|
||||
skeleton_painter.keypoints(ax, keypoint_sets, scores=scores)
|
||||
|
||||
if 'monoloco' in args.networks:
|
||||
if any((xx in args.output_types for xx in ['front', 'bird', 'combined'])):
|
||||
epistemic = False
|
||||
if args.n_dropout > 0:
|
||||
epistemic = True
|
||||
|
||||
if dic_out['boxes']: # Only print in case of detections
|
||||
printer = Printer(images_outputs[1], output_path, kk, output_types=args.output_types
|
||||
, z_max=args.z_max, epistemic=epistemic)
|
||||
figures, axes = printer.factory_axes()
|
||||
printer.draw(figures, axes, dic_out, images_outputs[1], draw_box=args.draw_box,
|
||||
save=True, show=args.show)
|
||||
|
||||
if 'json' in args.output_types:
|
||||
with open(os.path.join(output_path + '.monoloco.json'), 'w') as ff:
|
||||
json.dump(dic_out, ff)
|
||||
else:
|
||||
LOG.info("No output saved, please select one among front, bird, multi, or pifpaf options")
|
||||
|
||||
349
monoloco/prep/prep_kitti.py
Normal file
@ -0,0 +1,349 @@
|
||||
|
||||
# pylint: disable=too-many-statements, too-many-branches, too-many-nested-blocks
|
||||
|
||||
"""Preprocess annotations with KITTI ground-truth"""
|
||||
|
||||
import os
|
||||
import glob
|
||||
import copy
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
import json
|
||||
import datetime
|
||||
from PIL import Image
|
||||
|
||||
import torch
|
||||
import cv2
|
||||
|
||||
from ..utils import split_training, parse_ground_truth, get_iou_matches, append_cluster, factory_file, \
|
||||
extract_stereo_matches, get_category, normalize_hwl, make_new_directory
|
||||
from ..network.process import preprocess_pifpaf, preprocess_monoloco
|
||||
from .transforms import flip_inputs, flip_labels, height_augmentation
|
||||
|
||||
|
||||
class PreprocessKitti:
|
||||
"""Prepare arrays with same format as nuScenes preprocessing but using ground truth txt files"""
|
||||
|
||||
dir_gt = os.path.join('data', 'kitti', 'gt')
|
||||
dir_images = '/data/lorenzo-data/kitti/original_images/training/image_2'
|
||||
dir_byc_l = '/data/lorenzo-data/kitti/object_detection/left'
|
||||
|
||||
# SOCIAL DISTANCING PARAMETERS
|
||||
THRESHOLD_DIST = 2 # Threshold to check distance of people
|
||||
RADII = (0.3, 0.5, 1) # expected radii of the o-space
|
||||
SOCIAL_DISTANCE = True
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
dic_jo = {'train': dict(X=[], Y=[], names=[], kps=[], K=[],
|
||||
clst=defaultdict(lambda: defaultdict(list))),
|
||||
'val': dict(X=[], Y=[], names=[], kps=[], K=[],
|
||||
clst=defaultdict(lambda: defaultdict(list))),
|
||||
'test': dict(X=[], Y=[], names=[], kps=[], K=[],
|
||||
clst=defaultdict(lambda: defaultdict(list)))}
|
||||
dic_names = defaultdict(lambda: defaultdict(list))
|
||||
dic_std = defaultdict(lambda: defaultdict(list))
|
||||
|
||||
def __init__(self, dir_ann, iou_min, monocular=False):
|
||||
|
||||
self.dir_ann = dir_ann
|
||||
self.iou_min = iou_min
|
||||
self.monocular = monocular
|
||||
self.names_gt = tuple(os.listdir(self.dir_gt))
|
||||
self.dir_kk = os.path.join('data', 'kitti', 'calib')
|
||||
self.list_gt = glob.glob(self.dir_gt + '/*.txt')
|
||||
assert os.path.exists(self.dir_gt), "Ground truth dir does not exist"
|
||||
assert os.path.exists(self.dir_ann), "Annotation dir does not exist"
|
||||
|
||||
now = datetime.datetime.now()
|
||||
now_time = now.strftime("%Y%m%d-%H%M")[2:]
|
||||
dir_out = os.path.join('data', 'arrays')
|
||||
self.path_joints = os.path.join(dir_out, 'joints-kitti-' + now_time + '.json')
|
||||
self.path_names = os.path.join(dir_out, 'names-kitti-' + now_time + '.json')
|
||||
path_train = os.path.join('splits', 'kitti_train.txt')
|
||||
path_val = os.path.join('splits', 'kitti_val.txt')
|
||||
self.set_train, self.set_val = split_training(self.names_gt, path_train, path_val)
|
||||
|
||||
def run(self):
|
||||
|
||||
cnt_match_l, cnt_match_r, cnt_pair, cnt_pair_tot, cnt_extra_pair, cnt_files, cnt_files_ped, cnt_fnf, \
|
||||
cnt_tot, cnt_ambiguous, cnt_cyclist = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
|
||||
cnt_mono = {'train': 0, 'val': 0, 'test': 0}
|
||||
cnt_gt = cnt_mono.copy()
|
||||
cnt_stereo = cnt_mono.copy()
|
||||
correct_ped, correct_byc, wrong_ped, wrong_byc = 0, 0, 0, 0
|
||||
cnt_30, cnt_less_30 = 0, 0
|
||||
|
||||
# self.names_gt = ('002282.txt',)
|
||||
for name in self.names_gt:
|
||||
path_gt = os.path.join(self.dir_gt, name)
|
||||
basename, _ = os.path.splitext(name)
|
||||
path_im = os.path.join(self.dir_images, basename + '.png')
|
||||
phase, flag = self._factory_phase(name)
|
||||
if flag:
|
||||
cnt_fnf += 1
|
||||
continue
|
||||
|
||||
if phase == 'train':
|
||||
min_conf = 0
|
||||
category = 'all'
|
||||
else: # Remove for original results
|
||||
min_conf = 0.1
|
||||
category = 'pedestrian'
|
||||
|
||||
# Extract ground truth
|
||||
boxes_gt, ys, _, _ = parse_ground_truth(path_gt, # pylint: disable=unbalanced-tuple-unpacking
|
||||
category=category,
|
||||
spherical=True)
|
||||
cnt_gt[phase] += len(boxes_gt)
|
||||
cnt_files += 1
|
||||
cnt_files_ped += min(len(boxes_gt), 1) # if no boxes 0 else 1
|
||||
|
||||
# Extract keypoints
|
||||
path_calib = os.path.join(self.dir_kk, basename + '.txt')
|
||||
annotations, kk, tt = factory_file(path_calib, self.dir_ann, basename)
|
||||
|
||||
self.dic_names[basename + '.png']['boxes'] = copy.deepcopy(boxes_gt)
|
||||
self.dic_names[basename + '.png']['ys'] = copy.deepcopy(ys)
|
||||
self.dic_names[basename + '.png']['K'] = copy.deepcopy(kk)
|
||||
|
||||
# Check image size
|
||||
with Image.open(path_im) as im:
|
||||
width, height = im.size
|
||||
|
||||
boxes, keypoints = preprocess_pifpaf(annotations, im_size=(width, height), min_conf=min_conf)
|
||||
|
||||
if keypoints:
|
||||
annotations_r, kk_r, tt_r = factory_file(path_calib, self.dir_ann, basename, mode='right')
|
||||
boxes_r, keypoints_r = preprocess_pifpaf(annotations_r, im_size=(width, height), min_conf=min_conf)
|
||||
cat = get_category(keypoints, os.path.join(self.dir_byc_l, basename + '.json'))
|
||||
|
||||
if not keypoints_r: # Case of no detection
|
||||
all_boxes_gt, all_ys = [boxes_gt], [ys]
|
||||
boxes_r, keypoints_r = boxes[0:1].copy(), keypoints[0:1].copy()
|
||||
all_boxes, all_keypoints = [boxes], [keypoints]
|
||||
all_keypoints_r = [keypoints_r]
|
||||
else:
|
||||
|
||||
# Horizontal Flipping for training
|
||||
if phase == 'train':
|
||||
# GT)
|
||||
boxes_gt_flip, ys_flip = flip_labels(boxes_gt, ys, im_w=width)
|
||||
# New left
|
||||
boxes_flip = flip_inputs(boxes_r, im_w=width, mode='box')
|
||||
keypoints_flip = flip_inputs(keypoints_r, im_w=width)
|
||||
|
||||
# New right
|
||||
keypoints_r_flip = flip_inputs(keypoints, im_w=width)
|
||||
|
||||
# combine the 2 modes
|
||||
all_boxes_gt = [boxes_gt, boxes_gt_flip]
|
||||
all_ys = [ys, ys_flip]
|
||||
all_boxes = [boxes, boxes_flip]
|
||||
all_keypoints = [keypoints, keypoints_flip]
|
||||
all_keypoints_r = [keypoints_r, keypoints_r_flip]
|
||||
|
||||
else:
|
||||
all_boxes_gt, all_ys = [boxes_gt], [ys]
|
||||
all_boxes, all_keypoints = [boxes], [keypoints]
|
||||
all_keypoints_r = [keypoints_r]
|
||||
|
||||
# Match each set of keypoint with a ground truth
|
||||
self.dic_jo[phase]['K'].append(kk)
|
||||
for ii, boxes_gt in enumerate(all_boxes_gt):
|
||||
keypoints, keypoints_r = torch.tensor(all_keypoints[ii]), torch.tensor(all_keypoints_r[ii])
|
||||
ys = all_ys[ii]
|
||||
matches = get_iou_matches(all_boxes[ii], boxes_gt, self.iou_min)
|
||||
for (idx, idx_gt) in matches:
|
||||
keypoint = keypoints[idx:idx + 1]
|
||||
lab = ys[idx_gt][:-1]
|
||||
|
||||
# Preprocess MonoLoco++
|
||||
if self.monocular:
|
||||
inp = preprocess_monoloco(keypoint, kk).view(-1).tolist()
|
||||
lab = normalize_hwl(lab)
|
||||
if ys[idx_gt][10] < 0.5:
|
||||
self.dic_jo[phase]['kps'].append(keypoint.tolist())
|
||||
self.dic_jo[phase]['X'].append(inp)
|
||||
self.dic_jo[phase]['Y'].append(lab)
|
||||
self.dic_jo[phase]['names'].append(name) # One image name for each annotation
|
||||
append_cluster(self.dic_jo, phase, inp, lab, keypoint.tolist())
|
||||
cnt_mono[phase] += 1
|
||||
cnt_tot += 1
|
||||
|
||||
# Preprocess MonStereo
|
||||
else:
|
||||
zz = ys[idx_gt][2]
|
||||
stereo_matches, cnt_amb = extract_stereo_matches(keypoint, keypoints_r, zz,
|
||||
phase=phase, seed=cnt_pair_tot)
|
||||
cnt_match_l += 1 if ii < 0.1 else 0 # matched instances
|
||||
cnt_match_r += 1 if ii > 0.9 else 0
|
||||
cnt_ambiguous += cnt_amb
|
||||
|
||||
# Monitor precision of classes
|
||||
if phase == 'val':
|
||||
if ys[idx_gt][10] == cat[idx] == 1:
|
||||
correct_byc += 1
|
||||
elif ys[idx_gt][10] == cat[idx] == 0:
|
||||
correct_ped += 1
|
||||
elif ys[idx_gt][10] != cat[idx] and ys[idx_gt][10] == 1:
|
||||
wrong_byc += 1
|
||||
elif ys[idx_gt][10] != cat[idx] and ys[idx_gt][10] == 0:
|
||||
wrong_ped += 1
|
||||
|
||||
cnt_cyclist += 1 if ys[idx_gt][10] == 1 else 0
|
||||
|
||||
for num, (idx_r, s_match) in enumerate(stereo_matches):
|
||||
label = ys[idx_gt][:-1] + [s_match]
|
||||
if s_match > 0.9:
|
||||
cnt_pair += 1
|
||||
|
||||
# Remove noise of very far instances for validation
|
||||
# if (phase == 'val') and (ys[idx_gt][3] >= 50):
|
||||
# continue
|
||||
|
||||
# ---> Save only positives unless there is no positive (keep positive flip and augm)
|
||||
# if num > 0 and s_match < 0.9:
|
||||
# continue
|
||||
|
||||
# Height augmentation
|
||||
cnt_pair_tot += 1
|
||||
cnt_extra_pair += 1 if ii == 1 else 0
|
||||
flag_aug = False
|
||||
if phase == 'train' and 3 < label[2] < 30 and s_match > 0.9:
|
||||
flag_aug = True
|
||||
elif phase == 'train' and 3 < label[2] < 30 and cnt_pair_tot % 2 == 0:
|
||||
flag_aug = True
|
||||
|
||||
# Remove height augmentation
|
||||
# flag_aug = False
|
||||
|
||||
if flag_aug:
|
||||
kps_aug, labels_aug = height_augmentation(
|
||||
keypoints[idx:idx+1], keypoints_r[idx_r:idx_r+1], label, s_match,
|
||||
seed=cnt_pair_tot)
|
||||
else:
|
||||
kps_aug = [(keypoints[idx:idx+1], keypoints_r[idx_r:idx_r+1])]
|
||||
labels_aug = [label]
|
||||
|
||||
for i, lab in enumerate(labels_aug):
|
||||
(kps, kps_r) = kps_aug[i]
|
||||
input_l = preprocess_monoloco(kps, kk).view(-1)
|
||||
input_r = preprocess_monoloco(kps_r, kk).view(-1)
|
||||
keypoint = torch.cat((kps, kps_r), dim=2).tolist()
|
||||
inp = torch.cat((input_l, input_l - input_r)).tolist()
|
||||
|
||||
# Only relative distances
|
||||
# inp_x = input[::2]
|
||||
# inp = torch.cat((inp_x, input - input_r)).tolist()
|
||||
|
||||
# lab = normalize_hwl(lab)
|
||||
if ys[idx_gt][10] < 0.5:
|
||||
self.dic_jo[phase]['kps'].append(keypoint)
|
||||
self.dic_jo[phase]['X'].append(inp)
|
||||
self.dic_jo[phase]['Y'].append(lab)
|
||||
self.dic_jo[phase]['names'].append(name) # One image name for each annotation
|
||||
append_cluster(self.dic_jo, phase, inp, lab, keypoint)
|
||||
cnt_tot += 1
|
||||
if s_match > 0.9:
|
||||
cnt_stereo[phase] += 1
|
||||
else:
|
||||
cnt_mono[phase] += 1
|
||||
|
||||
with open(self.path_joints, 'w') as file:
|
||||
json.dump(self.dic_jo, file)
|
||||
with open(os.path.join(self.path_names), 'w') as file:
|
||||
json.dump(self.dic_names, file)
|
||||
|
||||
# cout
|
||||
print(cnt_30)
|
||||
print(cnt_less_30)
|
||||
print('-' * 120)
|
||||
|
||||
print("Number of GT files: {}. Files with at least one pedestrian: {}. Files not found: {}"
|
||||
.format(cnt_files, cnt_files_ped, cnt_fnf))
|
||||
print("Ground truth matches : {:.1f} % for left images (train and val) and {:.1f} % for right images (train)"
|
||||
.format(100*cnt_match_l / (cnt_gt['train'] + cnt_gt['val']), 100*cnt_match_r / cnt_gt['train']))
|
||||
print("Total annotations: {}".format(cnt_tot))
|
||||
print("Total number of cyclists: {}\n".format(cnt_cyclist))
|
||||
print("Ambiguous instances removed: {}".format(cnt_ambiguous))
|
||||
print("Extra pairs created with horizontal flipping: {}\n".format(cnt_extra_pair))
|
||||
|
||||
if not self.monocular:
|
||||
print('Instances with stereo correspondence: {:.1f}% '.format(100 * cnt_pair / cnt_pair_tot))
|
||||
for phase in ['train', 'val']:
|
||||
cnt = cnt_mono[phase] + cnt_stereo[phase]
|
||||
print("{}: annotations: {}. Stereo pairs {:.1f}% "
|
||||
.format(phase.upper(), cnt, 100 * cnt_stereo[phase] / cnt))
|
||||
|
||||
print("\nOutput files:\n{}\n{}".format(self.path_names, self.path_joints))
|
||||
print('-' * 120)
|
||||
|
||||
def prep_activity(self):
|
||||
"""Augment ground-truth with flag activity"""
|
||||
|
||||
from monstereo.activity import social_interactions
|
||||
main_dir = os.path.join('data', 'kitti')
|
||||
dir_gt = os.path.join(main_dir, 'gt')
|
||||
dir_out = os.path.join(main_dir, 'gt_activity')
|
||||
make_new_directory(dir_out)
|
||||
cnt_tp, cnt_tn = 0, 0
|
||||
|
||||
# Extract validation images for evaluation
|
||||
category = 'pedestrian'
|
||||
|
||||
for name in self.set_val:
|
||||
# Read
|
||||
path_gt = os.path.join(dir_gt, name)
|
||||
boxes_gt, ys, truncs_gt, occs_gt, lines = parse_ground_truth(path_gt, category, spherical=False,
|
||||
verbose=True)
|
||||
angles = [y[10] for y in ys]
|
||||
dds = [y[4] for y in ys]
|
||||
xz_centers = [[y[0], y[2]] for y in ys]
|
||||
|
||||
# Write
|
||||
path_out = os.path.join(dir_out, name)
|
||||
with open(path_out, "w+") as ff:
|
||||
for idx, line in enumerate(lines):
|
||||
if social_interactions(idx, xz_centers, angles, dds,
|
||||
n_samples=1,
|
||||
threshold_dist=self.THRESHOLD_DIST,
|
||||
radii=self.RADII,
|
||||
social_distance=self.SOCIAL_DISTANCE):
|
||||
activity = '1'
|
||||
cnt_tp += 1
|
||||
else:
|
||||
activity = '0'
|
||||
cnt_tn += 1
|
||||
|
||||
line_new = line[:-1] + ' ' + activity + line[-1]
|
||||
ff.write(line_new)
|
||||
|
||||
print(f'Written {len(self.set_val)} new files in {dir_out}')
|
||||
print(f'Saved {cnt_tp} positive and {cnt_tn} negative annotations')
|
||||
|
||||
def _factory_phase(self, name):
|
||||
"""Choose the phase"""
|
||||
phase = None
|
||||
flag = False
|
||||
if name in self.set_train:
|
||||
phase = 'train'
|
||||
elif name in self.set_val:
|
||||
phase = 'val'
|
||||
else:
|
||||
flag = True
|
||||
return phase, flag
|
||||
|
||||
|
||||
def crop_and_draw(im, box, keypoint):
|
||||
|
||||
box = [round(el) for el in box[:-1]]
|
||||
center = (int((keypoint[0][0])), int((keypoint[1][0])))
|
||||
radius = round((box[3]-box[1]) / 20)
|
||||
im = cv2.circle(im, center, radius, color=(0, 255, 0), thickness=1)
|
||||
crop = im[box[1]:box[3], box[0]:box[2]]
|
||||
h_crop = crop.shape[0]
|
||||
w_crop = crop.shape[1]
|
||||
|
||||
return crop, h_crop, w_crop
|
||||
@ -1,9 +1,14 @@
|
||||
# pylint: disable=too-many-statements, import-error
|
||||
|
||||
|
||||
"""Extract joints annotations and match with nuScenes ground truths
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import math
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
@ -12,15 +17,21 @@ import datetime
|
||||
import numpy as np
|
||||
from nuscenes.nuscenes import NuScenes
|
||||
from nuscenes.utils import splits
|
||||
from pyquaternion import Quaternion
|
||||
|
||||
from ..utils import get_iou_matches, append_cluster, select_categories, project_3d
|
||||
from ..utils import get_iou_matches, append_cluster, select_categories, project_3d, correct_angle, normalize_hwl, \
|
||||
to_spherical
|
||||
from ..network.process import preprocess_pifpaf, preprocess_monoloco
|
||||
|
||||
|
||||
class PreprocessNuscenes:
|
||||
"""
|
||||
Preprocess Nuscenes dataset
|
||||
"""
|
||||
"""Preprocess Nuscenes dataset"""
|
||||
AV_W = 0.68
|
||||
AV_L = 0.75
|
||||
AV_H = 1.72
|
||||
WLH_STD = 0.1
|
||||
social = False
|
||||
|
||||
CAMERAS = ('CAM_FRONT', 'CAM_FRONT_LEFT', 'CAM_FRONT_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT')
|
||||
dic_jo = {'train': dict(X=[], Y=[], names=[], kps=[], boxes_3d=[], K=[],
|
||||
clst=defaultdict(lambda: defaultdict(list))),
|
||||
@ -76,17 +87,25 @@ class PreprocessNuscenes:
|
||||
while not current_token == "":
|
||||
sample_dic = self.nusc.get('sample', current_token)
|
||||
cnt_samples += 1
|
||||
|
||||
# if (cnt_samples % 4 == 0) and (cnt_ann < 3000):
|
||||
# Extract all the sample_data tokens for each sample
|
||||
for cam in self.CAMERAS:
|
||||
sd_token = sample_dic['data'][cam]
|
||||
cnt_sd += 1
|
||||
|
||||
# Extract all the annotations of the person
|
||||
name, boxes_gt, boxes_3d, dds, kk = self.extract_from_token(sd_token)
|
||||
path_im, boxes_obj, kk = self.nusc.get_sample_data(sd_token, box_vis_level=1) # At least one corner
|
||||
boxes_gt, boxes_3d, ys = extract_ground_truth(boxes_obj, kk)
|
||||
kk = kk.tolist()
|
||||
name = os.path.basename(path_im)
|
||||
basename, _ = os.path.splitext(name)
|
||||
|
||||
self.dic_names[basename + '.jpg']['boxes'] = copy.deepcopy(boxes_gt)
|
||||
self.dic_names[basename + '.jpg']['ys'] = copy.deepcopy(ys)
|
||||
self.dic_names[basename + '.jpg']['K'] = copy.deepcopy(kk)
|
||||
|
||||
# Run IoU with pifpaf detections and save
|
||||
path_pif = os.path.join(self.dir_ann, name + '.pifpaf.json')
|
||||
path_pif = os.path.join(self.dir_ann, name + '.predictions.json')
|
||||
exists = os.path.isfile(path_pif)
|
||||
|
||||
if exists:
|
||||
@ -95,22 +114,21 @@ class PreprocessNuscenes:
|
||||
boxes, keypoints = preprocess_pifpaf(annotations, im_size=(1600, 900))
|
||||
else:
|
||||
continue
|
||||
|
||||
if keypoints:
|
||||
inputs = preprocess_monoloco(keypoints, kk).tolist()
|
||||
|
||||
matches = get_iou_matches(boxes, boxes_gt, self.iou_min)
|
||||
for (idx, idx_gt) in matches:
|
||||
self.dic_jo[phase]['kps'].append(keypoints[idx])
|
||||
self.dic_jo[phase]['X'].append(inputs[idx])
|
||||
self.dic_jo[phase]['Y'].append([dds[idx_gt]]) # Trick to make it (nn,1)
|
||||
keypoint = keypoints[idx:idx + 1]
|
||||
inp = preprocess_monoloco(keypoint, kk).view(-1).tolist()
|
||||
lab = ys[idx_gt]
|
||||
lab = normalize_hwl(lab)
|
||||
self.dic_jo[phase]['kps'].append(keypoint)
|
||||
self.dic_jo[phase]['X'].append(inp)
|
||||
self.dic_jo[phase]['Y'].append(lab)
|
||||
self.dic_jo[phase]['names'].append(name) # One image name for each annotation
|
||||
self.dic_jo[phase]['boxes_3d'].append(boxes_3d[idx_gt])
|
||||
self.dic_jo[phase]['K'].append(kk)
|
||||
append_cluster(self.dic_jo, phase, inputs[idx], dds[idx_gt], keypoints[idx])
|
||||
append_cluster(self.dic_jo, phase, inp, lab, keypoint)
|
||||
cnt_ann += 1
|
||||
sys.stdout.write('\r' + 'Saved annotations {}'.format(cnt_ann) + '\t')
|
||||
|
||||
current_token = sample_dic['next']
|
||||
|
||||
with open(os.path.join(self.path_joints), 'w') as f:
|
||||
@ -119,35 +137,50 @@ class PreprocessNuscenes:
|
||||
json.dump(self.dic_names, f)
|
||||
end = time.time()
|
||||
|
||||
# extract_box_average(self.dic_jo['train']['boxes_3d'])
|
||||
print("\nSaved {} annotations for {} samples in {} scenes. Total time: {:.1f} minutes"
|
||||
.format(cnt_ann, cnt_samples, cnt_scenes, (end-start)/60))
|
||||
print("\nOutput files:\n{}\n{}\n".format(self.path_names, self.path_joints))
|
||||
|
||||
def extract_from_token(self, sd_token):
|
||||
|
||||
boxes_gt = []
|
||||
dds = []
|
||||
boxes_3d = []
|
||||
path_im, boxes_obj, kk = self.nusc.get_sample_data(sd_token, box_vis_level=1) # At least one corner
|
||||
kk = kk.tolist()
|
||||
name = os.path.basename(path_im)
|
||||
for box_obj in boxes_obj:
|
||||
if box_obj.name[:6] != 'animal':
|
||||
general_name = box_obj.name.split('.')[0] + '.' + box_obj.name.split('.')[1]
|
||||
def extract_ground_truth(boxes_obj, kk, spherical=True):
|
||||
|
||||
boxes_gt = []
|
||||
boxes_3d = []
|
||||
ys = []
|
||||
|
||||
for box_obj in boxes_obj:
|
||||
|
||||
# Select category
|
||||
if box_obj.name[:6] != 'animal':
|
||||
general_name = box_obj.name.split('.')[0] + '.' + box_obj.name.split('.')[1]
|
||||
else:
|
||||
general_name = 'animal'
|
||||
if general_name in select_categories('all'):
|
||||
|
||||
# Obtain 2D & 3D box
|
||||
boxes_gt.append(project_3d(box_obj, kk))
|
||||
boxes_3d.append(box_obj.center.tolist() + box_obj.wlh.tolist())
|
||||
|
||||
# Angle
|
||||
yaw = quaternion_yaw(box_obj.orientation)
|
||||
assert - math.pi <= yaw <= math.pi
|
||||
sin, cos, _ = correct_angle(yaw, box_obj.center)
|
||||
hwl = [float(box_obj.wlh[i]) for i in (2, 0, 1)]
|
||||
|
||||
# Spherical coordinates
|
||||
xyz = list(box_obj.center)
|
||||
dd = np.linalg.norm(box_obj.center)
|
||||
if spherical:
|
||||
rtp = to_spherical(xyz)
|
||||
loc = rtp[1:3] + xyz[2:3] + rtp[0:1] # [theta, psi, z, r]
|
||||
else:
|
||||
general_name = 'animal'
|
||||
if general_name in select_categories('all'):
|
||||
box = project_3d(box_obj, kk)
|
||||
dd = np.linalg.norm(box_obj.center)
|
||||
boxes_gt.append(box)
|
||||
dds.append(dd)
|
||||
box_3d = box_obj.center.tolist() + box_obj.wlh.tolist()
|
||||
boxes_3d.append(box_3d)
|
||||
self.dic_names[name]['boxes'].append(box)
|
||||
self.dic_names[name]['dds'].append(dd)
|
||||
self.dic_names[name]['K'] = kk
|
||||
loc = xyz + [dd]
|
||||
|
||||
return name, boxes_gt, boxes_3d, dds, kk
|
||||
output = loc + hwl + [sin, cos, yaw]
|
||||
ys.append(output)
|
||||
|
||||
return boxes_gt, boxes_3d, ys
|
||||
|
||||
|
||||
def factory(dataset, dir_nuscenes):
|
||||
@ -175,3 +208,75 @@ def factory(dataset, dir_nuscenes):
|
||||
split_train, split_val = split_scenes['train'], split_scenes['val']
|
||||
|
||||
return nusc, scenes, split_train, split_val
|
||||
|
||||
|
||||
def quaternion_yaw(q: Quaternion, in_image_frame: bool = True) -> float:
|
||||
if in_image_frame:
|
||||
v = np.dot(q.rotation_matrix, np.array([1, 0, 0]))
|
||||
yaw = -np.arctan2(v[2], v[0])
|
||||
else:
|
||||
v = np.dot(q.rotation_matrix, np.array([1, 0, 0]))
|
||||
yaw = np.arctan2(v[1], v[0])
|
||||
return float(yaw)
|
||||
|
||||
|
||||
def extract_box_average(boxes_3d):
|
||||
boxes_np = np.array(boxes_3d)
|
||||
means = np.mean(boxes_np[:, 3:], axis=0)
|
||||
stds = np.std(boxes_np[:, 3:], axis=0)
|
||||
print(means)
|
||||
print(stds)
|
||||
|
||||
|
||||
def extract_social(inputs, ys, keypoints, idx, matches):
|
||||
"""Output a (padded) version with all the 5 neighbours
|
||||
- Take the ground feet and the output z
|
||||
- make relative to the person (as social LSTM)"""
|
||||
all_inputs = []
|
||||
|
||||
# Find the lowest relative ground foot
|
||||
ground_foot = np.max(np.array(inputs)[:, [31, 33]], axis=1)
|
||||
rel_ground_foot = ground_foot - ground_foot[idx]
|
||||
rel_ground_foot = rel_ground_foot.tolist()
|
||||
|
||||
# Order the people based on their distance
|
||||
base = np.array([np.mean(np.array(keypoints[idx][0])), np.mean(np.array(keypoints[idx][1]))])
|
||||
# delta_input = [abs((inp[31] + inp[33]) / 2 - base) for inp in inputs]
|
||||
delta_input = [np.linalg.norm(base - np.array([np.mean(np.array(kp[0])), np.mean(np.array(kp[1]))]))
|
||||
for kp in keypoints]
|
||||
sorted_indices = sorted(range(len(delta_input)), key=lambda k: delta_input[k]) # Return a list of sorted indices
|
||||
all_inputs.extend(inputs[idx])
|
||||
|
||||
indices_idx = [idx for (idx, idx_gt) in matches]
|
||||
if len(sorted_indices) > 2:
|
||||
aa = 5
|
||||
for ii in range(1, 3):
|
||||
try:
|
||||
index = sorted_indices[ii]
|
||||
|
||||
# Extract the idx_gt corresponding to the input we are attaching if it exists
|
||||
try:
|
||||
idx_idx_gt = indices_idx.index(index)
|
||||
idx_gt = matches[idx_idx_gt][1]
|
||||
all_inputs.append(rel_ground_foot[index]) # Relative lower ground foot
|
||||
all_inputs.append(float(ys[idx_gt][3])) # Output Z
|
||||
except ValueError:
|
||||
all_inputs.extend([0.] * 2)
|
||||
except IndexError:
|
||||
all_inputs.extend([0.] * 2)
|
||||
assert len(all_inputs) == 34 + 2 * 2
|
||||
return all_inputs
|
||||
|
||||
|
||||
# def get_jean_yaw(box_obj):
|
||||
# b_corners = box_obj.bottom_corners()
|
||||
# center = box_obj.center
|
||||
# back_point = [(b_corners[0, 2] + b_corners[0, 3]) / 2, (b_corners[2, 2] + b_corners[2, 3]) / 2]
|
||||
#
|
||||
# x = b_corners[0, :] - back_point[0]
|
||||
# y = b_corners[2, :] - back_point[1]
|
||||
#
|
||||
# angle = math.atan2((x[0] + x[1]) / 2, (y[0] + y[1]) / 2) * 180 / 3.14
|
||||
# angle = (angle + 360) % 360
|
||||
# correction = math.atan2(center[0], center[2]) * 180 / 3.14
|
||||
# return angle, correction
|
||||
|
||||
@ -1,28 +1,31 @@
|
||||
|
||||
import math
|
||||
from copy import deepcopy
|
||||
import numpy as np
|
||||
|
||||
BASELINE = 0.54
|
||||
BF = BASELINE * 721
|
||||
|
||||
COCO_KEYPOINTS = [
|
||||
'nose', # 1
|
||||
'left_eye', # 2
|
||||
'right_eye', # 3
|
||||
'left_ear', # 4
|
||||
'right_ear', # 5
|
||||
'left_shoulder', # 6
|
||||
'right_shoulder', # 7
|
||||
'left_elbow', # 8
|
||||
'right_elbow', # 9
|
||||
'left_wrist', # 10
|
||||
'right_wrist', # 11
|
||||
'left_hip', # 12
|
||||
'right_hip', # 13
|
||||
'left_knee', # 14
|
||||
'right_knee', # 15
|
||||
'left_ankle', # 16
|
||||
'right_ankle', # 17
|
||||
'nose', # 0
|
||||
'left_eye', # 1
|
||||
'right_eye', # 2
|
||||
'left_ear', # 3
|
||||
'right_ear', # 4
|
||||
'left_shoulder', # 5
|
||||
'right_shoulder', # 6
|
||||
'left_elbow', # 7
|
||||
'right_elbow', # 8
|
||||
'left_wrist', # 9
|
||||
'right_wrist', # 10
|
||||
'left_hip', # 11
|
||||
'right_hip', # 12
|
||||
'left_knee', # 13
|
||||
'right_knee', # 14
|
||||
'left_ankle', # 15
|
||||
'right_ankle', # 16
|
||||
]
|
||||
|
||||
|
||||
HFLIP = {
|
||||
'nose': 'nose',
|
||||
'left_eye': 'right_eye',
|
||||
@ -45,10 +48,94 @@ HFLIP = {
|
||||
|
||||
|
||||
def transform_keypoints(keypoints, mode):
|
||||
|
||||
"""Egocentric horizontal flip"""
|
||||
assert mode == 'flip', "mode not recognized"
|
||||
kps = np.array(keypoints)
|
||||
dic_kps = {key: kps[:, :, idx] for idx, key in enumerate(COCO_KEYPOINTS)}
|
||||
kps_hflip = np.array([dic_kps[value] for key, value in HFLIP.items()])
|
||||
kps_hflip = np.transpose(kps_hflip, (1, 2, 0))
|
||||
return kps_hflip.tolist()
|
||||
|
||||
|
||||
def flip_inputs(keypoints, im_w, mode=None):
|
||||
"""Horizontal flip the keypoints or the boxes in the image"""
|
||||
if mode == 'box':
|
||||
boxes = deepcopy(keypoints)
|
||||
for box in boxes:
|
||||
temp = box[2]
|
||||
box[2] = im_w - box[0]
|
||||
box[0] = im_w - temp
|
||||
return boxes
|
||||
|
||||
keypoints = np.array(keypoints)
|
||||
keypoints[:, 0, :] = im_w - keypoints[:, 0, :] # Shifted
|
||||
kps_flip = transform_keypoints(keypoints, mode='flip')
|
||||
return kps_flip
|
||||
|
||||
|
||||
def flip_labels(boxes_gt, labels, im_w):
|
||||
"""Correct x, d positions and angles after horizontal flipping"""
|
||||
from ..utils import correct_angle, to_cartesian, to_spherical
|
||||
boxes_flip = deepcopy(boxes_gt)
|
||||
labels_flip = deepcopy(labels)
|
||||
|
||||
for idx, label_flip in enumerate(labels_flip):
|
||||
|
||||
# Flip the box and account for disparity
|
||||
disp = BF / label_flip[2]
|
||||
temp = boxes_flip[idx][2]
|
||||
boxes_flip[idx][2] = im_w - boxes_flip[idx][0] + disp
|
||||
boxes_flip[idx][0] = im_w - temp + disp
|
||||
|
||||
# Flip X and D
|
||||
rtp = label_flip[3:4] + label_flip[0:2] # Originally t,p,z,r
|
||||
xyz = to_cartesian(rtp)
|
||||
xyz[0] = -xyz[0] + BASELINE # x
|
||||
rtp_r = to_spherical(xyz)
|
||||
label_flip[3], label_flip[0], label_flip[1] = rtp_r[0], rtp_r[1], rtp_r[2]
|
||||
|
||||
# FLip and correct the angle
|
||||
yaw = label_flip[9]
|
||||
yaw_n = math.copysign(1, yaw) * (np.pi - abs(yaw)) # Horizontal flipping change of angle
|
||||
|
||||
sin, cos, yaw_corr = correct_angle(yaw_n, xyz)
|
||||
label_flip[7], label_flip[8], label_flip[9] = sin, cos, yaw_n
|
||||
|
||||
return boxes_flip, labels_flip
|
||||
|
||||
|
||||
def height_augmentation(kps, kps_r, label, s_match, seed=0):
|
||||
"""
|
||||
label: theta, psi, z, rho, wlh, sin, cos, yaw, cat
|
||||
"""
|
||||
from ..utils import to_cartesian
|
||||
n_labels = 3 if s_match > 0.9 else 1
|
||||
height_min = 1.2
|
||||
height_max = 2
|
||||
av_height = 1.71
|
||||
kps_aug = [[kps.clone(), kps_r.clone()] for _ in range(n_labels+1)]
|
||||
labels_aug = [label.copy() for _ in range(n_labels+1)] # Maintain the original
|
||||
np.random.seed(seed)
|
||||
heights = np.random.uniform(height_min, height_max, n_labels) # 3 samples
|
||||
zzs = heights * label[2] / av_height
|
||||
disp = BF / label[2]
|
||||
|
||||
rtp = label[3:4] + label[0:2] # Originally t,p,z,r
|
||||
xyz = to_cartesian(rtp)
|
||||
|
||||
for i in range(n_labels):
|
||||
|
||||
if zzs[i] < 2:
|
||||
continue
|
||||
# Update keypoints
|
||||
disp_new = BF / zzs[i]
|
||||
delta_disp = disp - disp_new
|
||||
kps_aug[i][1][0, 0, :] = kps_aug[i][1][0, 0, :] + delta_disp
|
||||
|
||||
# Update labels
|
||||
labels_aug[i][2] = zzs[i]
|
||||
xyz[2] = zzs[i]
|
||||
rho = np.linalg.norm(xyz)
|
||||
labels_aug[i][3] = rho
|
||||
|
||||
return kps_aug, labels_aug
|
||||
|
||||
158
monoloco/run.py
@ -1,8 +1,8 @@
|
||||
# pylint: disable=too-many-branches, too-many-statements
|
||||
|
||||
import argparse
|
||||
|
||||
from openpifpaf.network import nets
|
||||
from openpifpaf import decoder
|
||||
from openpifpaf import decoder, network, visualizer, show, logger
|
||||
|
||||
|
||||
def cli():
|
||||
@ -19,71 +19,98 @@ def cli():
|
||||
prep_parser.add_argument('--dir_ann', help='directory of annotations of 2d joints', required=True)
|
||||
prep_parser.add_argument('--dataset',
|
||||
help='datasets to preprocess: nuscenes, nuscenes_teaser, nuscenes_mini, kitti',
|
||||
default='nuscenes')
|
||||
default='kitti')
|
||||
prep_parser.add_argument('--dir_nuscenes', help='directory of nuscenes devkit', default='data/nuscenes/')
|
||||
prep_parser.add_argument('--iou_min', help='minimum iou to match ground truth', type=float, default=0.3)
|
||||
prep_parser.add_argument('--variance', help='new', action='store_true')
|
||||
prep_parser.add_argument('--activity', help='new', action='store_true')
|
||||
prep_parser.add_argument('--monocular', help='new', action='store_true')
|
||||
|
||||
# Predict (2D pose and/or 3D location from images)
|
||||
# General
|
||||
predict_parser.add_argument('--networks', nargs='+', help='Run pifpaf and/or monoloco', default=['monoloco'])
|
||||
predict_parser.add_argument('--mode', help='pifpaf, mono, stereo', default='stereo')
|
||||
predict_parser.add_argument('images', nargs='*', help='input images')
|
||||
predict_parser.add_argument('--glob', help='glob expression for input images (for many images)')
|
||||
predict_parser.add_argument('-o', '--output-directory', help='Output directory')
|
||||
predict_parser.add_argument('--output_types', nargs='+', default=['json'],
|
||||
help='what to output: json keypoints skeleton for Pifpaf'
|
||||
'json bird front combined for Monoloco')
|
||||
predict_parser.add_argument('--show', help='to show images', action='store_true')
|
||||
'json bird front or multi for MonStereo')
|
||||
predict_parser.add_argument('--no_save', help='to show images', action='store_true')
|
||||
predict_parser.add_argument('--dpi', help='image resolution', type=int, default=150)
|
||||
predict_parser.add_argument('--long-edge', default=None, type=int,
|
||||
help='rescale the long side of the image (aspect ratio maintained)')
|
||||
predict_parser.add_argument('--disable-cuda', action='store_true', help='disable CUDA')
|
||||
predict_parser.add_argument('--focal',
|
||||
help='focal length in mm for a sensor size of 7.2x5.4 mm. Default nuScenes sensor',
|
||||
type=float, default=5.7)
|
||||
|
||||
# Pifpaf
|
||||
nets.cli(predict_parser)
|
||||
decoder.cli(predict_parser, force_complete_pose=True, instance_threshold=0.15)
|
||||
predict_parser.add_argument('--scale', default=1.0, type=float, help='change the scale of the image to preprocess')
|
||||
# Pifpaf parsers
|
||||
decoder.cli(parser)
|
||||
logger.cli(parser)
|
||||
network.Factory.cli(parser)
|
||||
show.cli(parser)
|
||||
visualizer.cli(parser)
|
||||
|
||||
# Monoloco
|
||||
predict_parser.add_argument('--net', help='Choose network: monoloco, monoloco_p, monoloco_pp, monstereo')
|
||||
predict_parser.add_argument('--model', help='path of MonoLoco model to load', required=True)
|
||||
predict_parser.add_argument('--hidden_size', type=int, help='Number of hidden units in the model', default=512)
|
||||
predict_parser.add_argument('--path_gt', help='path of json file with gt 3d localization',
|
||||
default='data/arrays/names-kitti-190513-1754.json')
|
||||
default='data/arrays/names-kitti-200615-1022.json')
|
||||
predict_parser.add_argument('--transform', help='transformation for the pose', default='None')
|
||||
predict_parser.add_argument('--draw_box', help='to draw box in the images', action='store_true')
|
||||
predict_parser.add_argument('--predict', help='whether to make prediction', action='store_true')
|
||||
predict_parser.add_argument('--z_max', type=int, help='maximum meters distance for predictions', default=22)
|
||||
predict_parser.add_argument('--z_max', type=int, help='maximum meters distance for predictions', default=100)
|
||||
predict_parser.add_argument('--n_dropout', type=int, help='Epistemic uncertainty evaluation', default=0)
|
||||
predict_parser.add_argument('--dropout', type=float, help='dropout parameter', default=0.2)
|
||||
predict_parser.add_argument('--webcam', help='monoloco streaming', action='store_true')
|
||||
predict_parser.add_argument('--show_all', help='only predict ground-truth matches or all', action='store_true')
|
||||
|
||||
# Social distancing and social interactions
|
||||
predict_parser.add_argument('--social_distance', help='social', action='store_true')
|
||||
predict_parser.add_argument('--threshold_prob', type=float, help='concordance for samples', default=0.25)
|
||||
predict_parser.add_argument('--threshold_dist', type=float, help='min distance of people', default=2.5)
|
||||
predict_parser.add_argument('--radii', type=tuple, help='o-space radii', default=(0.3, 0.5, 1))
|
||||
|
||||
# Training
|
||||
training_parser.add_argument('--joints', help='Json file with input joints',
|
||||
default='data/arrays/joints-nuscenes_teaser-190513-1846.json')
|
||||
training_parser.add_argument('--save', help='whether to not save model and log file', action='store_false')
|
||||
training_parser.add_argument('-e', '--epochs', type=int, help='number of epochs to train for', default=150)
|
||||
training_parser.add_argument('--bs', type=int, default=256, help='input batch size')
|
||||
training_parser.add_argument('--baseline', help='whether to train using the baseline', action='store_true')
|
||||
training_parser.add_argument('--no_save', help='to not save model and log file', action='store_true')
|
||||
training_parser.add_argument('-e', '--epochs', type=int, help='number of epochs to train for', default=500)
|
||||
training_parser.add_argument('--bs', type=int, default=512, help='input batch size')
|
||||
training_parser.add_argument('--monocular', help='whether to train monoloco', action='store_true')
|
||||
training_parser.add_argument('--dropout', type=float, help='dropout. Default no dropout', default=0.2)
|
||||
training_parser.add_argument('--lr', type=float, help='learning rate', default=0.002)
|
||||
training_parser.add_argument('--sched_step', type=float, help='scheduler step time (epochs)', default=20)
|
||||
training_parser.add_argument('--sched_gamma', type=float, help='Scheduler multiplication every step', default=0.9)
|
||||
training_parser.add_argument('--hidden_size', type=int, help='Number of hidden units in the model', default=256)
|
||||
training_parser.add_argument('--lr', type=float, help='learning rate', default=0.001)
|
||||
training_parser.add_argument('--sched_step', type=float, help='scheduler step time (epochs)', default=30)
|
||||
training_parser.add_argument('--sched_gamma', type=float, help='Scheduler multiplication every step', default=0.98)
|
||||
training_parser.add_argument('--hidden_size', type=int, help='Number of hidden units in the model', default=1024)
|
||||
training_parser.add_argument('--n_stage', type=int, help='Number of stages in the model', default=3)
|
||||
training_parser.add_argument('--hyp', help='run hyperparameters tuning', action='store_true')
|
||||
training_parser.add_argument('--multiplier', type=int, help='Size of the grid of hyp search', default=1)
|
||||
training_parser.add_argument('--r_seed', type=int, help='specify the seed for training and hyp tuning', default=1)
|
||||
training_parser.add_argument('--print_loss', help='print training and validation losses', action='store_true')
|
||||
training_parser.add_argument('--auto_tune_mtl', help='whether to use uncertainty to autotune losses',
|
||||
action='store_true')
|
||||
|
||||
# Evaluation
|
||||
eval_parser.add_argument('--dataset', help='datasets to evaluate, kitti or nuscenes', default='kitti')
|
||||
eval_parser.add_argument('--geometric', help='to evaluate geometric distance', action='store_true')
|
||||
eval_parser.add_argument('--generate', help='create txt files for KITTI evaluation', action='store_true')
|
||||
eval_parser.add_argument('--dir_ann', help='directory of annotations of 2d joints (for KITTI evaluation')
|
||||
eval_parser.add_argument('--model', help='path of MonoLoco model to load', required=True)
|
||||
eval_parser.add_argument('--dir_ann', help='directory of annotations of 2d joints (for KITTI evaluation)')
|
||||
eval_parser.add_argument('--model', help='path of MonoLoco model to load')
|
||||
eval_parser.add_argument('--joints', help='Json file with input joints to evaluate (for nuScenes evaluation)')
|
||||
eval_parser.add_argument('--n_dropout', type=int, help='Epistemic uncertainty evaluation', default=0)
|
||||
eval_parser.add_argument('--dropout', type=float, help='dropout. Default no dropout', default=0.2)
|
||||
eval_parser.add_argument('--hidden_size', type=int, help='Number of hidden units in the model', default=256)
|
||||
eval_parser.add_argument('--hidden_size', type=int, help='Number of hidden units in the model', default=1024)
|
||||
eval_parser.add_argument('--n_stage', type=int, help='Number of stages in the model', default=3)
|
||||
eval_parser.add_argument('--show', help='whether to show statistic graphs', action='store_true')
|
||||
eval_parser.add_argument('--save', help='whether to save statistic graphs', action='store_true')
|
||||
eval_parser.add_argument('--verbose', help='verbosity of statistics', action='store_true')
|
||||
eval_parser.add_argument('--stereo', help='include stereo baseline results', action='store_true')
|
||||
eval_parser.add_argument('--monocular', help='whether to train using the baseline', action='store_true')
|
||||
eval_parser.add_argument('--new', help='new', action='store_true')
|
||||
eval_parser.add_argument('--variance', help='evaluate keypoints variance', action='store_true')
|
||||
eval_parser.add_argument('--activity', help='evaluate activities', action='store_true')
|
||||
eval_parser.add_argument('--net', help='Choose network: monoloco, monoloco_p, monoloco_pp, monstereo')
|
||||
eval_parser.add_argument('--baselines', help='whether to evaluate stereo baselines', action='store_true')
|
||||
eval_parser.add_argument('--generate_official', help='whether to add empty txt files for official evaluation',
|
||||
action='store_true')
|
||||
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
@ -92,62 +119,75 @@ def cli():
|
||||
def main():
|
||||
args = cli()
|
||||
if args.command == 'predict':
|
||||
if args.webcam:
|
||||
from .visuals.webcam import webcam
|
||||
webcam(args)
|
||||
else:
|
||||
from . import predict
|
||||
predict.predict(args)
|
||||
from .predict import predict
|
||||
predict(args)
|
||||
|
||||
elif args.command == 'prep':
|
||||
if 'nuscenes' in args.dataset:
|
||||
from .prep.preprocess_nu import PreprocessNuscenes
|
||||
prep = PreprocessNuscenes(args.dir_ann, args.dir_nuscenes, args.dataset, args.iou_min)
|
||||
prep.run()
|
||||
if 'kitti' in args.dataset:
|
||||
from .prep.preprocess_ki import PreprocessKitti
|
||||
prep = PreprocessKitti(args.dir_ann, args.iou_min)
|
||||
prep.run()
|
||||
else:
|
||||
from .prep.prep_kitti import PreprocessKitti
|
||||
prep = PreprocessKitti(args.dir_ann, args.iou_min, args.monocular)
|
||||
if args.activity:
|
||||
prep.prep_activity()
|
||||
else:
|
||||
prep.run()
|
||||
|
||||
elif args.command == 'train':
|
||||
from .train import HypTuning
|
||||
if args.hyp:
|
||||
hyp_tuning = HypTuning(joints=args.joints, epochs=args.epochs,
|
||||
baseline=args.baseline, dropout=args.dropout,
|
||||
monocular=args.monocular, dropout=args.dropout,
|
||||
multiplier=args.multiplier, r_seed=args.r_seed)
|
||||
hyp_tuning.train()
|
||||
hyp_tuning.train(args)
|
||||
else:
|
||||
|
||||
from .train import Trainer
|
||||
training = Trainer(joints=args.joints, epochs=args.epochs, bs=args.bs,
|
||||
baseline=args.baseline, dropout=args.dropout, lr=args.lr, sched_step=args.sched_step,
|
||||
n_stage=args.n_stage, sched_gamma=args.sched_gamma, hidden_size=args.hidden_size,
|
||||
r_seed=args.r_seed, save=args.save)
|
||||
training = Trainer(args)
|
||||
|
||||
_ = training.train()
|
||||
_ = training.evaluate()
|
||||
|
||||
elif args.command == 'eval':
|
||||
if args.geometric:
|
||||
|
||||
if args.activity:
|
||||
from .eval.eval_activity import ActivityEvaluator
|
||||
evaluator = ActivityEvaluator(args)
|
||||
if 'collective' in args.dataset:
|
||||
evaluator.eval_collective()
|
||||
else:
|
||||
evaluator.eval_kitti()
|
||||
|
||||
elif args.geometric:
|
||||
assert args.joints, "joints argument not provided"
|
||||
from .eval import geometric_baseline
|
||||
from .network.geom_baseline import geometric_baseline
|
||||
geometric_baseline(args.joints)
|
||||
|
||||
if args.generate:
|
||||
from .eval import GenerateKitti
|
||||
kitti_txt = GenerateKitti(args.model, args.dir_ann, p_dropout=args.dropout, n_dropout=args.n_dropout,
|
||||
stereo=args.stereo)
|
||||
kitti_txt.run()
|
||||
elif args.variance:
|
||||
from .eval.eval_variance import joints_variance
|
||||
joints_variance(args.joints, clusters=None, dic_ms=None)
|
||||
|
||||
if args.dataset == 'kitti':
|
||||
from .eval import EvalKitti
|
||||
kitti_eval = EvalKitti(verbose=args.verbose, stereo=args.stereo)
|
||||
kitti_eval.run()
|
||||
kitti_eval.printer(show=args.show, save=args.save)
|
||||
else:
|
||||
if args.generate:
|
||||
from .eval.generate_kitti import GenerateKitti
|
||||
kitti_txt = GenerateKitti(args)
|
||||
kitti_txt.run()
|
||||
|
||||
if 'nuscenes' in args.dataset:
|
||||
from .train import Trainer
|
||||
training = Trainer(joints=args.joints)
|
||||
_ = training.evaluate(load=True, model=args.model, debug=False)
|
||||
if args.dataset == 'kitti':
|
||||
from .eval import EvalKitti
|
||||
kitti_eval = EvalKitti(args)
|
||||
kitti_eval.run()
|
||||
kitti_eval.printer()
|
||||
|
||||
elif 'nuscenes' in args.dataset:
|
||||
from .train import Trainer
|
||||
training = Trainer(args)
|
||||
_ = training.evaluate(load=True, model=args.model, debug=False)
|
||||
|
||||
else:
|
||||
raise ValueError("Option not recognized")
|
||||
|
||||
else:
|
||||
raise ValueError("Main subparser not recognized or not provided")
|
||||
|
||||
@ -5,6 +5,42 @@ import torch
|
||||
from torch.utils.data import Dataset
|
||||
|
||||
|
||||
class ActivityDataset(Dataset):
|
||||
"""
|
||||
Dataloader for activity dataset
|
||||
"""
|
||||
|
||||
def __init__(self, joints, phase):
|
||||
"""
|
||||
Load inputs and outputs from the pickles files from gt joints, mask joints or both
|
||||
"""
|
||||
assert(phase in ['train', 'val', 'test'])
|
||||
|
||||
with open(joints, 'r') as f:
|
||||
dic_jo = json.load(f)
|
||||
|
||||
# Define input and output for normal training and inference
|
||||
self.inputs_all = torch.tensor(dic_jo[phase]['X'])
|
||||
self.outputs_all = torch.tensor(dic_jo[phase]['Y']).view(-1, 1)
|
||||
# self.kps_all = torch.tensor(dic_jo[phase]['kps'])
|
||||
|
||||
def __len__(self):
|
||||
"""
|
||||
:return: number of samples (m)
|
||||
"""
|
||||
return self.inputs_all.shape[0]
|
||||
|
||||
def __getitem__(self, idx):
|
||||
"""
|
||||
Reading the tensors when required. E.g. Retrieving one element or one batch at a time
|
||||
:param idx: corresponding to m
|
||||
"""
|
||||
inputs = self.inputs_all[idx, :]
|
||||
outputs = self.outputs_all[idx]
|
||||
# kps = self.kps_all[idx, :]
|
||||
return inputs, outputs
|
||||
|
||||
|
||||
class KeypointsDataset(Dataset):
|
||||
"""
|
||||
Dataloader fro nuscenes or kitti datasets
|
||||
@ -21,7 +57,7 @@ class KeypointsDataset(Dataset):
|
||||
|
||||
# Define input and output for normal training and inference
|
||||
self.inputs_all = torch.tensor(dic_jo[phase]['X'])
|
||||
self.outputs_all = torch.tensor(dic_jo[phase]['Y']).view(-1, 1)
|
||||
self.outputs_all = torch.tensor(dic_jo[phase]['Y'])
|
||||
self.names_all = dic_jo[phase]['names']
|
||||
self.kps_all = torch.tensor(dic_jo[phase]['kps'])
|
||||
|
||||
|
||||
@ -15,17 +15,16 @@ from .trainer import Trainer
|
||||
|
||||
class HypTuning:
|
||||
|
||||
def __init__(self, joints, epochs, baseline, dropout, multiplier=1, r_seed=1):
|
||||
def __init__(self, joints, epochs, monocular, dropout, multiplier=1, r_seed=1):
|
||||
"""
|
||||
Initialize directories, load the data and parameters for the training
|
||||
"""
|
||||
|
||||
# Initialize Directories
|
||||
self.joints = joints
|
||||
self.baseline = baseline
|
||||
self.monocular = monocular
|
||||
self.dropout = dropout
|
||||
self.num_epochs = epochs
|
||||
self.baseline = baseline
|
||||
self.r_seed = r_seed
|
||||
dir_out = os.path.join('data', 'models')
|
||||
dir_logs = os.path.join('data', 'logs')
|
||||
@ -33,7 +32,7 @@ class HypTuning:
|
||||
if not os.path.exists(dir_logs):
|
||||
os.makedirs(dir_logs)
|
||||
|
||||
name_out = 'hyp-baseline-' if baseline else 'hyp-monoloco-'
|
||||
name_out = 'hyp-monoloco-' if monocular else 'hyp-ms-'
|
||||
|
||||
self.path_log = os.path.join(dir_logs, name_out)
|
||||
self.path_model = os.path.join(dir_out, name_out)
|
||||
@ -46,23 +45,23 @@ class HypTuning:
|
||||
np.random.seed(r_seed)
|
||||
self.sched_gamma_list = [0.8, 0.9, 1, 0.8, 0.9, 1] * multiplier
|
||||
random.shuffle(self.sched_gamma_list)
|
||||
self.sched_step = [10, 20, 30, 40, 50, 60] * multiplier
|
||||
self.sched_step = [10, 20, 40, 60, 80, 100] * multiplier
|
||||
random.shuffle(self.sched_step)
|
||||
self.bs_list = [64, 128, 256, 512, 1024, 2048] * multiplier
|
||||
self.bs_list = [64, 128, 256, 512, 512, 1024] * multiplier
|
||||
random.shuffle(self.bs_list)
|
||||
self.hidden_list = [256, 256, 256, 256, 256, 256] * multiplier
|
||||
self.hidden_list = [512, 1024, 2048, 512, 1024, 2048] * multiplier
|
||||
random.shuffle(self.hidden_list)
|
||||
self.n_stage_list = [3, 3, 3, 3, 3, 3] * multiplier
|
||||
random.shuffle(self.n_stage_list)
|
||||
# Learning rate
|
||||
aa = math.log(0.001, 10)
|
||||
bb = math.log(0.03, 10)
|
||||
aa = math.log(0.0005, 10)
|
||||
bb = math.log(0.01, 10)
|
||||
log_lr_list = np.random.uniform(aa, bb, int(6 * multiplier)).tolist()
|
||||
self.lr_list = [10 ** xx for xx in log_lr_list]
|
||||
# plt.hist(self.lr_list, bins=50)
|
||||
# plt.show()
|
||||
|
||||
def train(self):
|
||||
def train(self, args):
|
||||
"""Train multiple times using log-space random search"""
|
||||
|
||||
best_acc_val = 20
|
||||
@ -77,10 +76,7 @@ class HypTuning:
|
||||
hidden_size = self.hidden_list[idx]
|
||||
n_stage = self.n_stage_list[idx]
|
||||
|
||||
training = Trainer(joints=self.joints, epochs=self.num_epochs,
|
||||
bs=bs, baseline=self.baseline, dropout=self.dropout, lr=lr, sched_step=sched_step,
|
||||
sched_gamma=sched_gamma, hidden_size=hidden_size, n_stage=n_stage,
|
||||
save=False, print_loss=False, r_seed=self.r_seed)
|
||||
training = Trainer(args)
|
||||
|
||||
best_epoch = training.train()
|
||||
dic_err, model = training.evaluate()
|
||||
@ -92,12 +88,12 @@ class HypTuning:
|
||||
dic_best['lr'] = lr
|
||||
dic_best['joints'] = self.joints
|
||||
dic_best['bs'] = bs
|
||||
dic_best['baseline'] = self.baseline
|
||||
dic_best['monocular'] = self.monocular
|
||||
dic_best['sched_gamma'] = sched_gamma
|
||||
dic_best['sched_step'] = sched_step
|
||||
dic_best['hidden_size'] = hidden_size
|
||||
dic_best['n_stage'] = n_stage
|
||||
dic_best['acc_val'] = dic_err['val']['all']['mean']
|
||||
dic_best['acc_val'] = dic_err['val']['all']['d']
|
||||
dic_best['best_epoch'] = best_epoch
|
||||
dic_best['random_seed'] = self.r_seed
|
||||
# dic_best['acc_test'] = dic_err['test']['all']['mean']
|
||||
@ -124,7 +120,7 @@ class HypTuning:
|
||||
print()
|
||||
self.logger.info("Accuracy in each cluster:")
|
||||
|
||||
for key in dic_err_best['val']:
|
||||
self.logger.info("Val: error in cluster {} = {} ".format(key, dic_err_best['val'][key]['mean']))
|
||||
for key in ('10', '20', '30', '>30', 'all'):
|
||||
self.logger.info("Val: error in cluster {} = {} ".format(key, dic_err_best['val'][key]['d']))
|
||||
self.logger.info("Final accuracy Val: {:.2f}".format(dic_best['acc_val']))
|
||||
self.logger.info("\nSaved the model: {}".format(self.path_model))
|
||||
|
||||
196
monoloco/train/losses.py
Normal file
@ -0,0 +1,196 @@
|
||||
"""Inspired by Openpifpaf"""
|
||||
|
||||
import math
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
|
||||
from ..network import extract_labels, extract_labels_aux, extract_outputs
|
||||
|
||||
|
||||
class AutoTuneMultiTaskLoss(torch.nn.Module):
|
||||
def __init__(self, losses_tr, losses_val, lambdas, tasks):
|
||||
super().__init__()
|
||||
|
||||
assert all(l in (0.0, 1.0) for l in lambdas)
|
||||
self.losses = torch.nn.ModuleList(losses_tr)
|
||||
self.losses_val = losses_val
|
||||
self.lambdas = lambdas
|
||||
self.tasks = tasks
|
||||
self.log_sigmas = torch.nn.Parameter(torch.zeros((len(lambdas),), dtype=torch.float32), requires_grad=True)
|
||||
|
||||
def forward(self, outputs, labels, phase='train'):
|
||||
|
||||
assert phase in ('train', 'val')
|
||||
out = extract_outputs(outputs, tasks=self.tasks)
|
||||
gt_out = extract_labels(labels, tasks=self.tasks)
|
||||
loss_values = [lam * l(o, g) / (2.0 * (log_sigma.exp() ** 2))
|
||||
for lam, log_sigma, l, o, g in zip(self.lambdas, self.log_sigmas, self.losses, out, gt_out)]
|
||||
|
||||
auto_reg = [log_sigma for log_sigma in self.log_sigmas] # pylint: disable=unnecessary-comprehension
|
||||
|
||||
loss = sum(loss_values) + sum(auto_reg)
|
||||
if phase == 'val':
|
||||
loss_values_val = [l(o, g) for l, o, g in zip(self.losses_val, out, gt_out)]
|
||||
loss_values_val.extend([s.exp() for s in self.log_sigmas])
|
||||
return loss, loss_values_val
|
||||
return loss, loss_values
|
||||
|
||||
|
||||
class MultiTaskLoss(torch.nn.Module):
|
||||
def __init__(self, losses_tr, losses_val, lambdas, tasks):
|
||||
super().__init__()
|
||||
|
||||
self.losses = torch.nn.ModuleList(losses_tr)
|
||||
self.losses_val = losses_val
|
||||
self.lambdas = lambdas
|
||||
self.tasks = tasks
|
||||
if len(self.tasks) == 1 and self.tasks[0] == 'aux':
|
||||
self.flag_aux = True
|
||||
else:
|
||||
self.flag_aux = False
|
||||
|
||||
def forward(self, outputs, labels, phase='train'):
|
||||
|
||||
assert phase in ('train', 'val')
|
||||
out = extract_outputs(outputs, tasks=self.tasks)
|
||||
if self.flag_aux:
|
||||
gt_out = extract_labels_aux(labels, tasks=self.tasks)
|
||||
else:
|
||||
gt_out = extract_labels(labels, tasks=self.tasks)
|
||||
loss_values = [lam * l(o, g) for lam, l, o, g in zip(self.lambdas, self.losses, out, gt_out)]
|
||||
loss = sum(loss_values)
|
||||
|
||||
if phase == 'val':
|
||||
loss_values_val = [l(o, g) for l, o, g in zip(self.losses_val, out, gt_out)]
|
||||
return loss, loss_values_val
|
||||
return loss, loss_values
|
||||
|
||||
|
||||
class CompositeLoss(torch.nn.Module):
|
||||
|
||||
def __init__(self, tasks):
|
||||
super().__init__()
|
||||
|
||||
self.tasks = tasks
|
||||
self.multi_loss_tr = {task: (LaplacianLoss() if task == 'd'
|
||||
else (nn.BCEWithLogitsLoss() if task in ('aux', )
|
||||
else nn.L1Loss())) for task in tasks}
|
||||
|
||||
self.multi_loss_val = {}
|
||||
for task in tasks:
|
||||
if task == 'd':
|
||||
loss = l1_loss_from_laplace
|
||||
elif task == 'ori':
|
||||
loss = angle_loss
|
||||
elif task in ('aux', ):
|
||||
loss = nn.BCEWithLogitsLoss()
|
||||
else:
|
||||
loss = nn.L1Loss()
|
||||
self.multi_loss_val[task] = loss
|
||||
|
||||
def forward(self):
|
||||
losses_tr = [self.multi_loss_tr[l] for l in self.tasks]
|
||||
losses_val = [self.multi_loss_val[l] for l in self.tasks]
|
||||
return losses_tr, losses_val
|
||||
|
||||
|
||||
class LaplacianLoss(torch.nn.Module):
|
||||
"""1D Gaussian with std depending on the absolute distance"""
|
||||
def __init__(self, size_average=True, reduce=True, evaluate=False):
|
||||
super().__init__()
|
||||
self.size_average = size_average
|
||||
self.reduce = reduce
|
||||
self.evaluate = evaluate
|
||||
|
||||
def laplacian_1d(self, mu_si, xx):
|
||||
"""
|
||||
1D Gaussian Loss. f(x | mu, sigma). The network outputs mu and sigma. X is the ground truth distance.
|
||||
This supports backward().
|
||||
Inspired by
|
||||
https://github.com/naba89/RNN-Handwriting-Generation-Pytorch/blob/master/loss_functions.py
|
||||
|
||||
"""
|
||||
eps = 0.01 # To avoid 0/0 when no uncertainty
|
||||
mu, si = mu_si[:, 0:1], mu_si[:, 1:2]
|
||||
norm = 1 - mu / xx # Relative
|
||||
const = 2
|
||||
|
||||
term_a = torch.abs(norm) * torch.exp(-si) + eps
|
||||
term_b = si
|
||||
norm_bi = (np.mean(np.abs(norm.cpu().detach().numpy())), np.mean(torch.exp(si).cpu().detach().numpy()))
|
||||
|
||||
if self.evaluate:
|
||||
return norm_bi
|
||||
return term_a + term_b + const
|
||||
|
||||
def forward(self, outputs, targets):
|
||||
|
||||
values = self.laplacian_1d(outputs, targets)
|
||||
|
||||
if not self.reduce or self.evaluate:
|
||||
return values
|
||||
if self.size_average:
|
||||
mean_values = torch.mean(values)
|
||||
return mean_values
|
||||
return torch.sum(values)
|
||||
|
||||
|
||||
class GaussianLoss(torch.nn.Module):
|
||||
"""1D Gaussian with std depending on the absolute distance
|
||||
"""
|
||||
def __init__(self, device, size_average=True, reduce=True, evaluate=False):
|
||||
super().__init__()
|
||||
self.size_average = size_average
|
||||
self.reduce = reduce
|
||||
self.evaluate = evaluate
|
||||
self.device = device
|
||||
|
||||
def gaussian_1d(self, mu_si, xx):
|
||||
"""
|
||||
1D Gaussian Loss. f(x | mu, sigma). The network outputs mu and sigma. X is the ground truth distance.
|
||||
This supports backward().
|
||||
Inspired by
|
||||
https://github.com/naba89/RNN-Handwriting-Generation-Pytorch/blob/master/loss_functions.py
|
||||
"""
|
||||
mu, si = mu_si[:, 0:1], mu_si[:, 1:2]
|
||||
|
||||
min_si = torch.ones(si.size()).cuda(self.device) * 0.1
|
||||
si = torch.max(min_si, si)
|
||||
norm = xx - mu
|
||||
term_a = (norm / si)**2 / 2
|
||||
term_b = torch.log(si * math.sqrt(2 * math.pi))
|
||||
|
||||
norm_si = (np.mean(np.abs(norm.cpu().detach().numpy())), np.mean(si.cpu().detach().numpy()))
|
||||
|
||||
if self.evaluate:
|
||||
return norm_si
|
||||
|
||||
return term_a + term_b
|
||||
|
||||
def forward(self, outputs, targets):
|
||||
|
||||
values = self.gaussian_1d(outputs, targets)
|
||||
|
||||
if not self.reduce or self.evaluate:
|
||||
return values
|
||||
if self.size_average:
|
||||
mean_values = torch.mean(values)
|
||||
return mean_values
|
||||
return torch.sum(values)
|
||||
|
||||
|
||||
def angle_loss(orient, gt_orient):
|
||||
"""Only for evaluation"""
|
||||
angles = torch.atan2(orient[:, 0], orient[:, 1])
|
||||
gt_angles = torch.atan2(gt_orient[:, 0], gt_orient[:, 1])
|
||||
# assert all(angles < math.pi) & all(angles > - math.pi)
|
||||
# assert all(gt_angles < math.pi) & all(gt_angles > - math.pi)
|
||||
loss = torch.mean(torch.abs(angles - gt_angles)) * 180 / 3.14
|
||||
return loss
|
||||
|
||||
|
||||
def l1_loss_from_laplace(out, gt_out):
|
||||
"""Only for evaluation"""
|
||||
loss = torch.mean(torch.abs(out[:, 0:1] - gt_out))
|
||||
return loss
|
||||
@ -1,5 +1,5 @@
|
||||
|
||||
# pylint: disable=too-many-statements
|
||||
|
||||
"""
|
||||
Training and evaluation of a neural network which predicts 3D localization and confidence intervals
|
||||
given 2d joints
|
||||
@ -13,24 +13,30 @@ from collections import defaultdict
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
from itertools import chain
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch.utils.data import DataLoader
|
||||
from torch.optim import lr_scheduler
|
||||
|
||||
from .datasets import KeypointsDataset
|
||||
from ..network import LaplacianLoss
|
||||
from ..network.process import unnormalize_bi
|
||||
from ..network.architectures import LinearModel
|
||||
from .losses import CompositeLoss, MultiTaskLoss, AutoTuneMultiTaskLoss
|
||||
from ..network import extract_outputs, extract_labels
|
||||
from ..network.architectures import MonStereoModel
|
||||
from ..utils import set_logger
|
||||
|
||||
|
||||
class Trainer:
|
||||
def __init__(self, joints, epochs=100, bs=256, dropout=0.2, lr=0.002,
|
||||
sched_step=20, sched_gamma=1, hidden_size=256, n_stage=3, r_seed=1, n_samples=100,
|
||||
baseline=False, save=False, print_loss=False):
|
||||
# Constants
|
||||
VAL_BS = 10000
|
||||
|
||||
tasks = ('d', 'x', 'y', 'h', 'w', 'l', 'ori', 'aux')
|
||||
val_task = 'd'
|
||||
lambdas = (1, 1, 1, 1, 1, 1, 1, 1)
|
||||
clusters = ['10', '20', '30', '40']
|
||||
|
||||
def __init__(self, args):
|
||||
"""
|
||||
Initialize directories, load the data and parameters for the training
|
||||
"""
|
||||
@ -42,170 +48,159 @@ class Trainer:
|
||||
dir_logs = os.path.join('data', 'logs')
|
||||
if not os.path.exists(dir_logs):
|
||||
warnings.warn("Warning: default logs directory not found")
|
||||
assert os.path.exists(joints), "Input file not found"
|
||||
assert os.path.exists(args.joints), "Input file not found"
|
||||
|
||||
self.joints = joints
|
||||
self.num_epochs = epochs
|
||||
self.save = save
|
||||
self.print_loss = print_loss
|
||||
self.baseline = baseline
|
||||
self.lr = lr
|
||||
self.sched_step = sched_step
|
||||
self.sched_gamma = sched_gamma
|
||||
n_joints = 17
|
||||
input_size = n_joints * 2
|
||||
self.output_size = 2
|
||||
self.clusters = ['10', '20', '30', '>30']
|
||||
self.hidden_size = hidden_size
|
||||
self.n_stage = n_stage
|
||||
self.joints = args.joints
|
||||
self.num_epochs = args.epochs
|
||||
self.no_save = args.no_save
|
||||
self.print_loss = args.print_loss
|
||||
self.monocular = args.monocular
|
||||
self.lr = args.lr
|
||||
self.sched_step = args.sched_step
|
||||
self.sched_gamma = args.sched_gamma
|
||||
self.hidden_size = args.hidden_size
|
||||
self.n_stage = args.n_stage
|
||||
self.dir_out = dir_out
|
||||
self.n_samples = n_samples
|
||||
self.r_seed = r_seed
|
||||
self.r_seed = args.r_seed
|
||||
self.auto_tune_mtl = args.auto_tune_mtl
|
||||
|
||||
# Loss functions and output names
|
||||
# Select the device
|
||||
use_cuda = torch.cuda.is_available()
|
||||
self.device = torch.device("cuda" if use_cuda else "cpu")
|
||||
print('Device: ', self.device)
|
||||
torch.manual_seed(self.r_seed)
|
||||
if use_cuda:
|
||||
torch.cuda.manual_seed(self.r_seed)
|
||||
|
||||
# Remove auxiliary task if monocular
|
||||
if self.monocular and self.tasks[-1] == 'aux':
|
||||
self.tasks = self.tasks[:-1]
|
||||
self.lambdas = self.lambdas[:-1]
|
||||
|
||||
losses_tr, losses_val = CompositeLoss(self.tasks)()
|
||||
|
||||
if self.auto_tune_mtl:
|
||||
self.mt_loss = AutoTuneMultiTaskLoss(losses_tr, losses_val, self.lambdas, self.tasks)
|
||||
else:
|
||||
self.mt_loss = MultiTaskLoss(losses_tr, losses_val, self.lambdas, self.tasks)
|
||||
self.mt_loss.to(self.device)
|
||||
|
||||
if not self.monocular:
|
||||
input_size = 68
|
||||
output_size = 10
|
||||
else:
|
||||
input_size = 34
|
||||
output_size = 9
|
||||
|
||||
name = 'monoloco_pp' if self.monocular else 'monstereo'
|
||||
now = datetime.datetime.now()
|
||||
now_time = now.strftime("%Y%m%d-%H%M")[2:]
|
||||
|
||||
if baseline:
|
||||
name_out = 'baseline-' + now_time
|
||||
self.criterion = nn.L1Loss().cuda()
|
||||
self.output_size = 1
|
||||
else:
|
||||
name_out = 'monoloco-' + now_time
|
||||
self.criterion = LaplacianLoss().cuda()
|
||||
self.output_size = 2
|
||||
self.criterion_eval = nn.L1Loss().cuda()
|
||||
|
||||
if self.save:
|
||||
name_out = name + '-' + now_time
|
||||
if not self.no_save:
|
||||
self.path_model = os.path.join(dir_out, name_out + '.pkl')
|
||||
self.logger = set_logger(os.path.join(dir_logs, name_out))
|
||||
self.logger.info("Training arguments: \nepochs: {} \nbatch_size: {} \ndropout: {}"
|
||||
"\nbaseline: {} \nlearning rate: {} \nscheduler step: {} \nscheduler gamma: {} "
|
||||
"\ninput_size: {} \nhidden_size: {} \nn_stages: {} \nr_seed: {}"
|
||||
"\ninput_file: {}"
|
||||
.format(epochs, bs, dropout, baseline, lr, sched_step, sched_gamma, input_size,
|
||||
hidden_size, n_stage, r_seed, self.joints))
|
||||
"\nmonocular: {} \nlearning rate: {} \nscheduler step: {} \nscheduler gamma: {} "
|
||||
"\ninput_size: {} \noutput_size: {}\nhidden_size: {} \nn_stages: {} "
|
||||
"\nr_seed: {} \nlambdas: {} \ninput_file: {}"
|
||||
.format(args.epochs, args.bs, args.dropout, self.monocular,
|
||||
args.lr, args.sched_step, args.sched_gamma, input_size,
|
||||
output_size, args.hidden_size, args.n_stage, args.r_seed,
|
||||
self.lambdas, self.joints))
|
||||
else:
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
# Select the device and load the data
|
||||
use_cuda = torch.cuda.is_available()
|
||||
self.device = torch.device("cuda" if use_cuda else "cpu")
|
||||
print('Device: ', self.device)
|
||||
|
||||
# Set the seed for random initialization
|
||||
torch.manual_seed(r_seed)
|
||||
if use_cuda:
|
||||
torch.cuda.manual_seed(r_seed)
|
||||
|
||||
# Dataloader
|
||||
self.dataloaders = {phase: DataLoader(KeypointsDataset(self.joints, phase=phase),
|
||||
batch_size=bs, shuffle=True) for phase in ['train', 'val']}
|
||||
batch_size=args.bs, shuffle=True) for phase in ['train', 'val']}
|
||||
|
||||
self.dataset_sizes = {phase: len(KeypointsDataset(self.joints, phase=phase))
|
||||
for phase in ['train', 'val', 'test']}
|
||||
for phase in ['train', 'val']}
|
||||
|
||||
# Define the model
|
||||
self.logger.info('Sizes of the dataset: {}'.format(self.dataset_sizes))
|
||||
print(">>> creating model")
|
||||
self.model = LinearModel(input_size=input_size, output_size=self.output_size, linear_size=hidden_size,
|
||||
p_dropout=dropout, num_stage=self.n_stage)
|
||||
|
||||
self.model = MonStereoModel(input_size=input_size, output_size=output_size, linear_size=args.hidden_size,
|
||||
p_dropout=args.dropout, num_stage=self.n_stage, device=self.device)
|
||||
self.model.to(self.device)
|
||||
print(">>> total params: {:.2f}M".format(sum(p.numel() for p in self.model.parameters()) / 1000000.0))
|
||||
print(">>> model params: {:.3f}M".format(sum(p.numel() for p in self.model.parameters()) / 1000000.0))
|
||||
print(">>> loss params: {}".format(sum(p.numel() for p in self.mt_loss.parameters())))
|
||||
|
||||
# Optimizer and scheduler
|
||||
self.optimizer = torch.optim.Adam(params=self.model.parameters(), lr=lr)
|
||||
all_params = chain(self.model.parameters(), self.mt_loss.parameters())
|
||||
self.optimizer = torch.optim.Adam(params=all_params, lr=args.lr)
|
||||
self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min')
|
||||
self.scheduler = lr_scheduler.StepLR(self.optimizer, step_size=self.sched_step, gamma=self.sched_gamma)
|
||||
|
||||
def train(self):
|
||||
|
||||
# Initialize the variable containing model weights
|
||||
since = time.time()
|
||||
best_model_wts = copy.deepcopy(self.model.state_dict())
|
||||
best_acc = 1e6
|
||||
best_training_acc = 1e6
|
||||
best_epoch = 0
|
||||
epoch_losses_tr, epoch_losses_val, epoch_norms, epoch_sis = [], [], [], []
|
||||
|
||||
epoch_losses = defaultdict(lambda: defaultdict(list))
|
||||
for epoch in range(self.num_epochs):
|
||||
running_loss = defaultdict(lambda: defaultdict(int))
|
||||
|
||||
# Each epoch has a training and validation phase
|
||||
for phase in ['train', 'val']:
|
||||
if phase == 'train':
|
||||
self.scheduler.step()
|
||||
self.model.train() # Set model to training mode
|
||||
else:
|
||||
self.model.eval() # Set model to evaluate mode
|
||||
|
||||
running_loss_tr = running_loss_eval = norm_tr = bi_tr = 0.0
|
||||
|
||||
# Iterate over data.
|
||||
for inputs, labels, _, _ in self.dataloaders[phase]:
|
||||
inputs = inputs.to(self.device)
|
||||
labels = labels.to(self.device)
|
||||
|
||||
# zero the parameter gradients
|
||||
self.optimizer.zero_grad()
|
||||
|
||||
# forward
|
||||
# track history if only in train
|
||||
with torch.set_grad_enabled(phase == 'train'):
|
||||
outputs = self.model(inputs)
|
||||
|
||||
outputs_eval = outputs[:, 0:1] if self.output_size == 2 else outputs
|
||||
|
||||
loss = self.criterion(outputs, labels)
|
||||
loss_eval = self.criterion_eval(outputs_eval, labels) # L1 loss to evaluation
|
||||
|
||||
# backward + optimize only if in training phase
|
||||
if phase == 'train':
|
||||
self.optimizer.zero_grad()
|
||||
outputs = self.model(inputs)
|
||||
loss, loss_values = self.mt_loss(outputs, labels, phase=phase)
|
||||
loss.backward()
|
||||
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 3)
|
||||
self.optimizer.step()
|
||||
self.scheduler.step()
|
||||
|
||||
# statistics
|
||||
running_loss_tr += loss.item() * inputs.size(0)
|
||||
running_loss_eval += loss_eval.item() * inputs.size(0)
|
||||
else:
|
||||
outputs = self.model(inputs)
|
||||
with torch.no_grad():
|
||||
loss_eval, loss_values_eval = self.mt_loss(outputs, labels, phase='val')
|
||||
self.epoch_logs(phase, loss_eval, loss_values_eval, inputs, running_loss)
|
||||
|
||||
epoch_loss = running_loss_tr / self.dataset_sizes[phase]
|
||||
epoch_acc = running_loss_eval / self.dataset_sizes[phase] # Average distance in meters
|
||||
epoch_norm = float(norm_tr / self.dataset_sizes[phase])
|
||||
epoch_si = float(bi_tr / self.dataset_sizes[phase])
|
||||
if phase == 'train':
|
||||
epoch_losses_tr.append(epoch_loss)
|
||||
epoch_norms.append(epoch_norm)
|
||||
epoch_sis.append(epoch_si)
|
||||
else:
|
||||
epoch_losses_val.append(epoch_acc)
|
||||
self.cout_values(epoch, epoch_losses, running_loss)
|
||||
|
||||
if epoch % 5 == 1:
|
||||
sys.stdout.write('\r' + 'Epoch: {:.0f} Training Loss: {:.3f} Val Loss {:.3f}'
|
||||
.format(epoch, epoch_losses_tr[-1], epoch_losses_val[-1]) + '\t')
|
||||
# deep copy the model
|
||||
|
||||
# deep copy the model
|
||||
if phase == 'val' and epoch_acc < best_acc:
|
||||
best_acc = epoch_acc
|
||||
best_epoch = epoch
|
||||
best_model_wts = copy.deepcopy(self.model.state_dict())
|
||||
if epoch_losses['val'][self.val_task][-1] < best_acc:
|
||||
best_acc = epoch_losses['val'][self.val_task][-1]
|
||||
best_training_acc = epoch_losses['train']['all'][-1]
|
||||
best_epoch = epoch
|
||||
best_model_wts = copy.deepcopy(self.model.state_dict())
|
||||
|
||||
time_elapsed = time.time() - since
|
||||
print('\n\n' + '-'*120)
|
||||
print('\n\n' + '-' * 120)
|
||||
self.logger.info('Training:\nTraining complete in {:.0f}m {:.0f}s'
|
||||
.format(time_elapsed // 60, time_elapsed % 60))
|
||||
self.logger.info('Best validation Accuracy: {:.3f}'.format(best_acc))
|
||||
self.logger.info('Best training Accuracy: {:.3f}'.format(best_training_acc))
|
||||
self.logger.info('Best validation Accuracy for {}: {:.3f}'.format(self.val_task, best_acc))
|
||||
self.logger.info('Saved weights of the model at epoch: {}'.format(best_epoch))
|
||||
|
||||
if self.print_loss:
|
||||
epoch_losses_val_scaled = [x - 4 for x in epoch_losses_val] # to compare with L1 Loss
|
||||
plt.plot(epoch_losses_tr[10:], label='Training Loss')
|
||||
plt.plot(epoch_losses_val_scaled[10:], label='Validation Loss')
|
||||
plt.legend()
|
||||
plt.show()
|
||||
print_losses(epoch_losses)
|
||||
|
||||
# load best model weights
|
||||
self.model.load_state_dict(best_model_wts)
|
||||
|
||||
return best_epoch
|
||||
|
||||
def epoch_logs(self, phase, loss, loss_values, inputs, running_loss):
|
||||
|
||||
running_loss[phase]['all'] += loss.item() * inputs.size(0)
|
||||
for i, task in enumerate(self.tasks):
|
||||
running_loss[phase][task] += loss_values[i].item() * inputs.size(0)
|
||||
|
||||
def evaluate(self, load=False, model=None, debug=False):
|
||||
|
||||
# To load a model instead of using the trained one
|
||||
@ -215,13 +210,12 @@ class Trainer:
|
||||
# Average distance on training and test set after unnormalizing
|
||||
self.model.eval()
|
||||
dic_err = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0))) # initialized to zero
|
||||
phase = 'val'
|
||||
batch_size = 5000
|
||||
dataset = KeypointsDataset(self.joints, phase=phase)
|
||||
dic_err['val']['sigmas'] = [0.] * len(self.tasks)
|
||||
dataset = KeypointsDataset(self.joints, phase='val')
|
||||
size_eval = len(dataset)
|
||||
start = 0
|
||||
with torch.no_grad():
|
||||
for end in range(batch_size, size_eval+batch_size, batch_size):
|
||||
for end in range(self.VAL_BS, size_eval + self.VAL_BS, self.VAL_BS):
|
||||
end = end if end < size_eval else size_eval
|
||||
inputs, labels, _, _ = dataset[start:end]
|
||||
start = end
|
||||
@ -235,18 +229,9 @@ class Trainer:
|
||||
|
||||
# Forward pass
|
||||
outputs = self.model(inputs)
|
||||
if not self.baseline:
|
||||
outputs = unnormalize_bi(outputs)
|
||||
|
||||
dic_err[phase]['all'] = self.compute_stats(outputs, labels, dic_err[phase]['all'], size_eval)
|
||||
|
||||
print('-'*120)
|
||||
self.logger.info("Evaluation:\nAverage distance on the {} set: {:.2f}"
|
||||
.format(phase, dic_err[phase]['all']['mean']))
|
||||
|
||||
self.logger.info("Aleatoric Uncertainty: {:.2f}, inside the interval: {:.1f}%\n"
|
||||
.format(dic_err[phase]['all']['bi'], dic_err[phase]['all']['conf_bi']*100))
|
||||
self.compute_stats(outputs, labels, dic_err['val'], size_eval, clst='all')
|
||||
|
||||
self.cout_stats(dic_err['val'], size_eval, clst='all')
|
||||
# Evaluate performances on different clusters and save statistics
|
||||
for clst in self.clusters:
|
||||
inputs, labels, size_eval = dataset.get_cluster_annotations(clst)
|
||||
@ -254,49 +239,99 @@ class Trainer:
|
||||
|
||||
# Forward pass on each cluster
|
||||
outputs = self.model(inputs)
|
||||
if not self.baseline:
|
||||
outputs = unnormalize_bi(outputs)
|
||||
|
||||
dic_err[phase][clst] = self.compute_stats(outputs, labels, dic_err[phase][clst], size_eval)
|
||||
|
||||
self.logger.info("{} error in cluster {} = {:.2f} for {} instances. "
|
||||
"Aleatoric of {:.2f} with {:.1f}% inside the interval"
|
||||
.format(phase, clst, dic_err[phase][clst]['mean'], size_eval,
|
||||
dic_err[phase][clst]['bi'], dic_err[phase][clst]['conf_bi'] * 100))
|
||||
self.compute_stats(outputs, labels, dic_err['val'], size_eval, clst=clst)
|
||||
self.cout_stats(dic_err['val'], size_eval, clst=clst)
|
||||
|
||||
# Save the model and the results
|
||||
if self.save and not load:
|
||||
if not (self.no_save or load):
|
||||
torch.save(self.model.state_dict(), self.path_model)
|
||||
print('-'*120)
|
||||
print('-' * 120)
|
||||
self.logger.info("\nmodel saved: {} \n".format(self.path_model))
|
||||
else:
|
||||
self.logger.info("\nmodel not saved\n")
|
||||
|
||||
return dic_err, self.model
|
||||
|
||||
def compute_stats(self, outputs, labels_orig, dic_err, size_eval):
|
||||
def compute_stats(self, outputs, labels, dic_err, size_eval, clst):
|
||||
"""Compute mean, bi and max of torch tensors"""
|
||||
|
||||
labels = labels_orig.view(-1, )
|
||||
mean_mu = float(self.criterion_eval(outputs[:, 0], labels).item())
|
||||
max_mu = float(torch.max(torch.abs((outputs[:, 0] - labels))).item())
|
||||
loss, loss_values = self.mt_loss(outputs, labels, phase='val')
|
||||
rel_frac = outputs.size(0) / size_eval
|
||||
|
||||
if self.baseline:
|
||||
return (mean_mu, max_mu), (0, 0, 0)
|
||||
tasks = self.tasks[:-1] if self.tasks[-1] == 'aux' else self.tasks # Exclude auxiliary
|
||||
|
||||
mean_bi = torch.mean(outputs[:, 1]).item()
|
||||
for idx, task in enumerate(tasks):
|
||||
dic_err[clst][task] += float(loss_values[idx].item()) * (outputs.size(0) / size_eval)
|
||||
|
||||
low_bound_bi = labels >= (outputs[:, 0] - outputs[:, 1])
|
||||
up_bound_bi = labels <= (outputs[:, 0] + outputs[:, 1])
|
||||
bools_bi = low_bound_bi & up_bound_bi
|
||||
conf_bi = float(torch.sum(bools_bi)) / float(bools_bi.shape[0])
|
||||
# Distance
|
||||
errs = torch.abs(extract_outputs(outputs)['d'] - extract_labels(labels)['d'])
|
||||
assert rel_frac > 0.99, "Variance of errors not supported with partial evaluation"
|
||||
|
||||
dic_err['mean'] += mean_mu * (outputs.size(0) / size_eval)
|
||||
dic_err['bi'] += mean_bi * (outputs.size(0) / size_eval)
|
||||
dic_err['count'] += (outputs.size(0) / size_eval)
|
||||
dic_err['conf_bi'] += conf_bi * (outputs.size(0) / size_eval)
|
||||
# Uncertainty
|
||||
bis = extract_outputs(outputs)['bi'].cpu()
|
||||
bi = float(torch.mean(bis).item())
|
||||
bi_perc = float(torch.sum(errs <= bis)) / errs.shape[0]
|
||||
dic_err[clst]['bi'] += bi * rel_frac
|
||||
dic_err[clst]['bi%'] += bi_perc * rel_frac
|
||||
dic_err[clst]['std'] = errs.std()
|
||||
|
||||
return dic_err
|
||||
# (Don't) Save auxiliary task results
|
||||
if self.monocular:
|
||||
dic_err[clst]['aux'] = 0
|
||||
dic_err['sigmas'].append(0)
|
||||
else:
|
||||
acc_aux = get_accuracy(extract_outputs(outputs)['aux'], extract_labels(labels)['aux'])
|
||||
dic_err[clst]['aux'] += acc_aux * rel_frac
|
||||
|
||||
if self.auto_tune_mtl:
|
||||
assert len(loss_values) == 2 * len(self.tasks)
|
||||
for i, _ in enumerate(self.tasks):
|
||||
dic_err['sigmas'][i] += float(loss_values[len(tasks) + i + 1].item()) * rel_frac
|
||||
|
||||
def cout_stats(self, dic_err, size_eval, clst):
|
||||
if clst == 'all':
|
||||
print('-' * 120)
|
||||
self.logger.info("Evaluation, val set: \nAv. dist D: {:.2f} m with bi {:.2f} ({:.1f}%), \n"
|
||||
"X: {:.1f} cm, Y: {:.1f} cm \nOri: {:.1f} "
|
||||
"\n H: {:.1f} cm, W: {:.1f} cm, L: {:.1f} cm"
|
||||
"\nAuxiliary Task: {:.1f} %, "
|
||||
.format(dic_err[clst]['d'], dic_err[clst]['bi'], dic_err[clst]['bi%'] * 100,
|
||||
dic_err[clst]['x'] * 100, dic_err[clst]['y'] * 100,
|
||||
dic_err[clst]['ori'], dic_err[clst]['h'] * 100, dic_err[clst]['w'] * 100,
|
||||
dic_err[clst]['l'] * 100, dic_err[clst]['aux'] * 100))
|
||||
if self.auto_tune_mtl:
|
||||
self.logger.info("Sigmas: Z: {:.2f}, X: {:.2f}, Y:{:.2f}, H: {:.2f}, W: {:.2f}, L: {:.2f}, ORI: {:.2f}"
|
||||
" AUX:{:.2f}\n"
|
||||
.format(*dic_err['sigmas']))
|
||||
else:
|
||||
self.logger.info("Val err clust {} --> D:{:.2f}m, bi:{:.2f} ({:.1f}%), STD:{:.1f}m X:{:.1f} Y:{:.1f} "
|
||||
"Ori:{:.1f}d, H: {:.0f} W: {:.0f} L:{:.0f} for {} pp. "
|
||||
.format(clst, dic_err[clst]['d'], dic_err[clst]['bi'], dic_err[clst]['bi%'] * 100,
|
||||
dic_err[clst]['std'], dic_err[clst]['x'] * 100, dic_err[clst]['y'] * 100,
|
||||
dic_err[clst]['ori'], dic_err[clst]['h'] * 100, dic_err[clst]['w'] * 100,
|
||||
dic_err[clst]['l'] * 100, size_eval))
|
||||
|
||||
def cout_values(self, epoch, epoch_losses, running_loss):
|
||||
|
||||
string = '\r' + '{:.0f} '
|
||||
format_list = [epoch]
|
||||
for phase in running_loss:
|
||||
string = string + phase[0:1].upper() + ':'
|
||||
for el in running_loss['train']:
|
||||
loss = running_loss[phase][el] / self.dataset_sizes[phase]
|
||||
epoch_losses[phase][el].append(loss)
|
||||
if el == 'all':
|
||||
string = string + ':{:.1f} '
|
||||
format_list.append(loss)
|
||||
elif el in ('ori', 'aux'):
|
||||
string = string + el + ':{:.1f} '
|
||||
format_list.append(loss)
|
||||
else:
|
||||
string = string + el + ':{:.0f} '
|
||||
format_list.append(loss * 100)
|
||||
|
||||
if epoch % 10 == 0:
|
||||
print(string.format(*format_list))
|
||||
|
||||
|
||||
def debug_plots(inputs, labels):
|
||||
@ -310,3 +345,20 @@ def debug_plots(inputs, labels):
|
||||
plt.figure(2)
|
||||
plt.hist(labels, bins='auto')
|
||||
plt.show()
|
||||
|
||||
|
||||
def print_losses(epoch_losses):
|
||||
for idx, phase in enumerate(epoch_losses):
|
||||
for idx_2, el in enumerate(epoch_losses['train']):
|
||||
plt.figure(idx + idx_2)
|
||||
plt.plot(epoch_losses[phase][el][10:], label='{} Loss: {}'.format(phase, el))
|
||||
plt.savefig('figures/{}_loss_{}.png'.format(phase, el))
|
||||
plt.close()
|
||||
|
||||
|
||||
def get_accuracy(outputs, labels):
|
||||
"""From Binary cross entropy outputs to accuracy"""
|
||||
|
||||
mask = outputs >= 0.5
|
||||
accuracy = 1. - torch.mean(torch.abs(mask.float() - labels)).item()
|
||||
return accuracy
|
||||
|
||||
@ -1,7 +1,11 @@
|
||||
|
||||
from .iou import get_iou_matches, reorder_matches, get_iou_matrix
|
||||
from .misc import get_task_error, get_pixel_error, append_cluster, open_annotations
|
||||
from .kitti import check_conditions, get_category, split_training, parse_ground_truth, get_calibration
|
||||
from .camera import xyz_from_distance, get_keypoints, pixel_to_camera, project_3d, open_image
|
||||
from .iou import get_iou_matches, reorder_matches, get_iou_matrix, get_iou_matches_matrix
|
||||
from .misc import get_task_error, get_pixel_error, append_cluster, open_annotations, make_new_directory, normalize_hwl
|
||||
from .kitti import check_conditions, get_difficulty, split_training, parse_ground_truth, get_calibration, \
|
||||
factory_basename, factory_file, get_category, read_and_rewrite
|
||||
from .camera import xyz_from_distance, get_keypoints, pixel_to_camera, project_3d, open_image, correct_angle,\
|
||||
to_spherical, to_cartesian, back_correct_angles, project_to_pixels
|
||||
from .logs import set_logger
|
||||
from ..utils.nuscenes import select_categories
|
||||
from ..utils.stereo import mask_joint_disparity, average_locations, extract_stereo_matches, \
|
||||
verify_stereo, disparity_to_depth
|
||||
|
||||
@ -1,4 +1,6 @@
|
||||
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
@ -30,10 +32,9 @@ def pixel_to_camera(uv_tensor, kk, z_met):
|
||||
def project_to_pixels(xyz, kk):
|
||||
"""Project a single point in space into the image"""
|
||||
xx, yy, zz = np.dot(kk, xyz)
|
||||
uu = int(xx / zz)
|
||||
vv = int(yy / zz)
|
||||
|
||||
return uu, vv
|
||||
uu = round(xx / zz)
|
||||
vv = round(yy / zz)
|
||||
return [uu, vv]
|
||||
|
||||
|
||||
def project_3d(box_obj, kk):
|
||||
@ -180,3 +181,70 @@ def open_image(path_image):
|
||||
with open(path_image, 'rb') as f:
|
||||
pil_image = Image.open(f).convert('RGB')
|
||||
return pil_image
|
||||
|
||||
|
||||
def correct_angle(yaw, xyz):
|
||||
"""
|
||||
Correct the angle from the egocentric (global/ rotation_y)
|
||||
to allocentric (camera perspective / observation angle)
|
||||
and to be -pi < angle < pi
|
||||
"""
|
||||
correction = math.atan2(xyz[0], xyz[2])
|
||||
yaw = yaw - correction
|
||||
if yaw > np.pi:
|
||||
yaw -= 2 * np.pi
|
||||
elif yaw < -np.pi:
|
||||
yaw += 2 * np.pi
|
||||
assert -2 * np.pi <= yaw <= 2 * np.pi
|
||||
return math.sin(yaw), math.cos(yaw), yaw
|
||||
|
||||
|
||||
def back_correct_angles(yaws, xyz):
|
||||
corrections = torch.atan2(xyz[:, 0], xyz[:, 2])
|
||||
yaws = yaws + corrections.view(-1, 1)
|
||||
mask_up = yaws > math.pi
|
||||
yaws[mask_up] -= 2 * math.pi
|
||||
mask_down = yaws < -math.pi
|
||||
yaws[mask_down] += 2 * math.pi
|
||||
assert torch.all(yaws < math.pi) & torch.all(yaws > - math.pi)
|
||||
return yaws
|
||||
|
||||
|
||||
def to_spherical(xyz):
|
||||
"""convert from cartesian to spherical"""
|
||||
xyz = np.array(xyz)
|
||||
r = np.linalg.norm(xyz)
|
||||
theta = math.atan2(xyz[2], xyz[0])
|
||||
|
||||
assert 0 <= theta < math.pi # 0 when positive x and no z.
|
||||
psi = math.acos(xyz[1] / r)
|
||||
assert 0 <= psi <= math.pi
|
||||
return [r, theta, psi]
|
||||
|
||||
|
||||
def to_cartesian(rtp, mode=None):
|
||||
"""convert from spherical to cartesian"""
|
||||
|
||||
if isinstance(rtp, torch.Tensor):
|
||||
if mode in ('x', 'y'):
|
||||
r = rtp[:, 2]
|
||||
t = rtp[:, 0]
|
||||
p = rtp[:, 1]
|
||||
if mode == 'x':
|
||||
x = r * torch.sin(p) * torch.cos(t)
|
||||
return x.view(-1, 1)
|
||||
|
||||
if mode == 'y':
|
||||
y = r * torch.cos(p)
|
||||
return y.view(-1, 1)
|
||||
|
||||
xyz = rtp.clone()
|
||||
xyz[:, 0] = rtp[:, 0] * torch.sin(rtp[:, 2]) * torch.cos(rtp[:, 1])
|
||||
xyz[:, 1] = rtp[:, 0] * torch.cos(rtp[:, 2])
|
||||
xyz[:, 2] = rtp[:, 0] * torch.sin(rtp[:, 2]) * torch.sin(rtp[:, 1])
|
||||
return xyz
|
||||
|
||||
x = rtp[0] * math.sin(rtp[2]) * math.cos(rtp[1])
|
||||
y = rtp[0] * math.cos(rtp[2])
|
||||
z = rtp[0] * math.sin(rtp[2]) * math.sin(rtp[1])
|
||||
return[x, y, z]
|
||||
|
||||
@ -5,6 +5,11 @@ import numpy as np
|
||||
def calculate_iou(box1, box2):
|
||||
|
||||
# Calculate the (x1, y1, x2, y2) coordinates of the intersection of box1 and box2. Calculate its Area.
|
||||
# box1 = [-3, 8.5, 3, 11.5]
|
||||
# box2 = [-3, 9.5, 3, 12.5]
|
||||
# box1 = [1086.84, 156.24, 1181.62, 319.12]
|
||||
# box2 = [1078.333357, 159.086347, 1193.771014, 322.239107]
|
||||
|
||||
xi1 = max(box1[0], box2[0])
|
||||
yi1 = max(box1[1], box2[1])
|
||||
xi2 = min(box1[2], box2[2])
|
||||
@ -34,7 +39,30 @@ def get_iou_matrix(boxes, boxes_gt):
|
||||
return iou_matrix
|
||||
|
||||
|
||||
def get_iou_matches(boxes, boxes_gt, thresh):
|
||||
def get_iou_matches(boxes, boxes_gt, iou_min=0.3):
|
||||
"""From 2 sets of boxes and a minimum threshold, compute the matching indices for IoU matches"""
|
||||
|
||||
matches = []
|
||||
used = []
|
||||
if not boxes or not boxes_gt:
|
||||
return []
|
||||
confs = [box[4] for box in boxes]
|
||||
|
||||
indices = list(np.argsort(confs))
|
||||
for idx in indices[::-1]:
|
||||
box = boxes[idx]
|
||||
ious = []
|
||||
for idx_gt, box_gt in enumerate(boxes_gt):
|
||||
iou = calculate_iou(box, box_gt)
|
||||
ious.append(iou)
|
||||
idx_gt_max = int(np.argmax(ious))
|
||||
if (ious[idx_gt_max] >= iou_min) and (idx_gt_max not in used):
|
||||
matches.append((int(idx), idx_gt_max))
|
||||
used.append(idx_gt_max)
|
||||
return matches
|
||||
|
||||
|
||||
def get_iou_matches_matrix(boxes, boxes_gt, thresh):
|
||||
"""From 2 sets of boxes and a minimum threshold, compute the matching indices for IoU matchings"""
|
||||
|
||||
iou_matrix = get_iou_matrix(boxes, boxes_gt)
|
||||
@ -65,6 +93,6 @@ def reorder_matches(matches, boxes, mode='left_rigth'):
|
||||
|
||||
# Order the boxes based on the left-right position in the image and
|
||||
ordered_boxes = np.argsort([box[0] for box in boxes]) # indices of boxes ordered from left to right
|
||||
matches_left = [idx for (idx, _) in matches]
|
||||
matches_left = [int(idx) for (idx, _) in matches]
|
||||
|
||||
return [matches[matches_left.index(idx_boxes)] for idx_boxes in ordered_boxes if idx_boxes in matches_left]
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
|
||||
import math
|
||||
import os
|
||||
import glob
|
||||
|
||||
import numpy as np
|
||||
|
||||
@ -76,32 +78,21 @@ def check_conditions(line, category, method, thresh=0.3):
|
||||
check = False
|
||||
assert category in ['pedestrian', 'cyclist', 'all']
|
||||
|
||||
if category == 'all':
|
||||
category = ['pedestrian', 'person_sitting', 'cyclist']
|
||||
|
||||
if method == 'gt':
|
||||
if category == 'all':
|
||||
categories_gt = ['Pedestrian', 'Person_sitting', 'Cyclist']
|
||||
else:
|
||||
categories_gt = [category.upper()[0] + category[1:]] # Upper case names
|
||||
if line.split()[0] in categories_gt:
|
||||
if line.split()[0].lower() in category:
|
||||
check = True
|
||||
|
||||
elif method in ('m3d', '3dop'):
|
||||
conf = float(line[15])
|
||||
if line[0] == category and conf >= thresh:
|
||||
check = True
|
||||
|
||||
elif method == 'monodepth':
|
||||
check = True
|
||||
|
||||
else:
|
||||
zz = float(line[13])
|
||||
conf = float(line[15])
|
||||
if conf >= thresh and 0.5 < zz < 70:
|
||||
if line[0].lower() in category and conf >= thresh:
|
||||
check = True
|
||||
|
||||
return check
|
||||
|
||||
|
||||
def get_category(box, trunc, occ):
|
||||
def get_difficulty(box, trunc, occ):
|
||||
|
||||
hh = box[3] - box[1]
|
||||
if hh >= 40 and trunc <= 0.15 and occ <= 0:
|
||||
@ -128,31 +119,150 @@ def split_training(names_gt, path_train, path_val):
|
||||
for line in f_val:
|
||||
set_val.add(line[:-1] + '.txt')
|
||||
|
||||
set_train = tuple(set_gt.intersection(set_train))
|
||||
set_train = set_gt.intersection(set_train)
|
||||
set_train.remove('000518.txt')
|
||||
set_train.remove('005692.txt')
|
||||
set_train.remove('003009.txt')
|
||||
set_train = tuple(set_train)
|
||||
set_val = tuple(set_gt.intersection(set_val))
|
||||
assert set_train and set_val, "No validation or training annotations"
|
||||
return set_train, set_val
|
||||
|
||||
|
||||
def parse_ground_truth(path_gt, category):
|
||||
def parse_ground_truth(path_gt, category, spherical=False, verbose=False):
|
||||
"""Parse KITTI ground truth files"""
|
||||
from ..utils import correct_angle, to_spherical
|
||||
|
||||
boxes_gt = []
|
||||
dds_gt = []
|
||||
zzs_gt = []
|
||||
ys = []
|
||||
truncs_gt = [] # Float from 0 to 1
|
||||
occs_gt = [] # Either 0,1,2,3 fully visible, partly occluded, largely occluded, unknown
|
||||
boxes_3d = []
|
||||
lines = []
|
||||
|
||||
with open(path_gt, "r") as f_gt:
|
||||
for line_gt in f_gt:
|
||||
line = line_gt.split()
|
||||
if check_conditions(line_gt, category, method='gt'):
|
||||
truncs_gt.append(float(line_gt.split()[1]))
|
||||
occs_gt.append(int(line_gt.split()[2]))
|
||||
boxes_gt.append([float(x) for x in line_gt.split()[4:8]])
|
||||
loc_gt = [float(x) for x in line_gt.split()[11:14]]
|
||||
wlh = [float(x) for x in line_gt.split()[8:11]]
|
||||
boxes_3d.append(loc_gt + wlh)
|
||||
zzs_gt.append(loc_gt[2])
|
||||
dds_gt.append(math.sqrt(loc_gt[0] ** 2 + loc_gt[1] ** 2 + loc_gt[2] ** 2))
|
||||
truncs_gt.append(float(line[1]))
|
||||
occs_gt.append(int(line[2]))
|
||||
boxes_gt.append([float(x) for x in line[4:8]])
|
||||
xyz = [float(x) for x in line[11:14]]
|
||||
hwl = [float(x) for x in line[8:11]]
|
||||
dd = float(math.sqrt(xyz[0] ** 2 + xyz[1] ** 2 + xyz[2] ** 2))
|
||||
yaw = float(line[14])
|
||||
assert - math.pi <= yaw <= math.pi
|
||||
alpha = float(line[3])
|
||||
sin, cos, yaw_corr = correct_angle(yaw, xyz)
|
||||
assert min(abs(-yaw_corr - alpha), (abs(yaw_corr - alpha))) < 0.15, "more than 10 degrees of error"
|
||||
if spherical:
|
||||
rtp = to_spherical(xyz)
|
||||
loc = rtp[1:3] + xyz[2:3] + rtp[0:1] # [theta, psi, z, r]
|
||||
else:
|
||||
loc = xyz + [dd]
|
||||
# cat = 0 if line[0] in ('Pedestrian', 'Person_sitting') else 1
|
||||
if line[0] in ('Pedestrian', 'Person_sitting'):
|
||||
cat = 0
|
||||
else:
|
||||
cat = 1
|
||||
output = loc + hwl + [sin, cos, yaw, cat]
|
||||
ys.append(output)
|
||||
if verbose:
|
||||
lines.append(line_gt)
|
||||
if verbose:
|
||||
return boxes_gt, ys, truncs_gt, occs_gt, lines
|
||||
return boxes_gt, ys, truncs_gt, occs_gt
|
||||
|
||||
return boxes_gt, boxes_3d, dds_gt, zzs_gt, truncs_gt, occs_gt
|
||||
|
||||
def factory_basename(dir_ann, dir_gt):
|
||||
""" Return all the basenames in the annotations folder corresponding to validation images"""
|
||||
|
||||
# Extract ground truth validation images
|
||||
names_gt = tuple(os.listdir(dir_gt))
|
||||
path_train = os.path.join('splits', 'kitti_train.txt')
|
||||
path_val = os.path.join('splits', 'kitti_val.txt')
|
||||
_, set_val_gt = split_training(names_gt, path_train, path_val)
|
||||
set_val_gt = {os.path.basename(x).split('.')[0] for x in set_val_gt}
|
||||
|
||||
# Extract pifpaf files corresponding to validation images
|
||||
list_ann = glob.glob(os.path.join(dir_ann, '*.json'))
|
||||
set_basename = {os.path.basename(x).split('.')[0] for x in list_ann}
|
||||
set_val = set_basename.intersection(set_val_gt)
|
||||
assert set_val, " Missing json annotations file to create txt files for KITTI datasets"
|
||||
return set_val
|
||||
|
||||
|
||||
def factory_file(path_calib, dir_ann, basename, mode='left'):
|
||||
"""Choose the annotation and the calibration files. Stereo option with ite = 1"""
|
||||
|
||||
assert mode in ('left', 'right')
|
||||
p_left, p_right = get_calibration(path_calib)
|
||||
|
||||
if mode == 'left':
|
||||
kk, tt = p_left[:]
|
||||
path_ann = os.path.join(dir_ann, basename + '.png.predictions.json')
|
||||
|
||||
else:
|
||||
kk, tt = p_right[:]
|
||||
path_ann = os.path.join(dir_ann + '_right', basename + '.png.predictions.json')
|
||||
|
||||
from ..utils import open_annotations
|
||||
annotations = open_annotations(path_ann)
|
||||
|
||||
return annotations, kk, tt
|
||||
|
||||
|
||||
def get_category(keypoints, path_byc):
|
||||
"""Find the category for each of the keypoints"""
|
||||
|
||||
from ..utils import open_annotations
|
||||
dic_byc = open_annotations(path_byc)
|
||||
boxes_byc = dic_byc['boxes'] if dic_byc else []
|
||||
boxes_ped = make_lower_boxes(keypoints)
|
||||
|
||||
matches = get_matches_bikes(boxes_ped, boxes_byc)
|
||||
list_byc = [match[0] for match in matches]
|
||||
categories = [1.0 if idx in list_byc else 0.0 for idx, _ in enumerate(boxes_ped)]
|
||||
return categories
|
||||
|
||||
|
||||
def get_matches_bikes(boxes_ped, boxes_byc):
|
||||
from . import get_iou_matches_matrix
|
||||
matches = get_iou_matches_matrix(boxes_ped, boxes_byc, thresh=0.15)
|
||||
matches_b = []
|
||||
for idx, idx_byc in matches:
|
||||
box_ped = boxes_ped[idx]
|
||||
box_byc = boxes_byc[idx_byc]
|
||||
width_ped = box_ped[2] - box_ped[0]
|
||||
width_byc = box_byc[2] - box_byc[0]
|
||||
center_ped = (box_ped[2] + box_ped[0]) / 2
|
||||
center_byc = (box_byc[2] + box_byc[0]) / 2
|
||||
if abs(center_ped - center_byc) < min(width_ped, width_byc) / 4:
|
||||
matches_b.append((idx, idx_byc))
|
||||
return matches_b
|
||||
|
||||
|
||||
def make_lower_boxes(keypoints):
|
||||
lower_boxes = []
|
||||
keypoints = np.array(keypoints)
|
||||
for kps in keypoints:
|
||||
lower_boxes.append([min(kps[0, 9:]), min(kps[1, 9:]), max(kps[0, 9:]), max(kps[1, 9:])])
|
||||
return lower_boxes
|
||||
|
||||
|
||||
def read_and_rewrite(path_orig, path_new):
|
||||
"""Read and write same txt file. If file not found, create open file"""
|
||||
try:
|
||||
with open(path_orig, "r") as f_gt:
|
||||
with open(path_new, "w+") as ff:
|
||||
for line_gt in f_gt:
|
||||
# if check_conditions(line_gt, category='all', method='gt'):
|
||||
line = line_gt.split()
|
||||
hwl = [float(x) for x in line[8:11]]
|
||||
hwl = " ".join([str(i)[0:4] for i in hwl])
|
||||
temp_1 = " ".join([str(i) for i in line[0: 8]])
|
||||
temp_2 = " ".join([str(i) for i in line[11:]])
|
||||
line_new = temp_1 + ' ' + hwl + ' ' + temp_2 + '\n'
|
||||
ff.write("%s" % line_new)
|
||||
except FileNotFoundError:
|
||||
ff = open(path_new, "a+")
|
||||
ff.close()
|
||||
|
||||
@ -1,28 +1,33 @@
|
||||
import json
|
||||
import shutil
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def append_cluster(dic_jo, phase, xx, dd, kps):
|
||||
def append_cluster(dic_jo, phase, xx, ys, kps):
|
||||
"""Append the annotation based on its distance"""
|
||||
|
||||
if dd <= 10:
|
||||
if ys[3] <= 10:
|
||||
dic_jo[phase]['clst']['10']['kps'].append(kps)
|
||||
dic_jo[phase]['clst']['10']['X'].append(xx)
|
||||
dic_jo[phase]['clst']['10']['Y'].append([dd])
|
||||
|
||||
elif dd <= 20:
|
||||
dic_jo[phase]['clst']['10']['Y'].append(ys)
|
||||
elif ys[3] <= 20:
|
||||
dic_jo[phase]['clst']['20']['kps'].append(kps)
|
||||
dic_jo[phase]['clst']['20']['X'].append(xx)
|
||||
dic_jo[phase]['clst']['20']['Y'].append([dd])
|
||||
|
||||
elif dd <= 30:
|
||||
dic_jo[phase]['clst']['20']['Y'].append(ys)
|
||||
elif ys[3] <= 30:
|
||||
dic_jo[phase]['clst']['30']['kps'].append(kps)
|
||||
dic_jo[phase]['clst']['30']['X'].append(xx)
|
||||
dic_jo[phase]['clst']['30']['Y'].append([dd])
|
||||
|
||||
dic_jo[phase]['clst']['30']['Y'].append(ys)
|
||||
elif ys[3] <= 40:
|
||||
dic_jo[phase]['clst']['40']['kps'].append(kps)
|
||||
dic_jo[phase]['clst']['40']['X'].append(xx)
|
||||
dic_jo[phase]['clst']['40']['Y'].append(ys)
|
||||
else:
|
||||
dic_jo[phase]['clst']['>30']['kps'].append(kps)
|
||||
dic_jo[phase]['clst']['>30']['X'].append(xx)
|
||||
dic_jo[phase]['clst']['>30']['Y'].append([dd])
|
||||
dic_jo[phase]['clst']['>40']['kps'].append(kps)
|
||||
dic_jo[phase]['clst']['>40']['X'].append(xx)
|
||||
dic_jo[phase]['clst']['>40']['Y'].append(ys)
|
||||
|
||||
|
||||
def get_task_error(dd):
|
||||
@ -46,3 +51,24 @@ def open_annotations(path_ann):
|
||||
except FileNotFoundError:
|
||||
annotations = []
|
||||
return annotations
|
||||
|
||||
|
||||
def make_new_directory(dir_out):
|
||||
"""Remove the output directory if already exists (avoid residual txt files)"""
|
||||
if os.path.exists(dir_out):
|
||||
shutil.rmtree(dir_out)
|
||||
os.makedirs(dir_out)
|
||||
print("Created empty output directory {} ".format(dir_out))
|
||||
|
||||
|
||||
def normalize_hwl(lab):
|
||||
|
||||
AV_H = 1.72
|
||||
AV_W = 0.75
|
||||
AV_L = 0.68
|
||||
HLW_STD = 0.1
|
||||
|
||||
hwl = lab[4:7]
|
||||
hwl_new = list((np.array(hwl) - np.array([AV_H, AV_W, AV_L])) / HLW_STD)
|
||||
lab_new = lab[0:4] + hwl_new + lab[7:]
|
||||
return lab_new
|
||||
|
||||
198
monoloco/utils/stereo.py
Normal file
@ -0,0 +1,198 @@
|
||||
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
BF = 0.54 * 721
|
||||
z_min = 4
|
||||
z_max = 60
|
||||
D_MIN = BF / z_max
|
||||
D_MAX = BF / z_min
|
||||
|
||||
|
||||
def extract_stereo_matches(keypoint, keypoints_r, zz, phase='train', seed=0, method=None):
|
||||
"""Return binaries representing the match between the pose in the left and the ones in the right"""
|
||||
|
||||
stereo_matches = []
|
||||
cnt_ambiguous = 0
|
||||
if method == 'mask':
|
||||
conf_min = 0.1
|
||||
else:
|
||||
conf_min = 0.2
|
||||
avgs_x_l, avgs_x_r, disparities_x, disparities_y = average_locations(keypoint, keypoints_r, conf_min=conf_min)
|
||||
avg_disparities = [abs(float(l) - BF / zz - float(r)) for l, r in zip(avgs_x_l, avgs_x_r)]
|
||||
idx_matches = np.argsort(avg_disparities)
|
||||
# error_max_stereo = 1 * 0.0028 * zz**2 + 0.2 # 2m at 20 meters of depth + 20 cm of offset
|
||||
error_max_stereo = 0.2 * zz + 0.2 # 2m at 20 meters of depth + 20 cm of offset
|
||||
error_min_mono = 0.25 * zz + 0.2
|
||||
error_max_mono = 1 * zz + 0.5
|
||||
used = []
|
||||
# Add positive and negative samples
|
||||
for idx, idx_match in enumerate(idx_matches):
|
||||
match = avg_disparities[idx_match]
|
||||
zz_stereo, flag = disparity_to_depth(match + BF / zz)
|
||||
|
||||
# Conditions to accept stereo match
|
||||
conditions = (idx == 0
|
||||
and match < depth_to_pixel_error(zz, depth_error=error_max_stereo)
|
||||
and flag
|
||||
and verify_stereo(zz_stereo, zz, disparities_x[idx_match], disparities_y[idx_match]))
|
||||
|
||||
# Positive matches
|
||||
if conditions:
|
||||
stereo_matches.append((idx_match, 1))
|
||||
# Ambiguous
|
||||
elif match < depth_to_pixel_error(zz, depth_error=error_min_mono):
|
||||
cnt_ambiguous += 1
|
||||
|
||||
# Disparity-range negative
|
||||
# elif D_MIN < match + BF / zz < D_MAX:
|
||||
# stereo_matches.append((idx_match, 0))
|
||||
|
||||
elif phase == 'val' \
|
||||
and match < depth_to_pixel_error(zz, depth_error=error_max_mono) \
|
||||
and not stereo_matches\
|
||||
and zz < 40:
|
||||
stereo_matches.append((idx_match, 0))
|
||||
|
||||
# # Hard-negative for training
|
||||
elif phase == 'train' \
|
||||
and match < depth_to_pixel_error(zz, depth_error=error_max_mono) \
|
||||
and len(stereo_matches) < 3:
|
||||
stereo_matches.append((idx_match, 0))
|
||||
|
||||
# # Easy-negative
|
||||
elif phase == 'train' \
|
||||
and len(stereo_matches) < 3:
|
||||
np.random.seed(seed + idx)
|
||||
num = np.random.randint(idx, len(idx_matches))
|
||||
if idx_matches[num] not in used:
|
||||
stereo_matches.append((idx_matches[num], 0))
|
||||
|
||||
# elif len(stereo_matches) < 1:
|
||||
# stereo_matches.append((idx_match, 0))
|
||||
|
||||
# Easy-negative
|
||||
# elif len(idx_matches) > len(stereo_matches):
|
||||
# stereo_matches.append((idx_matches[-1], 0))
|
||||
# break # matches are ordered
|
||||
else:
|
||||
break
|
||||
used.append(idx_match)
|
||||
|
||||
# Make sure each left has at least a negative match
|
||||
# if not stereo_matches:
|
||||
# stereo_matches.append((idx_matches[0], 0))
|
||||
return stereo_matches, cnt_ambiguous
|
||||
|
||||
|
||||
def depth_to_pixel_error(zz, depth_error=1):
|
||||
"""
|
||||
Calculate the pixel error at a certain depth due to depth error according to:
|
||||
e_d = b * f * e_z / (z**2)
|
||||
"""
|
||||
e_d = BF * depth_error / (zz**2)
|
||||
return e_d
|
||||
|
||||
|
||||
def mask_joint_disparity(keypoints, keypoints_r):
|
||||
"""filter joints based on confidence and interquartile range of the distribution"""
|
||||
# TODO Merge with average location
|
||||
CONF_MIN = 0.3
|
||||
with warnings.catch_warnings() and np.errstate(invalid='ignore'):
|
||||
disparity_x_mask = np.empty((keypoints.shape[0], keypoints_r.shape[0], 17))
|
||||
disparity_y_mask = np.empty((keypoints.shape[0], keypoints_r.shape[0], 17))
|
||||
avg_disparity = np.empty((keypoints.shape[0], keypoints_r.shape[0]))
|
||||
|
||||
for idx, kps in enumerate(keypoints):
|
||||
disparity_x = kps[0, :] - keypoints_r[:, 0, :]
|
||||
disparity_y = kps[1, :] - keypoints_r[:, 1, :]
|
||||
|
||||
# Mask for low confidence
|
||||
mask_conf_left = kps[2, :] > CONF_MIN
|
||||
mask_conf_right = keypoints_r[:, 2, :] > CONF_MIN
|
||||
mask_conf = mask_conf_left & mask_conf_right
|
||||
disparity_x_conf = np.where(mask_conf, disparity_x, np.nan)
|
||||
disparity_y_conf = np.where(mask_conf, disparity_y, np.nan)
|
||||
|
||||
# Mask outliers using iqr
|
||||
mask_outlier = interquartile_mask(disparity_x_conf)
|
||||
x_mask_row = np.where(mask_outlier, disparity_x_conf, np.nan)
|
||||
y_mask_row = np.where(mask_outlier, disparity_y_conf, np.nan)
|
||||
avg_row = np.nanmedian(x_mask_row, axis=1) # ignore the nan
|
||||
|
||||
# Append
|
||||
disparity_x_mask[idx] = x_mask_row
|
||||
disparity_y_mask[idx] = y_mask_row
|
||||
avg_disparity[idx] = avg_row
|
||||
|
||||
return avg_disparity, disparity_x_mask, disparity_y_mask
|
||||
|
||||
|
||||
def average_locations(keypoint, keypoints_r, conf_min=0.2):
|
||||
"""
|
||||
Extract absolute average location of keypoints
|
||||
INPUT: arrays of (1, 3, 17) & (m,3,17)
|
||||
OUTPUT: 2 arrays of (m).
|
||||
The left keypoint will have different absolute positions based on the right keypoints they are paired with
|
||||
"""
|
||||
keypoint, keypoints_r = np.array(keypoint), np.array(keypoints_r)
|
||||
assert keypoints_r.shape[0] > 0, "No right keypoints"
|
||||
with warnings.catch_warnings() and np.errstate(invalid='ignore'):
|
||||
|
||||
# Mask by confidence
|
||||
mask_l_conf = keypoint[0, 2, :] > conf_min
|
||||
mask_r_conf = keypoints_r[:, 2, :] > conf_min
|
||||
abs_x_l = np.where(mask_l_conf, keypoint[0, 0:1, :], np.nan)
|
||||
abs_x_r = np.where(mask_r_conf, keypoints_r[:, 0, :], np.nan)
|
||||
|
||||
# Mask by iqr
|
||||
mask_l_iqr = interquartile_mask(abs_x_l)
|
||||
mask_r_iqr = interquartile_mask(abs_x_r)
|
||||
|
||||
# Combine masks
|
||||
mask = mask_l_iqr & mask_r_iqr
|
||||
|
||||
# Compute absolute locations and relative disparities
|
||||
x_l = np.where(mask, abs_x_l, np.nan)
|
||||
x_r = np.where(mask, abs_x_r, np.nan)
|
||||
x_disp = x_l - x_r
|
||||
y_disp = np.where(mask, keypoint[0, 1, :] - keypoints_r[:, 1, :], np.nan)
|
||||
avgs_x_l = np.nanmedian(x_l, axis=1)
|
||||
avgs_x_r = np.nanmedian(x_r, axis=1)
|
||||
|
||||
return avgs_x_l, avgs_x_r, x_disp, y_disp
|
||||
|
||||
|
||||
def interquartile_mask(distribution):
|
||||
quartile_1, quartile_3 = np.nanpercentile(distribution, [25, 75], axis=1)
|
||||
iqr = quartile_3 - quartile_1
|
||||
lower_bound = quartile_1 - (iqr * 1.5)
|
||||
upper_bound = quartile_3 + (iqr * 1.5)
|
||||
return (distribution < upper_bound.reshape(-1, 1)) & (distribution > lower_bound.reshape(-1, 1))
|
||||
|
||||
|
||||
def disparity_to_depth(avg_disparity):
|
||||
|
||||
try:
|
||||
zz_stereo = 0.54 * 721. / float(avg_disparity)
|
||||
flag = True
|
||||
except (ZeroDivisionError, ValueError): # All nan-slices or zero division
|
||||
zz_stereo = np.nan
|
||||
flag = False
|
||||
return zz_stereo, flag
|
||||
|
||||
|
||||
def verify_stereo(zz_stereo, zz_mono, disparity_x, disparity_y):
|
||||
"""Verify disparities based on coefficient of variation, maximum y difference and z difference wrt monoloco"""
|
||||
|
||||
# COV_MIN = 0.1
|
||||
y_max_difference = (80 / zz_mono)
|
||||
z_max_difference = 1 * zz_mono
|
||||
|
||||
cov = float(np.nanstd(disparity_x) / np.abs(np.nanmean(disparity_x))) # Coefficient of variation
|
||||
avg_disparity_y = np.nanmedian(disparity_y)
|
||||
|
||||
return abs(zz_stereo - zz_mono) < z_max_difference and avg_disparity_y < y_max_difference and 1 < zz_stereo < 80
|
||||
# cov < COV_MIN and \
|
||||
@ -1,3 +1,3 @@
|
||||
|
||||
from .printer import Printer
|
||||
from .figures import show_results, show_spread, show_task_error
|
||||
from .figures import show_results, show_spread, show_task_error, show_box_plot
|
||||
|
||||
@ -11,109 +11,115 @@ from matplotlib.patches import Ellipse
|
||||
from ..utils import get_task_error, get_pixel_error
|
||||
|
||||
|
||||
def show_results(dic_stats, show=False, save=False, stereo=False):
|
||||
FONTSIZE = 15
|
||||
FIGSIZE = (9.6, 7.2)
|
||||
DPI = 200
|
||||
GRID_WIDTH = 0.5
|
||||
|
||||
|
||||
def show_results(dic_stats, clusters, net, dir_fig, show=False, save=False):
|
||||
"""
|
||||
Visualize error as function of the distance and compare it with target errors based on human height analyses
|
||||
"""
|
||||
|
||||
dir_out = 'docs'
|
||||
phase = 'test'
|
||||
x_min = 0
|
||||
x_max = 38
|
||||
x_min = 3
|
||||
# x_max = 42
|
||||
x_max = 31
|
||||
y_min = 0
|
||||
y_max = 4.7
|
||||
xx = np.linspace(0, 60, 100)
|
||||
excl_clusters = ['all', '50', '>50', 'easy', 'moderate', 'hard']
|
||||
clusters = tuple([clst for clst in dic_stats[phase]['monoloco'] if clst not in excl_clusters])
|
||||
yy_gender = get_task_error(xx)
|
||||
|
||||
styles = printing_styles(stereo)
|
||||
for idx_style, (key, style) in enumerate(styles.items()):
|
||||
plt.figure(idx_style)
|
||||
plt.grid(linewidth=0.2)
|
||||
# y_max = 2.2
|
||||
y_max = 3.5 if net == 'monstereo' else 2.7
|
||||
xx = np.linspace(x_min, x_max, 100)
|
||||
excl_clusters = ['all', 'easy', 'moderate', 'hard', '49']
|
||||
clusters = [clst for clst in clusters if clst not in excl_clusters]
|
||||
styles = printing_styles(net)
|
||||
for idx_style, style in enumerate(styles.items()):
|
||||
plt.figure(idx_style, figsize=FIGSIZE)
|
||||
plt.grid(linewidth=GRID_WIDTH)
|
||||
plt.xlim(x_min, x_max)
|
||||
plt.ylim(y_min, y_max)
|
||||
plt.xlabel("Ground-truth distance [m]")
|
||||
plt.ylabel("Average localization error [m]")
|
||||
for idx, method in enumerate(style['methods']):
|
||||
errs = [dic_stats[phase][method][clst]['mean'] for clst in clusters]
|
||||
plt.xlabel("Ground-truth distance [m]", fontsize=FONTSIZE)
|
||||
plt.ylabel("Average localization error (ALE) [m]", fontsize=FONTSIZE)
|
||||
for idx, method in enumerate(styles['methods']):
|
||||
errs = [dic_stats[phase][method][clst]['mean'] for clst in clusters[:-1]] # last cluster only a bound
|
||||
cnts = [dic_stats[phase][method][clst]['cnt'] for clst in clusters[:-1]] # last cluster only a bound
|
||||
assert errs, "method %s empty" % method
|
||||
xxs = get_distances(clusters)
|
||||
|
||||
plt.plot(xxs, errs, marker=style['mks'][idx], markersize=style['mksizes'][idx], linewidth=style['lws'][idx],
|
||||
label=style['labels'][idx], linestyle=style['lstyles'][idx], color=style['colors'][idx])
|
||||
plt.plot(xx, yy_gender, '--', label="Task error", color='lightgreen', linewidth=2.5)
|
||||
if key == 'stereo':
|
||||
yy_stereo = get_pixel_error(xx)
|
||||
plt.plot(xx, yy_stereo, linewidth=1.7, color='k', label='Pixel error')
|
||||
plt.plot(xxs, errs, marker=styles['mks'][idx], markersize=styles['mksizes'][idx],
|
||||
linewidth=styles['lws'][idx],
|
||||
label=styles['labels'][idx], linestyle=styles['lstyles'][idx], color=styles['colors'][idx])
|
||||
if method in ('monstereo', 'monoloco_pp', 'pseudo-lidar'):
|
||||
for i, x in enumerate(xxs):
|
||||
plt.text(x, errs[i] - 0.1, str(cnts[i]), fontsize=FONTSIZE)
|
||||
if net == 'monoloco_pp':
|
||||
plt.plot(xx, get_task_error(xx), '--', label="Task error", color='lightgreen', linewidth=2.5)
|
||||
# if stereo:
|
||||
# yy_stereo = get_pixel_error(xx)
|
||||
# plt.plot(xx, yy_stereo, linewidth=1.4, color='k', label='Pixel error')
|
||||
|
||||
plt.legend(loc='upper left')
|
||||
if save:
|
||||
path_fig = os.path.join(dir_out, 'results_' + key + '.png')
|
||||
plt.savefig(path_fig)
|
||||
print("Figure of results " + key + " saved in {}".format(path_fig))
|
||||
if show:
|
||||
plt.show()
|
||||
plt.close()
|
||||
plt.legend(loc='upper left', prop={'size': FONTSIZE})
|
||||
plt.xticks(fontsize=FONTSIZE)
|
||||
plt.yticks(fontsize=FONTSIZE)
|
||||
if save:
|
||||
plt.tight_layout()
|
||||
path_fig = os.path.join(dir_fig, 'results_' + net + '.png')
|
||||
plt.savefig(path_fig, dpi=DPI)
|
||||
print("Figure of results " + net + " saved in {}".format(path_fig))
|
||||
if show:
|
||||
plt.show()
|
||||
plt.close('all')
|
||||
|
||||
|
||||
def show_spread(dic_stats, show=False, save=False):
|
||||
def show_spread(dic_stats, clusters, net, dir_fig, show=False, save=False):
|
||||
"""Predicted confidence intervals and task error as a function of ground-truth distance"""
|
||||
|
||||
assert net in ('monoloco_pp', 'monstereo'), "network not recognized"
|
||||
phase = 'test'
|
||||
dir_out = 'docs'
|
||||
excl_clusters = ['all', '50', '>50', 'easy', 'moderate', 'hard']
|
||||
clusters = tuple([clst for clst in dic_stats[phase]['our'] if clst not in excl_clusters])
|
||||
excl_clusters = ['all', 'easy', 'moderate', 'hard', '49']
|
||||
clusters = [clst for clst in clusters if clst not in excl_clusters]
|
||||
x_min = 3
|
||||
x_max = 31
|
||||
y_min = 0
|
||||
|
||||
plt.figure(2)
|
||||
fig, ax = plt.subplots(2, sharex=True)
|
||||
plt.xlabel("Distance [m]")
|
||||
plt.ylabel("Aleatoric uncertainty [m]")
|
||||
ar = 0.5 # Change aspect ratio of ellipses
|
||||
scale = 1.5 # Factor to scale ellipses
|
||||
rec_c = 0 # Center of the rectangle
|
||||
plots_line = True
|
||||
|
||||
bbs = np.array([dic_stats[phase]['our'][key]['std_ale'] for key in clusters])
|
||||
plt.figure(2, figsize=FIGSIZE)
|
||||
xxs = get_distances(clusters)
|
||||
yys = get_task_error(np.array(xxs))
|
||||
ax[1].plot(xxs, bbs, marker='s', color='b', label="Spread b")
|
||||
ax[1].plot(xxs, yys, '--', color='lightgreen', label="Task error", linewidth=2.5)
|
||||
yys_up = [rec_c + ar / 2 * scale * yy for yy in yys]
|
||||
bbs_up = [rec_c + ar / 2 * scale * bb for bb in bbs]
|
||||
yys_down = [rec_c - ar / 2 * scale * yy for yy in yys]
|
||||
bbs_down = [rec_c - ar / 2 * scale * bb for bb in bbs]
|
||||
bbs = np.array([dic_stats[phase][net][key]['std_ale'] for key in clusters[:-1]])
|
||||
xx = np.linspace(x_min, x_max, 100)
|
||||
if net == 'monoloco_pp':
|
||||
y_max = 2.7
|
||||
color = 'deepskyblue'
|
||||
epis = np.array([dic_stats[phase][net][key]['std_epi'] for key in clusters[:-1]])
|
||||
plt.plot(xxs, epis, marker='o', color='coral', linewidth=4, markersize=8, label="Combined uncertainty (\u03C3)")
|
||||
else:
|
||||
y_max = 3.5
|
||||
color = 'b'
|
||||
plt.plot(xx, get_pixel_error(xx), linewidth=2.5, color='k', label='Pixel error')
|
||||
plt.plot(xxs, bbs, marker='s', color=color, label="Aleatoric uncertainty (b)", linewidth=4, markersize=8)
|
||||
plt.plot(xx, get_task_error(xx), '--', label="Task error (monocular bound)", color='lightgreen', linewidth=4)
|
||||
|
||||
if plots_line:
|
||||
ax[0].plot(xxs, yys_up, '--', color='lightgreen', markersize=5, linewidth=1.4)
|
||||
ax[0].plot(xxs, yys_down, '--', color='lightgreen', markersize=5, linewidth=1.4)
|
||||
ax[0].plot(xxs, bbs_up, marker='s', color='b', markersize=5, linewidth=0.7)
|
||||
ax[0].plot(xxs, bbs_down, marker='s', color='b', markersize=5, linewidth=0.7)
|
||||
plt.xlabel("Ground-truth distance [m]", fontsize=FONTSIZE)
|
||||
plt.ylabel("Uncertainty [m]", fontsize=FONTSIZE)
|
||||
plt.xlim(x_min, x_max)
|
||||
plt.ylim(y_min, y_max)
|
||||
plt.grid(linewidth=GRID_WIDTH)
|
||||
plt.legend(prop={'size': FONTSIZE})
|
||||
plt.xticks(fontsize=FONTSIZE)
|
||||
plt.yticks(fontsize=FONTSIZE)
|
||||
|
||||
for idx, xx in enumerate(xxs):
|
||||
te = Ellipse((xx, rec_c), width=yys[idx] * ar * scale, height=scale, angle=90, color='lightgreen', fill=True)
|
||||
bi = Ellipse((xx, rec_c), width=bbs[idx] * ar * scale, height=scale, angle=90, color='b', linewidth=1.8,
|
||||
fill=False)
|
||||
|
||||
ax[0].add_patch(te)
|
||||
ax[0].add_patch(bi)
|
||||
|
||||
fig.subplots_adjust(hspace=0.1)
|
||||
plt.setp([aa.get_yticklabels() for aa in fig.axes[:-1]], visible=False)
|
||||
plt.legend()
|
||||
if save:
|
||||
path_fig = os.path.join(dir_out, 'spread.png')
|
||||
plt.savefig(path_fig)
|
||||
plt.tight_layout()
|
||||
path_fig = os.path.join(dir_fig, 'spread_' + net + '.png')
|
||||
plt.savefig(path_fig, dpi=DPI)
|
||||
print("Figure of confidence intervals saved in {}".format(path_fig))
|
||||
if show:
|
||||
plt.show()
|
||||
plt.close()
|
||||
plt.close('all')
|
||||
|
||||
|
||||
def show_task_error(show, save):
|
||||
def show_task_error(dir_fig, show, save):
|
||||
"""Task error figure"""
|
||||
plt.figure(3)
|
||||
dir_out = 'docs'
|
||||
plt.figure(3, figsize=FIGSIZE)
|
||||
xx = np.linspace(0.1, 50, 100)
|
||||
mu_men = 178
|
||||
mu_women = 165
|
||||
@ -128,7 +134,7 @@ def show_task_error(show, save):
|
||||
yy_young_female = target_error(xx, mm_young_female)
|
||||
yy_gender = target_error(xx, mm_gmm)
|
||||
yy_stereo = get_pixel_error(xx)
|
||||
plt.grid(linewidth=0.3)
|
||||
plt.grid(linewidth=GRID_WIDTH)
|
||||
plt.plot(xx, yy_young_male, linestyle='dotted', linewidth=2.1, color='b', label='Adult/young male')
|
||||
plt.plot(xx, yy_young_female, linestyle='dotted', linewidth=2.1, color='darkorange', label='Adult/young female')
|
||||
plt.plot(xx, yy_gender, '--', color='lightgreen', linewidth=2.8, label='Generic adult (task error)')
|
||||
@ -139,20 +145,21 @@ def show_task_error(show, save):
|
||||
plt.xlabel("Ground-truth distance from the camera $d_{gt}$ [m]")
|
||||
plt.ylabel("Localization error $\hat{e}$ due to human height variation [m]") # pylint: disable=W1401
|
||||
plt.legend(loc=(0.01, 0.55)) # Location from 0 to 1 from lower left
|
||||
plt.xticks(fontsize=FONTSIZE)
|
||||
plt.yticks(fontsize=FONTSIZE)
|
||||
if save:
|
||||
path_fig = os.path.join(dir_out, 'task_error.png')
|
||||
plt.savefig(path_fig)
|
||||
path_fig = os.path.join(dir_fig, 'task_error.png')
|
||||
plt.savefig(path_fig, dpi=DPI)
|
||||
print("Figure of task error saved in {}".format(path_fig))
|
||||
if show:
|
||||
plt.show()
|
||||
plt.close()
|
||||
plt.close('all')
|
||||
|
||||
|
||||
def show_method(save):
|
||||
def show_method(save, dir_out='data/figures'):
|
||||
""" method figure"""
|
||||
dir_out = 'docs'
|
||||
std_1 = 0.75
|
||||
fig = plt.figure(1)
|
||||
fig = plt.figure(4, figsize=FIGSIZE)
|
||||
ax = fig.add_subplot(1, 1, 1)
|
||||
ell_3 = Ellipse((0, 2), width=std_1 * 2, height=0.3, angle=-90, color='b', fill=False, linewidth=2.5)
|
||||
ell_4 = Ellipse((0, 2), width=std_1 * 3, height=0.3, angle=-90, color='r', fill=False,
|
||||
@ -164,14 +171,48 @@ def show_method(save):
|
||||
plt.plot([0, -3], [0, 4], 'k--')
|
||||
plt.xlim(-3, 3)
|
||||
plt.ylim(0, 3.5)
|
||||
plt.xticks([])
|
||||
plt.yticks([])
|
||||
plt.xticks(fontsize=FONTSIZE)
|
||||
plt.yticks(fontsize=FONTSIZE)
|
||||
plt.xlabel('X [m]')
|
||||
plt.ylabel('Z [m]')
|
||||
if save:
|
||||
path_fig = os.path.join(dir_out, 'output_method.png')
|
||||
plt.savefig(path_fig)
|
||||
plt.savefig(path_fig, dpi=DPI)
|
||||
print("Figure of method saved in {}".format(path_fig))
|
||||
plt.close('all')
|
||||
|
||||
|
||||
def show_box_plot(dic_errors, clusters, dir_fig, show=False, save=False):
|
||||
import pandas as pd
|
||||
excl_clusters = ['all', 'easy', 'moderate', 'hard']
|
||||
clusters = [int(clst) for clst in clusters if clst not in excl_clusters]
|
||||
methods = ('monstereo', 'pseudo-lidar', '3dop', 'monoloco')
|
||||
y_min = 0
|
||||
y_max = 16 # 18 for the other
|
||||
xxs = get_distances(clusters)
|
||||
labels = [str(xx) for xx in xxs]
|
||||
for idx, method in enumerate(methods):
|
||||
df = pd.DataFrame([dic_errors[method][str(clst)] for clst in clusters[:-1]]).T
|
||||
df.columns = labels
|
||||
|
||||
plt.figure(idx, figsize=FIGSIZE) # with 200 dpi it becomes 1920x1440
|
||||
_ = df.boxplot()
|
||||
name = 'MonStereo' if method == 'monstereo' else method
|
||||
plt.title(name, fontsize=FONTSIZE)
|
||||
plt.ylabel('Average localization error (ALE) [m]', fontsize=FONTSIZE)
|
||||
plt.xlabel('Ground-truth distance [m]', fontsize=FONTSIZE)
|
||||
plt.xticks(fontsize=FONTSIZE)
|
||||
plt.yticks(fontsize=FONTSIZE)
|
||||
plt.ylim(y_min, y_max)
|
||||
|
||||
if save:
|
||||
path_fig = os.path.join(dir_fig, 'box_plot_' + name + '.png')
|
||||
plt.tight_layout()
|
||||
plt.savefig(path_fig, dpi=DPI)
|
||||
print("Figure of box plot saved in {}".format(path_fig))
|
||||
if show:
|
||||
plt.show()
|
||||
plt.close('all')
|
||||
|
||||
|
||||
def target_error(xx, mm):
|
||||
@ -203,13 +244,10 @@ def get_confidence(xx, zz, std):
|
||||
|
||||
def get_distances(clusters):
|
||||
"""Extract distances as intermediate values between 2 clusters"""
|
||||
|
||||
clusters_ext = list(clusters)
|
||||
clusters_ext.insert(0, str(0))
|
||||
distances = []
|
||||
for idx, _ in enumerate(clusters_ext[:-1]):
|
||||
clst_0 = float(clusters_ext[idx])
|
||||
clst_1 = float(clusters_ext[idx + 1])
|
||||
for idx, _ in enumerate(clusters[:-1]):
|
||||
clst_0 = float(clusters[idx])
|
||||
clst_1 = float(clusters[idx + 1])
|
||||
distances.append((clst_1 - clst_0) / 2 + clst_0)
|
||||
return tuple(distances)
|
||||
|
||||
@ -251,21 +289,6 @@ def expandgrid(*itrs):
|
||||
return combinations
|
||||
|
||||
|
||||
def plot_dist(dist_gmm, dist_men, dist_women):
|
||||
try:
|
||||
import seaborn as sns # pylint: disable=C0415
|
||||
sns.distplot(dist_men, hist=False, rug=False, label="Men")
|
||||
sns.distplot(dist_women, hist=False, rug=False, label="Women")
|
||||
sns.distplot(dist_gmm, hist=False, rug=False, label="GMM")
|
||||
plt.xlabel("X [cm]")
|
||||
plt.ylabel("Height distributions of men and women")
|
||||
plt.legend()
|
||||
plt.show()
|
||||
plt.close()
|
||||
except ImportError:
|
||||
print("Import Seaborn first")
|
||||
|
||||
|
||||
def get_percentile(dist_gmm):
|
||||
dd_gt = 1000
|
||||
mu_gmm = np.mean(dist_gmm)
|
||||
@ -278,22 +301,21 @@ def get_percentile(dist_gmm):
|
||||
# mad_d = np.mean(np.abs(dist_d - mu_d))
|
||||
|
||||
|
||||
def printing_styles(stereo):
|
||||
style = {'mono': {"labels": ['Mono3D', 'Geometric Baseline', 'MonoDepth', 'Our MonoLoco', '3DOP (stereo)'],
|
||||
"methods": ['m3d_merged', 'geometric_merged', 'monodepth_merged', 'monoloco_merged',
|
||||
'3dop_merged'],
|
||||
"mks": ['*', '^', 'p', 's', 'o'],
|
||||
"mksizes": [6, 6, 6, 6, 6], "lws": [1.5, 1.5, 1.5, 2.2, 1.6],
|
||||
"colors": ['r', 'deepskyblue', 'grey', 'b', 'darkorange'],
|
||||
"lstyles": ['solid', 'solid', 'solid', 'solid', 'dashdot']}}
|
||||
if stereo:
|
||||
style['stereo'] = {"labels": ['3DOP', 'Pose Baseline', 'ReiD Baseline', 'Our MonoLoco (monocular)',
|
||||
'Our Stereo Baseline'],
|
||||
"methods": ['3dop_merged', 'pose_merged', 'reid_merged', 'monoloco_merged',
|
||||
'ml_stereo_merged'],
|
||||
"mks": ['o', '^', 'p', 's', 's'],
|
||||
"mksizes": [6, 6, 6, 4, 6], "lws": [1.5, 1.5, 1.5, 1.2, 1.5],
|
||||
"colors": ['darkorange', 'lightblue', 'red', 'b', 'b'],
|
||||
"lstyles": ['solid', 'solid', 'solid', 'dashed', 'solid']}
|
||||
def printing_styles(net):
|
||||
if net == 'monstereo':
|
||||
style = {"labels": ['3DOP', 'PSF', 'MonoLoco', 'MonoPSR', 'Pseudo-Lidar', 'Our MonStereo'],
|
||||
"methods": ['3dop', 'psf', 'monoloco', 'monopsr', 'pseudo-lidar', 'monstereo'],
|
||||
"mks": ['s', 'p', 'o', 'v', '*', '^'],
|
||||
"mksizes": [6, 6, 6, 6, 6, 6], "lws": [2, 2, 2, 2, 2, 2.2],
|
||||
"colors": ['gold', 'skyblue', 'darkgreen', 'pink', 'darkorange', 'b'],
|
||||
"lstyles": ['solid', 'solid', 'dashed', 'dashed', 'solid', 'solid']}
|
||||
else:
|
||||
style = {"labels": ['Geometric Baseline', 'MonoPSR', 'MonoDIS', '3DOP (stereo)',
|
||||
'MonoLoco', 'Monoloco++'],
|
||||
"methods": ['geometric', 'monopsr', 'monodis', '3dop', 'monoloco', 'monoloco_pp'],
|
||||
"mks": ['*', '^', 'p', '.', 's', 'o', 'o'],
|
||||
"mksizes": [6, 6, 6, 6, 6, 6], "lws": [1.5, 1.5, 1.5, 1.5, 1.5, 2.2],
|
||||
"colors": ['purple', 'olive', 'r', 'darkorange', 'b', 'darkblue'],
|
||||
"lstyles": ['solid', 'solid', 'solid', 'dashdot', 'solid', 'solid', ]}
|
||||
|
||||
return style
|
||||
|
||||
336
monoloco/visuals/pifpaf_show.py
Normal file
@ -0,0 +1,336 @@
|
||||
|
||||
# File adapted from https://github.com/vita-epfl/openpifpaf
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
try:
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
import scipy.ndimage as ndimage
|
||||
except ImportError:
|
||||
matplotlib = None
|
||||
plt = None
|
||||
ndimage = None
|
||||
|
||||
|
||||
COCO_PERSON_SKELETON = [
|
||||
[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13],
|
||||
[6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3],
|
||||
[2, 4], [3, 5], [4, 6], [5, 7]]
|
||||
|
||||
|
||||
@contextmanager
|
||||
def canvas(fig_file=None, show=True, **kwargs):
|
||||
if 'figsize' not in kwargs:
|
||||
# kwargs['figsize'] = (15, 8)
|
||||
kwargs['figsize'] = (10, 6)
|
||||
fig, ax = plt.subplots(**kwargs)
|
||||
|
||||
yield ax
|
||||
|
||||
fig.set_tight_layout(True)
|
||||
if fig_file:
|
||||
fig.savefig(fig_file, dpi=200) # , bbox_inches='tight')
|
||||
if show:
|
||||
plt.show()
|
||||
plt.close(fig)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def image_canvas(image, fig_file=None, show=True, dpi_factor=1.0, fig_width=10.0, **kwargs):
|
||||
if 'figsize' not in kwargs:
|
||||
kwargs['figsize'] = (fig_width, fig_width * image.size[1] / image.size[0])
|
||||
|
||||
fig = plt.figure(**kwargs)
|
||||
ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0])
|
||||
ax.set_axis_off()
|
||||
ax.set_xlim(0, image.size[0])
|
||||
ax.set_ylim(image.size[1], 0)
|
||||
fig.add_axes(ax)
|
||||
image_2 = ndimage.gaussian_filter(image, sigma=2.5)
|
||||
ax.imshow(image_2, alpha=0.4)
|
||||
yield ax
|
||||
|
||||
if fig_file:
|
||||
fig.savefig(fig_file, dpi=image.size[0] / kwargs['figsize'][0] * dpi_factor)
|
||||
print('keypoints image saved')
|
||||
if show:
|
||||
plt.show()
|
||||
plt.close(fig)
|
||||
|
||||
|
||||
def load_image(path, scale=1.0):
|
||||
with open(path, 'rb') as f:
|
||||
image = Image.open(f).convert('RGB')
|
||||
image = np.asarray(image) * scale / 255.0
|
||||
return image
|
||||
|
||||
|
||||
class KeypointPainter(object):
|
||||
def __init__(self, *,
|
||||
skeleton=None,
|
||||
xy_scale=1.0, highlight=None, highlight_invisible=False,
|
||||
show_box=True, linewidth=2, markersize=3,
|
||||
color_connections=False,
|
||||
solid_threshold=0.5):
|
||||
self.skeleton = skeleton or COCO_PERSON_SKELETON
|
||||
self.xy_scale = xy_scale
|
||||
self.highlight = highlight
|
||||
self.highlight_invisible = highlight_invisible
|
||||
self.show_box = show_box
|
||||
self.linewidth = linewidth
|
||||
self.markersize = markersize
|
||||
self.color_connections = color_connections
|
||||
self.solid_threshold = solid_threshold
|
||||
self.dashed_threshold = 0.1 # Patch to still allow force complete pose (set to zero to resume original)
|
||||
|
||||
def _draw_skeleton(self, ax, x, y, v, *, color=None):
|
||||
if not np.any(v > 0):
|
||||
return
|
||||
|
||||
if self.skeleton is not None:
|
||||
for ci, connection in enumerate(np.array(self.skeleton) - 1):
|
||||
c = color
|
||||
if self.color_connections:
|
||||
c = matplotlib.cm.get_cmap('tab20')(ci / len(self.skeleton))
|
||||
if np.all(v[connection] > self.dashed_threshold):
|
||||
ax.plot(x[connection], y[connection],
|
||||
linewidth=self.linewidth, color=c,
|
||||
linestyle='dashed', dash_capstyle='round')
|
||||
if np.all(v[connection] > self.solid_threshold):
|
||||
ax.plot(x[connection], y[connection],
|
||||
linewidth=self.linewidth, color=c, solid_capstyle='round')
|
||||
|
||||
# highlight invisible keypoints
|
||||
inv_color = 'k' if self.highlight_invisible else color
|
||||
|
||||
ax.plot(x[v > self.dashed_threshold], y[v > self.dashed_threshold],
|
||||
'o', markersize=self.markersize,
|
||||
markerfacecolor=color, markeredgecolor=inv_color, markeredgewidth=2)
|
||||
ax.plot(x[v > self.solid_threshold], y[v > self.solid_threshold],
|
||||
'o', markersize=self.markersize,
|
||||
markerfacecolor=color, markeredgecolor=color, markeredgewidth=2)
|
||||
|
||||
if self.highlight is not None:
|
||||
v_highlight = v[self.highlight]
|
||||
ax.plot(x[self.highlight][v_highlight > 0],
|
||||
y[self.highlight][v_highlight > 0],
|
||||
'o', markersize=self.markersize*2, markeredgewidth=2,
|
||||
markerfacecolor=color, markeredgecolor=color)
|
||||
|
||||
@staticmethod
|
||||
def _draw_box(ax, x, y, v, color, score=None):
|
||||
if not np.any(v > 0):
|
||||
return
|
||||
|
||||
# keypoint bounding box
|
||||
x1, x2 = np.min(x[v > 0]), np.max(x[v > 0])
|
||||
y1, y2 = np.min(y[v > 0]), np.max(y[v > 0])
|
||||
if x2 - x1 < 5.0:
|
||||
x1 -= 2.0
|
||||
x2 += 2.0
|
||||
if y2 - y1 < 5.0:
|
||||
y1 -= 2.0
|
||||
y2 += 2.0
|
||||
ax.add_patch(
|
||||
matplotlib.patches.Rectangle(
|
||||
(x1, y1), x2 - x1, y2 - y1, fill=False, color=color))
|
||||
|
||||
if score:
|
||||
ax.text(x1, y1, '{:.4f}'.format(score), fontsize=8, color=color)
|
||||
|
||||
@staticmethod
|
||||
def _draw_text(ax, x, y, v, text, color):
|
||||
if not np.any(v > 0):
|
||||
return
|
||||
|
||||
# keypoint bounding box
|
||||
x1, x2 = np.min(x[v > 0]), np.max(x[v > 0])
|
||||
y1, y2 = np.min(y[v > 0]), np.max(y[v > 0])
|
||||
if x2 - x1 < 5.0:
|
||||
x1 -= 2.0
|
||||
x2 += 2.0
|
||||
if y2 - y1 < 5.0:
|
||||
y1 -= 2.0
|
||||
y2 += 2.0
|
||||
|
||||
ax.text(x1 + 2, y1 - 2, text, fontsize=8,
|
||||
color='white', bbox={'facecolor': color, 'alpha': 0.5, 'linewidth': 0})
|
||||
|
||||
@staticmethod
|
||||
def _draw_scales(ax, xs, ys, vs, color, scales):
|
||||
for x, y, v, scale in zip(xs, ys, vs, scales):
|
||||
if v == 0.0:
|
||||
continue
|
||||
ax.add_patch(
|
||||
matplotlib.patches.Rectangle(
|
||||
(x - scale, y - scale), 2 * scale, 2 * scale, fill=False, color=color))
|
||||
|
||||
def keypoints(self, ax, keypoint_sets, *, scores=None, color=None, colors=None, texts=None):
|
||||
if keypoint_sets is None:
|
||||
return
|
||||
|
||||
if color is None and self.color_connections:
|
||||
color = 'white'
|
||||
if color is None and colors is None:
|
||||
colors = range(len(keypoint_sets))
|
||||
|
||||
for i, kps in enumerate(np.asarray(keypoint_sets)):
|
||||
assert kps.shape[1] == 3
|
||||
x = kps[:, 0] * self.xy_scale
|
||||
y = kps[:, 1] * self.xy_scale
|
||||
v = kps[:, 2]
|
||||
|
||||
if colors is not None:
|
||||
color = colors[i]
|
||||
|
||||
if isinstance(color, (int, np.integer)):
|
||||
color = matplotlib.cm.get_cmap('tab20')((color % 20 + 0.05) / 20)
|
||||
|
||||
self._draw_skeleton(ax, x, y, v, color=color)
|
||||
if self.show_box:
|
||||
score = scores[i] if scores is not None else None
|
||||
self._draw_box(ax, x, y, v, color, score)
|
||||
|
||||
if texts is not None:
|
||||
self._draw_text(ax, x, y, v, texts[i], color)
|
||||
|
||||
|
||||
def annotations(self, ax, annotations, *,
|
||||
color=None, colors=None, texts=None):
|
||||
if annotations is None:
|
||||
return
|
||||
|
||||
if color is None and self.color_connections:
|
||||
color = 'white'
|
||||
if color is None and colors is None:
|
||||
colors = range(len(annotations))
|
||||
|
||||
for i, ann in enumerate(annotations):
|
||||
if colors is not None:
|
||||
color = colors[i]
|
||||
|
||||
text = texts[i] if texts is not None else None
|
||||
self.annotation(ax, ann, color=color, text=text)
|
||||
|
||||
def annotation(self, ax, ann, *, color, text=None):
|
||||
if isinstance(color, (int, np.integer)):
|
||||
color = matplotlib.cm.get_cmap('tab20')((color % 20 + 0.05) / 20)
|
||||
|
||||
kps = ann.data
|
||||
assert kps.shape[1] == 3
|
||||
x = kps[:, 0] * self.xy_scale
|
||||
y = kps[:, 1] * self.xy_scale
|
||||
v = kps[:, 2]
|
||||
|
||||
self._draw_skeleton(ax, x, y, v, color=color)
|
||||
|
||||
if ann.joint_scales is not None:
|
||||
self._draw_scales(ax, x, y, v, color, ann.joint_scales)
|
||||
|
||||
if self.show_box:
|
||||
self._draw_box(ax, x, y, v, color, ann.score())
|
||||
|
||||
if text is not None:
|
||||
self._draw_text(ax, x, y, v, text, color)
|
||||
|
||||
|
||||
def quiver(ax, vector_field, intensity_field=None, step=1, threshold=0.5,
|
||||
xy_scale=1.0, uv_is_offset=False,
|
||||
reg_uncertainty=None, **kwargs):
|
||||
x, y, u, v, c, r = [], [], [], [], [], []
|
||||
for j in range(0, vector_field.shape[1], step):
|
||||
for i in range(0, vector_field.shape[2], step):
|
||||
if intensity_field is not None and intensity_field[j, i] < threshold:
|
||||
continue
|
||||
x.append(i * xy_scale)
|
||||
y.append(j * xy_scale)
|
||||
u.append(vector_field[0, j, i] * xy_scale)
|
||||
v.append(vector_field[1, j, i] * xy_scale)
|
||||
c.append(intensity_field[j, i] if intensity_field is not None else 1.0)
|
||||
r.append(reg_uncertainty[j, i] * xy_scale if reg_uncertainty is not None else None)
|
||||
x = np.array(x)
|
||||
y = np.array(y)
|
||||
u = np.array(u)
|
||||
v = np.array(v)
|
||||
c = np.array(c)
|
||||
r = np.array(r)
|
||||
s = np.argsort(c)
|
||||
if uv_is_offset:
|
||||
u -= x
|
||||
v -= y
|
||||
|
||||
for xx, yy, uu, vv, _, rr in zip(x, y, u, v, c, r):
|
||||
if not rr:
|
||||
continue
|
||||
circle = matplotlib.patches.Circle(
|
||||
(xx + uu, yy + vv), rr / 2.0, zorder=11, linewidth=1, alpha=1.0,
|
||||
fill=False, color='orange')
|
||||
ax.add_artist(circle)
|
||||
|
||||
return ax.quiver(x[s], y[s], u[s], v[s], c[s],
|
||||
angles='xy', scale_units='xy', scale=1, zOrder=10, **kwargs)
|
||||
|
||||
|
||||
def arrows(ax, fourd, xy_scale=1.0, threshold=0.0, **kwargs):
|
||||
mask = np.min(fourd[:, 2], axis=0) >= threshold
|
||||
fourd = fourd[:, :, mask]
|
||||
(x1, y1), (x2, y2) = fourd[:, :2, :] * xy_scale
|
||||
c = np.min(fourd[:, 2], axis=0)
|
||||
s = np.argsort(c)
|
||||
return ax.quiver(x1[s], y1[s], (x2 - x1)[s], (y2 - y1)[s], c[s],
|
||||
angles='xy', scale_units='xy', scale=1, zOrder=10, **kwargs)
|
||||
|
||||
|
||||
def boxes(ax, scalar_field, intensity_field=None, xy_scale=1.0, step=1, threshold=0.5,
|
||||
cmap='viridis_r', clim=(0.5, 1.0), **kwargs):
|
||||
x, y, s, c = [], [], [], []
|
||||
for j in range(0, scalar_field.shape[0], step):
|
||||
for i in range(0, scalar_field.shape[1], step):
|
||||
if intensity_field is not None and intensity_field[j, i] < threshold:
|
||||
continue
|
||||
x.append(i * xy_scale)
|
||||
y.append(j * xy_scale)
|
||||
s.append(scalar_field[j, i] * xy_scale)
|
||||
c.append(intensity_field[j, i] if intensity_field is not None else 1.0)
|
||||
|
||||
cmap = matplotlib.cm.get_cmap(cmap)
|
||||
cnorm = matplotlib.colors.Normalize(vmin=clim[0], vmax=clim[1])
|
||||
for xx, yy, ss, cc in zip(x, y, s, c):
|
||||
color = cmap(cnorm(cc))
|
||||
rectangle = matplotlib.patches.Rectangle(
|
||||
(xx - ss, yy - ss), ss * 2.0, ss * 2.0,
|
||||
color=color, zorder=10, linewidth=1, **kwargs)
|
||||
ax.add_artist(rectangle)
|
||||
|
||||
|
||||
def circles(ax, scalar_field, intensity_field=None, xy_scale=1.0, step=1, threshold=0.5,
|
||||
cmap='viridis_r', clim=(0.5, 1.0), **kwargs):
|
||||
x, y, s, c = [], [], [], []
|
||||
for j in range(0, scalar_field.shape[0], step):
|
||||
for i in range(0, scalar_field.shape[1], step):
|
||||
if intensity_field is not None and intensity_field[j, i] < threshold:
|
||||
continue
|
||||
x.append(i * xy_scale)
|
||||
y.append(j * xy_scale)
|
||||
s.append(scalar_field[j, i] * xy_scale)
|
||||
c.append(intensity_field[j, i] if intensity_field is not None else 1.0)
|
||||
|
||||
cmap = matplotlib.cm.get_cmap(cmap)
|
||||
cnorm = matplotlib.colors.Normalize(vmin=clim[0], vmax=clim[1])
|
||||
for xx, yy, ss, cc in zip(x, y, s, c):
|
||||
color = cmap(cnorm(cc))
|
||||
circle = matplotlib.patches.Circle(
|
||||
(xx, yy), ss,
|
||||
color=color, zorder=10, linewidth=1, **kwargs)
|
||||
ax.add_artist(circle)
|
||||
|
||||
|
||||
def white_screen(ax, alpha=0.9):
|
||||
ax.add_patch(
|
||||
plt.Rectangle((0, 0), 1, 1, transform=ax.transAxes, alpha=alpha,
|
||||
facecolor='white')
|
||||
)
|
||||
98
monoloco/visuals/plot_3d_box.py
Normal file
@ -0,0 +1,98 @@
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def correct_boxes(boxes, hwls, xyzs, yaws, path_calib):
|
||||
|
||||
with open(path_calib, "r") as ff:
|
||||
file = ff.readlines()
|
||||
p2_str = file[2].split()[1:]
|
||||
p2_list = [float(xx) for xx in p2_str]
|
||||
P = np.array(p2_list).reshape(3, 4)
|
||||
boxes_new = []
|
||||
for idx, box in enumerate(boxes):
|
||||
hwl = hwls[idx]
|
||||
xyz = xyzs[idx]
|
||||
yaw = yaws[idx]
|
||||
corners_2d, corners_3d = compute_box_3d(hwl, xyz, yaw, P)
|
||||
box_new = project_8p_to_4p(corners_2d).reshape(-1).tolist()
|
||||
boxes_new.append(box_new)
|
||||
return boxes_new
|
||||
|
||||
|
||||
def compute_box_3d(hwl, xyz, ry, P):
|
||||
""" Takes an object and a projection matrix (P) and projects the 3d
|
||||
bounding box into the image plane.
|
||||
Returns:
|
||||
corners_2d: (8,2) array in left image coord.
|
||||
corners_3d: (8,3) array in in rect camera coord.
|
||||
"""
|
||||
# compute rotational matrix around yaw axis
|
||||
R = roty(ry)
|
||||
|
||||
# 3d bounding box dimensions
|
||||
l = hwl[2]
|
||||
w = hwl[1]
|
||||
h = hwl[0]
|
||||
|
||||
# 3d bounding box corners
|
||||
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
|
||||
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
|
||||
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
|
||||
|
||||
# rotate and translate 3d bounding box
|
||||
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
|
||||
# print corners_3d.shape
|
||||
corners_3d[0, :] = corners_3d[0, :] + xyz[0]
|
||||
corners_3d[1, :] = corners_3d[1, :] + xyz[1]
|
||||
corners_3d[2, :] = corners_3d[2, :] + xyz[2]
|
||||
# print 'cornsers_3d: ', corners_3d
|
||||
# only draw 3d bounding box for objs in front of the camera
|
||||
if np.any(corners_3d[2, :] < 0.1):
|
||||
corners_2d = None
|
||||
return corners_2d, np.transpose(corners_3d)
|
||||
|
||||
# project the 3d bounding box into the image plane
|
||||
corners_2d = project_to_image(np.transpose(corners_3d), P)
|
||||
# print 'corners_2d: ', corners_2d
|
||||
return corners_2d, np.transpose(corners_3d)
|
||||
|
||||
|
||||
|
||||
def roty(t):
|
||||
""" Rotation about the y-axis. """
|
||||
c = np.cos(t)
|
||||
s = np.sin(t)
|
||||
return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
|
||||
|
||||
|
||||
|
||||
def project_to_image(pts_3d, P):
|
||||
""" Project 3d points to image plane.
|
||||
Usage: pts_2d = projectToImage(pts_3d, P)
|
||||
input: pts_3d: nx3 matrix
|
||||
P: 3x4 projection matrix
|
||||
output: pts_2d: nx2 matrix
|
||||
P(3x4) dot pts_3d_extended(4xn) = projected_pts_2d(3xn)
|
||||
=> normalize projected_pts_2d(2xn)
|
||||
<=> pts_3d_extended(nx4) dot P'(4x3) = projected_pts_2d(nx3)
|
||||
=> normalize projected_pts_2d(nx2)
|
||||
"""
|
||||
n = pts_3d.shape[0]
|
||||
pts_3d_extend = np.hstack((pts_3d, np.ones((n, 1))))
|
||||
# print(('pts_3d_extend shape: ', pts_3d_extend.shape))
|
||||
pts_2d = np.dot(pts_3d_extend, np.transpose(P)) # nx3
|
||||
pts_2d[:, 0] /= pts_2d[:, 2]
|
||||
pts_2d[:, 1] /= pts_2d[:, 2]
|
||||
return pts_2d[:, 0:2]
|
||||
|
||||
|
||||
|
||||
def project_8p_to_4p(pts_2d):
|
||||
x0 = np.min(pts_2d[:, 0])
|
||||
x1 = np.max(pts_2d[:, 0])
|
||||
y0 = np.min(pts_2d[:, 1])
|
||||
y1 = np.max(pts_2d[:, 1])
|
||||
x0 = max(0, x0)
|
||||
y0 = max(0, y0)
|
||||
return np.array([x0, y0, x1, y1])
|
||||
@ -1,90 +1,133 @@
|
||||
|
||||
"""
|
||||
Class for drawing frontal, bird-eye-view and multi figures
|
||||
"""
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
import math
|
||||
from collections import OrderedDict
|
||||
|
||||
import numpy as np
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.cm as cm
|
||||
from matplotlib.patches import Ellipse, Circle, Rectangle
|
||||
from mpl_toolkits.axes_grid1 import make_axes_locatable
|
||||
from matplotlib.patches import Rectangle
|
||||
|
||||
from ..utils import pixel_to_camera, get_task_error
|
||||
from ..utils import pixel_to_camera
|
||||
|
||||
|
||||
def get_angle(xx, zz):
|
||||
"""Obtain the points to plot the confidence of each annotation"""
|
||||
|
||||
theta = math.atan2(zz, xx)
|
||||
angle = theta * (180 / math.pi)
|
||||
|
||||
return angle
|
||||
|
||||
|
||||
def image_attributes(dpi, output_types):
|
||||
c = 0.7 if 'front' in output_types else 1.0
|
||||
return dict(dpi=dpi,
|
||||
fontsize_d=round(14 * c),
|
||||
fontsize_bv=round(24 * c),
|
||||
fontsize_num=round(22 * c),
|
||||
fontsize_ax=round(16 * c),
|
||||
linewidth=round(8 * c),
|
||||
markersize=round(13 * c),
|
||||
y_box_margin=round(24 * math.sqrt(c)),
|
||||
stereo=dict(color='deepskyblue',
|
||||
numcolor='darkorange',
|
||||
linewidth=1 * c),
|
||||
mono=dict(color='red',
|
||||
numcolor='firebrick',
|
||||
linewidth=2 * c)
|
||||
)
|
||||
|
||||
|
||||
class Printer:
|
||||
"""
|
||||
Print results on images: birds eye view and computed distance
|
||||
"""
|
||||
FONTSIZE_BV = 16
|
||||
FONTSIZE = 18
|
||||
TEXTCOLOR = 'darkorange'
|
||||
COLOR_KPS = 'yellow'
|
||||
FIG_WIDTH = 15
|
||||
extensions = []
|
||||
y_scale = 1
|
||||
nones = lambda n: [None for _ in range(n)]
|
||||
mpl_im0, stds_ale, stds_epi, xx_gt, zz_gt, xx_pred, zz_pred, dd_real, uv_centers, uv_shoulders, uv_kps, boxes, \
|
||||
boxes_gt, uv_camera, radius, auxs = nones(16)
|
||||
|
||||
def __init__(self, image, output_path, kk, output_types, epistemic=False, z_max=30, fig_width=10):
|
||||
def __init__(self, image, output_path, kk, args):
|
||||
|
||||
self.im = image
|
||||
self.kk = kk
|
||||
self.output_types = output_types
|
||||
self.epistemic = epistemic
|
||||
self.z_max = z_max # To include ellipses in the image
|
||||
self.y_scale = 1
|
||||
self.width = self.im.size[0]
|
||||
self.height = self.im.size[1]
|
||||
self.fig_width = fig_width
|
||||
|
||||
# Define the output dir
|
||||
self.output_path = output_path
|
||||
self.cmap = cm.get_cmap('jet')
|
||||
self.extensions = []
|
||||
self.kk = kk
|
||||
self.output_types = args.output_types
|
||||
self.z_max = args.z_max # set max distance to show instances
|
||||
self.show_all = args.show_all
|
||||
self.show = args.show_all
|
||||
self.save = not args.no_save
|
||||
|
||||
# Define variables of the class to change for every image
|
||||
self.mpl_im0 = self.stds_ale = self.stds_epi = self.xx_gt = self.zz_gt = self.xx_pred = self.zz_pred =\
|
||||
self.dds_real = self.uv_centers = self.uv_shoulders = self.uv_kps = self.boxes = self.boxes_gt = \
|
||||
self.uv_camera = self.radius = None
|
||||
# define image attributes
|
||||
self.attr = image_attributes(args.dpi, args.output_types)
|
||||
|
||||
def _process_results(self, dic_ann):
|
||||
# Include the vectors inside the interval given by z_max
|
||||
self.stds_ale = dic_ann['stds_ale']
|
||||
self.stds_epi = dic_ann['stds_epi']
|
||||
self.gt = dic_ann['gt'] # regulate ground-truth matching
|
||||
self.xx_gt = [xx[0] for xx in dic_ann['xyz_real']]
|
||||
self.xx_pred = [xx[0] for xx in dic_ann['xyz_pred']]
|
||||
|
||||
# Set maximum distance
|
||||
self.dd_pred = dic_ann['dds_pred']
|
||||
self.dd_real = dic_ann['dds_real']
|
||||
self.z_max = int(min(self.z_max, 4 + max(max(self.dd_pred), max(self.dd_real, default=0))))
|
||||
|
||||
# Do not print instances outside z_max
|
||||
self.zz_gt = [xx[2] if xx[2] < self.z_max - self.stds_epi[idx] else 0
|
||||
for idx, xx in enumerate(dic_ann['xyz_real'])]
|
||||
self.xx_pred = [xx[0] for xx in dic_ann['xyz_pred']]
|
||||
self.zz_pred = [xx[2] if xx[2] < self.z_max - self.stds_epi[idx] else 0
|
||||
for idx, xx in enumerate(dic_ann['xyz_pred'])]
|
||||
self.dds_real = dic_ann['dds_real']
|
||||
|
||||
self.uv_heads = dic_ann['uv_heads']
|
||||
self.uv_shoulders = dic_ann['uv_shoulders']
|
||||
self.boxes = dic_ann['boxes']
|
||||
self.boxes_gt = dic_ann['boxes_gt']
|
||||
|
||||
self.uv_camera = (int(self.im.size[0] / 2), self.im.size[1])
|
||||
self.radius = 11 / 1600 * self.width
|
||||
self.auxs = dic_ann['aux']
|
||||
if len(self.auxs) == 0:
|
||||
self.modes = ['mono'] * len(self.dd_pred)
|
||||
else:
|
||||
self.modes = []
|
||||
for aux in self.auxs:
|
||||
if aux <= 0.3:
|
||||
self.modes.append('mono')
|
||||
else:
|
||||
self.modes.append('stereo')
|
||||
|
||||
def factory_axes(self):
|
||||
"""Create axes for figures: front bird combined"""
|
||||
def factory_axes(self, dic_out):
|
||||
"""Create axes for figures: front bird multi"""
|
||||
axes = []
|
||||
figures = []
|
||||
|
||||
# Initialize combined figure, resizing it for aesthetic proportions
|
||||
if 'combined' in self.output_types:
|
||||
assert 'bird' and 'front' not in self.output_types, \
|
||||
"combined figure cannot be print together with front or bird ones"
|
||||
# Process the annotation dictionary of monoloco
|
||||
self._process_results(dic_out)
|
||||
|
||||
self.y_scale = self.width / (self.height * 1.8) # Defined proportion
|
||||
# Initialize multi figure, resizing it for aesthetic proportion
|
||||
if 'multi' in self.output_types:
|
||||
assert 'bird' not in self.output_types and 'front' not in self.output_types, \
|
||||
"multi figure cannot be print together with front or bird ones"
|
||||
|
||||
self.y_scale = self.width / (self.height * 2) # Defined proportion
|
||||
if self.y_scale < 0.95 or self.y_scale > 1.05: # allows more variation without resizing
|
||||
self.im = self.im.resize((self.width, round(self.height * self.y_scale)))
|
||||
self.width = self.im.size[0]
|
||||
self.height = self.im.size[1]
|
||||
fig_width = self.fig_width + 0.6 * self.fig_width
|
||||
fig_height = self.fig_width * self.height / self.width
|
||||
fig_width = self.FIG_WIDTH + 0.6 * self.FIG_WIDTH
|
||||
fig_height = self.FIG_WIDTH * self.height / self.width
|
||||
|
||||
# Distinguish between KITTI images and general images
|
||||
fig_ar_1 = 1.7 if self.y_scale > 1.7 else 1.3
|
||||
fig_ar_1 = 0.8
|
||||
width_ratio = 1.9
|
||||
self.extensions.append('.combined.png')
|
||||
self.extensions.append('.multi.png')
|
||||
|
||||
fig, (ax1, ax0) = plt.subplots(1, 2, sharey=False, gridspec_kw={'width_ratios': [1, width_ratio]},
|
||||
fig, (ax0, ax1) = plt.subplots(1, 2, sharey=False, gridspec_kw={'width_ratios': [width_ratio, 1]},
|
||||
figsize=(fig_width, fig_height))
|
||||
ax1.set_aspect(fig_ar_1)
|
||||
fig.set_tight_layout(True)
|
||||
@ -92,12 +135,12 @@ class Printer:
|
||||
|
||||
figures.append(fig)
|
||||
assert 'front' not in self.output_types and 'bird' not in self.output_types, \
|
||||
"--combined arguments is not supported with other visualizations"
|
||||
"--multi arguments is not supported with other visualizations"
|
||||
|
||||
# Initialize front figure
|
||||
elif 'front' in self.output_types:
|
||||
width = self.fig_width
|
||||
height = self.fig_width * self.height / self.width
|
||||
width = self.FIG_WIDTH
|
||||
height = self.FIG_WIDTH * self.height / self.width
|
||||
self.extensions.append(".front.png")
|
||||
plt.figure(0)
|
||||
fig0, ax0 = plt.subplots(1, 1, figsize=(width, height))
|
||||
@ -105,18 +148,8 @@ class Printer:
|
||||
figures.append(fig0)
|
||||
|
||||
# Create front figure axis
|
||||
if any(xx in self.output_types for xx in ['front', 'combined']):
|
||||
ax0 = self.set_axes(ax0, axis=0)
|
||||
|
||||
divider = make_axes_locatable(ax0)
|
||||
cax = divider.append_axes('right', size='3%', pad=0.05)
|
||||
bar_ticks = self.z_max // 5 + 1
|
||||
norm = matplotlib.colors.Normalize(vmin=0, vmax=self.z_max)
|
||||
scalar_mappable = plt.cm.ScalarMappable(cmap=self.cmap, norm=norm)
|
||||
scalar_mappable.set_array([])
|
||||
plt.colorbar(scalar_mappable, ticks=np.linspace(0, self.z_max, bar_ticks),
|
||||
boundaries=np.arange(- 0.05, self.z_max + 0.1, .1), cax=cax, label='Z [m]')
|
||||
|
||||
if any(xx in self.output_types for xx in ['front', 'multi']):
|
||||
ax0 = self._set_axes(ax0, axis=0)
|
||||
axes.append(ax0)
|
||||
if not axes:
|
||||
axes.append(None)
|
||||
@ -127,99 +160,96 @@ class Printer:
|
||||
fig1, ax1 = plt.subplots(1, 1)
|
||||
fig1.set_tight_layout(True)
|
||||
figures.append(fig1)
|
||||
if any(xx in self.output_types for xx in ['bird', 'combined']):
|
||||
ax1 = self.set_axes(ax1, axis=1) # Adding field of view
|
||||
if any(xx in self.output_types for xx in ['bird', 'multi']):
|
||||
ax1 = self._set_axes(ax1, axis=1) # Adding field of view
|
||||
axes.append(ax1)
|
||||
return figures, axes
|
||||
|
||||
def draw(self, figures, axes, dic_out, image, draw_text=True, legend=True, draw_box=False,
|
||||
save=False, show=False):
|
||||
def draw(self, figures, axes, image):
|
||||
|
||||
# Process the annotation dictionary of monoloco
|
||||
self._process_results(dic_out)
|
||||
# whether to include instances that don't match the ground-truth
|
||||
iterator = range(len(self.zz_pred)) if self.show_all else range(len(self.zz_gt))
|
||||
if not iterator:
|
||||
print("-" * 110 + '\n' + "! No instances detected, be sure to include file with ground-truth values or "
|
||||
"use the command --show_all" + '\n' + "-" * 110)
|
||||
|
||||
# Draw the front figure
|
||||
num = 0
|
||||
self.mpl_im0.set_data(image)
|
||||
for idx, uv in enumerate(self.uv_shoulders):
|
||||
if any(xx in self.output_types for xx in ['front', 'combined']) and \
|
||||
min(self.zz_pred[idx], self.zz_gt[idx]) > 0:
|
||||
|
||||
color = self.cmap((self.zz_pred[idx] % self.z_max) / self.z_max)
|
||||
self.draw_circle(axes, uv, color)
|
||||
if draw_box:
|
||||
self.draw_boxes(axes, idx, color)
|
||||
|
||||
if draw_text:
|
||||
self.draw_text_front(axes, uv, num)
|
||||
num += 1
|
||||
number = dict(flag=False, num=97)
|
||||
if any(xx in self.output_types for xx in ['front', 'multi']):
|
||||
number['flag'] = True # add numbers
|
||||
self.mpl_im0.set_data(image)
|
||||
for idx in iterator:
|
||||
if any(xx in self.output_types for xx in ['front', 'multi']) and self.zz_pred[idx] > 0:
|
||||
self._draw_front(axes[0],
|
||||
self.dd_pred[idx],
|
||||
idx,
|
||||
number)
|
||||
number['num'] += 1
|
||||
|
||||
# Draw the bird figure
|
||||
num = 0
|
||||
for idx, _ in enumerate(self.xx_pred):
|
||||
if any(xx in self.output_types for xx in ['bird', 'combined']) and self.zz_gt[idx] > 0:
|
||||
number['num'] = 97
|
||||
for idx in iterator:
|
||||
if any(xx in self.output_types for xx in ['bird', 'multi']) and self.zz_pred[idx] > 0:
|
||||
|
||||
# Draw ground truth and predicted ellipses
|
||||
self.draw_ellipses(axes, idx)
|
||||
# Draw ground truth and uncertainty
|
||||
self._draw_uncertainty(axes, idx)
|
||||
|
||||
# Draw bird eye view text
|
||||
if draw_text:
|
||||
self.draw_text_bird(axes, idx, num)
|
||||
num += 1
|
||||
# Add the legend
|
||||
if legend:
|
||||
draw_legend(axes)
|
||||
if number['flag']:
|
||||
self._draw_text_bird(axes, idx, number['num'])
|
||||
number['num'] += 1
|
||||
self._draw_legend(axes)
|
||||
|
||||
# Draw, save or/and show the figures
|
||||
for idx, fig in enumerate(figures):
|
||||
fig.canvas.draw()
|
||||
if save:
|
||||
fig.savefig(self.output_path + self.extensions[idx], bbox_inches='tight')
|
||||
if show:
|
||||
if self.save:
|
||||
fig.savefig(self.output_path + self.extensions[idx], bbox_inches='tight', dpi=self.attr['dpi'])
|
||||
if self.show:
|
||||
fig.show()
|
||||
plt.close(fig)
|
||||
|
||||
def draw_ellipses(self, axes, idx):
|
||||
"""draw uncertainty ellipses"""
|
||||
target = get_task_error(self.dds_real[idx])
|
||||
angle_gt = get_angle(self.xx_gt[idx], self.zz_gt[idx])
|
||||
ellipse_real = Ellipse((self.xx_gt[idx], self.zz_gt[idx]), width=target * 2, height=1,
|
||||
angle=angle_gt, color='lightgreen', fill=True, label="Task error")
|
||||
axes[1].add_patch(ellipse_real)
|
||||
if abs(self.zz_gt[idx] - self.zz_pred[idx]) > 0.001:
|
||||
axes[1].plot(self.xx_gt[idx], self.zz_gt[idx], 'kx', label="Ground truth", markersize=3)
|
||||
def _draw_front(self, ax, z, idx, number):
|
||||
|
||||
angle = get_angle(self.xx_pred[idx], self.zz_pred[idx])
|
||||
ellipse_ale = Ellipse((self.xx_pred[idx], self.zz_pred[idx]), width=self.stds_ale[idx] * 2,
|
||||
height=1, angle=angle, color='b', fill=False, label="Aleatoric Uncertainty",
|
||||
linewidth=1.3)
|
||||
ellipse_var = Ellipse((self.xx_pred[idx], self.zz_pred[idx]), width=self.stds_epi[idx] * 2,
|
||||
height=1, angle=angle, color='r', fill=False, label="Uncertainty",
|
||||
linewidth=1, linestyle='--')
|
||||
# Bbox
|
||||
w = min(self.width-2, self.boxes[idx][2] - self.boxes[idx][0])
|
||||
h = min(self.height-2, (self.boxes[idx][3] - self.boxes[idx][1]) * self.y_scale)
|
||||
x0 = self.boxes[idx][0]
|
||||
y0 = self.boxes[idx][1] * self.y_scale
|
||||
y1 = y0 + h
|
||||
rectangle = Rectangle((x0, y0),
|
||||
width=w,
|
||||
height=h,
|
||||
fill=False,
|
||||
color=self.attr[self.modes[idx]]['color'],
|
||||
linewidth=self.attr[self.modes[idx]]['linewidth'])
|
||||
ax.add_patch(rectangle)
|
||||
z_str = str(z).split(sep='.')
|
||||
text = z_str[0] + '.' + z_str[1][0]
|
||||
bbox_config = {'facecolor': self.attr[self.modes[idx]]['color'], 'alpha': 0.4, 'linewidth': 0}
|
||||
|
||||
axes[1].add_patch(ellipse_ale)
|
||||
if self.epistemic:
|
||||
axes[1].add_patch(ellipse_var)
|
||||
x_t = x0 - 1.5
|
||||
y_t = y1 + self.attr['y_box_margin']
|
||||
if y_t < (self.height-10):
|
||||
ax.annotate(
|
||||
text,
|
||||
(x_t, y_t),
|
||||
fontsize=self.attr['fontsize_d'],
|
||||
weight='bold',
|
||||
xytext=(5.0, 5.0),
|
||||
textcoords='offset points',
|
||||
color='white',
|
||||
bbox=bbox_config,
|
||||
)
|
||||
if number['flag']:
|
||||
ax.text(x0 - 17,
|
||||
y1 + 14,
|
||||
chr(number['num']),
|
||||
fontsize=self.attr['fontsize_num'],
|
||||
color=self.attr[self.modes[idx]]['numcolor'],
|
||||
weight='bold')
|
||||
|
||||
axes[1].plot(self.xx_pred[idx], self.zz_pred[idx], 'ro', label="Predicted", markersize=3)
|
||||
|
||||
def draw_boxes(self, axes, idx, color):
|
||||
ww_box = self.boxes[idx][2] - self.boxes[idx][0]
|
||||
hh_box = (self.boxes[idx][3] - self.boxes[idx][1]) * self.y_scale
|
||||
ww_box_gt = self.boxes_gt[idx][2] - self.boxes_gt[idx][0]
|
||||
hh_box_gt = (self.boxes_gt[idx][3] - self.boxes_gt[idx][1]) * self.y_scale
|
||||
|
||||
rectangle = Rectangle((self.boxes[idx][0], self.boxes[idx][1] * self.y_scale),
|
||||
width=ww_box, height=hh_box, fill=False, color=color, linewidth=3)
|
||||
rectangle_gt = Rectangle((self.boxes_gt[idx][0], self.boxes_gt[idx][1] * self.y_scale),
|
||||
width=ww_box_gt, height=hh_box_gt, fill=False, color='g', linewidth=2)
|
||||
axes[0].add_patch(rectangle_gt)
|
||||
axes[0].add_patch(rectangle)
|
||||
|
||||
def draw_text_front(self, axes, uv, num):
|
||||
axes[0].text(uv[0] + self.radius, uv[1] * self.y_scale - self.radius, str(num),
|
||||
fontsize=self.FONTSIZE, color=self.TEXTCOLOR, weight='bold')
|
||||
|
||||
def draw_text_bird(self, axes, idx, num):
|
||||
def _draw_text_bird(self, axes, idx, num):
|
||||
"""Plot the number in the bird eye view map"""
|
||||
|
||||
std = self.stds_epi[idx] if self.stds_epi[idx] > 0 else self.stds_ale[idx]
|
||||
@ -228,15 +258,103 @@ class Printer:
|
||||
delta_x = std * math.cos(theta)
|
||||
delta_z = std * math.sin(theta)
|
||||
|
||||
axes[1].text(self.xx_pred[idx] + delta_x, self.zz_pred[idx] + delta_z,
|
||||
str(num), fontsize=self.FONTSIZE_BV, color='darkorange')
|
||||
axes[1].text(self.xx_pred[idx] + delta_x + 0.2, self.zz_pred[idx] + delta_z + 0/2, chr(num),
|
||||
fontsize=self.attr['fontsize_bv'],
|
||||
color=self.attr[self.modes[idx]]['numcolor'])
|
||||
|
||||
def draw_circle(self, axes, uv, color):
|
||||
def _draw_uncertainty(self, axes, idx):
|
||||
|
||||
circle = Circle((uv[0], uv[1] * self.y_scale), radius=self.radius, color=color, fill=True)
|
||||
axes[0].add_patch(circle)
|
||||
theta = math.atan2(self.zz_pred[idx], self.xx_pred[idx])
|
||||
dic_std = {'ale': self.stds_ale[idx], 'epi': self.stds_epi[idx]}
|
||||
dic_x, dic_y = {}, {}
|
||||
|
||||
def set_axes(self, ax, axis):
|
||||
# Aleatoric and epistemic
|
||||
for key, std in dic_std.items():
|
||||
delta_x = std * math.cos(theta)
|
||||
delta_z = std * math.sin(theta)
|
||||
dic_x[key] = (self.xx_pred[idx] - delta_x, self.xx_pred[idx] + delta_x)
|
||||
dic_y[key] = (self.zz_pred[idx] - delta_z, self.zz_pred[idx] + delta_z)
|
||||
|
||||
# MonoLoco
|
||||
if not self.auxs:
|
||||
axes[1].plot(dic_x['epi'],
|
||||
dic_y['epi'],
|
||||
color='coral',
|
||||
linewidth=round(self.attr['linewidth']/2),
|
||||
label="Epistemic Uncertainty")
|
||||
|
||||
axes[1].plot(dic_x['ale'],
|
||||
dic_y['ale'],
|
||||
color='deepskyblue',
|
||||
linewidth=self.attr['linewidth'],
|
||||
label="Aleatoric Uncertainty")
|
||||
|
||||
axes[1].plot(self.xx_pred[idx],
|
||||
self.zz_pred[idx],
|
||||
color='cornflowerblue',
|
||||
label="Prediction",
|
||||
markersize=self.attr['markersize'],
|
||||
marker='o')
|
||||
|
||||
if self.gt[idx]:
|
||||
axes[1].plot(self.xx_gt[idx],
|
||||
self.zz_gt[idx],
|
||||
color='k',
|
||||
label="Ground-truth",
|
||||
markersize=8,
|
||||
marker='x')
|
||||
|
||||
# MonStereo(stereo case)
|
||||
elif self.auxs[idx] > 0.5:
|
||||
axes[1].plot(dic_x['ale'],
|
||||
dic_y['ale'],
|
||||
color='r',
|
||||
linewidth=self.attr['linewidth'],
|
||||
label="Prediction (mono)")
|
||||
|
||||
axes[1].plot(dic_x['ale'],
|
||||
dic_y['ale'],
|
||||
color='deepskyblue',
|
||||
linewidth=self.attr['linewidth'],
|
||||
label="Prediction (stereo+mono)")
|
||||
|
||||
if self.gt[idx]:
|
||||
axes[1].plot(self.xx_gt[idx],
|
||||
self.zz_gt[idx],
|
||||
color='k',
|
||||
label="Ground-truth",
|
||||
markersize=self.attr['markersize'],
|
||||
marker='x')
|
||||
|
||||
# MonStereo (monocular case)
|
||||
else:
|
||||
axes[1].plot(dic_x['ale'],
|
||||
dic_y['ale'],
|
||||
color='deepskyblue',
|
||||
linewidth=self.attr['linewidth'],
|
||||
label="Prediction (stereo+mono)")
|
||||
|
||||
axes[1].plot(dic_x['ale'],
|
||||
dic_y['ale'],
|
||||
color='r',
|
||||
linewidth=self.attr['linewidth'],
|
||||
label="Prediction (mono)")
|
||||
if self.gt[idx]:
|
||||
axes[1].plot(self.xx_gt[idx],
|
||||
self.zz_gt[idx],
|
||||
color='k',
|
||||
label="Ground-truth",
|
||||
markersize=self.attr['markersize'],
|
||||
marker='x')
|
||||
|
||||
def _draw_legend(self, axes):
|
||||
# Bird eye view legend
|
||||
if any(xx in self.output_types for xx in ['bird', 'multi']):
|
||||
handles, labels = axes[1].get_legend_handles_labels()
|
||||
by_label = OrderedDict(zip(labels, handles))
|
||||
axes[1].legend(by_label.values(), by_label.keys(), loc='best', prop={'size': 15})
|
||||
|
||||
def _set_axes(self, ax, axis):
|
||||
assert axis in (0, 1)
|
||||
|
||||
if axis == 0:
|
||||
@ -251,25 +369,12 @@ class Printer:
|
||||
uv_max = [0., float(self.height)]
|
||||
xyz_max = pixel_to_camera(uv_max, self.kk, self.z_max)
|
||||
x_max = abs(xyz_max[0]) # shortcut to avoid oval circles in case of different kk
|
||||
corr = round(float(x_max / 3))
|
||||
ax.plot([0, x_max], [0, self.z_max], 'k--')
|
||||
ax.plot([0, -x_max], [0, self.z_max], 'k--')
|
||||
ax.set_ylim(0, self.z_max+1)
|
||||
ax.set_xlim(-x_max + corr, x_max - corr)
|
||||
ax.set_ylim(0, self.z_max + 1)
|
||||
ax.set_xlabel("X [m]")
|
||||
ax.set_ylabel("Z [m]")
|
||||
|
||||
plt.xticks(fontsize=self.attr['fontsize_ax'])
|
||||
plt.yticks(fontsize=self.attr['fontsize_ax'])
|
||||
return ax
|
||||
|
||||
|
||||
def draw_legend(axes):
|
||||
handles, labels = axes[1].get_legend_handles_labels()
|
||||
by_label = OrderedDict(zip(labels, handles))
|
||||
axes[1].legend(by_label.values(), by_label.keys())
|
||||
|
||||
|
||||
def get_angle(xx, zz):
|
||||
"""Obtain the points to plot the confidence of each annotation"""
|
||||
|
||||
theta = math.atan2(zz, xx)
|
||||
angle = theta * (180 / math.pi)
|
||||
|
||||
return angle
|
||||
|
||||
22
setup.py
@ -1,7 +1,7 @@
|
||||
from setuptools import setup
|
||||
|
||||
# extract version from __init__.py
|
||||
with open('monoloco/__init__.py', 'r') as f:
|
||||
with open('monstereo/__init__.py', 'r') as f:
|
||||
VERSION_LINE = [l for l in f if l.startswith('__version__')][0]
|
||||
VERSION = VERSION_LINE.split('=')[1].strip()[1:-1]
|
||||
|
||||
@ -18,7 +18,7 @@ setup(
|
||||
'monoloco.utils'
|
||||
],
|
||||
license='GNU AGPLv3',
|
||||
description='MonoLoco: Monocular 3D Pedestrian Localization and Uncertainty Estimation',
|
||||
description=' 3D localization with semantic keypoints ',
|
||||
long_description=open('README.md').read(),
|
||||
long_description_content_type='text/markdown',
|
||||
author='Lorenzo Bertoni',
|
||||
@ -27,19 +27,19 @@ setup(
|
||||
zip_safe=False,
|
||||
|
||||
install_requires=[
|
||||
'torch<=1.1.0',
|
||||
'Pillow<=6.3',
|
||||
'torchvision<=0.3.0',
|
||||
'openpifpaf<=0.9.0',
|
||||
'tabulate<=0.8.3', # For evaluation
|
||||
'openpifpaf>=v0.12.1',
|
||||
'matplotlib'
|
||||
],
|
||||
extras_require={
|
||||
'test': [
|
||||
'pylint<=2.4.2',
|
||||
'pytest<=4.6.3',
|
||||
'eval': [
|
||||
'tabulate==0.8.3',
|
||||
'sklearn',
|
||||
'pandas',
|
||||
'pylint',
|
||||
'pytest',
|
||||
],
|
||||
'prep': [
|
||||
'nuscenes-devkit<=1.0.2',
|
||||
'nuscenes-devkit==1.0.2',
|
||||
],
|
||||
},
|
||||
)
|
||||
|
||||
89
tests/test_iou.ipynb
Normal file
@ -0,0 +1,89 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import math\n",
|
||||
"def calculate_iou(box1, box2):\n",
|
||||
"\n",
|
||||
" # Calculate the (x1, y1, x2, y2) coordinates of the intersection of box1 and box2. Calculate its Area.\n",
|
||||
" xi1 = max(box1[0], box2[0])\n",
|
||||
" yi1 = max(box1[1], box2[1])\n",
|
||||
" xi2 = min(box1[2], box2[2])\n",
|
||||
" yi2 = min(box1[3], box2[3])\n",
|
||||
" inter_area = max((xi2 - xi1), 0) * max((yi2 - yi1), 0) # Max keeps into account not overlapping box\n",
|
||||
"\n",
|
||||
" # Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)\n",
|
||||
" box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])\n",
|
||||
" box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])\n",
|
||||
" union_area = box1_area + box2_area - inter_area\n",
|
||||
"\n",
|
||||
" # compute the IoU\n",
|
||||
" iou = inter_area / union_area\n",
|
||||
"\n",
|
||||
" return iou"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 64,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"15.0\n",
|
||||
"[8.450052369622647, 12.393410142113215, 88.45005236962265, 77.39341014211321]\n",
|
||||
"0.4850460596873889\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"x1 = 75\n",
|
||||
"y1 = 60\n",
|
||||
"\n",
|
||||
"box1 = [0, 0, x1, y1]\n",
|
||||
"alpha = math.atan2(110,75) # good number\n",
|
||||
"diag = 15\n",
|
||||
"x_cateto = diag * math.cos(alpha)\n",
|
||||
"y_cateto = diag * math.sin(alpha)\n",
|
||||
"print(math.sqrt(x_cateto**2 + y_cateto**2))\n",
|
||||
"box2 = [x_cateto, y_cateto, x1 + x_cateto + 5, y1 + y_cateto+ 5]\n",
|
||||
"print(box2)\n",
|
||||
"print(calculate_iou(box1, box2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@ -9,10 +9,10 @@ sys.path.insert(0, os.path.join('..', 'monoloco'))
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from monoloco.train import Trainer
|
||||
from monoloco.network import MonoLoco
|
||||
from monoloco.network.process import preprocess_pifpaf, factory_for_gt
|
||||
from monoloco.visuals.printer import Printer
|
||||
from stereoloco.train import Trainer
|
||||
from stereoloco.network import MonoLoco
|
||||
from stereoloco.network.process import preprocess_pifpaf, factory_for_gt
|
||||
from stereoloco.visuals.printer import Printer
|
||||
|
||||
JOINTS = 'tests/joints_sample.json'
|
||||
PIFPAF_KEYPOINTS = 'tests/002282.png.pifpaf.json'
|
||||
@ -44,7 +44,7 @@ def tst_printer(dic_out, kk, image_path):
|
||||
"""Draw a fake figure"""
|
||||
with open(image_path, 'rb') as f:
|
||||
pil_image = Image.open(f).convert('RGB')
|
||||
printer = Printer(image=pil_image, output_path='tests/test_image', kk=kk, output_types=['combined'], z_max=15)
|
||||
printer = Printer(image=pil_image, output_path='tests/test_image', kk=kk, output_types=['multi'], z_max=15)
|
||||
figures, axes = printer.factory_axes()
|
||||
printer.draw(figures, axes, dic_out, pil_image, save=True)
|
||||
|
||||
|
||||
@ -6,7 +6,7 @@ sys.path.insert(0, os.path.join('..', 'monoloco'))
|
||||
|
||||
|
||||
def test_iou():
|
||||
from monoloco.utils import get_iou_matrix
|
||||
from stereoloco.utils import get_iou_matrix
|
||||
boxes_pred = [[1, 100, 1, 200]]
|
||||
boxes_gt = [[100., 120., 150., 160.],[12, 110, 130., 160.]]
|
||||
iou_matrix = get_iou_matrix(boxes_pred, boxes_gt)
|
||||
@ -14,7 +14,7 @@ def test_iou():
|
||||
|
||||
|
||||
def test_pixel_to_camera():
|
||||
from monoloco.utils import pixel_to_camera
|
||||
from stereoloco.utils import pixel_to_camera
|
||||
kk = [[718.3351, 0., 600.3891], [0., 718.3351, 181.5122], [0., 0., 1.]]
|
||||
zz = 10
|
||||
uv_vector = [1000., 400.]
|
||||
|
||||
@ -10,7 +10,7 @@ sys.path.insert(0, os.path.join('..', 'monoloco'))
|
||||
|
||||
def test_printer():
|
||||
"""Draw a fake figure"""
|
||||
from monoloco.visuals.printer import Printer
|
||||
from stereoloco.visuals.printer import Printer
|
||||
test_list = [[718.3351, 0., 600.3891], [0., 718.3351, 181.5122], [0., 0., 1.]]
|
||||
boxes = [xx + [0] for xx in test_list]
|
||||
kk = test_list
|
||||
|
||||