diff --git a/docs/MonoLoco++.md b/docs/MonoLoco++.md index be6e28e..f7c89dd 100644 --- a/docs/MonoLoco++.md +++ b/docs/MonoLoco++.md @@ -57,7 +57,25 @@ To show all the instances estimated by MonoLoco add the argument `show_all` to t ![predict_all](out_002282.png.multi_all.jpg) ### Social Distancing -WIP +To visualize social distancing compliance, simply add the argument `--social-distance` to the predict command. + +An example from the Collective Activity Dataset is provided below. + + + +To visualize social distancing run the below, command: +``` +python -m monstereo.run predict \ +docs/frame0038.jpg \ +--net monoloco_pp \ +--social_distance \ +--output_types front bird --show_all \ +--model data/models/monoloco_pp-201203-1424.pkl -o +``` + + + + ### Orientation and Bounding Box dimensions MonoLoco++ estimates orientation and box dimensions as well. Results are saved in a json file when using the command diff --git a/docs/frame0038.jpg b/docs/frame0038.jpg new file mode 100644 index 0000000..7050b2c Binary files /dev/null and b/docs/frame0038.jpg differ diff --git a/docs/out_frame0038.jpg.bird.png b/docs/out_frame0038.jpg.bird.png new file mode 100644 index 0000000..19cdca5 Binary files /dev/null and b/docs/out_frame0038.jpg.bird.png differ diff --git a/docs/out_frame0038.jpg.front.png b/docs/out_frame0038.jpg.front.png new file mode 100644 index 0000000..e64071f Binary files /dev/null and b/docs/out_frame0038.jpg.front.png differ diff --git a/monstereo/activity.py b/monstereo/activity.py index a7a937a..7fe9af2 100644 --- a/monstereo/activity.py +++ b/monstereo/activity.py @@ -251,7 +251,8 @@ def show_social(args, image_t, output_path, annotations, dic_out): draw_orientation(ax, uv_centers, sizes, angles, colors, mode='front') if 'bird' in args.output_types: - with bird_canvas(args, output_path) as ax1: + z_max = min(args.z_max, 4 + max([el[1] for el in xz_centers])) + with bird_canvas(output_path, z_max) as ax1: draw_orientation(ax1, xz_centers, [], angles, colors, mode='bird') draw_uncertainty(ax1, xz_centers, stds) @@ -273,14 +274,14 @@ def get_pifpaf_outputs(annotations): @contextmanager -def bird_canvas(args, output_path): +def bird_canvas(output_path, z_max): fig, ax = plt.subplots(1, 1) fig.set_tight_layout(True) output_path = output_path + '.bird.png' - x_max = args.z_max / 1.5 - ax.plot([0, x_max], [0, args.z_max], 'k--') - ax.plot([0, -x_max], [0, args.z_max], 'k--') - ax.set_ylim(0, args.z_max + 1) + x_max = z_max / 1.5 + ax.plot([0, x_max], [0, z_max], 'k--') + ax.plot([0, -x_max], [0, z_max], 'k--') + ax.set_ylim(0, z_max + 1) yield ax fig.savefig(output_path) plt.close(fig) diff --git a/monstereo/predict.py b/monstereo/predict.py index c46c0df..676e183 100644 --- a/monstereo/predict.py +++ b/monstereo/predict.py @@ -37,7 +37,7 @@ def factory_from_args(args): if not args.checkpoint: args.checkpoint = 'data/models/shufflenetv2k30-201104-224654-cocokp-d75ed641.pkl' # Default model - # Decices + # Devices args.device = torch.device('cpu') args.disable_cuda = False args.pin_memory = False @@ -98,8 +98,8 @@ def predict(args): for batch_i, (image_tensors_batch, _, meta_batch) in enumerate(data_loader): pred_batch = processor.batch(model, image_tensors_batch, device=args.device) - # unbatch - for pred, meta in zip(pred_batch, meta_batch): + # unbatch (only for MonStereo) + for idx, (pred, meta) in enumerate(zip(pred_batch, meta_batch)): LOG.info('batch %d: %s', batch_i, meta['file_name']) pred = preprocess.annotations_inverse(pred, meta) @@ -112,7 +112,7 @@ def predict(args): print('image', batch_i, meta['file_name'], output_path) pifpaf_out = [ann.json_data() for ann in pred] - if batch_i == 0: + if idx == 0: pifpaf_outputs = pred # to only print left image for stereo pifpaf_outs = {'left': pifpaf_out} with open(meta_batch[0]['file_name'], 'rb') as f: @@ -120,7 +120,7 @@ def predict(args): else: pifpaf_outs['right'] = pifpaf_out - # Load the original image + # 3D Predictions if args.net in ('monoloco_pp', 'monstereo'): im_name = os.path.basename(meta['file_name']) diff --git a/monstereo/run.py b/monstereo/run.py index 29b6546..2d4601f 100644 --- a/monstereo/run.py +++ b/monstereo/run.py @@ -53,18 +53,16 @@ def cli(): predict_parser.add_argument('--path_gt', help='path of json file with gt 3d localization', default='data/arrays/names-kitti-200615-1022.json') predict_parser.add_argument('--transform', help='transformation for the pose', default='None') - predict_parser.add_argument('--z_max', type=int, help='maximum meters distance for predictions', default=30) + predict_parser.add_argument('--z_max', type=int, help='maximum meters distance for predictions', default=100) predict_parser.add_argument('--n_dropout', type=int, help='Epistemic uncertainty evaluation', default=0) predict_parser.add_argument('--dropout', type=float, help='dropout parameter', default=0.2) predict_parser.add_argument('--show_all', help='only predict ground-truth matches or all', action='store_true') # Social distancing and social interactions predict_parser.add_argument('--social_distance', help='social', action='store_true') - predict_parser.add_argument('--json_dir', help='for social') predict_parser.add_argument('--threshold_prob', type=float, help='concordance for samples', default=0.25) - predict_parser.add_argument('--threshold_dist', type=float, help='min distance of people', default=2) - predict_parser.add_argument('--margin', type=float, help='conservative for noise in orientation', default=1.5) - predict_parser.add_argument('--radii', type=tuple, help='o-space radii', default=(0.25, 1, 2)) + predict_parser.add_argument('--threshold_dist', type=float, help='min distance of people', default=2.5) + predict_parser.add_argument('--radii', type=tuple, help='o-space radii', default=(0.3, 0.5, 1)) # Training training_parser.add_argument('--joints', help='Json file with input joints',