temp
This commit is contained in:
parent
612286457e
commit
c877a16c4b
@ -24,11 +24,6 @@ class ImageList(torch.utils.data.Dataset):
|
|||||||
with open(image_path, 'rb') as f:
|
with open(image_path, 'rb') as f:
|
||||||
image = Image.open(f).convert('RGB')
|
image = Image.open(f).convert('RGB')
|
||||||
|
|
||||||
if self.scale > 1.01 or self.scale < 0.99:
|
|
||||||
image = torchvision.transforms.functional.resize(image,
|
|
||||||
(round(self.scale * image.size[1]),
|
|
||||||
round(self.scale * image.size[0])),
|
|
||||||
interpolation=Image.BICUBIC)
|
|
||||||
# PIL images are not iterables
|
# PIL images are not iterables
|
||||||
original_image = torchvision.transforms.functional.to_tensor(image) # 0-255 --> 0-1
|
original_image = torchvision.transforms.functional.to_tensor(image) # 0-255 --> 0-1
|
||||||
image = image_transform(image)
|
image = image_transform(image)
|
||||||
|
|||||||
@ -2,96 +2,137 @@
|
|||||||
# pylint: disable=too-many-statements, too-many-branches, undefined-loop-variable
|
# pylint: disable=too-many-statements, too-many-branches, undefined-loop-variable
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import glob
|
||||||
import json
|
import json
|
||||||
|
import logging
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
import PIL
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
import openpifpaf
|
||||||
|
import openpifpaf.datasets as datasets
|
||||||
|
from openpifpaf.predict import processor_factory, preprocess_factory
|
||||||
|
|
||||||
from .visuals.printer import Printer
|
from .visuals.printer import Printer
|
||||||
from .visuals.pifpaf_show import KeypointPainter, image_canvas
|
from .visuals.pifpaf_show import KeypointPainter, image_canvas
|
||||||
from .network import PifPaf, ImageList, Loco
|
from .network import PifPaf, ImageList, Loco
|
||||||
from .network.process import factory_for_gt, preprocess_pifpaf
|
from .network.process import factory_for_gt, preprocess_pifpaf
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def factory_from_args(args):
|
||||||
|
|
||||||
|
# Merge the model_pifpaf argument
|
||||||
|
if not args.checkpoint:
|
||||||
|
args.checkpoint = 'shufflenetv2k30' # Default model
|
||||||
|
# glob
|
||||||
|
if args.glob:
|
||||||
|
args.images += glob.glob(args.glob)
|
||||||
|
if not args.images:
|
||||||
|
raise Exception("no image files given")
|
||||||
|
|
||||||
|
# add args.device
|
||||||
|
args.device = torch.device('cpu')
|
||||||
|
args.disable_cuda = False
|
||||||
|
args.pin_memory = False
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
args.device = torch.device('cuda')
|
||||||
|
args.pin_memory = True
|
||||||
|
|
||||||
|
# Add num_workers
|
||||||
|
args.loader_workers = 8
|
||||||
|
|
||||||
|
# Add visualization defaults
|
||||||
|
args.figure_width = 10
|
||||||
|
args.dpi_factor = 1.0
|
||||||
|
|
||||||
|
# TODO
|
||||||
|
args.long_edge = None
|
||||||
|
if args.net == 'monstereo':
|
||||||
|
args.batch_size = 2
|
||||||
|
else:
|
||||||
|
args.batch_size = 1
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
def predict(args):
|
def predict(args):
|
||||||
|
|
||||||
cnt = 0
|
cnt = 0
|
||||||
|
args = factory_from_args(args)
|
||||||
|
|
||||||
# Load Models
|
# Load Models
|
||||||
pifpaf = PifPaf(args)
|
assert args.net in ('monoloco_pp', 'monstereo', 'pifpaf')
|
||||||
assert args.mode in ('mono', 'stereo', 'pifpaf')
|
|
||||||
|
|
||||||
if 'mono' in args.mode:
|
if args.net in ('monoloco_pp', 'monstereo'):
|
||||||
monoloco = Loco(model=args.model, net='monoloco_pp',
|
net = Loco(model=args.model, net=args.net, device=args.device, n_dropout=args.n_dropout, p_dropout=args.dropout)
|
||||||
device=args.device, n_dropout=args.n_dropout, p_dropout=args.dropout)
|
|
||||||
|
|
||||||
if 'stereo' in args.mode:
|
|
||||||
monstereo = Loco(model=args.model, net='monstereo',
|
|
||||||
device=args.device, n_dropout=args.n_dropout, p_dropout=args.dropout)
|
|
||||||
|
|
||||||
# data
|
# data
|
||||||
data = ImageList(args.images, scale=args.scale)
|
|
||||||
if args.mode == 'stereo':
|
processor, model = processor_factory(args)
|
||||||
|
preprocess = preprocess_factory(args)
|
||||||
|
|
||||||
|
# data
|
||||||
|
data = datasets.ImageList(args.images, preprocess=preprocess)
|
||||||
|
if args.net == 'monstereo':
|
||||||
assert len(data.image_paths) % 2 == 0, "Odd number of images in a stereo setting"
|
assert len(data.image_paths) % 2 == 0, "Odd number of images in a stereo setting"
|
||||||
bs = 2
|
|
||||||
else:
|
|
||||||
bs = 1
|
|
||||||
data_loader = torch.utils.data.DataLoader(
|
data_loader = torch.utils.data.DataLoader(
|
||||||
data, batch_size=bs, shuffle=False,
|
data, batch_size=args.batch_size, shuffle=False,
|
||||||
pin_memory=args.pin_memory, num_workers=args.loader_workers)
|
pin_memory=False, collate_fn=datasets.collate_images_anns_meta)
|
||||||
|
|
||||||
for idx, (image_paths, image_tensors, processed_images_cpu) in enumerate(data_loader):
|
# visualizers
|
||||||
images = image_tensors.permute(0, 2, 3, 1)
|
annotation_painter = openpifpaf.show.AnnotationPainter()
|
||||||
|
|
||||||
processed_images = processed_images_cpu.to(args.device, non_blocking=True)
|
for batch_i, (image_tensors_batch, _, meta_batch) in enumerate(data_loader):
|
||||||
fields_batch = pifpaf.fields(processed_images)
|
pred_batch = processor.batch(model, image_tensors_batch, device=args.device)
|
||||||
|
|
||||||
# unbatch stereo pair
|
# unbatch
|
||||||
for ii, (image_path, image, processed_image_cpu, fields) in enumerate(zip(
|
for pred, meta in zip(pred_batch, meta_batch):
|
||||||
image_paths, images, processed_images_cpu, fields_batch)):
|
LOG.info('batch %d: %s', batch_i, meta['file_name'])
|
||||||
|
pred = preprocess.annotations_inverse(pred, meta)
|
||||||
|
|
||||||
if args.output_directory is None:
|
if args.output_directory is None:
|
||||||
splits = os.path.split(image_paths[0])
|
splits = os.path.split(meta['file_name'])
|
||||||
output_path = os.path.join(splits[0], 'out_' + splits[1])
|
output_path = os.path.join(splits[0], 'out_' + splits[1])
|
||||||
else:
|
else:
|
||||||
file_name = os.path.basename(image_paths[0])
|
file_name = os.path.basename(meta['file_name'])
|
||||||
output_path = os.path.join(args.output_directory, 'out_' + file_name)
|
output_path = os.path.join(args.output_directory, 'out_' + file_name)
|
||||||
print('image', idx, image_path, output_path)
|
print('image', batch_i, meta['file_name'], output_path)
|
||||||
keypoint_sets, scores, pifpaf_out = pifpaf.forward(image, processed_image_cpu, fields)
|
pifpaf_out = [ann.json_data() for ann in pred]
|
||||||
|
|
||||||
if ii == 0:
|
if batch_i == 0:
|
||||||
pifpaf_outputs = [keypoint_sets, scores, pifpaf_out] # keypoints_sets and scores for pifpaf printing
|
pifpaf_outputs = [keypoint_sets, scores, pifpaf_out] # keypoints_sets and scores for pifpaf printing
|
||||||
images_outputs = [image] # List of 1 or 2 elements with pifpaf tensor and monoloco original image
|
images_outputs = [cpu_image] # List of 1 or 2 elements with pifpaf tensor and monoloco original image
|
||||||
pifpaf_outs = {'left': pifpaf_out}
|
pifpaf_outs = {'left': pifpaf_out}
|
||||||
image_path_l = image_path
|
|
||||||
else:
|
else:
|
||||||
pifpaf_outs['right'] = pifpaf_out
|
pifpaf_outs['right'] = pifpaf_out
|
||||||
|
|
||||||
if args.mode in ('stereo', 'mono'):
|
# Load the original image
|
||||||
# Extract calibration matrix and ground truth file if present
|
if args.net in ('monoloco_pp', 'monstereo'):
|
||||||
with open(image_path_l, 'rb') as f:
|
with open(meta['file_name'], 'rb') as f:
|
||||||
pil_image = Image.open(f).convert('RGB')
|
cpu_image = PIL.Image.open(f).convert('RGB')
|
||||||
images_outputs.append(pil_image)
|
|
||||||
|
|
||||||
im_name = os.path.basename(image_path_l)
|
im_name = os.path.basename(meta['file_name'])
|
||||||
im_size = (float(image.size()[1] / args.scale), float(image.size()[0] / args.scale)) # Original
|
im_size = (cpu_image.size()[1], cpu_image.size()[0]) # Original
|
||||||
kk, dic_gt = factory_for_gt(im_size, name=im_name, path_gt=args.path_gt)
|
kk, dic_gt = factory_for_gt(im_size, name=im_name, path_gt=args.path_gt)
|
||||||
|
|
||||||
# Preprocess pifpaf outputs and run monoloco
|
# Preprocess pifpaf outputs and run monoloco
|
||||||
boxes, keypoints = preprocess_pifpaf(pifpaf_outs['left'], im_size, enlarge_boxes=False)
|
boxes, keypoints = preprocess_pifpaf(pifpaf_outs['left'], im_size, enlarge_boxes=False)
|
||||||
|
|
||||||
if args.mode == 'mono':
|
if args.net == 'monoloco_pp':
|
||||||
print("Prediction with MonoLoco++")
|
print("Prediction with MonoLoco++")
|
||||||
dic_out = monoloco.forward(keypoints, kk)
|
dic_out = net.forward(keypoints, kk)
|
||||||
dic_out = monoloco.post_process(dic_out, boxes, keypoints, kk, dic_gt)
|
dic_out = net.post_process(dic_out, boxes, keypoints, kk, dic_gt)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print("Prediction with MonStereo")
|
print("Prediction with MonStereo")
|
||||||
boxes_r, keypoints_r = preprocess_pifpaf(pifpaf_outs['right'], im_size)
|
boxes_r, keypoints_r = preprocess_pifpaf(pifpaf_outs['right'], im_size)
|
||||||
dic_out = monstereo.forward(keypoints, kk, keypoints_r=keypoints_r)
|
dic_out = net.forward(keypoints, kk, keypoints_r=keypoints_r)
|
||||||
dic_out = monstereo.post_process(dic_out, boxes, keypoints, kk, dic_gt)
|
dic_out = net.post_process(dic_out, boxes, keypoints, kk, dic_gt)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
dic_out = defaultdict(list)
|
dic_out = defaultdict(list)
|
||||||
@ -107,6 +148,10 @@ def factory_outputs(args, images_outputs, output_path, pifpaf_outputs, dic_out=N
|
|||||||
|
|
||||||
# Save json file
|
# Save json file
|
||||||
if args.mode == 'pifpaf':
|
if args.mode == 'pifpaf':
|
||||||
|
with show.image_canvas(cpu_image, image_out_name) as ax:
|
||||||
|
annotation_painter.annotations(ax, pred)
|
||||||
|
|
||||||
|
|
||||||
keypoint_sets, scores, pifpaf_out = pifpaf_outputs[:]
|
keypoint_sets, scores, pifpaf_out = pifpaf_outputs[:]
|
||||||
|
|
||||||
# Visualizer
|
# Visualizer
|
||||||
|
|||||||
146
monstereo/predict_old.py
Normal file
146
monstereo/predict_old.py
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
|
||||||
|
# pylint: disable=too-many-statements, too-many-branches, undefined-loop-variable
|
||||||
|
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
from .visuals.printer import Printer
|
||||||
|
from .visuals.pifpaf_show import KeypointPainter, image_canvas
|
||||||
|
from .network import PifPaf, ImageList, Loco
|
||||||
|
from .network.process import factory_for_gt, preprocess_pifpaf
|
||||||
|
|
||||||
|
|
||||||
|
def predict(args):
|
||||||
|
|
||||||
|
cnt = 0
|
||||||
|
|
||||||
|
# Load Models
|
||||||
|
pifpaf = PifPaf(args)
|
||||||
|
assert args.mode in ('mono', 'stereo', 'pifpaf')
|
||||||
|
|
||||||
|
if 'mono' in args.mode:
|
||||||
|
monoloco = Loco(model=args.model, net='monoloco_pp',
|
||||||
|
device=args.device, n_dropout=args.n_dropout, p_dropout=args.dropout)
|
||||||
|
|
||||||
|
if 'stereo' in args.mode:
|
||||||
|
monstereo = Loco(model=args.model, net='monstereo',
|
||||||
|
device=args.device, n_dropout=args.n_dropout, p_dropout=args.dropout)
|
||||||
|
|
||||||
|
# data
|
||||||
|
data = ImageList(args.images, scale=args.scale)
|
||||||
|
if args.mode == 'stereo':
|
||||||
|
assert len(data.image_paths) % 2 == 0, "Odd number of images in a stereo setting"
|
||||||
|
bs = 2
|
||||||
|
else:
|
||||||
|
bs = 1
|
||||||
|
data_loader = torch.utils.data.DataLoader(
|
||||||
|
data, batch_size=bs, shuffle=False,
|
||||||
|
pin_memory=args.pin_memory, num_workers=args.loader_workers)
|
||||||
|
|
||||||
|
for idx, (image_paths, image_tensors, processed_images_cpu) in enumerate(data_loader):
|
||||||
|
images = image_tensors.permute(0, 2, 3, 1)
|
||||||
|
|
||||||
|
processed_images = processed_images_cpu.to(args.device, non_blocking=True)
|
||||||
|
fields_batch = pifpaf.fields(processed_images)
|
||||||
|
|
||||||
|
# unbatch stereo pair
|
||||||
|
for ii, (image_path, image, processed_image_cpu, fields) in enumerate(zip(
|
||||||
|
image_paths, images, processed_images_cpu, fields_batch)):
|
||||||
|
|
||||||
|
if args.output_directory is None:
|
||||||
|
splits = os.path.split(image_paths[0])
|
||||||
|
output_path = os.path.join(splits[0], 'out_' + splits[1])
|
||||||
|
else:
|
||||||
|
file_name = os.path.basename(image_paths[0])
|
||||||
|
output_path = os.path.join(args.output_directory, 'out_' + file_name)
|
||||||
|
print('image', idx, image_path, output_path)
|
||||||
|
keypoint_sets, scores, pifpaf_out = pifpaf.forward(image, processed_image_cpu, fields)
|
||||||
|
|
||||||
|
if ii == 0:
|
||||||
|
pifpaf_outputs = [keypoint_sets, scores, pifpaf_out] # keypoints_sets and scores for pifpaf printing
|
||||||
|
images_outputs = [image] # List of 1 or 2 elements with pifpaf tensor and monoloco original image
|
||||||
|
pifpaf_outs = {'left': pifpaf_out}
|
||||||
|
image_path_l = image_path
|
||||||
|
else:
|
||||||
|
pifpaf_outs['right'] = pifpaf_out
|
||||||
|
|
||||||
|
if args.mode in ('stereo', 'mono'):
|
||||||
|
# Extract calibration matrix and ground truth file if present
|
||||||
|
with open(image_path_l, 'rb') as f:
|
||||||
|
pil_image = Image.open(f).convert('RGB')
|
||||||
|
images_outputs.append(pil_image)
|
||||||
|
|
||||||
|
im_name = os.path.basename(image_path_l)
|
||||||
|
im_size = (float(image.size()[1] / args.scale), float(image.size()[0] / args.scale)) # Original
|
||||||
|
kk, dic_gt = factory_for_gt(im_size, name=im_name, path_gt=args.path_gt)
|
||||||
|
|
||||||
|
# Preprocess pifpaf outputs and run monoloco
|
||||||
|
boxes, keypoints = preprocess_pifpaf(pifpaf_outs['left'], im_size, enlarge_boxes=False)
|
||||||
|
|
||||||
|
if args.mode == 'mono':
|
||||||
|
print("Prediction with MonoLoco++")
|
||||||
|
dic_out = monoloco.forward(keypoints, kk)
|
||||||
|
dic_out = monoloco.post_process(dic_out, boxes, keypoints, kk, dic_gt)
|
||||||
|
|
||||||
|
else:
|
||||||
|
print("Prediction with MonStereo")
|
||||||
|
boxes_r, keypoints_r = preprocess_pifpaf(pifpaf_outs['right'], im_size)
|
||||||
|
dic_out = monstereo.forward(keypoints, kk, keypoints_r=keypoints_r)
|
||||||
|
dic_out = monstereo.post_process(dic_out, boxes, keypoints, kk, dic_gt)
|
||||||
|
|
||||||
|
else:
|
||||||
|
dic_out = defaultdict(list)
|
||||||
|
kk = None
|
||||||
|
|
||||||
|
factory_outputs(args, images_outputs, output_path, pifpaf_outputs, dic_out=dic_out, kk=kk)
|
||||||
|
print('Image {}\n'.format(cnt) + '-' * 120)
|
||||||
|
cnt += 1
|
||||||
|
|
||||||
|
|
||||||
|
def factory_outputs(args, images_outputs, output_path, pifpaf_outputs, dic_out=None, kk=None):
|
||||||
|
"""Output json files or images according to the choice"""
|
||||||
|
|
||||||
|
# Save json file
|
||||||
|
if args.mode == 'pifpaf':
|
||||||
|
keypoint_sets, scores, pifpaf_out = pifpaf_outputs[:]
|
||||||
|
|
||||||
|
# Visualizer
|
||||||
|
keypoint_painter = KeypointPainter(show_box=False)
|
||||||
|
skeleton_painter = KeypointPainter(show_box=False, color_connections=True, markersize=1, linewidth=4)
|
||||||
|
|
||||||
|
if 'json' in args.output_types and keypoint_sets.size > 0:
|
||||||
|
with open(output_path + '.pifpaf.json', 'w') as f:
|
||||||
|
json.dump(pifpaf_out, f)
|
||||||
|
|
||||||
|
if 'keypoints' in args.output_types:
|
||||||
|
with image_canvas(images_outputs[0],
|
||||||
|
output_path + '.keypoints.png',
|
||||||
|
show=args.show,
|
||||||
|
fig_width=args.figure_width,
|
||||||
|
dpi_factor=args.dpi_factor) as ax:
|
||||||
|
keypoint_painter.keypoints(ax, keypoint_sets)
|
||||||
|
|
||||||
|
if 'skeleton' in args.output_types:
|
||||||
|
with image_canvas(images_outputs[0],
|
||||||
|
output_path + '.skeleton.png',
|
||||||
|
show=args.show,
|
||||||
|
fig_width=args.figure_width,
|
||||||
|
dpi_factor=args.dpi_factor) as ax:
|
||||||
|
skeleton_painter.keypoints(ax, keypoint_sets, scores=scores)
|
||||||
|
|
||||||
|
else:
|
||||||
|
if any((xx in args.output_types for xx in ['front', 'bird', 'multi'])):
|
||||||
|
print(output_path)
|
||||||
|
if dic_out['boxes']: # Only print in case of detections
|
||||||
|
printer = Printer(images_outputs[1], output_path, kk, args)
|
||||||
|
figures, axes = printer.factory_axes()
|
||||||
|
printer.draw(figures, axes, dic_out, images_outputs[1])
|
||||||
|
|
||||||
|
if 'json' in args.output_types:
|
||||||
|
with open(os.path.join(output_path + '.monoloco.json'), 'w') as ff:
|
||||||
|
json.dump(dic_out, ff)
|
||||||
Loading…
Reference in New Issue
Block a user