compatibility with pifpaf v0.12.1 (#7)

* update with new pifpaf version

* pylint

* pylint
This commit is contained in:
Lorenzo Bertoni 2021-02-24 11:44:57 +01:00 committed by GitHub
parent cee8050add
commit bbaf32d9e2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 18 additions and 15 deletions

View File

@ -1,4 +1,4 @@
"""Open implementation of MonoLoco++ / MonStereo."""
__version__ = '0.2.2'
__version__ = '0.2.3'

View File

@ -7,11 +7,11 @@ import numpy as np
import torch
import torchvision
from ..utils import get_keypoints, pixel_to_camera, to_cartesian, back_correct_angles
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from ..utils import get_keypoints, pixel_to_camera, to_cartesian, back_correct_angles
BF = 0.54 * 721
z_min = 4
z_max = 60

View File

@ -16,7 +16,7 @@ import PIL
import openpifpaf
import openpifpaf.datasets as datasets
from openpifpaf.predict import processor_factory, preprocess_factory
from openpifpaf import decoder, network, visualizer, show
from openpifpaf import decoder, network, visualizer, show, logger
from .visuals.printer import Printer
from .network import Loco
@ -45,14 +45,15 @@ def factory_from_args(args):
"Using a ShuffleNet backbone")
args.checkpoint = 'shufflenetv2k30'
logger.configure(args, LOG) # logger first
# Devices
args.device = torch.device('cpu')
args.disable_cuda = False
args.pin_memory = False
if torch.cuda.is_available():
args.device = torch.device('cuda')
args.pin_memory = True
args.loader_workers = 8
LOG.debug('neural network device: %s', args.device)
# Add visualization defaults
args.figure_width = 10
@ -69,7 +70,7 @@ def factory_from_args(args):
# Configure
decoder.configure(args)
network.configure(args)
network.Factory.configure(args)
show.configure(args)
visualizer.configure(args)
@ -105,7 +106,7 @@ def predict(args):
# unbatch (only for MonStereo)
for idx, (pred, meta) in enumerate(zip(pred_batch, meta_batch)):
print('batch %d: %s', batch_i, meta['file_name'])
pred = preprocess.annotations_inverse(pred, meta)
pred = [ann.inverse_transform(meta) for ann in pred]
if args.output_directory is None:
splits = os.path.split(meta['file_name'])

View File

@ -2,7 +2,7 @@
import argparse
from openpifpaf import decoder, network, visualizer, show
from openpifpaf import decoder, network, visualizer, show, logger
def cli():
@ -39,15 +39,17 @@ def cli():
predict_parser.add_argument('--dpi', help='image resolution', type=int, default=150)
predict_parser.add_argument('--long-edge', default=None, type=int,
help='rescale the long side of the image (aspect ratio maintained)')
predict_parser.add_argument('--disable-cuda', action='store_true', help='disable CUDA')
predict_parser.add_argument('--focal',
help='focal length in mm for a sensor size of 7.2x5.4 mm. Default nuScenes sensor',
type=float, default=5.7)
# Pifpaf parsers
decoder.cli(predict_parser)
network.cli(predict_parser)
show.cli(predict_parser)
visualizer.cli(predict_parser)
decoder.cli(parser)
logger.cli(parser)
network.Factory.cli(parser)
show.cli(parser)
visualizer.cli(parser)
# Monoloco
predict_parser.add_argument('--net', help='Choose network: monoloco, monoloco_p, monoloco_pp, monstereo')

View File

@ -111,7 +111,7 @@ class Printer:
# Initialize multi figure, resizing it for aesthetic proportion
if 'multi' in self.output_types:
assert 'bird' and 'front' not in self.output_types, \
assert 'bird' not in self.output_types and 'front' not in self.output_types, \
"multi figure cannot be print together with front or bird ones"
self.y_scale = self.width / (self.height * 2) # Defined proportion

View File

@ -28,7 +28,7 @@ setup(
zip_safe=False,
install_requires=[
'openpifpaf==v0.12b1',
'openpifpaf==v0.12.1',
'matplotlib'
],
extras_require={