List of journal and conference papers published within the project. For a list of MSc and BSc theses click here.
2018 |
Ian Cherabier, Johannes L. Schönberger, Martin Oswald, Marc Pollefeys, Andreas Geiger Learning Priors for Semantic 3D Reconstruction Conference European Conference on Computer Vision, ECCV 2018 , 2018. @conference{CherabierECCV2018, title = {Learning Priors for Semantic 3D Reconstruction}, author = {Ian Cherabier and Johannes L. Schönberger and Martin Oswald and Marc Pollefeys and Andreas Geiger}, url = {https://demuc.de/papers/cherabier2018learning.bib https://demuc.de/papers/cherabier2018learning.pdf https://demuc.de/papers/cherabier2018learning_supp.pdf}, year = {2018}, date = {2018-09-10}, booktitle = {European Conference on Computer Vision, ECCV 2018 }, keywords = {}, pubstate = {published}, tppubtype = {conference} } |
Wei Zeng, Theo Gevers 3DContextNet: K-d Tree Guided Hierarchical Learning of Point Clouds Using Local and Global Contextual Cues Conference Workshop Geometry Meets Deep Learning, ECCV, 2018, 2018. @conference{Zeng2018, title = {3DContextNet: K-d Tree Guided Hierarchical Learning of Point Clouds Using Local and Global Contextual Cues}, author = {Wei Zeng and Theo Gevers }, year = {2018}, date = {2018-09-08}, booktitle = {Workshop Geometry Meets Deep Learning, ECCV, 2018}, keywords = {}, pubstate = {published}, tppubtype = {conference} } |
Nicola Strisciuglio, George Azzopardi, Nicolai Petkov Brain-Inspired Robust Delineation Operator Conference European Conference on Computer Vision Workshops 2018, Springer, 2018. @conference{StrisciuglioBDCV2018, title = {Brain-Inspired Robust Delineation Operator}, author = {Nicola Strisciuglio and George Azzopardi and Nicolai Petkov}, url = {https://arxiv.org/pdf/1811.10240.pdf}, year = {2018}, date = {2018-09-08}, booktitle = {European Conference on Computer Vision Workshops 2018}, pages = {555-565}, publisher = {Springer}, keywords = {}, pubstate = {published}, tppubtype = {conference} } |
Can Pu, Nanbo Li, Radim Tylecek, Bob Fisher DUGMA: Dynamic Uncertainty-Based Gaussian Mixture Alignment Conference 3DV 2018, 2018. @conference{Pu3DV2018, title = {DUGMA: Dynamic Uncertainty-Based Gaussian Mixture Alignment}, author = {Can Pu and Nanbo Li and Radim Tylecek and Bob Fisher }, url = {http://homepages.inf.ed.ac.uk/rbf/PAPERS/DUGMA18.pdf}, year = {2018}, date = {2018-09-04}, booktitle = {3DV 2018}, keywords = {}, pubstate = {published}, tppubtype = {conference} } |
Hoang-An Le, Anil Baslamisli, Thomas Mensink andTheo Gevers. Three for one and one for three: Flow, Segmentation, and Surface Normals. Conference British Machine Vision Conference, BMVC 2018, 2018. @conference{HoangBMVC2018, title = {Three for one and one for three: Flow, Segmentation, and Surface Normals.}, author = {Hoang-An Le and Anil Baslamisli and Thomas Mensink andTheo Gevers.}, year = {2018}, date = {2018-09-03}, booktitle = {British Machine Vision Conference, BMVC 2018}, keywords = {}, pubstate = {published}, tppubtype = {conference} } |
Radim Tylecek, Fisher, R. B. Consistent Semantic Annotation of Outdoor Datasets via 2D/3D Label Transfer Journal Article Sensors, 18 (7), pp. 20, 2018, ISSN: 1424-8220. @article{Tylecek2018semanot, title = {Consistent Semantic Annotation of Outdoor Datasets via 2D/3D Label Transfer}, author = {Radim Tylecek and Fisher, R. B.}, url = {https://github.com/rtylecek/rosemat}, doi = {10.3390/s18072249}, issn = {1424-8220}, year = {2018}, date = {2018-07-12}, journal = {Sensors}, volume = {18}, number = {7}, pages = {20}, abstract = {The advance of scene understanding methods based on machine learning relies on the availability of large ground truth datasets, which are essential for their training and evaluation. Construction of such datasets with imagery from real sensor data however typically requires much manual annotation of semantic regions in the data, delivered by substantial human labour. To speed up this process, we propose a framework for semantic annotation of scenes captured by moving camera(s), e.g., mounted on a vehicle or robot. It makes use of an available 3D model of the traversed scene to project segmented 3D objects into each camera frame to obtain an initial annotation of the associated 2D image, which is followed by manual refinement by the user. The refined annotation can be transferred to the next consecutive frame using optical flow estimation. We have evaluated the efficiency of the proposed framework during the production of a labelled outdoor dataset. The analysis of annotation times shows that up to 43% less effort is required on average, and the consistency of the labelling is also improved}, keywords = {}, pubstate = {published}, tppubtype = {article} } The advance of scene understanding methods based on machine learning relies on the availability of large ground truth datasets, which are essential for their training and evaluation. Construction of such datasets with imagery from real sensor data however typically requires much manual annotation of semantic regions in the data, delivered by substantial human labour. To speed up this process, we propose a framework for semantic annotation of scenes captured by moving camera(s), e.g., mounted on a vehicle or robot. It makes use of an available 3D model of the traversed scene to project segmented 3D objects into each camera frame to obtain an initial annotation of the associated 2D image, which is followed by manual refinement by the user. The refined annotation can be transferred to the next consecutive frame using optical flow estimation. We have evaluated the efficiency of the proposed framework during the production of a labelled outdoor dataset. The analysis of annotation times shows that up to 43% less effort is required on average, and the consistency of the labelling is also improved |
Nicola Strisciuglio, Maria Leyva Vallina, Nicolai Petkov, Rafael Munoz Salinas Camera Localization in Outdoor Garden Environments Using Artificial Landmarks Conference IEEE International Work Conference on Bioinspired Intelligence , 2018. @conference{StrisciuglioMarkers2018, title = {Camera Localization in Outdoor Garden Environments Using Artificial Landmarks}, author = {Nicola Strisciuglio and Maria Leyva Vallina and Nicolai Petkov and Rafael Munoz Salinas}, url = {https://ieeexplore.ieee.org/document/8464139}, year = {2018}, date = {2018-07-09}, booktitle = {IEEE International Work Conference on Bioinspired Intelligence }, keywords = {}, pubstate = {published}, tppubtype = {conference} } |
B.A.J. van Tuijl, A.P.M. Tielen, A. Mencarelli, J. Hemming Structured design of a novel end-effector for a bush trimming robot Conference European Society of Agricultural Engineers (EurAgEng) Conference, Ageng 2018, 2018. @conference{Tuijl2018, title = {Structured design of a novel end-effector for a bush trimming robot}, author = { B.A.J. van Tuijl and A.P.M. Tielen and A. Mencarelli and J. Hemming}, year = {2018}, date = {2018-07-08}, booktitle = {European Society of Agricultural Engineers (EurAgEng) Conference, Ageng 2018}, abstract = {The European TrimBot2020 project researches the robotics and vision technologies to prototype the first autonomous outdoor garden trimming robot. The robot navigates over different terrains, approaches boxwood plants and trims them to a desired shape. The robot platform is based on a modified Bosch robot lawnmower, which navigates autonomously using 3D-based vision scene analysis. During trimming a robotic arm is controlled by visual servo in order to trim the bush. A novel end-effector had to be designed to guarantee flexibility of the manipulator, precision of trimming and smoothness of the trimmed bush surface. This paper describes the structured design of this bush trimmer. When faced with a design problem with many interconnecting system elements, structured design is a tool to be used to iteratively and step by step guide the designers in making the right design choices at the right moment during the different design phases. First, preliminary research is done to analyse the problem and to assess the goals of the end-effector. Second, the functions are determined and working principles are found and put into a coherent structure. Finally, this leads to a composition of several preliminary designs of which the most promising one is determined. This design is built as a working prototype. Next to this, 3D-Computer Aided Design (CAD) tools and rapid prototyping is used to test ideas along the design process. The final design, based on contra-rotating blades, was discussed in terms of how and to what extent it has met the requirements, objectives and functions found during the structured design process. Moreover, the results of lab and field tests have shown the first functional results and points of improvements have been identified. A novel trimming method, by contra-rotating blades, has been found using structured design which meets the demands and limitations of other system components of the robot.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The European TrimBot2020 project researches the robotics and vision technologies to prototype the first autonomous outdoor garden trimming robot. The robot navigates over different terrains, approaches boxwood plants and trims them to a desired shape. The robot platform is based on a modified Bosch robot lawnmower, which navigates autonomously using 3D-based vision scene analysis. During trimming a robotic arm is controlled by visual servo in order to trim the bush. A novel end-effector had to be designed to guarantee flexibility of the manipulator, precision of trimming and smoothness of the trimmed bush surface. This paper describes the structured design of this bush trimmer. When faced with a design problem with many interconnecting system elements, structured design is a tool to be used to iteratively and step by step guide the designers in making the right design choices at the right moment during the different design phases. First, preliminary research is done to analyse the problem and to assess the goals of the end-effector. Second, the functions are determined and working principles are found and put into a coherent structure. Finally, this leads to a composition of several preliminary designs of which the most promising one is determined. This design is built as a working prototype. Next to this, 3D-Computer Aided Design (CAD) tools and rapid prototyping is used to test ideas along the design process. The final design, based on contra-rotating blades, was discussed in terms of how and to what extent it has met the requirements, objectives and functions found during the structured design process. Moreover, the results of lab and field tests have shown the first functional results and points of improvements have been identified. A novel trimming method, by contra-rotating blades, has been found using structured design which meets the demands and limitations of other system components of the robot. |
Nicola Strisciuglio, Radim Tylecek, Nicolai Petkov, Peter Bieber, Jochen Hemming, Eldert van Henten, Torsten Sattler, Marc Pollefeys, Theo Gevers, Thomas Brox, Robert B. Fisher TrimBot2020: an outdoor robot for automatic gardening Conference 50th International Symposium on Robotics, VDE Verlag GmbH - Berlin - Offenbach, 2018. @conference{StrisciuglioTB2018, title = {TrimBot2020: an outdoor robot for automatic gardening}, author = {Nicola Strisciuglio and Radim Tylecek and Nicolai Petkov and Peter Bieber and Jochen Hemming and Eldert van Henten and Torsten Sattler and Marc Pollefeys and Theo Gevers and Thomas Brox and Robert B. Fisher}, url = {http://trimbot2020.webhosting.rug.nl/wp-content/uploads/2018/04/tb_isr.pdf}, year = {2018}, date = {2018-06-21}, booktitle = {50th International Symposium on Robotics}, publisher = {VDE Verlag GmbH - Berlin - Offenbach}, keywords = {}, pubstate = {published}, tppubtype = {conference} } |
Nikolaus Mayer, Eddy Ilg, Philipp Fischer, Caner Hazirbas, Daniel Cremers, Alexey Dosovitskiy, Thomas Brox What Makes Good Synthetic Training Data for Learning Disparity and Optical Flow Estimation? Journal Article International Journal of Computer Vision, 2018. @article{MayerIJCV2018, title = {What Makes Good Synthetic Training Data for Learning Disparity and Optical Flow Estimation?}, author = {Nikolaus Mayer and Eddy Ilg and Philipp Fischer and Caner Hazirbas and Daniel Cremers and Alexey Dosovitskiy and Thomas Brox}, url = {https://arxiv.org/abs/1801.06397}, year = {2018}, date = {2018-03-22}, journal = {International Journal of Computer Vision}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Johannes L. Schönberger, Andreas Geiger, Marc Pollefeys, Torsten Sattler Semantic Visual Localization Conference IEEE International Conference on Computer Vision and Pattern Recognition, 2018. @conference{SchoenbergerCVPR18, title = {Semantic Visual Localization }, author = {Johannes L. Schönberger and Andreas Geiger and Marc Pollefeys and Torsten Sattler}, url = {https://arxiv.org/abs/1712.05773}, year = {2018}, date = {2018-03-21}, booktitle = {IEEE International Conference on Computer Vision and Pattern Recognition}, keywords = {}, pubstate = {published}, tppubtype = {conference} } |
Anil S. Baslamisli, Hoang-An Le, Theo Gevers CNN based Learning using Reflection and Retinex Models for Intrinsic Image Decomposition Conference IEEE International Conference on Computer Vision and Pattern Recognition (CVPR), 2018. @conference{BaslamisliCVPR18, title = {CNN based Learning using Reflection and Retinex Models for Intrinsic Image Decomposition}, author = {Anil S. Baslamisli and Hoang-An Le and Theo Gevers}, url = {https://arxiv.org/abs/1712.01056}, year = {2018}, date = {2018-03-21}, booktitle = {IEEE International Conference on Computer Vision and Pattern Recognition (CVPR)}, keywords = {}, pubstate = {published}, tppubtype = {conference} } |
2017 |
Torsten Sattler, Radim Tylecek, Thomas Brox, Marc Pollefeys, Robert B. Fisher 3D Reconstruction Meets Semantics – Reconstruction Challenge 2017 Technical Report ICCV Workshop Venice, Italy, 2017. @techreport{3DRMSChallenge2017, title = {3D Reconstruction Meets Semantics – Reconstruction Challenge 2017}, author = {Torsten Sattler and Radim Tylecek and Thomas Brox and Marc Pollefeys and Robert B. Fisher }, url = {http://trimbot2020.webhosting.rug.nl/wp-content/uploads/2017/11/rms_challenge.pdf}, year = {2017}, date = {2017-10-23}, address = {Venice, Italy}, institution = {ICCV Workshop}, abstract = {Part of the workshop is a challenge on combining 3D and semantic information in complex scenes. To this end, a challenging outdoor dataset, captured by a robot driving through a semantically-rich garden that contains fine geometric details, was released. A multi-camera rig is mounted on top of the robot, enabling the use of both stereo and motion stereo information. Precise ground truth for the 3D structure of the garden has been obtained with a laser scanner and accurate pose estimates for the robot are available as well. Ground truth semantic labels and ground truth depth from a laser scan are used for benchmarking the quality of the 3D reconstructions. }, keywords = {}, pubstate = {published}, tppubtype = {techreport} } Part of the workshop is a challenge on combining 3D and semantic information in complex scenes. To this end, a challenging outdoor dataset, captured by a robot driving through a semantically-rich garden that contains fine geometric details, was released. A multi-camera rig is mounted on top of the robot, enabling the use of both stereo and motion stereo information. Precise ground truth for the 3D structure of the garden has been obtained with a laser scanner and accurate pose estimates for the robot are available as well. Ground truth semantic labels and ground truth depth from a laser scan are used for benchmarking the quality of the 3D reconstructions. |
Sezer Karaoglu, Yang Liu, Theo Gevers, Arnold W.M. Smeulders Point Light Source Position Estimation from RGB-D Images by Learning Surface Attribute Journal Article IEEE Trans. Image Processing, 2017. @article{Karaoglu_LSP17, title = {Point Light Source Position Estimation from RGB-D Images by Learning Surface Attribute}, author = {Sezer Karaoglu and Yang Liu and Theo Gevers and Arnold W.M. Smeulders}, url = {http://ieeexplore.ieee.org/document/7990258/ http://trimbot2020.webhosting.rug.nl/wp-content/uploads/2017/08/TIP_LSP.pdf }, year = {2017}, date = {2017-07-24}, journal = {IEEE Trans. Image Processing}, abstract = {Light source position estimation is a difficult yet an important problem in computer vision. A common approach for estimating the light source position (LSP) assumes Lambert’s law. However, in real-world scenes, Lambert’s law does not hold for all different types of surfaces. Instead of assuming all that surfaces follow Lambert’s law, our approach classifies image surface segments based on their photometric and geometric surface attributes (i.e. glossy, matte, curved etc.) and assigns weights to image surface segments based on their suitability for LSP estimation. In addition, we propose the use of the estimated camera pose to globally constrain LSP for RGB-D video sequences. Experiments on Boom and a newly collected RGB-D video datasets show that the state-of-the-art methods are outperformed by the proposed method. The results demonstrate that weighting image surface segments based on their attributes outperforms the state-of-the-art methods in which the image surface segments are considered to equally contribute. In particular, by using the proposed surface weighting, the angular error for light source position estimation is reduced from 12.6° to 8.2° and 24.6° to 4.8° for Boom and RGB-D video datasets respectively. Moreover, using the camera pose to globally constrain LSP provides higher accuracy (4.8°) compared to using single frames (8.5°).}, keywords = {}, pubstate = {published}, tppubtype = {article} } Light source position estimation is a difficult yet an important problem in computer vision. A common approach for estimating the light source position (LSP) assumes Lambert’s law. However, in real-world scenes, Lambert’s law does not hold for all different types of surfaces. Instead of assuming all that surfaces follow Lambert’s law, our approach classifies image surface segments based on their photometric and geometric surface attributes (i.e. glossy, matte, curved etc.) and assigns weights to image surface segments based on their suitability for LSP estimation. In addition, we propose the use of the estimated camera pose to globally constrain LSP for RGB-D video sequences. Experiments on Boom and a newly collected RGB-D video datasets show that the state-of-the-art methods are outperformed by the proposed method. The results demonstrate that weighting image surface segments based on their attributes outperforms the state-of-the-art methods in which the image surface segments are considered to equally contribute. In particular, by using the proposed surface weighting, the angular error for light source position estimation is reduced from 12.6° to 8.2° and 24.6° to 4.8° for Boom and RGB-D video datasets respectively. Moreover, using the camera pose to globally constrain LSP provides higher accuracy (4.8°) compared to using single frames (8.5°). |
Benjamin Ummenhofer, Huizhong Zhou, Jonas Uhrig, Nikolaus Mayer, Eddy Ilg, Alexey Dosovitskiy, Thomas Brox DeMoN: Depth and Motion Network for Learning Monocular Stereo Conference IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. @conference{Ummenhofer2017, title = {DeMoN: Depth and Motion Network for Learning Monocular Stereo}, author = {Benjamin Ummenhofer and Huizhong Zhou and Jonas Uhrig and Nikolaus Mayer and Eddy Ilg and Alexey Dosovitskiy and Thomas Brox}, url = {https://arxiv.org/pdf/1612.02401.pdf}, year = {2017}, date = {2017-07-22}, booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, abstract = {In this paper we formulate structure from motion as a learning problem. We train a convolutional network end-to-end to compute depth and camera motion from successive, unconstrained image pairs. The architecture is composed of multiple stacked encoder-decoder networks, the core part being an iterative network that is able to improve its own predictions. The network estimates not only depth and motion, but additionally surface normals, optical flow between the images and confidence of the matching. A crucial component of the approach is a training loss based on spatial relative differences. Compared to traditional two-frame structure from motion methods, results are more accurate and more robust. In contrast to the popular depth-from-single-image networks, DeMoN learns the concept of matching and, thus, better generalizes to structures not seen during training. }, keywords = {}, pubstate = {published}, tppubtype = {conference} } In this paper we formulate structure from motion as a learning problem. We train a convolutional network end-to-end to compute depth and camera motion from successive, unconstrained image pairs. The architecture is composed of multiple stacked encoder-decoder networks, the core part being an iterative network that is able to improve its own predictions. The network estimates not only depth and motion, but additionally surface normals, optical flow between the images and confidence of the matching. A crucial component of the approach is a training loss based on spatial relative differences. Compared to traditional two-frame structure from motion methods, results are more accurate and more robust. In contrast to the popular depth-from-single-image networks, DeMoN learns the concept of matching and, thus, better generalizes to structures not seen during training. |
Eddy Ilg, Nikolaus Mayer, T. Saikia, Margret Keuper, Alexey Dosovitskiy, Thomas Brox FlowNet 2.0: Evolution of Optical Flow Estimation with Deep Networks Conference IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. @conference{Ilg2017, title = {FlowNet 2.0: Evolution of Optical Flow Estimation with Deep Networks}, author = { Eddy Ilg and Nikolaus Mayer and T. Saikia and Margret Keuper and Alexey Dosovitskiy and Thomas Brox }, url = {http://lmb.informatik.uni-freiburg.de/Publications/2017/IMKDB17 https://lmb.informatik.uni-freiburg.de/Publications/2017/IMKDB17/paper-FlowNet_2_0__CVPR.pdf}, year = {2017}, date = {2017-07-22}, booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, abstract = { Abstract: The FlowNet demonstrated that optical flow estimation can be cast as a learning problem. However, the state of the art with regard to the quality of the flow has still been defined by traditional methods. Particularly on small displacements and real-world data, FlowNet cannot compete with variational methods. In this paper, we advance the concept of end-to-end learning of optical flow and make it work really well. The large improvements in quality and speed are caused by three major contributions: first, we focus on the training data and show that the schedule of presenting data during training is very important. Second, we develop a stacked architecture that includes warping of the second image with intermediate optical flow. Third, we elaborate on small displacements by introducing a sub-network specializing on small motions. FlowNet 2.0 is only marginally slower than the original FlowNet but decreases the estimation error by more than 50%. It performs on par with state-of-the-art methods, while running at interactive frame rates. Moreover, we present faster variants that allow optical flow computation at up to 140fps with accuracy matching the original FlowNet. }, keywords = {}, pubstate = {published}, tppubtype = {conference} } Abstract: The FlowNet demonstrated that optical flow estimation can be cast as a learning problem. However, the state of the art with regard to the quality of the flow has still been defined by traditional methods. Particularly on small displacements and real-world data, FlowNet cannot compete with variational methods. In this paper, we advance the concept of end-to-end learning of optical flow and make it work really well. The large improvements in quality and speed are caused by three major contributions: first, we focus on the training data and show that the schedule of presenting data during training is very important. Second, we develop a stacked architecture that includes warping of the second image with intermediate optical flow. Third, we elaborate on small displacements by introducing a sub-network specializing on small motions. FlowNet 2.0 is only marginally slower than the original FlowNet but decreases the estimation error by more than 50%. It performs on par with state-of-the-art methods, while running at interactive frame rates. Moreover, we present faster variants that allow optical flow computation at up to 140fps with accuracy matching the original FlowNet. |
Johannes Lutz Schönberger, Hans Hardmeier, Torsten Sattler, Marc Pollefeys Comparative Evaluation of Hand-Crafted and Learned Local Features Conference Conference on Computer Vision and Pattern Recognition (CVPR), 2017. @conference{schoenberger2017comparative, title = {Comparative Evaluation of Hand-Crafted and Learned Local Features}, author = {Johannes Lutz Schönberger and Hans Hardmeier and Torsten Sattler and Marc Pollefeys}, url = {http://trimbot2020.webhosting.rug.nl/wp-content/uploads/2017/04/schoenberger2017comparative-5.pdf http://trimbot2020.webhosting.rug.nl/wp-content/uploads/2017/04/schoenberger2017comparative_supp.pdf}, year = {2017}, date = {2017-07-21}, booktitle = {Conference on Computer Vision and Pattern Recognition (CVPR)}, keywords = {}, pubstate = {published}, tppubtype = {conference} } |
Thomas Schöps, Johannes L. Schönberger, Silvano Galliani, Torsten Sattler, Konrad Schindler, Marc Pollefeys, Andreas Geiger A Multi-View Stereo Benchmark with High-Resolution Images and Multi-Camera Videos Conference Conference on Computer Vision and Pattern Recognition (CVPR), 2017, 2017. @conference{schoeps2017benchmark, title = {A Multi-View Stereo Benchmark with High-Resolution Images and Multi-Camera Videos}, author = {Thomas Schöps and Johannes L. Schönberger and Silvano Galliani and Torsten Sattler and Konrad Schindler and Marc Pollefeys and Andreas Geiger}, url = {http://trimbot2020.webhosting.rug.nl/wp-content/uploads/2017/04/schoeps2017benchmark-1.pdf http://trimbot2020.webhosting.rug.nl/wp-content/uploads/2017/04/schoeps2017benchmark_supp-4.pdf }, year = {2017}, date = {2017-07-21}, booktitle = {Conference on Computer Vision and Pattern Recognition (CVPR), 2017}, keywords = {}, pubstate = {published}, tppubtype = {conference} } |
Dominik Honegger, Torsten Sattler, Marc Pollefeys Embedded Real-time Multi-Baseline Stereo Conference Proc. IEEE Int. Conf. on Robotics and Automation (ICRA), 2017, 2017. @conference{Honegger2017, title = {Embedded Real-time Multi-Baseline Stereo}, author = {Dominik Honegger and Torsten Sattler and Marc Pollefeys}, url = {http://trimbot2020.webhosting.rug.nl/wp-content/uploads/2017/05/ICRA_2017_multi_baseline.pdf}, year = {2017}, date = {2017-05-30}, booktitle = {Proc. IEEE Int. Conf. on Robotics and Automation (ICRA), 2017}, keywords = {}, pubstate = {published}, tppubtype = {conference} } |
Horna, L. A., Fisher, R. B. 3D plane labeling stereo matching with content aware adaptive windows Conference Proc. 12th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications, 2017. @conference{HornaFisher2017, title = {3D plane labeling stereo matching with content aware adaptive windows}, author = {Horna, L. A. and Fisher, R. B. }, url = {http://homepages.inf.ed.ac.uk/rbf/PAPERS/visapphorna.pdf}, year = {2017}, date = {2017-02-27}, booktitle = {Proc. 12th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications}, keywords = {}, pubstate = {published}, tppubtype = {conference} } |