@misc{10481/107732, year = {2025}, month = {10}, url = {https://hdl.handle.net/10481/107732}, abstract = {In recent years, SLAM, visual odometry, and structure-from-motion approaches have widely addressed the problems of 3D reconstruction and ego-motion estimation. Of the many input modalities that can be used to solve these ill-posed problems, the pure visual alternative using a single monocular RGB camera has attracted the attention of multiple researchers due to its low cost and widespread availability in handheld devices. One of the best proposals currently available is the Direct Sparse Odometry (DSO) system, which has demonstrated the ability to accurately recover trajectories and depth maps using monocular sequences as the only source of information. Given the impressive advances in single-image depth estimation using neural networks, this work proposes an extension of the DSO system, named DeepDSO. DeepDSO effectively integrates the state-of-the-art NeW CRF neural network as a depth estimation module, providing depth prior information for each candidate point. This reduces the point search interval over the epipolar line. This integration improves the DSO algorithm’s depth point initialization and allows each proposed point to converge faster to its true depth. Experimentation carried out in the TUMMono dataset demonstrated that adding the neural network depth estimation module to the DSO pipeline significantly reduced rotation, translation, scale, start-segment alignment, end-segment alignment, and RMSE errors.}, organization = {CropID Project (Ref. AS No. 25)}, publisher = {MDPI}, keywords = {CNN direct sparse odometry}, keywords = {Monocular visual odometry}, keywords = {Monocular 3D reconstruction}, title = {Deep-DSO: Improving Mapping of Direct Sparse Odometry Using CNN-Based Single-Image Depth Estimation}, doi = {10.3390/math13203330}, author = {Herrera-Granda, Erick P. and Torres-Cantero, Juan C. and Herrera-Granda, Israel D. and Lucio-Naranjo, José F}, }