Skip to content

Commit

Permalink
fix(openmvs): export the undistorted reconstruction
Browse files Browse the repository at this point in the history
Fixes #192
  • Loading branch information
paulinus committed Jul 3, 2017
1 parent 41cea9e commit 7bb8248
Show file tree
Hide file tree
Showing 14 changed files with 484 additions and 2 deletions.
8 changes: 8 additions & 0 deletions Dockerfile_opencv3
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
FROM paulinus/opensfm-docker-base:opencv3

COPY . /source/OpenSfM

WORKDIR /source/OpenSfM

RUN pip install -r requirements.txt && \
python setup.py build
12 changes: 12 additions & 0 deletions bundle.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@

from opensfm import dataset
from opensfm import reconstruction

data = dataset.DataSet('data/data/zanzibar_geo1')
graph = data.load_tracks_graph()
rs = data.load_reconstruction()
for r in rs:
reconstruction.bundle(graph, r, None, data.config)

data.save_reconstruction(rs, 'reconstruction.rebundled.json')
print data.images()
18 changes: 18 additions & 0 deletions gather_submodels.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import sys

from opensfm import dataset
from opensfm.large import metadataset

r = []
a = []
path = sys.argv[1]
metadata = metadataset.MetaDataSet(path)

for submodel in metadata.get_submodel_paths():
data = dataset.DataSet(submodel)
r.extend(data.load_reconstruction('reconstruction.unaligned.json'))
a.extend(data.load_reconstruction('reconstruction.aligned.json'))

data = dataset.DataSet(path)
data.save_reconstruction(r, 'reconstruction.unaligned.json')
data.save_reconstruction(a, 'reconstruction.aligned.json')
49 changes: 49 additions & 0 deletions generate_gcp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#!/usr/bin/env python
import argparse

import matplotlib.pyplot as plt
import numpy as np

from opensfm import dataset
from opensfm import features
from opensfm import geo

if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Generate GCP by sampling points from the reconstruction')
parser.add_argument(
'dataset',
help='path to the dataset to be processed')
parser.add_argument(
'--num_points',
default=3,
type=int,
help='number of points to generate')
args = parser.parse_args()

data = dataset.DataSet(args.dataset)
reference = data.load_reference_lla()
reconstruction = data.load_reconstruction()[0]

print 'WGS84'
for i in range(args.num_points):
point = np.random.choice(reconstruction.points.values())

for shot in reconstruction.shots.values():
pixel = shot.project(point.coordinates)
if np.fabs(pixel).max() < 0.5:

lla = geo.lla_from_topocentric(
point.coordinates[0],
point.coordinates[1],
point.coordinates[2],
reference['latitude'],
reference['longitude'],
reference['altitude'])

x, y = features.denormalized_image_coordinates(
pixel.reshape(1, 2), shot.camera.width, shot.camera.height)[0]

print "{} {} {} {} {} {}".format(
lla[0], lla[1], lla[2],
x, y, shot.id)
39 changes: 39 additions & 0 deletions killer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
from multiprocessing import Pool
import time
import threading
from subprocess import check_output


def get_pid(name):
return list(map(int, check_output(["pidof", name]).split()))


class Killer(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)

def run(self):
print("Wait 3 seconds...")
time.sleep(3)
pids = get_pid('python')
print(pids)
print("Found " + str(len(pids)) + " python processes")
print("Killing middle process")

# check_output(["kill", "-9", str(pids[int(len(pids) / 2)])])
check_output(["/bin/bash", "-c", '"kill -9 {}"'.format(str(pids[int(len(pids) / 2)]))])

print("Processes killed, now waiting for termination...")


def f(x):
time.sleep(6)
return x*x


if __name__ == '__main__':
p = Pool(3)
print("Started")
Killer().start()
print(p.map(f, [1, 2, 3]))
print("Ended")
53 changes: 53 additions & 0 deletions notes_on_thresholds.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@


Thresholds
==========


For a triangulated point X and projection P

|x - P X| < t_pixels


For an homography between images

|x1 - H x2| < 2 * t_pixels


For a triangulated point and bearing

|b - R X - t| < t_angle


For a rotation between bearings

|b1 - R b1| < 2 * t_angle




The threshold for image to image error has to be twice as big as the error for 3d to image.
This is because the triangulated 3D point could be in the midle of the two image poits

x1 <-------> P X <-------> x2
t t

x1 <---------------------> x2
2 * t



To convert between t_pixel and t_angle, we need the focal length of the camera.
We have

tan(t_angle) = t_pixels / focal_length

Here we arbitrarily assume that the threshold is given for a camera of focal length 1
Also tan(t) \approx t since for small t. So we simply have

t_angle \approx t_pixels


See also opengv doc on thresholds here: http://laurentkneip.github.io/opengv/page_how_to_use.html


4 changes: 2 additions & 2 deletions opensfm/commands/export_openmvs.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ def add_arguments(self, parser):

def run(self, args):
data = dataset.DataSet(args.dataset)
reconstructions = data.load_reconstruction()
graph = data.load_tracks_graph()
reconstructions = data.load_undistorted_reconstruction()
graph = data.load_undistorted_tracks_graph()

if reconstructions:
self.export(reconstructions[0], graph, data)
Expand Down
8 changes: 8 additions & 0 deletions remove_images_without_gps.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
from opensfm import dataset

data = dataset.DataSet('.')

for image in data.images():
e = data.load_exif(image)
if 'gps' not in e or 'latitude' not in e['gps']:
print image
14 changes: 14 additions & 0 deletions run_lund.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
DATA=data/data/lund_large

./setup_odm_metadataset.py $DATA

bin/opensfm extract_metadata $DATA/opensfm
bin/opensfm detect_features $DATA/opensfm
bin/opensfm match_features $DATA/opensfm
bin/opensfm create_submodels $DATA/opensfm --size 10 --dist 20

python run_submodels.py $DATA/opensfm

bin/opensfm align_submodels $DATA/opensfm

python gather_submodels.py $DATA/opensfm
69 changes: 69 additions & 0 deletions run_submodels.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
#!/usr/bin/env python

import argparse
import logging
import multiprocessing
import os
import subprocess
import sys

from opensfm.large import metadataset

logger = logging.getLogger(__name__)

logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)


class Reconstructor:
def __init__(self, command, complete):
self._command = command
self._complete = complete

def __call__(self, submodel_path):
logger.info("===========================================================")
logger.info("Reconstructing submodel {}".format(submodel_path))
logger.info("===========================================================")

if self._complete:
self._run_command([self._command, 'extract_metadata', submodel_path])
self._run_command([self._command, 'detect_features', submodel_path])
self._run_command([self._command, 'match_features', submodel_path])

self._run_command([self._command, 'create_tracks', submodel_path])
self._run_command([self._command, 'reconstruct', submodel_path])

logger.info("===========================================================")
logger.info("Submodel {} reconstructed".format(submodel_path))
logger.info("===========================================================")

def _run_command(self, args):
result = subprocess.Popen(args).wait()
if result != 0:
raise RuntimeError(result)


if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Reconstruct all submodels')
parser.add_argument('dataset',
help='path to the dataset to be processed')
parser.add_argument('-c', '--complete',
help='Run the complete pipeline on each subset',
action='store_true')
parser.add_argument('-p', '--processes',
help='Number of parallel processes to run',
type=int, default=1)
args = parser.parse_args()

meta_data = metadataset.MetaDataSet(args.dataset)
exec_dir = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))
command = os.path.join(exec_dir, "bin/opensfm")

submodel_paths = meta_data.get_submodel_paths()
reconstructor = Reconstructor(command, args.complete)

if args.processes == 1:
for submodel_path in submodel_paths:
reconstructor(submodel_path)
else:
p = multiprocessing.Pool(args.processes)
p.map(reconstructor, submodel_paths)
108 changes: 108 additions & 0 deletions setup_odm_metadataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
#!/usr/bin/env python

"""Setup an ODM metadataset.
A metadatase will be split into multiple submodel folders. Each submodel
will be reconstructed independently. Before dense reconstruction
the different submodels are aligned to each other.
"""

import argparse
import errno
import os

import yaml


def mkdir_p(path):
"""Make a directory including parent directories."""
try:
os.makedirs(path)
except os.error as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise


def is_image_file(filename):
extensions = {'jpg', 'jpeg', 'png', 'tif', 'tiff', 'pgm', 'pnm', 'gif'}
return filename.split('.')[-1].lower() in extensions


def create_image_list(image_path, opensfm_path):
image_files = filter(is_image_file, os.listdir(image_path))

lines = []
relpath = os.path.relpath(image_path, opensfm_path)
for image in image_files:
lines.append(os.path.join(relpath, image))

with open(os.path.join(opensfm_path, 'image_list.txt'), 'w') as fout:
fout.write("\n".join(lines))


def create_config(opensfm_path, args):
config = {
"submodels_relpath": "../submodels/opensfm",
"submodel_relpath_template": "../submodels/submodel_%04d/opensfm",

"feature_process_size": args.resize_to,
"feature_min_frames": args.min_num_features,
"processes": args.num_cores,
"matching_gps_neighbors": args.matcher_neighbors,
}
with open(os.path.join(opensfm_path, 'config.yaml'), 'w') as fout:
yaml.dump(config, fout, default_flow_style=False)


def parse_command_line():
parser = argparse.ArgumentParser(description='Setup an ODM metadataset')
parser.add_argument('dataset',
help='path to the dataset to be processed')

# TODO(pau): reduce redundancy with OpenDroneMap/opendm/config.py

parser.add_argument('--resize-to', # currently doesn't support 'orig'
metavar='<integer>',
default=2400,
type=int,
help='resizes images by the largest side')

parser.add_argument('--min-num-features',
metavar='<integer>',
default=4000,
type=int,
help=('Minimum number of features to extract per image. '
'More features leads to better results but slower '
'execution. Default: %(default)s'))

parser.add_argument('--num-cores',
metavar='<positive integer>',
default=4,
type=int,
help=('The maximum number of cores to use. '
'Default: %(default)s'))

parser.add_argument('--matcher-neighbors',
type=int,
metavar='<integer>',
default=8,
help='Number of nearest images to pre-match based on GPS '
'exif data. Set to 0 to skip pre-matching. '
'Neighbors works together with Distance parameter, '
'set both to 0 to not use pre-matching. OpenSFM '
'uses both parameters at the same time, Bundler '
'uses only one which has value, prefering the '
'Neighbors parameter. Default: %(default)s')

return parser.parse_args()


if __name__ == '__main__':
args = parse_command_line()
data_path = args.dataset
image_path = os.path.join(data_path, 'images')
opensfm_path = os.path.join(data_path, 'opensfm')

mkdir_p(opensfm_path)
create_image_list(image_path, opensfm_path)
create_config(opensfm_path, args)
Loading

0 comments on commit 7bb8248

Please sign in to comment.