Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

TFDSv4 carla_video_tracking upgrade. #1829

Closed
wants to merge 46 commits into from
Closed
Show file tree
Hide file tree
Changes from 25 commits
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
1200bbc
fixing configs for new format
Nov 30, 2022
2d12b3c
adding preprocessing for carla_obj_det_dev
Nov 30, 2022
873442d
adding dataset dir for carla_obj_det_dev
Nov 30, 2022
b7bc58e
updating tf import
Nov 30, 2022
df90fbb
update cached_datasets.json
Dec 2, 2022
c0fbbe3
Merge branch 'tfdsv4' into tfdsv4-fix-carla-configs
davidslater Dec 6, 2022
216c4da
handling merge conflict
Dec 6, 2022
bd3621a
formatting
Dec 6, 2022
c9aec26
fix bug resulting from conflict handling
Dec 6, 2022
1787e0e
removing code that got duplicated in merge conflict
Dec 6, 2022
98caa12
renaming a function; fixing a docstring
Dec 6, 2022
c74e710
removing duplicate declaration
Dec 6, 2022
5e92bc3
Merge remote-tracking branch 'lc/tfdsv4-fix-carla-configs' into tfdsv…
jprokos26 Dec 13, 2022
52d9cde
Adding initial files, ArmoryDataGenerator needs updated to include co…
jprokos26 Dec 19, 2022
cda1beb
Merge branch 'tfdsv4' into tfdsv4-carla-video
jprokos26 Dec 19, 2022
80cfa6d
Adding context for Generators. Input shapes causing issue with metric…
jprokos26 Dec 20, 2022
41b1686
Adding unversioned files.
jprokos26 Dec 20, 2022
b7afdd0
Separating dataloader batches into List[Dict[ndarray: (frames, coords…
jprokos26 Dec 20, 2022
f552723
Migrating carla_video_tracking_test and implementing preprocessing.
jprokos26 Dec 20, 2022
69a6498
adding carla_video_tracking_test checksum
jprokos26 Dec 20, 2022
83ae25d
Adding carla_video_tracking_test cached dataset
jprokos26 Dec 20, 2022
3059170
removing carla_video_tracking_dev to readd as rename instead of new file
jprokos26 Dec 20, 2022
bc069bd
marking dev as rename
jprokos26 Dec 20, 2022
b35df54
removing extraneous code
jprokos26 Dec 20, 2022
09e2df3
removing testing changes
jprokos26 Dec 20, 2022
de621f6
user init feature
davidslater Nov 29, 2022
5b70cec
update docs
davidslater Nov 29, 2022
58bec8d
update docs and error
davidslater Dec 2, 2022
1ba2c28
better metrics
davidslater Nov 16, 2022
02834e4
Adding initial files, ArmoryDataGenerator needs updated to include co…
jprokos26 Dec 19, 2022
61a487f
found an easy change to add custom metrics without having to worry as…
ppark-twosixtech Dec 5, 2022
4d8ac63
reverting changes to test loading custom metrics
ppark-twosixtech Dec 6, 2022
f8bc198
Merge pytorch, tf2, and carla mot images (#1786)
Dec 9, 2022
0b7ffa0
fix image link (#1817)
Dec 9, 2022
014e1fc
Adding context for Generators. Input shapes causing issue with metric…
jprokos26 Dec 20, 2022
d718e69
Adding unversioned files.
jprokos26 Dec 20, 2022
ef3b1f7
Separating dataloader batches into List[Dict[ndarray: (frames, coords…
jprokos26 Dec 20, 2022
2577998
Migrating carla_video_tracking_test and implementing preprocessing.
jprokos26 Dec 20, 2022
07741c5
adding carla_video_tracking_test checksum
jprokos26 Dec 20, 2022
631e25b
Adding carla_video_tracking_test cached dataset
jprokos26 Dec 20, 2022
5943015
removing carla_video_tracking_dev to readd as rename instead of new file
jprokos26 Dec 20, 2022
4ca907b
marking dev as rename
jprokos26 Dec 20, 2022
204097e
removing extraneous code
jprokos26 Dec 20, 2022
5757f0b
removing testing changes
jprokos26 Dec 20, 2022
53708ef
removing extraneous code
jprokos26 Dec 21, 2022
e1668e0
Applying rebase from tfdsv4-carla-fix-configs to tfdsv4.
jprokos26 Dec 21, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions armory/datasets/adversarial/carla_obj_det_dev/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
"""carla_obj_det_dev dataset."""

from .carla_obj_det_dev import CarlaObjDetDev
225 changes: 225 additions & 0 deletions armory/datasets/adversarial/carla_obj_det_dev/carla_obj_det_dev.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,225 @@
"""carla_obj_det_dev dataset."""

import collections
import json
import os
from copy import deepcopy
import numpy as np

import tensorflow as tf
import tensorflow_datasets as tfds

_DESCRIPTION = """
Synthetic multimodality (RGB, depth) dataset generated using CARLA (https://carla.org).
"""

_CITATION = """
@inproceedings{Dosovitskiy17,
title = { {CARLA}: {An} Open Urban Driving Simulator},
author = {Alexey Dosovitskiy and German Ros and Felipe Codevilla and Antonio Lopez and Vladlen Koltun},
booktitle = {Proceedings of the 1st Annual Conference on Robot Learning},
pages = {1--16},
year = {2017}
}
"""

# fmt: off
_URLS = "https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_od_dev_2.0.0.tar.gz"
# fmt: on


class CarlaObjDetDev(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for carla_obj_det_dev dataset."""

VERSION = tfds.core.Version("2.0.0")
RELEASE_NOTES = {
"1.0.0": "Initial release.",
"1.0.1": "Correcting error to RGB and depth image pairing",
"2.0.0": "Eval5 update with higher resolution, HD textures, accurate annotations, and objects overlapping patch",
}

def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
features = {
# sequence of [RGB, depth] images
"image": tfds.features.Sequence(
tfds.features.Image(shape=(960, 1280, 3)),
length=2,
),
# sequence of image features for [RGB, depth]
"images": tfds.features.Sequence(
tfds.features.FeaturesDict(
{
"file_name": tfds.features.Text(),
"height": tf.int64,
"width": tf.int64,
"id": tf.int64,
},
),
length=2,
),
# both modalities share the same categories
"categories": tfds.features.Sequence(
tfds.features.FeaturesDict(
{
"id": tf.int64, # {'pedstrian':1, 'vehicles':2, 'trafficlight':3}
"name": tfds.features.Text(),
"supercategory": tfds.features.Text(),
}
)
),
# both modalities share the same objects
"objects": tfds.features.Sequence(
{
"id": tf.int64,
"image_id": tf.int64,
"area": tf.int64, # un-normalized area
"boxes": tfds.features.BBoxFeature(), # normalized bounding box [ymin, xmin, ymax, xmax]
"labels": tfds.features.ClassLabel(num_classes=5),
"is_crowd": tf.bool,
}
),
# these data only apply to the "green screen patch" objects, which both modalities share
"patch_metadata": tfds.features.FeaturesDict(
{
# green screen vertices in (x,y) starting from top-left moving clockwise
"gs_coords": tfds.features.Tensor(shape=[4, 2], dtype=tf.int32),
# binarized segmentation mask of patch.
# mask[x,y] == 1 indicates patch pixel; 0 otherwise
"mask": tfds.features.Image(shape=(960, 1280, 3)),
"avg_patch_depth": tfds.features.Tensor(shape=(), dtype=tf.float64),
}
),
}

return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(features),
citation=_CITATION,
)

def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
path = dl_manager.download_and_extract(_URLS)
return {"dev": self._generate_examples(os.path.join(path, "dev"))}

def _generate_examples(self, path):
"""yield examples"""

# For each image, gets its annotations and yield relevant data
depth_folder = "_out/sensor.camera.depth.2"
foreground_mask_folder = "_out/foreground_mask"
patch_metadata_folder = "_out/patch_metadata"

annotation_path = os.path.join(
path, "_out", "kwcoco_annotations_without_patch_and_sans_tiny_objects.json"
)

cocoanno = COCOAnnotation(annotation_path)

images_rgb = (
cocoanno.images()
) # list of dictionaries of RGB image id, height, width, file_name

# sort images alphabetically
images_rgb = sorted(images_rgb, key=lambda x: x["file_name"].lower())

for idx, image_rgb in enumerate(images_rgb):

# Discard irrelevant fields
image_rgb.pop("date_captured")
image_rgb.pop("license")
image_rgb.pop("coco_url")
image_rgb.pop("flickr_url")
image_rgb.pop("video_id")
image_rgb.pop("frame_index")

# Pairing RGB and depth
fpath_rgb = image_rgb["file_name"] # rgb image path
fname = fpath_rgb.split("/")[-1]
fname_no_ext = fname.split(".")[0]
fpath_depth = os.path.join(depth_folder, fname) # depth image path
image_depth = deepcopy(image_rgb)
image_depth["file_name"] = fpath_depth

# get object annotations for each image
annotations = cocoanno.get_annotations(image_rgb["id"])

# For unknown reasons, when kwcoco is saved after removing tiny objects,
# bbox format changes from [x,y,w,h] to [x1,y1,x2,y1]
def build_bbox(x1, y1, x2, y2):
return tfds.features.BBox(
ymin=y1 / image_rgb["height"],
xmin=x1 / image_rgb["width"],
ymax=y2 / image_rgb["height"],
xmax=x2 / image_rgb["width"],
)

example = {
"image": [
os.path.join(
path,
modality,
)
for modality in [fpath_rgb, fpath_depth]
],
"images": [image_rgb, image_depth],
"categories": cocoanno.categories(),
"objects": [
{
"id": anno["id"],
"image_id": anno["image_id"],
"area": anno["area"],
"boxes": build_bbox(*anno["bbox"]),
"labels": anno["category_id"],
"is_crowd": bool(anno["iscrowd"]),
}
for anno in annotations
],
"patch_metadata": {
"gs_coords": np.load(
os.path.join(
path, patch_metadata_folder, fname_no_ext + "_coords.npy"
)
),
"avg_patch_depth": np.load(
os.path.join(
path, patch_metadata_folder, fname_no_ext + "_avg_depth.npy"
)
),
"mask": os.path.join(path, foreground_mask_folder, fname),
},
}

yield idx, example


class COCOAnnotation(object):
"""COCO annotation helper class."""

def __init__(self, annotation_path):
with tf.io.gfile.GFile(annotation_path) as f:
data = json.load(f)
self._data = data

# for each images["id"], find all annotations such that annotations["image_id"] == images["id"]
img_id2annotations = collections.defaultdict(list)
for a in self._data["annotations"]:
img_id2annotations[a["image_id"]].append(a)
self._img_id2annotations = {
k: list(sorted(v, key=lambda a: a["id"]))
for k, v in img_id2annotations.items()
}

def categories(self):
"""Return the category dicts, as sorted in the file."""
return self._data["categories"]

def images(self):
"""Return the image dicts, as sorted in the file."""
return self._data["images"]

def get_annotations(self, img_id):
"""Return all annotations associated with the image id string."""
return self._img_id2annotations.get(img_id, [])
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_od_dev_2.0.0.tar.gz 67507963 30c7593817867eb97b3c7e1358451c576805bb4423599b09ad99f15a2ebdd5c9 carla_od_dev_2.0.0.tar.gz
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
"""carla_video_tracking_dev dataset."""

from .carla_video_tracking_dev import CarlaVideoTrackingDev
15 changes: 5 additions & 10 deletions ...a/adversarial/carla_video_tracking_dev.py → ..._tracking_dev/carla_video_tracking_dev.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import glob
import numpy as np
from PIL import Image
import tensorflow.compat.v1 as tf
import tensorflow as tf
import tensorflow_datasets as tfds

_DESCRIPTION = """
Expand All @@ -21,7 +21,7 @@
}
"""

_URLS = "https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_video_tracking_dev_2.0.0.tar.gz"
_URL = "https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_video_tracking_dev_2.0.0.tar.gz"


class CarlaVideoTrackingDev(tfds.core.GeneratorBasedBuilder):
Expand Down Expand Up @@ -70,14 +70,9 @@ def _info(self) -> tfds.core.DatasetInfo:

def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
path = dl_manager.download_and_extract(_URLS)

return [
tfds.core.SplitGenerator(
name="dev",
gen_kwargs={"path": os.path.join(path, "dev")},
)
]
path = dl_manager.download_and_extract(_URL)

return {"dev": self._generate_examples(path / "dev")}

def _generate_examples(self, path):
"""Yields examples."""
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_video_tracking_dev_2.0.0.tar.gz 1278862237 8b23ca76bd9602a8e3ff4058335b7fb8ca665660a8a958852715e9a26ffbef20 carla_video_tracking_dev_2.0.0.tar.gz
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
"""carla_video_tracking_test dataset."""

from .carla_video_tracking_test import CarlaVideoTrackingTest
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import glob
import numpy as np
from PIL import Image
import tensorflow.compat.v1 as tf
import tensorflow as tf
import tensorflow_datasets as tfds


Expand All @@ -22,7 +22,7 @@
}
"""

_URLS = "https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_video_tracking_test_2.0.0.tar.gz"
_URL = "https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_video_tracking_test_2.0.0.tar.gz"


class CarlaVideoTrackingTest(tfds.core.GeneratorBasedBuilder):
Expand Down Expand Up @@ -71,14 +71,8 @@ def _info(self) -> tfds.core.DatasetInfo:

def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
path = dl_manager.download_and_extract(_URLS)

return [
tfds.core.SplitGenerator(
name="test",
gen_kwargs={"path": os.path.join(path, "test")},
)
]
path = dl_manager.download_and_extract(_URL)
return {"test": self._generate_examples(path / "test")}

def _generate_examples(self, path):
"""Yields examples."""
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_video_tracking_test_2.0.0.tar.gz 387465525 6bd09f5cf50c0e16f34b5054e9d77f95cb4491a373ecb842431cc58ae50b882e carla_video_tracking_test_2.0.0.tar.gz
1 change: 1 addition & 0 deletions armory/datasets/art_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ class WrappedDataGenerator(DataGenerator):
def __init__(self, gen):
super().__init__(gen.size, gen.batch_size)
self._iterator = gen
self.context = gen.context

def __iter__(self):
return iter(self._iterator)
Expand Down
21 changes: 21 additions & 0 deletions armory/datasets/cached_datasets.json
Original file line number Diff line number Diff line change
@@ -1,11 +1,32 @@
{
"carla_obj_det_dev": {
"sha256": "a7adc2400d1fafb03f6d49d10b61ac6405382bc19df446ee25e3d4afce2775a4",
"size": 64152715,
"subdir": "carla_obj_det_dev/2.0.0",
"url": null,
"version": "2.0.0"
},
"carla_over_obj_det_dev": {
"sha256": "77761f1d5c6eca40984aa40f38fab0568b9bb4a4dca696e876fccaa2dd9be56d",
"size": 59760259,
"subdir": "carla_over_obj_det_dev/1.0.0",
"url": null,
"version": "1.0.0"
},
"carla_video_tracking_dev": {
"sha256": "958d470dcd394928050f4123a7af05b0e389ceeec6fa0a3261df55a65e553b69",
"size": 1281628036,
"subdir": "carla_video_tracking_dev/2.0.0",
"url": null,
"version": "2.0.0"
},
"carla_video_tracking_test": {
"sha256": "8c52281611807243cba425ad3a588f4abca40dfb2b3ab828b9ad8a5191a7df10",
"size": 388218968,
"subdir": "carla_video_tracking_test/2.0.0",
"url": null,
"version": "2.0.0"
},
"digit": {
"sha256": "805fb5e33caf2029e13f4146c9d06fdb437ac5b0f0aa9668e3201922b617c559",
"size": 8349857,
Expand Down
6 changes: 6 additions & 0 deletions armory/datasets/config_load.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
"""

from armory.datasets import load, preprocessing, generator, filtering
from armory.datasets.context import contexts


def load_dataset(
Expand All @@ -20,6 +21,7 @@ def load_dataset(
index=None,
class_ids=None,
drop_remainder=False,
context=None,
):
# All are keyword elements by design
if name is None:
Expand Down Expand Up @@ -64,6 +66,9 @@ def load_dataset(

shuffle_elements = shuffle_files

if context is None and name in contexts:
context = contexts[name]

return generator.ArmoryDataGenerator(
info,
ds_dict,
Expand All @@ -78,4 +83,5 @@ def load_dataset(
element_map=preprocessing_fn,
shuffle_elements=shuffle_elements,
key_map=None,
context=context,
)
Loading