Sensor Simulation
Multi-Camera
The below script will create multiple viewports (render products with different resolution and camera poses for each viewport). Once created, rgb images from each render product are saved to disk by accesing the data using a custom replicator writer or the built-in annotators.
1from omni.isaac.kit import SimulationApp
2
3simulation_app = SimulationApp()
4
5import os
6import omni.kit
7import numpy as np
8from PIL import Image
9from pxr import UsdGeom
10
11import omni.replicator.core as rep
12from omni.replicator.core import Writer, AnnotatorRegistry
13
14NUM_FRAMES = 5
15
16# Save rgb image to file
17def save_rgb(rgb_data, file_name):
18 rgb_image_data = np.frombuffer(rgb_data, dtype=np.uint8).reshape(*rgb_data.shape, -1)
19 rgb_img = Image.fromarray(rgb_image_data, "RGBA")
20 rgb_img.save(file_name + ".png")
21
22
23# Randomize cube color every frame using a replicator randomizer
24def cube_color_randomizer():
25 cube_prims = rep.get.prims(path_pattern="Cube")
26 with cube_prims:
27 rep.randomizer.color(colors=rep.distribution.uniform((0, 0, 0), (1, 1, 1)))
28 return cube_prims.node
29
30
31# Access data through a custom replicator writer
32class MyWriter(Writer):
33 def __init__(self, rgb: bool = True):
34 self._frame_id = 0
35 if rgb:
36 self.annotators.append(AnnotatorRegistry.get_annotator("rgb"))
37 # Create writer output directory
38 self.file_path = os.path.join(os.getcwd(), "_out_writer", "")
39 print(f"Writing writer data to {self.file_path}")
40 dir = os.path.dirname(self.file_path)
41 os.makedirs(dir, exist_ok=True)
42
43 def write(self, data):
44 for annotator in data.keys():
45 annotator_split = annotator.split("-")
46 if len(annotator_split) > 1:
47 render_product_name = annotator_split[-1]
48 if annotator.startswith("rgb"):
49 save_rgb(data[annotator], f"{self.file_path}/{render_product_name}_frame_{self._frame_id}")
50 self._frame_id += 1
51
52
53rep.WriterRegistry.register(MyWriter)
54
55stage = omni.usd.get_context().get_stage()
56
57# Create cube
58cube_prim = stage.DefinePrim("/World/Cube", "Cube")
59UsdGeom.Xformable(cube_prim).AddTranslateOp().Set((0., 5., 1.))
60
61# Register cube color randomizer to trigger on every frame
62rep.randomizer.register(cube_color_randomizer)
63with rep.trigger.on_frame():
64 rep.randomizer.cube_color_randomizer()
65
66# Create cameras
67camera_prim1 = stage.DefinePrim("/World/Camera1", "Camera")
68UsdGeom.Xformable(camera_prim1).AddTranslateOp().Set((0., 10., 20.))
69UsdGeom.Xformable(camera_prim1).AddRotateXYZOp().Set((-15., 0., 0.))
70
71camera_prim2 = stage.DefinePrim("/World/Camera2", "Camera")
72UsdGeom.Xformable(camera_prim2).AddTranslateOp().Set((-10., 15., 15.))
73UsdGeom.Xformable(camera_prim2).AddRotateXYZOp().Set((-45., 0., 45.))
74
75# Create render products
76rp1 = rep.create.render_product(str(camera_prim1.GetPrimPath()), resolution=(320, 320))
77rp2 = rep.create.render_product(str(camera_prim2.GetPrimPath()), resolution=(640, 640))
78rp3 = rep.create.render_product("/OmniverseKit_Persp", (1024, 1024))
79
80# Acess the data through a custom writer
81writer = rep.WriterRegistry.get("MyWriter")
82writer.initialize(rgb=True)
83writer.attach([rp1, rp2, rp3])
84
85# Acess the data through annotators
86rgb_annotators = []
87for rp in [rp1, rp2, rp3]:
88 rgb = rep.AnnotatorRegistry.get_annotator("rgb")
89 rgb.attach([rp])
90 rgb_annotators.append(rgb)
91
92# NOTE A list of render products will be supported in the near future, currently only the first render product in the list will be used
93# rgb = rep.AnnotatorRegistry.get_annotator("rgb")
94# rgb.attach([rp1, rp2, rp3])
95
96# Create annotator output directory
97file_path = os.path.join(os.getcwd(), "_out_annot", "")
98print(f"Writing annotator data to {file_path}")
99dir = os.path.dirname(file_path)
100os.makedirs(dir, exist_ok=True)
101
102for i in range(NUM_FRAMES):
103 rep.orchestrator.step()
104 # Get annotator data after each replicator process step
105 for j, rgb_annot in enumerate(rgb_annotators):
106 save_rgb(rgb_annot.get_data(), f"{dir}/rp{j}_step_{i}")
107
108simulation_app.close()
Get synthetic data at custom events in simulations
The below script will use annotators to access synthetic data at specific timepoints during simulations.
Note
Triggering built-in Replicator writers at custom events will be supported in future releases.
1from omni.isaac.kit import SimulationApp
2
3simulation_app = SimulationApp(launch_config={"renderer": "RayTracedLighting", "headless": False})
4
5import omni
6import numpy as np
7import os
8import json
9from PIL import Image
10from omni.isaac.core import World
11from omni.isaac.core.objects import DynamicCuboid
12import omni.replicator.core as rep
13from omni.isaac.core.utils.semantics import add_update_semantics
14
15# Util function to save rgb annotator data
16def write_rgb_data(rgb_data, file_path):
17 rgb_image_data = np.frombuffer(rgb_data, dtype=np.uint8).reshape(*rgb_data.shape, -1)
18 rgb_img = Image.fromarray(rgb_image_data, "RGBA")
19 rgb_img.save(file_path + ".png")
20
21
22# Util function to save semantic segmentation annotator data
23def write_sem_data(sem_data, file_path):
24 id_to_labels = sem_data["info"]["idToLabels"]
25 with open(file_path + ".json", "w") as f:
26 json.dump(id_to_labels, f)
27 sem_image_data = np.frombuffer(sem_data["data"], dtype=np.uint8).reshape(*sem_data["data"].shape, -1)
28 sem_img = Image.fromarray(sem_image_data, "RGBA")
29 sem_img.save(file_path + ".png")
30
31
32# Create a new stage with the default ground plane
33omni.usd.get_context().new_stage()
34world = World()
35world.scene.add_default_ground_plane()
36world.reset()
37
38# Run the application for several frames to allow the materials to load
39for i in range(20):
40 simulation_app.update()
41
42# Create a camera and render product to collect the data from
43cam = rep.create.camera(position=(3, 3, 3), look_at=(0, 0, 0))
44rp = rep.create.render_product(cam, (512, 512))
45
46# Set the output directory for the data
47out_dir = os.getcwd() + "/_out_sim_get_data"
48os.makedirs(out_dir, exist_ok=True)
49print(f"Outputting data to {out_dir}..")
50
51# NOTE currently replicator writers do not work correctly with isaac simulations and will interfere with the timeline
52# writer = rep.WriterRegistry.get("BasicWriter")
53# writer.initialize(output_dir=out_dir, rgb=True, semantic_segmentation=True, colorize_semantic_segmentation=True)
54# writer.attach([rp])
55
56# Accesing the data directly from annotators
57rgb_annot = rep.AnnotatorRegistry.get_annotator("rgb")
58rgb_annot.attach([rp])
59sem_annot = rep.AnnotatorRegistry.get_annotator("semantic_segmentation", init_params={"colorize": True})
60sem_annot.attach([rp])
61
62for i in range(5):
63 cuboid = world.scene.add(DynamicCuboid(prim_path=f"/World/Cuboid_{i}", name=f"Cuboid_{i}", position=(0, 0, 10 + i)))
64 add_update_semantics(cuboid.prim, "Cuboid")
65
66 for s in range(1000):
67 world.step(render=True, step_sim=True)
68 vel = np.linalg.norm(cuboid.get_linear_velocity())
69 if vel < 0.1:
70 print(f"Cube_{i} stopped moving after {s} simulation steps, writing data..")
71 # NOTE replicator's step is no longer needed since new data is fed in the annotators every world.step()
72 # rep.orchestrator.step()
73 write_rgb_data(rgb_annot.get_data(), f"{out_dir}/Cube_{i}_step_{s}_rgb")
74 write_sem_data(sem_annot.get_data(), f"{out_dir}/Cube_{i}_step_{s}_sem")
75 break
76
77simulation_app.close()
Get Camera Parameters
The below script show how to get the camera parameters associated with a viewport.
1import omni
2from omni.syntheticdata import helpers
3import math
4
5stage = omni.usd.get_context().get_stage()
6viewport_api = omni.kit.viewport.utility.get_active_viewport()
7# Set viewport resolution, changes will occur on next frame
8viewport_api.set_texture_resolution((512, 512))
9# get resolution
10(width, height) = viewport_api.get_texture_resolution()
11aspect_ratio = width / height
12# get camera prim attached to viewport
13camera = stage.GetPrimAtPath(viewport_api.get_active_camera())
14focal_length = camera.GetAttribute("focalLength").Get()
15horiz_aperture = camera.GetAttribute("horizontalAperture").Get()
16vert_aperture = camera.GetAttribute("verticalAperture").Get()
17# Pixels are square so we can also do:
18# vert_aperture = height / width * horiz_aperture
19near, far = camera.GetAttribute("clippingRange").Get()
20fov = 2 * math.atan(horiz_aperture / (2 * focal_length))
21# helper to compute projection matrix
22proj_mat = helpers.get_projection_matrix(fov, aspect_ratio, near, far)
23
24# compute focal point and center
25focal_x = height * focal_length / vert_aperture
26focal_y = width * focal_length / horiz_aperture
27center_x = height * 0.5
28center_y = width * 0.5