Skip to content

Commit 547e831

Browse files
authored
PR #13988 from alexk1976: Adding python example for color/depth alignment using SW device
2 parents a10e3dc + 47b06a6 commit 547e831

File tree

2 files changed

+304
-1
lines changed

2 files changed

+304
-1
lines changed
Lines changed: 303 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,303 @@
1+
## License: Apache 2.0. See LICENSE file in root directory.
2+
## Copyright(c) 2025 Intel Corporation. All Rights Reserved.
3+
#####################################################################################################
4+
# ##
5+
# Align depth to color with precaptured images in software device ##
6+
# ##
7+
## Purpose ##
8+
## This example first captures depth and color images from realsense camera and then ##
9+
## demonstrate align depth to color with the precaptured images in software device ##
10+
## ##
11+
## Steps: ##
12+
## 1) stream realsense camera with depth 640x480@30fps and color 1280x720@30fps ##
13+
## 2) capture camera depth and color intrinsics and extrinsics ##
14+
## 3) capture depth and color images and save into files in npy format ##
15+
## 4) construct software device from the saved intrinsics, extrinsics, depth and color images ##
16+
## 5) align the precaptured depth image to color image ##
17+
## ##
18+
#####################################################################################################
19+
20+
import logging
21+
import cv2
22+
import pyrealsense2 as rs
23+
import numpy as np
24+
import os
25+
import time
26+
27+
fps = 30 # frame rate
28+
tv = 1000.0 / fps # time interval between frames in miliseconds
29+
30+
max_num_frames = 100 # max number of framesets to be captured into npy files and processed with software device
31+
32+
depth_file_name = "depth" # depth_file_name + str(i) + ".npy"
33+
color_file_name = "color" # color_file_name + str(i) + ".npy"
34+
35+
# intrinsic and extrinsic from the camera
36+
camera_depth_intrinsics = rs.intrinsics() # camera depth intrinsics
37+
camera_color_intrinsics = rs.intrinsics() # camera color intrinsics
38+
camera_depth_to_color_extrinsics = rs.extrinsics() # camera depth to color extrinsics
39+
40+
41+
######################## start of first part - capture images from live device #######################################
42+
# stream depth and color on attached realsnese camera and save depth and color frames into files with npy format
43+
try:
44+
# create a context object, this object owns the handles to all connected realsense devices
45+
ctx = rs.context()
46+
devs = list(ctx.query_devices())
47+
48+
if len(devs) > 0:
49+
print("Devices: {}".format(devs))
50+
else:
51+
print("No camera detected. Please connect a realsense camera and try again.")
52+
exit(0)
53+
54+
pipeline = rs.pipeline()
55+
56+
# configure streams
57+
config = rs.config()
58+
config.enable_stream(rs.stream.depth)
59+
config.enable_stream(rs.stream.color)
60+
61+
# start streaming
62+
cfg = pipeline.start(config)
63+
64+
# get scale
65+
depth_sensor = cfg.get_device().first_depth_sensor()
66+
depth_scale = depth_sensor.get_depth_scale()
67+
68+
# get intrinsics
69+
camera_depth_profile = cfg.get_stream(rs.stream.depth) # fetch depth depth stream profile
70+
camera_depth_intrinsics = camera_depth_profile.as_video_stream_profile().get_intrinsics() # downcast to video_stream_profile and fetch intrinsics
71+
72+
camera_color_profile = cfg.get_stream(rs.stream.color) # fetch color stream profile
73+
camera_color_intrinsics = camera_color_profile.as_video_stream_profile().get_intrinsics() # downcast to video_stream_profile and fetch intrinsics
74+
75+
camera_depth_to_color_extrinsics = camera_depth_profile.get_extrinsics_to(camera_color_profile)
76+
77+
78+
print("camera depth intrinsic:", camera_depth_intrinsics)
79+
print("camera color intrinsic:", camera_color_intrinsics)
80+
print("camera depth to color extrinsic:", camera_depth_to_color_extrinsics)
81+
82+
print("streaming attached camera and save depth and color frames into files in npy format ...")
83+
84+
i = 0
85+
while i < max_num_frames:
86+
# wait until a new coherent set of frames is available on the device
87+
frames = pipeline.wait_for_frames()
88+
depth = frames.get_depth_frame()
89+
color = frames.get_color_frame()
90+
91+
# Validate that both frames are valid
92+
if not depth or not color:
93+
continue
94+
95+
# convert images to numpy arrays
96+
depth_image = np.asanyarray(depth.get_data())
97+
color_image = np.asanyarray(color.get_data())
98+
# save images in npy format
99+
depth_file = depth_file_name + str(i) + ".npy"
100+
color_file = color_file_name + str(i) + ".npy"
101+
print("saving frame set ", i, depth_file, color_file)
102+
103+
with open(depth_file, 'wb') as f1:
104+
np.save(f1,depth_image)
105+
106+
with open(color_file, 'wb') as f2:
107+
np.save(f2,color_image)
108+
109+
# next frameset
110+
i = i +1
111+
112+
except Exception as e:
113+
logging.error("An error occurred: %s", e, exc_info=True)
114+
exit(1)
115+
116+
######################## end of first part - capture images from live device #######################################
117+
118+
119+
120+
######################## start of second part - align depth to color in software device #############################
121+
# align depth to color with the above precaptured images in software device
122+
123+
# software device
124+
sdev = rs.software_device()
125+
126+
# software depth sensor
127+
depth_sensor: rs.software_sensor = sdev.add_sensor("Depth")
128+
129+
# depth instrincis
130+
depth_intrinsics = rs.intrinsics()
131+
132+
depth_intrinsics.width = camera_depth_intrinsics.width
133+
depth_intrinsics.height = camera_depth_intrinsics.height
134+
135+
depth_intrinsics.ppx = camera_depth_intrinsics.ppx
136+
depth_intrinsics.ppy = camera_depth_intrinsics.ppy
137+
138+
depth_intrinsics.fx = camera_depth_intrinsics.fx
139+
depth_intrinsics.fy = camera_depth_intrinsics.fy
140+
141+
depth_intrinsics.coeffs = camera_depth_intrinsics.coeffs ## [0.0, 0.0, 0.0, 0.0, 0.0]
142+
depth_intrinsics.model = camera_depth_intrinsics.model ## rs.pyrealsense2.distortion.brown_conrady
143+
144+
#depth stream
145+
depth_stream = rs.video_stream()
146+
depth_stream.type = rs.stream.depth
147+
depth_stream.width = depth_intrinsics.width
148+
depth_stream.height = depth_intrinsics.height
149+
depth_stream.fps = fps
150+
depth_stream.bpp = 2 # depth z16 2 bytes per pixel
151+
depth_stream.fmt = rs.format.z16
152+
depth_stream.intrinsics = depth_intrinsics
153+
depth_stream.index = 0
154+
depth_stream.uid = 1
155+
156+
depth_profile = depth_sensor.add_video_stream(depth_stream)
157+
158+
# software color sensor
159+
color_sensor: rs.software_sensor = sdev.add_sensor("Color")
160+
161+
# color intrinsic:
162+
color_intrinsics = rs.intrinsics()
163+
color_intrinsics.width = camera_color_intrinsics.width
164+
color_intrinsics.height = camera_color_intrinsics.height
165+
166+
color_intrinsics.ppx = camera_color_intrinsics.ppx
167+
color_intrinsics.ppy = camera_color_intrinsics.ppy
168+
169+
color_intrinsics.fx = camera_color_intrinsics.fx
170+
color_intrinsics.fy = camera_color_intrinsics.fy
171+
172+
color_intrinsics.coeffs = camera_color_intrinsics.coeffs
173+
color_intrinsics.model = camera_color_intrinsics.model
174+
175+
color_stream = rs.video_stream()
176+
color_stream.type = rs.stream.color
177+
color_stream.width = color_intrinsics.width
178+
color_stream.height = color_intrinsics.height
179+
color_stream.fps = fps
180+
color_stream.bpp = 3 # color stream rgb8 3 bytes per pixel in this example
181+
color_stream.fmt = rs.format.rgb8
182+
color_stream.intrinsics = color_intrinsics
183+
color_stream.index = 0
184+
color_stream.uid = 2
185+
186+
color_profile = color_sensor.add_video_stream(color_stream)
187+
188+
# depth to color extrinsics auto z_to_other = depth_profile.get_extrinsics_to(other_profile);
189+
depth_to_color_extrinsics = rs.extrinsics()
190+
depth_to_color_extrinsics.rotation = camera_depth_to_color_extrinsics.rotation
191+
depth_to_color_extrinsics.translation = camera_depth_to_color_extrinsics.translation
192+
depth_profile.register_extrinsics_to(color_profile, depth_to_color_extrinsics)
193+
194+
# start software sensors
195+
depth_sensor.open(depth_profile)
196+
color_sensor.open(color_profile)
197+
198+
# syncronize frames from depth and color streams
199+
camera_syncer = rs.syncer()
200+
depth_sensor.start(camera_syncer)
201+
color_sensor.start(camera_syncer)
202+
203+
# create a depth alignment object
204+
# rs.align allows us to perform alignment of depth frames to others frames
205+
# the "align_to" is the stream type to which we plan to align depth frames
206+
# align depth frame to color frame
207+
align_to = rs.stream.color
208+
align = rs.align(align_to)
209+
210+
# colorizer for depth rendering
211+
colorizer = rs.colorizer()
212+
213+
paused = False
214+
215+
# loop through pre-captured frames
216+
for i in range(0, max_num_frames):
217+
print("\nframe set:", i)
218+
219+
# precaptured depth and color image files in npy format
220+
df = depth_file_name + str(i) + ".npy"
221+
cf = color_file_name + str(i) + ".npy"
222+
223+
if (not os.path.exists(cf)) or (not os.path.exists(df)): continue
224+
225+
# load depth frame from precaptured npy file
226+
print('loading depth frame ', df)
227+
depth_npy = np.load(df, mmap_mode='r')
228+
229+
# create software depth frame
230+
depth_swframe = rs.software_video_frame()
231+
depth_swframe.stride = depth_stream.width * depth_stream.bpp
232+
depth_swframe.bpp = depth_stream.bpp
233+
depth_swframe.timestamp = i * tv
234+
depth_swframe.pixels = depth_npy
235+
depth_swframe.domain = rs.timestamp_domain.hardware_clock
236+
depth_swframe.frame_number = i
237+
depth_swframe.profile = depth_profile.as_video_stream_profile()
238+
depth_swframe.depth_units = depth_scale
239+
depth_sensor.on_video_frame(depth_swframe)
240+
241+
# load color frame from precaptured npy file
242+
print('loading color frame ', cf)
243+
color_npy = np.load(cf, mmap_mode='r')
244+
245+
# create software color frame
246+
color_swframe = rs.software_video_frame()
247+
color_swframe.stride = color_stream.width * color_stream.bpp
248+
color_swframe.bpp = color_stream.bpp
249+
color_swframe.timestamp = i * tv
250+
color_swframe.pixels = color_npy
251+
color_swframe.domain = rs.timestamp_domain.hardware_clock
252+
color_swframe.frame_number = i
253+
color_swframe.profile = color_profile.as_video_stream_profile()
254+
color_sensor.on_video_frame(color_swframe)
255+
256+
# synchronize depth and color, receive as frameset
257+
frames = camera_syncer.wait_for_frames()
258+
print("frame set:", frames.size(), " ", frames)
259+
260+
# get unaligned depth frame
261+
unaligned_depth_frame = frames.get_depth_frame()
262+
if not unaligned_depth_frame: continue
263+
264+
# align depth frame to color
265+
aligned_frames = align.process(frames)
266+
267+
aligned_depth_frame = aligned_frames.get_depth_frame()
268+
color_frame = aligned_frames.get_color_frame()
269+
270+
if (not aligned_depth_frame) or (not color_frame): continue
271+
272+
aligned_depth_frame = colorizer.colorize(aligned_depth_frame)
273+
npy_aligned_depth_image = np.asanyarray(aligned_depth_frame.get_data())
274+
275+
npy_color_image = np.asanyarray(color_frame.get_data())
276+
277+
# render aligned images:
278+
# depth align to color
279+
# aligned depth on left
280+
# color on right
281+
images = np.hstack((npy_aligned_depth_image, npy_color_image))
282+
cv2.namedWindow('Align Example', cv2.WINDOW_NORMAL)
283+
cv2.imshow('Align Example', images)
284+
key = cv2.waitKey(1)
285+
286+
# render original unaligned depth as reference
287+
# colorized_unaligned_depth_frame = colorizer.colorize(unaligned_depth_frame)
288+
# npy_unaligned_depth_image = np.asanyarray(colorized_unaligned_depth_frame.get_data())
289+
# cv2.imshow("Unaligned Depth", npy_unaligned_depth_image)
290+
291+
# press ENTER or SPACEBAR key to pause the image window for 5 seconds
292+
293+
if key == 13 or key == 32: paused = not paused
294+
295+
if paused:
296+
print("Paused for 5 seconds ...", i, ", press ENTER or SPACEBAR key anytime for additional pauses.")
297+
time.sleep(5)
298+
paused = not paused
299+
300+
# end of second part - align depth to color with the precaptured images in software device
301+
######################## End of second part - align depth to color in software device #############################
302+
303+
cv2.destroyAllWindows()

wrappers/python/examples/readme.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ These Examples demonstrate how to use the python wrapper of the SDK.
1414
8. [Realsense over Ethernet](./ethernet_client_server/README.md) - This example shows how to stream depth data from RealSense depth cameras over ethernet.
1515
9. [D400 self-calibration demo](./depth_auto_calibration_example.py) - Provides a reference implementation for D400 Self-Calibration Routines flow. The scripts performs On-Chip Calibration, followed by Focal-Length calibration and finally, the Tare Calibration sub-routines. Follow the [White Paper Link](https://dev.intelrealsense.com/docs/self-calibration-for-depth-cameras) for in-depth description of the provided calibration methods.
1616
10. [Numpy To Pyrealsense Frame](./numpy_to_pyrealsense_frame.py) - Example of how to convert a numpy array to a pyrealsense frame using a software device.
17-
17+
11. [Stream alignment using SW device](./align-with-software-device.py) - Demonstrates how to align depth and RGB images using a software device.
1818
## Pointcloud Visualization
1919

2020
1. [OpenCV software renderer](https://github.com/IntelRealSense/librealsense/blob/development/wrappers/python/examples/opencv_pointcloud_viewer.py)

0 commit comments

Comments
 (0)