37from dataclasses
import dataclass
38from pathlib
import Path
39from typing
import List, Optional, Tuple
45from visp.core
import CameraParameters, HomogeneousMatrix
46from visp.core
import Color, Display, ImageConvert
47from visp.core
import ImageGray, ImageUInt16, ImageRGBa, ImageFloat
48from visp.io
import ImageIo
49from visp.rbt
import RBTracker, RBFeatureTracker, RBFeatureTrackerInput
53import pyrealsense2
as rs
54import matplotlib.pyplot
as plt
58 The base structure that a class should have to implement a render based feature tracker
61 RBFeatureTracker.__init__(self)
76 def extractFeatures(self, frame: RBFeatureTrackerInput, previousFrame: RBFeatureTrackerInput, cMo: HomogeneousMatrix):
79 def trackFeatures(self, frame: RBFeatureTrackerInput, previousFrame: RBFeatureTrackerInput, cMo: HomogeneousMatrix):
82 def initVVS(self, frame: RBFeatureTrackerInput, previousFrame: RBFeatureTrackerInput, cMo: HomogeneousMatrix):
83 print(
'INITVVS Was called')
86 def computeVVSIter(self, frame: RBFeatureTrackerInput, cMo: HomogeneousMatrix, iteration: int):
92 def display(self, cam: CameraParameters, I: ImageGray, IRGB: ImageRGBa, I_depth: ImageGray):
99 I_depth: Optional[ImageFloat]
102def read_data(depth_scale: Optional[float], IRGB: ImageRGBa, I: ImageGray, pipe: rs.pipeline):
103 use_depth = depth_scale
is not None
105 align_to = rs.align(rs.stream.color)
107 frames = pipe.wait_for_frames()
108 frames = align_to.process(frames)
109 I_np = np.asanyarray(frames.get_color_frame().as_frame().get_data())
110 I_np = np.concatenate((I_np, np.ones_like(I_np[..., 0:1], dtype=np.uint8)), axis=-1)
111 IRGB.resize(I_np.shape[0], I_np.shape[1])
112 I_rgba_ref = IRGB.numpy()
113 I_rgba_ref[...] = I_np
114 ImageConvert.convert(IRGB, I, 0)
117 I_depth_raw = np.asanyarray(frames.get_depth_frame().as_frame().get_data())
118 I_depth_float = I_depth_raw.astype(np.float32) * depth_scale
120 yield FrameData(I, IRGB, ImageFloat(I_depth_float))
124 intr = profile.as_video_stream_profile().get_intrinsics()
125 return CameraParameters(intr.fx, intr.fy, intr.ppx, intr.ppy), intr.height, intr.width
127if __name__ ==
'__main__':
128 parser = argparse.ArgumentParser()
129 parser.add_argument(
'--tracker', type=str, required=
True,
130 help=
'Path to the json file containing the tracker configuration.')
131 parser.add_argument(
'--model', type=str, required=
False,
132 help=
'Path to the .obj/.bam file describing the CAD model.')
134 args = parser.parse_args()
135 tracker_path: str = args.tracker
136 assert Path(tracker_path).exists(),
'Tracker file not found'
137 model_path = args.model
138 if model_path
is not None:
139 assert Path(model_path).exists(),
'3D CAD model file not found'
144 config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 60)
145 config.enable_stream(rs.stream.color, 848, 480, rs.format.rgb8, 60)
147 cfg = pipe.start(config)
148 depth_scale = cfg.get_device().first_depth_sensor().get_depth_scale()
151 tracker = RBTracker()
153 tracker.loadConfigurationFile(tracker_path)
154 extensions = PythonRBExtensions()
155 extensions.parse_python_extensions(tracker, Path(tracker_path))
156 if model_path
is not None:
157 tracker.setModelPath(model_path)
160 tracker.addTracker(custom_feature)
164 tracker.setCameraParameters(cam_color, color_height, color_width)
168 print(
'Color intrinsics:', cam_color)
171 I_depth_display = ImageGray()
173 frame_data = next(data_generator)
177 dI.init(I, 0, 0,
'Color image')
180 dRGB.init(IRGB, I.getWidth(), 0,
'Color image')
182 I_depth = ImageGray()
183 dDepth = get_display()
185 ImageConvert.createDepthHistogram(frame_data.I_depth, I_depth)
186 dDepth.init(I_depth, I.getWidth() * 2, 0,
'Depth')
188 for frame
in data_generator:
190 Display.displayText(I, 50, 0,
'Click to initialize tracking', Color.red)
192 Display.display(IRGB)
194 event = Display.getClick(I, blocking=
False)
197 tracker.startTracking()
198 tracker.initClick(I, tracker_path.replace(
'.json',
'.init'),
True)
199 start_time = time.time()
200 for frame_data
in data_generator:
201 if frame_data.I_depth
is not None:
202 I_depth_np = I_depth.numpy()
203 I_depth_np[...] = ((np.minimum(frame_data.I_depth, 0.5) / 0.5) * 255.0).astype(np.uint8)
205 displayed = [I, IRGB, I_depth]
207 for display_image
in displayed:
208 Display.display(display_image)
209 Display.displayText(I, 50, 0,
'Click to stop tracking', Color.red)
215 tracker.track(I=frame.I, IRGB=frame_data.IRGB, depth=frame_data.I_depth)
216 tracking_time = np.round((time.time() - t1) * 1000.0, 2)
218 Display.displayText(I, 60, 0, f
'Tracking time: {tracking_time}', Color.red)
219 cMo = HomogeneousMatrix()
222 tracker.display(I, IRGB, I_depth)
223 Display.displayFrame(I, cMo, cam_color, 0.05, Color.none, 2)
226 for display_image
in displayed:
227 Display.flush(display_image)
229 event = Display.getClick(I, blocking=
False)
232 end_time = time.time()
233 print(f
'total time = {end_time - start_time}s')
initVVS(self, RBFeatureTrackerInput frame, RBFeatureTrackerInput previousFrame, HomogeneousMatrix cMo)
display(self, CameraParameters cam, ImageGray I, ImageRGBa IRGB, ImageGray I_depth)
bool requiresSilhouetteCandidates(self)
onTrackingIterEnd(self, HomogeneousMatrix cMo)
computeVVSIter(self, RBFeatureTrackerInput frame, HomogeneousMatrix cMo, int iteration)
trackFeatures(self, RBFeatureTrackerInput frame, RBFeatureTrackerInput previousFrame, HomogeneousMatrix cMo)
onTrackingIterStart(self, RBFeatureTrackerInput frame, HomogeneousMatrix cMo)
extractFeatures(self, RBFeatureTrackerInput frame, RBFeatureTrackerInput previousFrame, HomogeneousMatrix cMo)
Tuple[CameraParameters, int, int] cam_from_rs_profile(profile)
read_data(Optional[float] depth_scale, ImageRGBa IRGB, ImageGray I, rs.pipeline pipe)