Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1,897 changes: 1,897 additions & 0 deletions Robodriver.txt

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion components/arms/dora-arm-piper/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ authors = [
readme = "README.md"
requires-python = ">=3.8,<3.14"
dependencies = [
"dora-rs (>=0.3.11,<0.4.0)",
"dora-rs (>=0.4.0,<0.5.0)",
"piper_sdk >= 0.0.8",
"numpy>=1.24.4",
]
Expand Down
280 changes: 42 additions & 238 deletions components/cameras/dora-camera-orbbec-v1/dora_camera_orbbec_v1/main.py
Original file line number Diff line number Diff line change
@@ -1,261 +1,65 @@
"""TODO: Add docstring."""
"""Orbbec Gemini 335 color camera node — V4L2 backend (OpenCV).

import os
Uses the kernel uvcvideo driver via /dev/videoN instead of pyorbbecsdk/libusb,
because libob_usb.so fails to parse UVC payload headers on this device,
producing all-constant (green) frames regardless of scene content.
"""

import os
import cv2
import numpy as np
import pyarrow as pa
from dora import Node

import time


try:
from pyorbbecsdk import (
Config,
Context,
FrameSet,
OBError,
OBFormat,
OBSensorType,
Pipeline,
VideoFrame,
VideoStreamProfile,
)
except ImportError as err:
print(
"Please install pyorbbecsdk first by following the instruction at: https://github.com/orbbec/pyorbbecsdk",
)
raise err


class TemporalFilter:
"""TODO: Add docstring."""

def __init__(self, alpha):
"""TODO: Add docstring."""
self.alpha = alpha
self.previous_frame = None

def process(self, frame):
"""TODO: Add docstring."""
if self.previous_frame is None:
result = frame
else:
result = cv2.addWeighted(
frame,
self.alpha,
self.previous_frame,
1 - self.alpha,
0,
)
self.previous_frame = result
return result


def yuyv_to_bgr(frame: np.ndarray, width: int, height: int) -> np.ndarray:
"""TODO: Add docstring."""
yuyv = frame.reshape((height, width, 2))
return cv2.cvtColor(yuyv, cv2.COLOR_YUV2BGR_YUY2)


def uyvy_to_bgr(frame: np.ndarray, width: int, height: int) -> np.ndarray:
"""TODO: Add docstring."""
uyvy = frame.reshape((height, width, 2))
return cv2.cvtColor(uyvy, cv2.COLOR_YUV2BGR_UYVY)


def i420_to_bgr(frame: np.ndarray, width: int, height: int) -> np.ndarray:
"""TODO: Add docstring."""
y = frame[0:height, :]
u = frame[height : height + height // 4].reshape(height // 2, width // 2)
v = frame[height + height // 4 :].reshape(height // 2, width // 2)
yuv_image = cv2.merge([y, u, v])
return cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR_I420)


def nv21_to_bgr(frame: np.ndarray, width: int, height: int) -> np.ndarray:
"""TODO: Add docstring."""
y = frame[0:height, :]
uv = frame[height : height + height // 2].reshape(height // 2, width)
yuv_image = cv2.merge([y, uv])
return cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR_NV21)


def nv12_to_bgr(frame: np.ndarray, width: int, height: int) -> np.ndarray:
"""TODO: Add docstring."""
y = frame[0:height, :]
uv = frame[height : height + height // 2].reshape(height // 2, width)
yuv_image = cv2.merge([y, uv])
return cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR_NV12)


def frame_to_bgr_image(frame: VideoFrame):
"""TODO: Add docstring."""
width = frame.get_width()
height = frame.get_height()
color_format = frame.get_format()
data = np.asanyarray(frame.get_data())
image = np.zeros((height, width, 3), dtype=np.uint8)
if color_format == OBFormat.RGB:
image = np.resize(data, (height, width, 3))
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
elif color_format == OBFormat.BGR:
image = np.resize(data, (height, width, 3))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif color_format == OBFormat.YUYV:
image = np.resize(data, (height, width, 2))
image = cv2.cvtColor(image, cv2.COLOR_YUV2BGR_YUYV)
elif color_format == OBFormat.MJPG:
image = cv2.imdecode(data, cv2.IMREAD_COLOR)
elif color_format == OBFormat.I420:
return i420_to_bgr(data, width, height)
elif color_format == OBFormat.NV12:
return nv12_to_bgr(data, width, height)
elif color_format == OBFormat.NV21:
return nv21_to_bgr(data, width, height)
elif color_format == OBFormat.UYVY:
image = np.resize(data, (height, width, 2))
image = cv2.cvtColor(image, cv2.COLOR_YUV2BGR_UYVY)
else:
print(f"Unsupported color format: {color_format}")
return None
return image


MIN_DEPTH_METERS = 0.01
MAX_DEPTH_METERS = 15.0

GET_DEVICE_FROM = os.getenv("GET_DEVICE_FROM", "SN") # SN or INDEX
DEVICE_SN = os.getenv("DEVICE_SN")
DEVICE_INDEX = int(os.getenv("DEVICE_INDEX", "0"))
V4L2_DEVICE = os.getenv("V4L2_DEVICE", "/dev/video16")
IMAGE_WIDTH = int(os.getenv("IMAGE_WIDTH", "640"))
IMAGE_HEIGHT = int(os.getenv("IMAGE_HEIGHT", "480"))


def main():
"""TODO: Add docstring."""
node = Node()

ctx = Context()
device_list = ctx.query_devices()

if GET_DEVICE_FROM == "SN":
if DEVICE_SN is not None:
curr_device_cnt = device_list.get_count()
if curr_device_cnt == 0:
print("No device connected")
return
for i in range(curr_device_cnt):
device = device_list.get_device_by_index(i)
device_info = device.get_device_info()
# get device serial number
serial_number = device_info.get_serial_number()
if serial_number == DEVICE_SN:
device = device_list.get_device_by_index(i)
break
else:
raise ValueError
elif GET_DEVICE_FROM == "INDEX":
device = device_list.get_device_by_index(int(DEVICE_INDEX))

# temporal_filter = TemporalFilter(alpha=0.5)
pipeline = Pipeline(device)

profile_list = pipeline.get_stream_profile_list(OBSensorType.COLOR_SENSOR)
try:
color_profile: VideoStreamProfile = profile_list.get_video_stream_profile(
640,
480,
OBFormat.MJPG,
30,
)
except OBError as e:
print(e)
color_profile = profile_list.get_default_video_stream_profile()
print("color profile: ", color_profile)

profile_list = pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR)
try:
depth_profile: VideoStreamProfile = profile_list.get_video_stream_profile(
640,
400,
OBFormat.Y11,
30,
)
except OBError as e:
print(e)
depth_profile = profile_list.get_default_video_stream_profile()
print("depth profile: ", depth_profile)

config = Config()
config.enable_stream(color_profile)
config.enable_stream(depth_profile)
pipeline.start(config)

for _event in node:
# while True:

try:
frames: FrameSet = pipeline.wait_for_frames(100)
if frames is None:
cap = cv2.VideoCapture(V4L2_DEVICE, cv2.CAP_V4L2)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, IMAGE_WIDTH)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, IMAGE_HEIGHT)
cap.set(cv2.CAP_PROP_FPS, 30)
# 请求 MJPG,带宽低且解码质量好
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))

if not cap.isOpened():
print(f"[CAM_TOP] ERROR: cannot open {V4L2_DEVICE}", flush=True)
return

print(f"[CAM_TOP] opened {V4L2_DEVICE} "
f"{int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))}x"
f"{int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))} "
f"@ {cap.get(cv2.CAP_PROP_FPS):.0f}fps", flush=True)

for event in node:
if event["type"] == "INPUT" and event["id"] == "tick":
ret, bgr_image = cap.read()
if not ret or bgr_image is None:
print("[CAM_TOP] frame read failed", flush=True)
continue

# Get Color image
color_frame = frames.get_color_frame()
if color_frame is None:
continue
mean_val = bgr_image.mean()
print(f"[CAM_TOP] mean={mean_val:.1f} std={bgr_image.std():.1f} size={bgr_image.shape}", flush=True)

# convert to RGB format
color_image = frame_to_bgr_image(color_frame)
if color_image is None:
print("failed to convert frame to image")
if mean_val < 5:
continue

# # Send Color Image
# ret, frame = cv2.imencode("." + "jpeg", color_image)
# if ret:
node.send_output("image", pa.array(color_image.ravel()), {"encoding": "bgr8", "width": int(640), "height": int(480)})

# Get Depth data
depth_frame = frames.get_depth_frame()
if depth_frame is None:
ret2, jpeg_buf = cv2.imencode(".jpg", bgr_image)
if not ret2:
continue
width = depth_frame.get_width()
height = depth_frame.get_height()
scale = depth_frame.get_depth_scale()
depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16)
depth_data = depth_data.reshape((height, width))
depth_data = depth_data.astype(np.float32) * scale * 0.001
depth_data = np.where(
(depth_data > MIN_DEPTH_METERS) & (depth_data < MAX_DEPTH_METERS),
depth_data,
0,
)

# Send Depth data
storage = pa.array(depth_data.ravel())
node.send_output("depth", storage, {"width": int(width), "height": int(height)})

# Convert to Image
depth_image = cv2.normalize(
depth_data,
None,
0,
255,
cv2.NORM_MINMAX,
dtype=cv2.CV_8U,
)
# Send Depth Image
depth_image = cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
ret, frame = cv2.imencode("." + "jpeg", depth_image)
if ret:
node.send_output("image_depth", pa.array(frame), {"encoding": "jpeg", "width": int(640), "height": int(400)})

# cv2.imshow("0", color_image)
# cv2.waitKey(40)
jpeg_array = np.ascontiguousarray(jpeg_buf).ravel()
node.send_output("image", pa.array(jpeg_array), {
"encoding": "jpeg",
"width": IMAGE_WIDTH,
"height": IMAGE_HEIGHT,
})

except KeyboardInterrupt:
break
pipeline.stop()
cap.release()


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion components/cameras/dora-camera-orbbec-v1/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ authors = [
readme = "README.md"
requires-python = ">=3.10,<3.11"
dependencies = [
"dora-rs (>=0.3.11,<0.4.0)",
"dora-rs (>=0.4.0,<0.5.0)",
"sb-pyorbbecsdk",
"opencv-python",
"numpy (<2.0)"
Expand Down
Loading