upload real-robot deployment code
This commit is contained in:
64
unitree_deploy/test/camera/test_image_client_camera.py
Normal file
64
unitree_deploy/test/camera/test_image_client_camera.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import time
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
|
||||
from unitree_deploy.robot_devices.cameras.configs import ImageClientCameraConfig
|
||||
from unitree_deploy.robot_devices.cameras.utils import make_cameras_from_configs
|
||||
from unitree_deploy.utils.rich_logger import log_success
|
||||
|
||||
|
||||
# ============================From configs============================
|
||||
def run_camera():
|
||||
def image_client_default_factory():
|
||||
return {
|
||||
"imageclient": ImageClientCameraConfig(
|
||||
head_camera_type="opencv",
|
||||
head_camera_id_numbers=[4],
|
||||
head_camera_image_shape=[480, 1280], # Head camera resolution
|
||||
wrist_camera_type="opencv",
|
||||
wrist_camera_id_numbers=[0, 2],
|
||||
wrist_camera_image_shape=[480, 640], # Wrist camera resolution
|
||||
aspect_ratio_threshold=2.0,
|
||||
fps=30,
|
||||
mock=False,
|
||||
),
|
||||
}
|
||||
|
||||
# ===========================================
|
||||
|
||||
cameras = make_cameras_from_configs(image_client_default_factory())
|
||||
print(cameras)
|
||||
for name in cameras:
|
||||
cameras[name].connect()
|
||||
log_success(f"Connecting {name} cameras.")
|
||||
|
||||
for _ in tqdm(range(20), desc="Camera warming up"):
|
||||
for name in cameras:
|
||||
cameras[name].async_read()
|
||||
time.sleep(1 / 30)
|
||||
|
||||
while True:
|
||||
images = {}
|
||||
for name in cameras:
|
||||
output = cameras[name].async_read()
|
||||
if isinstance(output, dict):
|
||||
for k, v in output.items():
|
||||
images[k] = torch.from_numpy(v)
|
||||
else:
|
||||
images[name] = torch.from_numpy(output)
|
||||
|
||||
image_list = [np.stack([img.numpy()] * 3, axis=-1) if img.ndim == 2 else img.numpy() for img in images.values()]
|
||||
|
||||
stacked_image = np.hstack(image_list)
|
||||
cv2.imshow("Stacked Image", stacked_image)
|
||||
|
||||
if (cv2.waitKey(1) & 0xFF) == ord("q"):
|
||||
cv2.destroyAllWindows()
|
||||
break
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_camera()
|
||||
66
unitree_deploy/test/camera/test_realsense_camera.py
Normal file
66
unitree_deploy/test/camera/test_realsense_camera.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import time
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from unitree_deploy.robot_devices.cameras.configs import IntelRealSenseCameraConfig
|
||||
from unitree_deploy.robot_devices.cameras.utils import make_cameras_from_configs
|
||||
from unitree_deploy.utils.rich_logger import log_success
|
||||
|
||||
|
||||
def run_camera():
|
||||
# ===========================================
|
||||
def intelrealsense_camera_default_factory():
|
||||
return {
|
||||
"cam_high": IntelRealSenseCameraConfig(
|
||||
serial_number="044122071036",
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
"cam_wrist": IntelRealSenseCameraConfig(
|
||||
serial_number="419122270615",
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
}
|
||||
|
||||
# ===========================================
|
||||
|
||||
cameras = make_cameras_from_configs(intelrealsense_camera_default_factory())
|
||||
for name in cameras:
|
||||
cameras[name].connect()
|
||||
log_success(f"Connecting {name} cameras.")
|
||||
|
||||
for _ in range(20):
|
||||
for name in cameras:
|
||||
cameras[name].async_read()
|
||||
time.sleep(1 / 30)
|
||||
|
||||
while True:
|
||||
images = []
|
||||
for name in cameras:
|
||||
frame = cameras[name].async_read()
|
||||
if frame is not None:
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
cv2.putText(frame, name, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
|
||||
images.append(frame)
|
||||
|
||||
if images:
|
||||
rows = []
|
||||
for i in range(0, len(images), 2):
|
||||
row = np.hstack(images[i : i + 2])
|
||||
rows.append(row)
|
||||
canvas = np.vstack(rows)
|
||||
|
||||
cv2.imshow("All Cameras", canvas)
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_camera()
|
||||
95
unitree_deploy/test/camera/test_usb_camera.py
Normal file
95
unitree_deploy/test/camera/test_usb_camera.py
Normal file
@@ -0,0 +1,95 @@
|
||||
import time
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import tyro
|
||||
from tqdm import tqdm
|
||||
|
||||
from unitree_deploy.robot_devices.cameras.configs import OpenCVCameraConfig
|
||||
from unitree_deploy.robot_devices.cameras.utils import make_camera, make_cameras_from_configs
|
||||
from unitree_deploy.utils.rich_logger import log_success
|
||||
|
||||
|
||||
def usb_camera_default_factory():
|
||||
return {
|
||||
"cam_high": OpenCVCameraConfig(
|
||||
camera_index="/dev/video1",
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
"cam_left_wrist": OpenCVCameraConfig(
|
||||
camera_index="/dev/video3",
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
"cam_right_wrist": OpenCVCameraConfig(
|
||||
camera_index="/dev/video5",
|
||||
fps=30,
|
||||
width=640,
|
||||
height=480,
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def run_cameras(camera_style: int = 0):
|
||||
"""
|
||||
Runs camera(s) based on the specified style.
|
||||
|
||||
Args:
|
||||
camera_style (int):
|
||||
0 - Single camera (OpenCV).
|
||||
1 - Multiple cameras from config.
|
||||
"""
|
||||
|
||||
if camera_style == 0:
|
||||
# ========== Single camera ==========
|
||||
camera_kwargs = {"camera_type": "opencv", "camera_index": "/dev/video5", "mock": False}
|
||||
camera = make_camera(**camera_kwargs)
|
||||
camera.connect()
|
||||
log_success("Connecting camera.")
|
||||
|
||||
while True:
|
||||
color_image = camera.read()
|
||||
color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
|
||||
cv2.imshow("Camera", color_image)
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
|
||||
elif camera_style == 1:
|
||||
# ========== Multi-camera from configs ==========
|
||||
cameras = make_cameras_from_configs(usb_camera_default_factory())
|
||||
|
||||
for name in cameras:
|
||||
cameras[name].connect()
|
||||
log_success(f"Connecting {name} camera.")
|
||||
|
||||
# Camera warm-up
|
||||
for _ in tqdm(range(20), desc="Camera warming up"):
|
||||
for name in cameras:
|
||||
cameras[name].async_read()
|
||||
time.sleep(1 / 30)
|
||||
|
||||
while True:
|
||||
images = {}
|
||||
for name in cameras:
|
||||
images[name] = cameras[name].async_read()
|
||||
|
||||
image_list = [
|
||||
np.stack([img.numpy()] * 3, axis=-1) if img.ndim == 2 else img.numpy() for img in images.values()
|
||||
]
|
||||
|
||||
stacked_image = np.hstack(image_list)
|
||||
cv2.imshow("Multi-Camera View", stacked_image)
|
||||
|
||||
if (cv2.waitKey(1) & 0xFF) == ord("q"):
|
||||
cv2.destroyAllWindows()
|
||||
break
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unsupported camera_style: {camera_style}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tyro.cli(run_cameras)
|
||||
Reference in New Issue
Block a user