graduation-design/orbbec_camera/OrbbecCamera.py
GitLab 559a94c55a 1.大体实现机械臂跟随物体移动
2.使用线程队列将机器人控制与视觉识别解耦,大大提高了系统的流畅性
2024-04-01 13:59:33 +08:00

179 lines
6.8 KiB
Python
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import threading
import cv2
import numpy as np
import sys
import argparse
from pyorbbecsdk import *
from .utils import frame_to_bgr_image
class OrbbecCamera:
# 定义一个类常量作为退出键
ESC_KEY = 27
def __init__(self, mode='HW', enable_sync=True, image_width=640):
# 构造函数初始化Orbbec相机和配置
self.pipeline = Pipeline() # 创建Pipeline对象
self.device = self.pipeline.get_device() # 获取设备对象
self.config = Config() # 创建配置对象
self.mode = mode # 设置对齐模式
self.enable_sync = enable_sync # 设置是否启用帧同步
self.center_distance = 0 # 中心点的深度值
self.MIN_DEPTH = 20 # 最小深度值为20mm
self.MAX_DEPTH = 10000 # 最大深度值为10000mm
self.color_image = None # 颜色图像
self.depth_image = None # 深度图像
self.combined_image = None # 叠加图像
self.depth_intrinsics = 0 # 深度图像的内参
# 线程控制
self.thread = None # 线程对象,用于并行执行捕获循环
self.active = False # 控制捕获循环的标志
# 显示设置
self.width = image_width # 图像宽度
self.fps = 30 # 帧率
# 初始化相机设置
device_info = self.device.get_device_info() # 获取设备信息
device_pid = device_info.get_pid() # 获取设备PID
try:
# 获取并设置颜色流配置
profile_list = self.pipeline.get_stream_profile_list(OBSensorType.COLOR_SENSOR)
# color_profile = profile_list.get_default_video_stream_profile()
color_profile = profile_list.get_video_stream_profile(self.width, 0, OBFormat.RGB, self.fps)
self.config.enable_stream(color_profile)
# 获取并设置深度流配置
profile_list = self.pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR)
# depth_profile = profile_list.get_default_video_stream_profile()
depth_profile = profile_list.get_video_stream_profile(self.width, 0, OBFormat.Y16, self.fps)
self.config.enable_stream(depth_profile)
except Exception as e:
print(e)
sys.exit(1)
# 根据模式设置对齐方式
# HW为硬件D2C对齐
# SW为软件D2C对齐
if self.mode == 'HW':
if device_pid == 0x066B:
self.config.set_align_mode(OBAlignMode.SW_MODE)
else:
self.config.set_align_mode(OBAlignMode.HW_MODE)
elif self.mode == 'SW':
self.config.set_align_mode(OBAlignMode.SW_MODE)
else:
self.config.set_align_mode(OBAlignMode.DISABLE)
# 启用帧同步
if self.enable_sync:
try:
self.pipeline.enable_frame_sync()
except Exception as e:
print(e)
sys.exit(1)
def stop(self):
# 停止捕获
self.pipeline.stop()
self.active = False
if self.thread is not None:
self.thread.join() # 等待线程结束
def run(self):
if not self.active:
self.active = True
# 创建并启动线程目标函数是_capture_loop
self.thread = threading.Thread(target=self._capture_loop)
self.thread.start()
def _capture_loop(self):
try:
self.pipeline.start(self.config) # 启动流
except Exception as e:
print(e)
return
while self.active:
try:
frames: FrameSet = self.pipeline.wait_for_frames(500) # 等待获取帧,注意前面设计的帧率太低的话可能会报等待超时
if frames is None:
continue
color_frame = frames.get_color_frame() # 获取颜色帧
if color_frame is None:
continue
color_image = frame_to_bgr_image(color_frame) # 转换颜色帧为RGB图像
self.color_image = color_image
if color_image is None:
print("failed to convert frame to image")
continue
depth_frame = frames.get_depth_frame() # 获取深度帧
if depth_frame is None:
continue
# 处理深度数据
self.process_depth_data(depth_frame)
# depth_image = self.process_depth_data(depth_frame)
# self.depth_image = depth_image
# # 合成深度图像
# combined_image = cv2.addWeighted(color_image, 0.5, depth_image, 0.5, 0) # 叠加图像
# self.combined_image = combined_image
except KeyboardInterrupt:
break # 捕捉到中断信号则退出
def process_depth_data(self, depth_frame):
# 处理深度数据,转换为可视化的图像
width = depth_frame.get_width()
height = depth_frame.get_height()
scale = depth_frame.get_depth_scale()
depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16).reshape((height, width))
depth_data = depth_data.astype(np.float32) * scale
depth_image = cv2.normalize(depth_data, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
# 获取图像中心点的深度值
center_y = int(height / 2)
center_x = int(width / 2)
self.center_distance = depth_data[center_y, center_x]
return cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
def get_color_image(self):
return self.color_image
def get_depth_image(self):
return self.depth_image
def get_combined_image(self):
return self.combined_image
def get_center_distance(self):
return self.center_distance
# 将像素坐标转换为相机坐标系下的坐标
def covert_pixel_to_carmera_coordinates(self, u, v, depth):
# u: 像素坐标x
# v: 像素坐标y
# depth: 深度值
# K[0]为相机参数
fx = self.depth_intrinsics.K[0]
fy = self.depth_intrinsics.K[4]
cx = self.depth_intrinsics.K[2]
cy = self.depth_intrinsics.K[5]
# 单位是毫米
camera_x = (u - cx) * depth / fx
camera_y = (v - cy) * depth / fy
return camera_x, camera_y, depth
# 返回值: 相机坐标(x, y, z)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mode", help="对齐模式: HW=硬件, SW=软件, NONE=禁用", type=str, default='HW')
parser.add_argument("-s", "--enable_sync", help="启用同步", type=bool, default=True)
args = parser.parse_args()
camera = OrbbecCamera(mode=args.mode, enable_sync=args.enable_sync)
camera.run()
print("中心点的深度值: ", camera.center_distance, "mm")