ZED2相机+NVIDIA NX使用及检测目标功能2
在nx結合zed相機,使用yolo進行物體識別
1.venv虛擬環境安裝
由于設備自帶的python為python2.7環境,所以先創建虛擬環境并激活
 sudo apt-get install software-properties-common
 sudo apt install python3.6(可用存儲庫管理)
 sudo aptitude install python3.6-venv
 python3 -m venv torch2
 source torch2/bin/activate
 
 
 
 
2.安裝torch和torchvision
2.1安裝torch及其依賴
NVIDIA論壇網址:https://forums.developer.nvidia.com/t/pytorch-for-jetson/72048,whl文件可從網站下載,torchvison從github下載對應版本即可
 
sudo apt-get install python3-pip libopenblas-base libopenmpi-dev libomp-dev
 pip3 install Cython
 pip3 install torch-1.8.0-cp36-cp36m-linux_aarch64.whl
 
 
 
2.2安裝對應版本torchvision
由于我安裝的torch1.7,git clone對應的0.8.1torchvision,下載之后執行以下操作:
 cd torchvision
 export BUILD_VERSION=0.8.1
 python3 setup.py install(venv虛擬環境不能添加–user)
 
 若過程中出現 invalid command ‘bdist_wheel’,如下:
 
 解決方法:pip3 install wheel
 安裝完成后可能仍然無法使用torchvision,提示future feature annotations is not defined,可能是由于pillow版本不符合(過高),我使用的版本下對應解決方法為:pip install pillow==8.1.0
 
 安裝完成:
 
3.安裝ZED的python api
cd /usr/local/zed
 python3 get_python_api.py
 
 其他問題:報錯illegal instruction(core dumped)
 
4.安裝yolo
為了方便最開始的測試,此處安裝yolov5進行簡單試驗
 直接git要使用的yolo版本,有些yolo版本無法正常安裝使用,先換其他版本,不要換環境,若要使用ZED相機官方給的示例,2022年開始的版本更改個別函數名稱或參數即可
 安裝yolo直接pip install -r requirements.txt即可
 速度下載很慢或卡在building時,解決方法:
 pip install --upgrade pip
 
 安裝opengl相關
 pip install PyOpenGL PyOpenGL_accelerate
 
5.安裝完成后,用自己修改和訓練的模型正常測試即可,可用torch線程運行yolo檢測,ros線程控制設備和相機進行交互
小實驗的代碼:
#!/usr/bin/env python3import sys import numpy as npimport argparse import torch import cv2 import pyzed.sl as sl import torch.backends.cudnn as cudnnsys.path.insert(0, './yolov5') ###problem from models.experimental import attempt_load####################################################### from utils.general import check_img_size, non_max_suppression, scale_coords, xyxy2xywh from utils.torch_utils import select_device from utils.augmentations import letterbox ###from threading import Lock, Thread from time import sleepimport ogl_viewer.viewer as gl import cv_viewer.tracking_viewer as cv_viewer ###ros from sys import maxunicode from turtle import position import rospy from std_msgs.msg import String from std_msgs.msg import Float64 from zed_interfaces.msg import Object from zed_interfaces.msg import ObjectsStamped from geometry_msgs.msg import Twist import math ###lock = Lock() run_signal = False run_signal2 = False exit_signal = Falsedef img_preprocess(img, device, half, net_size):net_image, ratio, pad = letterbox(img[:, :, :3], net_size, auto=False)net_image = net_image.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGBnet_image = np.ascontiguousarray(net_image)img = torch.from_numpy(net_image).to(device)img = img.half() if half else img.float() # uint8 to fp16/32img /= 255.0 # 0 - 255 to 0.0 - 1.0if img.ndimension() == 3:img = img.unsqueeze(0)return img, ratio, paddef xywh2abcd(xywh, im_shape):output = np.zeros((4, 2))# Center / Width / Height -> BBox corners coordinatesx_min = (xywh[0] - 0.5*xywh[2]) * im_shape[1]x_max = (xywh[0] + 0.5*xywh[2]) * im_shape[1]y_min = (xywh[1] - 0.5*xywh[3]) * im_shape[0]y_max = (xywh[1] + 0.5*xywh[3]) * im_shape[0]# A ------ B# | Object |# D ------ Coutput[0][0] = x_minoutput[0][1] = y_minoutput[1][0] = x_maxoutput[1][1] = y_minoutput[2][0] = x_minoutput[2][1] = y_maxoutput[3][0] = x_maxoutput[3][1] = y_maxreturn outputdef detections_to_custom_box(detections, im, im0):output = []for i, det in enumerate(detections):if len(det):det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwhfor *xyxy, conf, cls in reversed(det):xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh# Creating ingestable objects for the ZED SDKobj = sl.CustomBoxObjectData()obj.bounding_box_2d = xywh2abcd(xywh, im0.shape)obj.label = clsobj.probability = confobj.is_grounded = Falseoutput.append(obj)return outputdef torch_thread(weights, img_size, conf_thres=0.2, iou_thres=0.45):global image_net, exit_signal, run_signal, detectionsprint("Intializing Network...")device = select_device()half = device.type != 'cpu' # half precision only supported on CUDAimgsz = img_size# Load modelmodel = attempt_load(weights, map_location=device) # load FP32stride = int(model.stride.max()) # model strideimgsz = check_img_size(imgsz, s=stride) # check img_sizeif half:model.half() # to FP16cudnn.benchmark = True# Run inferenceif device.type != 'cpu':model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run oncewhile not exit_signal:if run_signal:lock.acquire()img, ratio, pad = img_preprocess(image_net, device, half, imgsz)pred = model(img)[0]det = non_max_suppression(pred, conf_thres, iou_thres)# ZED CustomBox format (with inverse letterboxing tf applied)detections = detections_to_custom_box(det, img, image_net)lock.release()run_signal = Falsesleep(0.01)def distance(position):x=-position[2]y=-position[0]+0.06z=position[1]dis=math.sqrt(x*x+y*y+z*z)return dis,x,y,z#if people_exist==TRUE def movecar1(dis,x,y,z):#linear_x angular_zif dis>=1.5:linear_x=0.3elif dis<=0.7:linear_x=-0.3else:linear_x=0if abs(y)<0.35:angular_z = -0.2linear_y=-0.1elif abs(y)>1:angular_z = 0.3linear_y = 0.1else:angular_z=0linear_y=0return linear_x,linear_y,angular_z #if people_exist==FALSE def movecar2():angular_z=0.5return angular_zdef find_object(object_list):object_tot=object_listmax_con=object_tot[0]for obj_tmp in object_tot:if obj_tmp.confidence>max_con.confidence:max_con=obj_tmpreturn max_condef ros_thread():#global image_net, exit_signal, run_signal, detectionsglobal objects_total, run_signal2###print("rostopic publish!!!!!!!!!!!!!!!!!!!!!!")try:#rospy.init_node('pylistener',anonymous=True)while (not rospy.is_shutdown()) and (not exit_signal):if run_signal2:lock.acquire()obj=objects_totalrospy.loginfo("Listener:")rospy.loginfo("people num: %d",len(obj.object_list))pub_vel_cmd = rospy.Publisher('/tianbot_01/cmd_vel', Twist, queue_size=10)pub_info = rospy.Publisher('detect_position_info', Float64, queue_size=10)###trans infoif len(obj.object_list)>0:object_max=find_object(obj.object_list)if object_max.confidence>90:#the colsest people position:x,y,z###num=closest_people(obj)#position=obj.object_list[0].position###先考慮0position=object_max.positionrospy.loginfo("the closest car position(x,y,z): %f %f %f",-position[2],-position[0]+0.06,position[1])dis,x,y,z=distance(position)linear_x,linear_y,angular_z=movecar1(dis,x,y,z)#movevel_cmd = Twist()vel_cmd.linear.x = linear_xvel_cmd.linear.y = linear_yvel_cmd.angular.z = angular_zpub_vel_cmd.publish(vel_cmd) pub_info.publish(dis)###trans infoelse:angular_z=movecar2()#movevel_cmd = Twist()vel_cmd.linear.x = 0.2vel_cmd.angular.z = 0.5pub_vel_cmd.publish(vel_cmd)lock.release()run_signal2 = Falsesleep(0.01)except rospy.ROSInterruptException:passdef main():global image_net, exit_signal, run_signal, detections, objects_total, run_signal2capture_thread = Thread(target=torch_thread,kwargs={'weights': opt.weights, 'img_size': opt.img_size, "conf_thres": opt.conf_thres})capture_thread.start()capture_thread2 = Thread(target=ros_thread)capture_thread2.start()print("Initializing Camera...")zed = sl.Camera()input_type = sl.InputType()if opt.svo is not None:input_type.set_from_svo_file(opt.svo)# Create a InitParameters object and set configuration parametersinit_params = sl.InitParameters(input_t=input_type, svo_real_time_mode=True)init_params.camera_resolution = sl.RESOLUTION.HD720init_params.coordinate_units = sl.UNIT.METERinit_params.depth_mode = sl.DEPTH_MODE.ULTRA # QUALITYinit_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UPinit_params.depth_maximum_distance = 50runtime_params = sl.RuntimeParameters()status = zed.open(init_params)if status != sl.ERROR_CODE.SUCCESS:print(repr(status))exit()image_left_tmp = sl.Mat() ###selfprint("Initialized Camera")positional_tracking_parameters = sl.PositionalTrackingParameters()# If the camera is static, uncomment the following line to have better performances and boxes sticked to the ground.# positional_tracking_parameters.set_as_static = Truezed.enable_positional_tracking(positional_tracking_parameters)obj_param = sl.ObjectDetectionParameters()obj_param.detection_model = sl.DETECTION_MODEL.CUSTOM_BOX_OBJECTSobj_param.enable_tracking = Truezed.enable_object_detection(obj_param)objects = sl.Objects()obj_runtime_param = sl.ObjectDetectionRuntimeParameters()# Displaycamera_infos = zed.get_camera_information()# Create OpenGL viewerviewer = gl.GLViewer()point_cloud_res = sl.Resolution(min(camera_infos.camera_resolution.width, 720),min(camera_infos.camera_resolution.height, 404))point_cloud_render = sl.Mat()viewer.init(camera_infos.camera_model, point_cloud_res, obj_param.enable_tracking)point_cloud = sl.Mat(point_cloud_res.width, point_cloud_res.height, sl.MAT_TYPE.F32_C4, sl.MEM.CPU)image_left = sl.Mat()# Utilities for 2D displaydisplay_resolution = sl.Resolution(min(camera_infos.camera_resolution.width, 1280),min(camera_infos.camera_resolution.height, 720))image_scale = [display_resolution.width / camera_infos.camera_resolution.width, display_resolution.height / camera_infos.camera_resolution.height]image_left_ocv = np.full((display_resolution.height, display_resolution.width, 4), [245, 239, 239, 255], np.uint8)# Utilities for tracks viewcamera_config = zed.get_camera_information().camera_configurationtracks_resolution = sl.Resolution(400, display_resolution.height)track_view_generator = cv_viewer.TrackingViewer(tracks_resolution, camera_config.camera_fps,init_params.depth_maximum_distance)track_view_generator.set_camera_calibration(camera_config.calibration_parameters)image_track_ocv = np.zeros((tracks_resolution.height, tracks_resolution.width, 4), np.uint8)# Camera posecam_w_pose = sl.Pose()while viewer.is_available() and not exit_signal:if zed.grab(runtime_params) == sl.ERROR_CODE.SUCCESS:# -- Get the imagelock.acquire()zed.retrieve_image(image_left_tmp, sl.VIEW.LEFT)image_net = image_left_tmp.get_data()lock.release()run_signal = True# -- Detection running on the other threadwhile run_signal:sleep(0.001)# Wait for detectionslock.acquire()# -- Ingest detectionszed.ingest_custom_box_objects(detections)lock.release()lock.acquire()zed.retrieve_objects(objects, obj_runtime_param)###print(len(objects.object_list))objects_total=objects###lock.release()###move carrun_signal2=Truewhile run_signal2:sleep(0.001)# -- Display# Retrieve display datazed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA, sl.MEM.CPU, point_cloud_res)point_cloud.copy_to(point_cloud_render)zed.retrieve_image(image_left, sl.VIEW.LEFT, sl.MEM.CPU, display_resolution)zed.get_position(cam_w_pose, sl.REFERENCE_FRAME.WORLD)# 3D renderingviewer.updateData(point_cloud_render, objects)# 2D renderingnp.copyto(image_left_ocv, image_left.get_data())cv_viewer.render_2D(image_left_ocv, image_scale, objects, obj_param.enable_tracking)global_image = cv2.hconcat([image_left_ocv, image_track_ocv])# Tracking viewtrack_view_generator.generate_view(objects, cam_w_pose, image_track_ocv, objects.is_tracked)cv2.imshow("ZED | 2D View and Birds View", global_image)key = cv2.waitKey(10)if key == 27:exit_signal = Trueelse:exit_signal = Trueviewer.exit()exit_signal = Truezed.close()if __name__ == '__main__':parser = argparse.ArgumentParser()parser.add_argument('--weights', nargs='+', type=str, default='./test4.pt', help='model.pt path(s)')parser.add_argument('--svo', type=str, default=None, help='optional svo file')parser.add_argument('--img_size', type=int, default=416, help='inference size (pixels)')parser.add_argument('--conf_thres', type=float, default=0.4, help='object confidence threshold')opt = parser.parse_args()rospy.init_node('pylistener',anonymous=True)with torch.no_grad():main()總結
以上是生活随笔為你收集整理的ZED2相机+NVIDIA NX使用及检测目标功能2的全部內容,希望文章能夠幫你解決所遇到的問題。
 
                            
                        - 上一篇: 亚马逊关联是什么意思-亚马逊防关联的具体
- 下一篇: 图像质量评估(8) -- 低光(Low
