python numba.jit 警告:cannot determine Numba type of class 'numba.dispatcher.LiftedLoop'(加速代码)
生活随笔
收集整理的這篇文章主要介紹了
python numba.jit 警告:cannot determine Numba type of class 'numba.dispatcher.LiftedLoop'(加速代码)
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
原代碼
# -*- coding: utf-8 -*- """ @File : 191218_obstacle_detection_測試加空間過濾器和jit加速器.py @Time : 2019/12/18 11:47 @Author : Dontla @Email : sxana@qq.com @Software: PyCharm """import timeimport numpy as np import pyrealsense2 as rs import cv2 import sys from numba import jit, vectorize, int64, int32, autojit, uint16, float64class ObstacleDetection(object):def __init__(self):# self.cam_serials = ['838212073161', '827312071726']self.cam_serials = ['838212073161']# @jit(nopython=True)@jit# @vectorize([int64(int64, int64)], target='parallel')# @jit('int64(int64,int64[:])')# @jit(int64[:](int64[:], int64[:]))def traversing_pixels(self, depth_image):num_black = 0all_pixels = 0for pixel in depth_image.ravel():all_pixels += 1if pixel == 0:num_black += 1return num_black# return all_pixels, num_blackdef obstacle_detection(self):# 攝像頭個數(在這里設置所需使用攝像頭的總個數)cam_num = 6ctx = rs.context()'''連續驗證機制'''# D·C 1911202:創建最大驗證次數max_veri_times;創建連續穩定值continuous_stable_value,用于判斷設備重置后是否處于穩定狀態max_veri_times = 100continuous_stable_value = 10print('\n', end='')print('開始連續驗證,連續驗證穩定值:{},最大驗證次數:{}:'.format(continuous_stable_value, max_veri_times))continuous_value = 0veri_times = 0while True:devices = ctx.query_devices()connected_cam_num = len(devices)if connected_cam_num == cam_num:continuous_value += 1if continuous_value == continuous_stable_value:breakelse:continuous_value = 0veri_times += 1if veri_times == max_veri_times:print("檢測超時,請檢查攝像頭連接!")sys.exit()print('攝像頭個數:{}'.format(connected_cam_num))'''循環reset攝像頭'''# hardware_reset()后是不是應該延遲一段時間?不延遲就會報錯print('\n', end='')print('開始初始化攝像頭:')for dev in ctx.query_devices():# 先將設備的序列號放進一個變量里,免得在下面for循環里訪問設備的信息過多(雖然不知道它會不會每次都重新訪問)dev_serial = dev.get_info(rs.camera_info.serial_number)# 匹配序列號,重置我們需重置的特定攝像頭(注意兩個for循環順序,哪個在外哪個在內很重要,不然會導致剛重置的攝像頭又被訪問導致報錯)for serial in self.cam_serials:if serial == dev_serial:dev.hardware_reset()# 像下面這條語句居然不會報錯,不是剛剛才重置了dev嗎?莫非區別在于沒有通過for循環ctx.query_devices()去訪問?# 是不是剛重置后可以通過ctx.query_devices()去查看有這個設備,但是卻沒有存儲設備地址?如果是這樣,# 也就能夠解釋為啥能夠通過len(ctx.query_devices())函數獲取設備數量,但訪問序列號等信息就會報錯的原因了print('攝像頭{}初始化成功'.format(dev.get_info(rs.camera_info.serial_number)))'''連續驗證機制'''# D·C 1911202:創建最大驗證次數max_veri_times;創建連續穩定值continuous_stable_value,用于判斷設備重置后是否處于穩定狀態print('\n', end='')print('開始連續驗證,連續驗證穩定值:{},最大驗證次數:{}:'.format(continuous_stable_value, max_veri_times))continuous_value = 0veri_times = 0while True:devices = ctx.query_devices()connected_cam_num = len(devices)if connected_cam_num == cam_num:continuous_value += 1if continuous_value == continuous_stable_value:breakelse:continuous_value = 0veri_times += 1if veri_times == max_veri_times:print("檢測超時,請檢查攝像頭連接!")sys.exit()print('攝像頭個數:{}'.format(connected_cam_num))'''配置各個攝像頭的基本對象'''for i in range(len(self.cam_serials)):locals()['pipeline' + str(i + 1)] = rs.pipeline(ctx)locals()['config' + str(i + 1)] = rs.config()locals()['config' + str(i + 1)].enable_device(self.cam_serials[i])locals()['config' + str(i + 1)].enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)locals()['config' + str(i + 1)].enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)locals()['pipeline' + str(i + 1)].start(locals()['config' + str(i + 1)])# 創建對齊對象(深度對齊顏色)locals()['align' + str(i + 1)] = rs.align(rs.stream.color)'''運行攝像頭'''try:while True:start_time = time.time()for i in range(len(self.cam_serials)):locals()['frames' + str(i + 1)] = locals()['pipeline' + str(i + 1)].wait_for_frames()# 獲取對齊幀集locals()['aligned_frames' + str(i + 1)] = locals()['align' + str(i + 1)].process(locals()['frames' + str(i + 1)])# 獲取對齊后的深度幀和彩色幀locals()['aligned_depth_frame' + str(i + 1)] = locals()['aligned_frames' + str(i + 1)].get_depth_frame()locals()['color_frame' + str(i + 1)] = locals()['aligned_frames' + str(i + 1)].get_color_frame()if not locals()['aligned_depth_frame' + str(i + 1)] or not locals()['color_frame' + str(i + 1)]:continue# 獲取顏色幀內參locals()['color_profile' + str(i + 1)] = locals()['color_frame' + str(i + 1)].get_profile()locals()['cvsprofile' + str(i + 1)] = rs.video_stream_profile(locals()['color_profile' + str(i + 1)])locals()['color_intrin' + str(i + 1)] = locals()['cvsprofile' + str(i + 1)].get_intrinsics()locals()['color_intrin_part' + str(i + 1)] = [locals()['color_intrin' + str(i + 1)].ppx,locals()['color_intrin' + str(i + 1)].ppy,locals()['color_intrin' + str(i + 1)].fx,locals()['color_intrin' + str(i + 1)].fy]# 【空間過濾器】locals()['spatial' + str(i + 1)] = rs.spatial_filter()locals()['spatial' + str(i + 1)].set_option(rs.option.filter_magnitude, 5)locals()['spatial' + str(i + 1)].set_option(rs.option.filter_smooth_alpha, 1)locals()['spatial' + str(i + 1)].set_option(rs.option.filter_smooth_delta, 50)locals()['spatial' + str(i + 1)].set_option(rs.option.holes_fill, 3)locals()['filtered_depth' + str(i + 1)] = locals()['spatial' + str(i + 1)].process(locals()['aligned_depth_frame' + str(i + 1)])locals()['depth_image' + str(i + 1)] = np.asanyarray(locals()['filtered_depth' + str(i + 1)].get_data())# print(locals()['depth_image' + str(i + 1)].dtype) # uint16locals()['color_image' + str(i + 1)] = np.asanyarray(locals()['color_frame' + str(i + 1)].get_data())# locals()['depth_image' + str(i + 1)] = np.asanyarray(# locals()['aligned_depth_frame' + str(i + 1)].get_data())# 【打印深度值看看、全部打印顯示】# np.set_printoptions(threshold=np.inf)# print(locals()['depth_image' + str(i + 1)])# 【計算深度圖數據中的0值】# locals()['all_pixels' + str(i + 1)], locals()['num_black' + str(i + 1)] = self.traversing_pixels(# locals()['depth_image' + str(i + 1)])locals()['num_black' + str(i + 1)] = self.traversing_pixels(locals()['depth_image' + str(i + 1)])# num_black = 0# all_pixels = 0# for row in range(480):# for colume in range(640):# all_pixels += 1# if locals()['depth_image' + str(i + 1)][row, colume] == 0:# num_black += 1print('depth_image分辨率:{}'.format(locals()['depth_image' + str(i + 1)].shape))# print('depth_image:{}'.format(num_black))# print('depth_image:{}'.format(num_black / all_pixels))print('depth_image:{}'.format(locals()['num_black' + str(i + 1)]))# print('depth_image:{}'.format(# locals()['num_black' + str(i + 1)] / locals()['all_pixels' + str(i + 1)]))# 以下這種卡的不行(get_distance()函數會把窗口搞崩潰(即使不很卡))# for row in range(locals()['aligned_depth_frame' + str(i + 1)].get_height()):# for colume in range(locals()['aligned_depth_frame' + str(i + 1)].get_width()):# all_pixels += 1# if locals()['depth_image' + str(i + 1)][row, colume] == 0:# # if locals()[# # 'aligned_depth_frame' + str(i + 1)].get_distance(row, colume) == 0:# num_black += 1# for pixel in locals()['depth_image' + str(i + 1)].ravel():# all_pixels += 1# if pixel == 0:# num_black += 1# print('depth_image分辨率:{}'.format(locals()['depth_image' + str(i + 1)].shape))# print('depth_image:{}'.format(num_black))# print('depth_image:{}'.format(num_black / all_pixels))locals()['depth_colormap' + str(i + 1)] = cv2.applyColorMap(cv2.convertScaleAbs(locals()['depth_image' + str(i + 1)], alpha=0.0425),cv2.COLORMAP_JET)locals()['image' + str(i + 1)] = np.hstack((locals()['color_image' + str(i + 1)], locals()['depth_colormap' + str(i + 1)]))cv2.imshow('win{}'.format(i + 1), locals()['image' + str(i + 1)])cv2.waitKey(1)end_time = time.time()print('單幀運行時間:{}'.format(end_time - start_time))finally:for i in range(len(self.cam_serials)):locals()['pipeline' + str(i + 1)].stop()if __name__ == '__main__':ObstacleDetection().obstacle_detection()運行結果:
D:/20191211_obstacle_detection/obstacle_detection/191218_obstacle_detection_測試加空間過濾器和jit加速器.py:26: NumbaWarning: Compilation is falling back to object mode WITH looplifting enabled because Function "traversing_pixels" failed type inference due to: non-precise type pyobject [1] During: typing of argument at D:/20191211_obstacle_detection/obstacle_detection/191218_obstacle_detection_測試加空間過濾器和jit加速器.py (32)File "191218_obstacle_detection_測試加空間過濾器和jit加速器.py", line 32:def traversing_pixels(self, depth_image):num_black = 0^@jit D:/20191211_obstacle_detection/obstacle_detection/191218_obstacle_detection_測試加空間過濾器和jit加速器.py:26: NumbaWarning: Compilation is falling back to object mode WITHOUT looplifting enabled because Function "traversing_pixels" failed type inference due to: cannot determine Numba type of <class 'numba.dispatcher.LiftedLoop'>File "191218_obstacle_detection_測試加空間過濾器和jit加速器.py", line 34:def traversing_pixels(self, depth_image):<source elided>all_pixels = 0for pixel in depth_image.ravel():^@jit D:\20191031_tensorflow_yolov3\python\lib\site-packages\numba\object_mode_passes.py:178: NumbaWarning: Function "traversing_pixels" was compiled in object mode without forceobj=True, but has lifted loops.File "191218_obstacle_detection_測試加空間過濾器和jit加速器.py", line 31:# @jit(uint16[:](uint16[:], uint16[:]))def traversing_pixels(self, depth_image):^state.func_ir.loc)) D:\20191031_tensorflow_yolov3\python\lib\site-packages\numba\object_mode_passes.py:187: NumbaDeprecationWarning: Fall-back from the nopython compilation path to the object mode compilation path has been detected, this is deprecated behaviour.For more information visit http://numba.pydata.org/numba-doc/latest/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jitFile "191218_obstacle_detection_測試加空間過濾器和jit加速器.py", line 31:# @jit(uint16[:](uint16[:], uint16[:]))def traversing_pixels(self, depth_image):^warnings.warn(errors.NumbaDeprecationWarning(msg, state.func_ir.loc))原因
原因暫不明,之前我是將需jit加速函數放在類中定義,現在我將此函數移出類外,發現警告消失了,猜測在類中也是可以的,只不過我不懂怎么設置罷了。。。
修改后代碼
# -*- coding: utf-8 -*- """ @File : 191219_obstacle_detection_測試加空間過濾器和jit加速器_將需jit加速函數放在類外部.py @Time : 2019/12/19 10:10 @Author : Dontla @Email : sxana@qq.com @Software: PyCharm """import timeimport numpy as np import pyrealsense2 as rs import cv2 import sys from numba import jit, vectorize, int64, int32, autojit, uint16, float64# @jit(nopython=True) # @jit # @vectorize([int64(int64, int64)], target='parallel') # @jit('int64(int64,int64[:])') # @jit(int64[:](int64[:], int64[:])) @jit def traversing_pixels(depth_image):num_black = 0all_pixels = 0for pixel in depth_image.ravel():all_pixels += 1if pixel == 0:num_black += 1return num_black# return all_pixels, num_blackclass ObstacleDetection(object):def __init__(self):# self.cam_serials = ['838212073161', '827312071726']self.cam_serials = ['838212073161']def obstacle_detection(self):# 攝像頭個數(在這里設置所需使用攝像頭的總個數)cam_num = 6ctx = rs.context()'''連續驗證機制'''# D·C 1911202:創建最大驗證次數max_veri_times;創建連續穩定值continuous_stable_value,用于判斷設備重置后是否處于穩定狀態max_veri_times = 100continuous_stable_value = 10print('\n', end='')print('開始連續驗證,連續驗證穩定值:{},最大驗證次數:{}:'.format(continuous_stable_value, max_veri_times))continuous_value = 0veri_times = 0while True:devices = ctx.query_devices()connected_cam_num = len(devices)if connected_cam_num == cam_num:continuous_value += 1if continuous_value == continuous_stable_value:breakelse:continuous_value = 0veri_times += 1if veri_times == max_veri_times:print("檢測超時,請檢查攝像頭連接!")sys.exit()print('攝像頭個數:{}'.format(connected_cam_num))'''循環reset攝像頭'''# hardware_reset()后是不是應該延遲一段時間?不延遲就會報錯print('\n', end='')print('開始初始化攝像頭:')for dev in ctx.query_devices():# 先將設備的序列號放進一個變量里,免得在下面for循環里訪問設備的信息過多(雖然不知道它會不會每次都重新訪問)dev_serial = dev.get_info(rs.camera_info.serial_number)# 匹配序列號,重置我們需重置的特定攝像頭(注意兩個for循環順序,哪個在外哪個在內很重要,不然會導致剛重置的攝像頭又被訪問導致報錯)for serial in self.cam_serials:if serial == dev_serial:dev.hardware_reset()# 像下面這條語句居然不會報錯,不是剛剛才重置了dev嗎?莫非區別在于沒有通過for循環ctx.query_devices()去訪問?# 是不是剛重置后可以通過ctx.query_devices()去查看有這個設備,但是卻沒有存儲設備地址?如果是這樣,# 也就能夠解釋為啥能夠通過len(ctx.query_devices())函數獲取設備數量,但訪問序列號等信息就會報錯的原因了print('攝像頭{}初始化成功'.format(dev.get_info(rs.camera_info.serial_number)))'''連續驗證機制'''# D·C 1911202:創建最大驗證次數max_veri_times;創建連續穩定值continuous_stable_value,用于判斷設備重置后是否處于穩定狀態print('\n', end='')print('開始連續驗證,連續驗證穩定值:{},最大驗證次數:{}:'.format(continuous_stable_value, max_veri_times))continuous_value = 0veri_times = 0while True:devices = ctx.query_devices()connected_cam_num = len(devices)if connected_cam_num == cam_num:continuous_value += 1if continuous_value == continuous_stable_value:breakelse:continuous_value = 0veri_times += 1if veri_times == max_veri_times:print("檢測超時,請檢查攝像頭連接!")sys.exit()print('攝像頭個數:{}'.format(connected_cam_num))'''配置各個攝像頭的基本對象'''for i in range(len(self.cam_serials)):locals()['pipeline' + str(i + 1)] = rs.pipeline(ctx)locals()['config' + str(i + 1)] = rs.config()locals()['config' + str(i + 1)].enable_device(self.cam_serials[i])locals()['config' + str(i + 1)].enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)locals()['config' + str(i + 1)].enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)locals()['pipeline' + str(i + 1)].start(locals()['config' + str(i + 1)])# 創建對齊對象(深度對齊顏色)locals()['align' + str(i + 1)] = rs.align(rs.stream.color)'''運行攝像頭'''try:while True:start_time = time.time()for i in range(len(self.cam_serials)):locals()['frames' + str(i + 1)] = locals()['pipeline' + str(i + 1)].wait_for_frames()# 獲取對齊幀集locals()['aligned_frames' + str(i + 1)] = locals()['align' + str(i + 1)].process(locals()['frames' + str(i + 1)])# 獲取對齊后的深度幀和彩色幀locals()['aligned_depth_frame' + str(i + 1)] = locals()['aligned_frames' + str(i + 1)].get_depth_frame()locals()['color_frame' + str(i + 1)] = locals()['aligned_frames' + str(i + 1)].get_color_frame()if not locals()['aligned_depth_frame' + str(i + 1)] or not locals()['color_frame' + str(i + 1)]:continue# 獲取顏色幀內參locals()['color_profile' + str(i + 1)] = locals()['color_frame' + str(i + 1)].get_profile()locals()['cvsprofile' + str(i + 1)] = rs.video_stream_profile(locals()['color_profile' + str(i + 1)])locals()['color_intrin' + str(i + 1)] = locals()['cvsprofile' + str(i + 1)].get_intrinsics()locals()['color_intrin_part' + str(i + 1)] = [locals()['color_intrin' + str(i + 1)].ppx,locals()['color_intrin' + str(i + 1)].ppy,locals()['color_intrin' + str(i + 1)].fx,locals()['color_intrin' + str(i + 1)].fy]# 【空間過濾器】locals()['spatial' + str(i + 1)] = rs.spatial_filter()locals()['spatial' + str(i + 1)].set_option(rs.option.filter_magnitude, 5)locals()['spatial' + str(i + 1)].set_option(rs.option.filter_smooth_alpha, 1)locals()['spatial' + str(i + 1)].set_option(rs.option.filter_smooth_delta, 50)locals()['spatial' + str(i + 1)].set_option(rs.option.holes_fill, 3)locals()['filtered_depth' + str(i + 1)] = locals()['spatial' + str(i + 1)].process(locals()['aligned_depth_frame' + str(i + 1)])locals()['depth_image' + str(i + 1)] = np.asanyarray(locals()['filtered_depth' + str(i + 1)].get_data())# print(locals()['depth_image' + str(i + 1)].dtype) # uint16locals()['color_image' + str(i + 1)] = np.asanyarray(locals()['color_frame' + str(i + 1)].get_data())# locals()['depth_image' + str(i + 1)] = np.asanyarray(# locals()['aligned_depth_frame' + str(i + 1)].get_data())# 【打印深度值看看、全部打印顯示】# np.set_printoptions(threshold=np.inf)# print(locals()['depth_image' + str(i + 1)])# 【計算深度圖數據中的0值】# locals()['all_pixels' + str(i + 1)], locals()['num_black' + str(i + 1)] = self.traversing_pixels(# locals()['depth_image' + str(i + 1)])locals()['num_black' + str(i + 1)] = traversing_pixels(locals()['depth_image' + str(i + 1)])# num_black = 0# all_pixels = 0# for row in range(480):# for colume in range(640):# all_pixels += 1# if locals()['depth_image' + str(i + 1)][row, colume] == 0:# num_black += 1print('depth_image分辨率:{}'.format(locals()['depth_image' + str(i + 1)].shape))# print('depth_image:{}'.format(num_black))# print('depth_image:{}'.format(num_black / all_pixels))print('depth_image:{}'.format(locals()['num_black' + str(i + 1)]))# print('depth_image:{}'.format(# locals()['num_black' + str(i + 1)] / locals()['all_pixels' + str(i + 1)]))# 以下這種卡的不行(get_distance()函數會把窗口搞崩潰(即使不很卡))# for row in range(locals()['aligned_depth_frame' + str(i + 1)].get_height()):# for colume in range(locals()['aligned_depth_frame' + str(i + 1)].get_width()):# all_pixels += 1# if locals()['depth_image' + str(i + 1)][row, colume] == 0:# # if locals()[# # 'aligned_depth_frame' + str(i + 1)].get_distance(row, colume) == 0:# num_black += 1# for pixel in locals()['depth_image' + str(i + 1)].ravel():# all_pixels += 1# if pixel == 0:# num_black += 1# print('depth_image分辨率:{}'.format(locals()['depth_image' + str(i + 1)].shape))# print('depth_image:{}'.format(num_black))# print('depth_image:{}'.format(num_black / all_pixels))locals()['depth_colormap' + str(i + 1)] = cv2.applyColorMap(cv2.convertScaleAbs(locals()['depth_image' + str(i + 1)], alpha=0.0425),cv2.COLORMAP_JET)locals()['image' + str(i + 1)] = np.hstack((locals()['color_image' + str(i + 1)], locals()['depth_colormap' + str(i + 1)]))cv2.imshow('win{}'.format(i + 1), locals()['image' + str(i + 1)])cv2.waitKey(1)end_time = time.time()print('單幀運行時間:{}'.format(end_time - start_time))finally:for i in range(len(self.cam_serials)):locals()['pipeline' + str(i + 1)].stop()if __name__ == '__main__':ObstacleDetection().obstacle_detection()```運行結果:```python D:\20191031_tensorflow_yolov3\python\python.exe D:/20191211_obstacle_detection/obstacle_detection/191219_obstacle_detection_測試加空間過濾器和jit加速器_將需jit加速函數放在類外部.py開始連續驗證,連續驗證穩定值:10,最大驗證次數:100: 攝像頭個數:6 攝像頭個數:6 攝像頭個數:6 攝像頭個數:6 攝像頭個數:6 攝像頭個數:6 攝像頭個數:6 攝像頭個數:6 攝像頭個數:6開始初始化攝像頭: 攝像頭838212073161初始化成功開始連續驗證,連續驗證穩定值:10,最大驗證次數:100: 攝像頭個數:6 攝像頭個數:6 攝像頭個數:6 攝像頭個數:6 攝像頭個數:6 攝像頭個數:6 攝像頭個數:6 攝像頭個數:6 攝像頭個數:6 depth_image分辨率:(480, 640) depth_image:85152 單幀運行時間:0.9076991081237793 depth_image分辨率:(480, 640) depth_image:9551 ...總結
以上是生活随笔為你收集整理的python numba.jit 警告:cannot determine Numba type of class 'numba.dispatcher.LiftedLoop'(加速代码)的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: python numba.jit(该装饰
- 下一篇: numba.jit警告:Compilat