请注意,本文内容源自机器翻译,可能存在语法或其它翻译错误,仅供参考。如需获取准确内容,请参阅链接中的英语原文或自行翻译。
器件型号:TDA4VM 大家好、我正在尝试通过 Edge ai tidl tflite 运行时使用多线程(python)来运行多个模型。
我附上了一个示例代码(它非常模仿我们的场景)。 如果我运行此代码、它将冻结(没有日志的 app Deinit)。 但是、如果我在不实例化第二个模型的情况下运行该代码、则可以正常运行(终端上记录了 app Deinit 等)。
你可以选择,模型和对应的文物,你认为合适。 我可以对每个模型重复此行为。 很遗憾、我无法分享模特。
我怀疑有一些共享资源没有被释放、所以代码会冻结。 我想了解是否有更好的方法来处理多个模型和多线程方案?
期待您的答复。
最佳
阿沙伊
import os
import numpy as np
import threading
import queue
import time
import copy
import tflite_runtime.interpreter as tflite
os.environ["SOC"]="am68a"
os.environ["TIDL_TOOLS_PATH"]="/home/root/edgeai-tidl-tools/"
def get_image():
img = np.random.randint(0, 255, (1, 256, 320, 3))
return img.astype(np.float32)
class ImageProcessor:
def __init__(self, model_path, model_artifacts, num_threads=2):
self.model_path = model_path
self.model_artifacts = model_artifacts
self.compile_options = {"tidl_tools_path": os.environ["TIDL_TOOLS_PATH"],
"artifacts_folder": model_artifacts}
self.shared_obj_path = "libtidl_tfl_delegate.so"
self.tidl_delegate = [tflite.load_delegate(self.shared_obj_path, self.compile_options)]
self.interpreter = tflite.Interpreter(model_path=self.model_path,
experimental_delegates=self.tidl_delegate)
self.image_queue = queue.Queue()
self.lock = threading.Lock()
self.num_threads = 2
def run_inference(self, input_image):
input_details = self.interpreter.get_input_details()
output_details = self.interpreter.get_output_details()
self.interpreter.resize_tensor_input(0, (1, 256, 320, 3))
self.interpreter.allocate_tensors()
self.interpreter.set_tensor(input_details[0]['index'], input_image)
self.interpreter.invoke()
output = self.interpreter.get_tensor(output_details[0]['index'])
return output
def worker(self):
while True:
images = self.image_queue.get()
if images is None:
break
with self.lock:
output = self.run_inference(images)
def process_image(self):
images = []
threads = []
for i in range(10):
images.append(get_image())
for i in images:
self.image_queue.put(i)
for _ in range(self.num_threads):
thread = threading.Thread(target=self.worker)
thread.start()
threads.append(thread)
for i in range(self.num_threads):
self.image_queue.put(None)
print("Image processed")
for thread in threads:
thread.join()
def main():
first_model_filename = "some_model_1.tflite"
first_model_artifact = "some_model_1_artifacts"
second_model_filename = "some_model_2.tflite"
second_model_artifact = "some_model_2_artifacts"
image = ImageProcessor(first_model_filename, first_model_artifact)
image_1 = ImageProcessor(second_model_filename, second_model_artifact)
image.process_image()
image_1.process_image()
for thread in threading.enumerate():
print(thread.name)
return "Successful"
if __name__ == "__main__":
status = main()
print(status)