Howdy, Stranger!

It looks like you're new here. If you want to get involved, click one of these buttons!

Sign In

Howdy, Stranger!

It looks like you're new here. If you want to get involved, click one of these buttons!

RuntimeError: Failed to queue inference: NC_ERROR - after 100 Frames // NCS2 Raspberry pi

Hi there,

I tried to change the classification sample, to work with my video and own model, and it runs on my i with 2 NCS2, but after approximtely 100 frames it starts to freeze and then gives me the following error msg:

Traceback (most recent call last):
File "/home/pi/NCS_OpenVino/", line 131, in
res = exec_net.infer(inputs={input_blob: images})
File "ie_api.pyx", line 146, in openvino.inference_engine.ie_api.ExecutableNetwork.infer
E: [watchdog] [ 990982] sendPingMessage:164 Failed send ping message: X_LINK_ERROR
File "ie_api.pyx", line 179, in openvino.inference_engine.ie_api.InferRequest.infer
File "ie_api.pyx", line 183, in openvino.inference_engine.ie_api.InferRequest.infer
RuntimeError: Failed to queue inference: NC_ERROR
E: [ncAPI] [ 991550] ncFifoDestroy:2888 Failed to write to fifo before deleting it!

i tried to delete my variables and load the model again every 40 frames, cause I thought it might be a allocation error, but didn't change a thing.

any clues??

from future import print_function
import imutils
import sys
import os
from argparse import ArgumentParser
import cv2

import numpy as np
import logging as log
from time import time
from openvino.inference_engine import IENetwork, IEPlugin
from import FileVideoStream

from import FPS
from import VideoStream
from import FPS
import argparse
import imutils
import cv2 as cv
import numpy as np
from import FileVideoStream
import os

import matplotlib.pyplot as plt

def build_argparser():
parser = ArgumentParser()
parser.add_argument("-m", "--model", help="Path to an .xml file with a trained model.", required=True, type=str)
# parser.add_argument("-i", "--input", help="Path to a folder with images or path to an image files", required=True,
# type=str, nargs="+")
parser.add_argument("-l", "--cpu_extension",
help="MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels "
"impl.", type=str, default=None)
parser.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder", type=str, default=None)
parser.add_argument("-d", "--device",
help="Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device specified (CPU by default)", default="CPU",
parser.add_argument("--labels", help="Labels mapping file", default=None, type=str)
parser.add_argument("-nt", "--number_top", help="Number of top results", default=10, type=int)
parser.add_argument("-ni", "--number_iter", help="Number of inference iterations", default=1, type=int)
parser.add_argument("-pc", "--perf_counts", help="Report performance counters", default=False, action="store_true")

return parser

if 1 == True:

#cap = VideoStream(usePiCamera=True).start()

cap = FileVideoStream('/media/videopath.mp4').start()

log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"

# Plugin initialization for specified device and load extensions library if specified
plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
if args.cpu_extension and 'CPU' in args.device:
# Read IR"Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = IENetwork(model=model_xml, weights=model_bin)

if plugin.device == "CPU":
    supported_layers = plugin.get_supported_layers(net)
    not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
    if len(not_supported_layers) != 0:
        log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                  format(plugin.device, ', '.join(not_supported_layers)))
        log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                  "or --cpu_extension command line argument")

assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
assert len(net.outputs) == 1, "Sample supports only single output topologies""Loading model to the plugin")
exec_net = plugin.load(network=net)
framenr = 0
net.batch_size = 1
width1 = 1080
pxli = round(width1 / 2 - width1 * 0.022)
pxre = round(width1 / 2 + width1 * 0.022)


    spf = time()  

    frame1 =

    frame2 = imutils.resize(frame1, width=width1)

    frame1 = frame2[350:398, pxli:pxre]
    frame1 = cv2.resize(frame1, (224, 224))
    args.input = frame1"Preparing input blobs")
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    images = frame1

    images = images.transpose((2, 0, 1))  # Change data layout from HWC to CHW
        #images[i] = image"Batch size is {}".format(1))
    t1 = time()
    res = exec_net.infer(inputs={input_blob: images})
    cv2.imshow("Frame", frame1)"Average running time of one iteration: {} ms".format((time()-t1)*1000))

    # Processing output blob"Processing output blob")
    res = res[out_blob]"Top {} results: ".format(args.number_top))
    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
        labels_map = None
    for i, probs in enumerate(res):

        probs = np.squeeze(probs)
        top_ind = np.argsort(probs)[-args.number_top:][::-1]
        #print("Image {}\n".format(args.input))

        for id in top_ind:
            det_label = labels_map[id] if labels_map else "#{}".format(id)
            print("{:.7f} label {}".format(probs[id], det_label))

    key = cv.waitKey(1)
    if key == ord("q"):
    print("FPS {} ".format(1/(time()-spf)))
    framenr +=1
    print("#FRAME {}".format(framenr))
    del images
    del input_blob
    del out_blob
    del res
    del frame1
    del probs
    del frame2
    if framenr > 40:
        del exec_net
        exec_net = plugin.load(network=net)
        framenr = 0



  • 1 Comment sorted by Votes Date Added
  • Hi @Tonto5000

    The Failed send ping message: X_LINK_ERROR message is probably due to a connection issue between the NCS2 and the system. Since you're using a Raspberry Pi, are you plugging the NCS2 straight in to the Pi, or are you using a powered USB hub?

    Best Regards,

This discussion has been closed.