frame

Howdy, Stranger!

It looks like you're new here. If you want to get involved, click one of these buttons!

Sign In

Howdy, Stranger!

It looks like you're new here. If you want to get involved, click one of these buttons!

Alert: Beginning Tuesday, June 25th, we will be freezing this site and migrating the content and forums to our new home at https://forums.intel.com. Check it out now!

Using the Pi Camera not USB camera. Some sample code..

I got to admit I was struggling with the ncappzoo and the getting the stick running. After installing version one, I deleted my workspace folder and upgraded to version 2 of the SDK and then installed version 2 of the ncappzoo and now my Pi 3+ is running and sees the Stick. The very next issue was getting using the a Pi Camera on the ribbon instead of a USB Drive.
I noticed the examples that take video input are work for USB cam and not the PI camera I am using. So I reworked this simple example. I am no Python expert but am pasting the code in here in case someone needs or wants to use the Pi Camera and needs a little help getting started. Happy using Neural Compute Stick!

This is a modified live-image-classifier.py
`

!/usr/bin/python3

****************************************************************************

Copyright(c) 2017 Intel Corporation.

License: MIT See LICENSE file in root directory.

****************************************************************************

Perform inference on a LIVE camera feed using DNNs on

Intel® Movidius™ Neural Compute Stick (NCS)

from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import os
import cv2
import sys
import numpy
import ntpath
import argparse
import skimage.io
import skimage.transform

import mvnc.mvncapi as mvnc

Variable to store commandline arguments

ARGS = None

OpenCV object for video capture

cam = None

capture image array

rawCapture = None

---- Step 1: Open the enumerated device and get a handle to it -------------

def open_ncs_device():

# Look for enumerated NCS device(s); quit program if none found.
devices = mvnc.enumerate_devices()
if len( devices ) == 0:
    print( "No devices found" )
    quit()

# Get a handle to the first enumerated device and open it
device = mvnc.Device( devices[0] )
device.open()

return device

---- Step 2: Load a graph file onto the NCS device -------------------------

def load_graph( device ):

# Read the graph file into a buffer
with open( ARGS.graph, mode='rb' ) as f:
    blob = f.read()

# Load the graph buffer into the NCS
graph = mvnc.Graph( ARGS.graph )
    # Set up fifos
fifo_in, fifo_out = graph.allocate_with_fifos(device, blob)

return graph, fifo_in, fifo_out

---- Step 3: Pre-process the images ----------------------------------------

def pre_process_image(frame):

# Grab a frame from the camera
img = cv2.resize( frame, tuple( ARGS.dim ) )
height, width, channels = frame.shape

# Extract/crop a section of the frame and resize it
x1 = int( width / 3 )
y1 = int( height / 4 )
x2 = int( width * 2 / 3 )
y2 = int( height * 3 / 4 )

cv2.rectangle( frame, ( x1, y1 ) , ( x2, y2 ), ( 0, 255, 0 ), 2 )
img = frame[ y1 : y2, x1 : x2 ]

# Resize image [Image size if defined by choosen network, during training]
img = cv2.resize( img, tuple( ARGS.dim ) )

# Convert RGB to BGR [skimage reads image in RGB, but Caffe uses BGR]
if( ARGS.colormode == "BGR" ): 
    img = img[:, :, ::-1]

# Mean subtraction & scaling [A common technique used to center the data]
img = img.astype( numpy.float16 )
img = ( img - numpy.float16( ARGS.mean ) ) * ARGS.scale

return img

---- Step 4: Read & print inference results from the NCS -------------------

def infer_image( graph, img, frame, fifo_in, fifo_out ):

# Load the labels file 
labels =[ line.rstrip('\n') for line in 
               open( ARGS.labels ) if line != 'classes\n'] 

# Load the image as a half-precision floating point array
graph.queue_inference_with_fifo_elem( fifo_in, fifo_out, img.astype(numpy.float32), None )

# Get the results from NCS
output, userobj = fifo_out.read_elem()

# Find the index of highest confidence 
top_prediction = output.argmax()

# Get execution time
inference_time = graph.get_option( mvnc.GraphOption.RO_TIME_TAKEN )

print(  "I am %3.1f%%" % (100.0 * output[top_prediction] ) + " confident"
        + " you are " + labels[top_prediction]
        + " ( %.2f ms )" % ( numpy.sum( inference_time ) ) )

# If a display is available, show the image on which inference was performed
if 'DISPLAY' in os.environ:
    frame = cv2.flip(frame, 1)
    cv2.imshow( 'NCS live inference', frame )

---- Step 5: Close/clean up fifos, graph, and device -------------------------

def clean_up(device, graph, fifo_in, fifo_out):
fifo_in.destroy()
fifo_out.destroy()
graph.destroy()
device.close()
device.destroy()
cv2.destroyAllWindows()
cam.close()

---- Main function (entry point for this script ) --------------------------

def main():

device = open_ncs_device()
graph, fifo_in, fifo_out = load_graph( device )

while( True ):
    print ("Capture frame")
    rawCapture = PiRGBArray(cam,size=(640,480))
    cam.capture(rawCapture,format="bgr")
    frame = rawCapture.array
    print (frame.shape)
    img = pre_process_image(frame) 

    infer_image( graph, img, frame, fifo_in, fifo_out )

    # Display the frame for 5ms, and close the window so that the next frame 
    # can be displayed. Close the window if 'q' or 'Q' is pressed.
    if( cv2.waitKey( 5 ) & 0xFF == ord( 'q' ) ):
        break

print ("Clean up")
clean_up(device, graph, fifo_in, fifo_out)

---- Define 'main' function as the entry point for this script -------------

if name == 'main':

parser = argparse.ArgumentParser(
                     description="Image classifier using \
                     Intel® Movidius™ Neural Compute Stick." )

parser.add_argument( '-g', '--graph', type=str,
                     default='../../caffe/GenderNet/graph',
                     help="Absolute path to the neural network graph file." )

parser.add_argument( '-l', '--labels', type=str,
                     default='../../data/age_gender/gender_categories.txt',
                     help="Absolute path to labels file." )

parser.add_argument( '-M', '--mean', type=float,
                     nargs='+',
                     default=[78.42633776, 87.76891437, 114.89584775],
                     help="',' delimited floating point values for image mean." )

parser.add_argument( '-S', '--scale', type=float,
                     default=1,
                     help="Absolute path to labels file." )

parser.add_argument( '-D', '--dim', type=int,
                     nargs='+',
                     default=[227, 227],
                     help="Image dimensions. ex. -D 224 224" )

parser.add_argument( '-c', '--colormode', type=str,
                     default="RGB",
                     help="RGB vs BGR color sequence. \
                           Defined during model training." )

parser.add_argument( '-v', '--video', type=int,
                     default=0,
                     help="Index of your computer's V4L2 video device. \
                           ex. 0 for /dev/video0" )

ARGS = parser.parse_args()

# Construct (open) the camera
cam = PiCamera()
cam.resolution = (640,480)

print ("passed raw capture")
main()

==== End of file ===========================================================

'

Comments

  • 5 Comments sorted by Votes Date Added
  • Sorry folks I am not sure how to get this to format correctly

  • I noticed the examples that take video input are work for USB cam and not the PI camera I am using

    @chicagobob123, here's a sample code that runs SSD object detection on RPI Zero W with PiCam - https://github.com/movidius/ncappzoo/blob/master/apps/security-cam/security-picam.py. I had a blast putting this project together on an RPI Zero W, it should work on RPI 3 without any mods.

    Some supporting documentations for this source code:
    1. Instructions to run this source code (use security-picam.py instead of security-cam.py) - https://github.com/movidius/ncappzoo/blob/master/apps/security-cam/README.md
    2. Blog explaining how I put this project together - https://ai.intel.com/practical-applications-of-deep-learning-build-a-diy-smart-security-camera-using-the-intel-movidius-neural-compute-stick/
    3. Slide deck from one of my hands-on training session - https://goo.gl/t59vq9
    4. I installed just the API framework on my RPI Zero, as against installing the entire SDK and OpenCV - https://movidius.github.io/blog/ncs-apps-on-rpi/

  • Thanks for the leg up. I am doing the git now.

  • I guess this will take some work to get going its something I am really interested in though.
    first I downloaded the regular Version 1 ncappzoo dir a make all and found that if I run that version.
    it does not work, I think because it I now have version 2 SDK loaded.
    Here are the errors.
    Traceback (most recent call last):
    File "security-picam.py", line 213, in
    main()
    File "security-picam.py", line 149, in main
    device = open_ncs_device()
    File "security-picam.py", line 41, in open_ncs_device
    devices = mvnc.EnumerateDevices()
    AttributeError: module 'mvnc.mvncapi' has no attribute 'EnumerateDevices'

    In the current version 2 of the sdk I do not have a security-cam folder.
    So I took the version 1 and made it version 2 compatible. I am grateful as its teaching the basics.
    I am now modifying so it makes a window and you can see the person detected.
    the img.show() did not work. It stated img did not have show member or something.
    The code I modified is below. I am now adding a OpenCV display window (quickest way I know to toss up a window) and play with this for a while. Thanks so much.

    `

    !/usr/bin/python3

    ****************************************************************************

    Copyright(c) 2017 Intel Corporation.

    License: MIT See LICENSE file in root directory.

    ****************************************************************************

    DIY smart security camera PoC using Raspberry Pi Camera and

    Intel® Movidius™ Neural Compute Stick (NCS)

    import os
    import sys
    import numpy
    import select
    import ntpath
    import argparse
    import picamera
    import picamera.array

    import mvnc.mvncapi as mvnc

    from PIL import Image
    from time import localtime, strftime
    from utils import visualize_output
    from utils import deserialize_output

    "Class of interest" - Display detections only if they match this class ID

    CLASS_PERSON = 15

    Detection threshold: Minimum confidance to tag as valid detection

    CONFIDANCE_THRESHOLD = 0.60 # 60% confidant

    Variable to store commandline arguments

    ARGS = None

    ---- Step 1: Open the enumerated device and get a handle to it -------------

    def open_ncs_device():

    # Look for enumerated NCS device(s); quit program if none found.
    devices = mvnc.enumerate_devices()
    if len( devices ) == 0:
        print( "No devices found" )
        quit()
    
    # Get a handle to the first enumerated device and open it
    device = mvnc.Device( devices[0] )
    device.open()
    
    return device
    

    ---- Step 2: Load a graph file onto the NCS device -------------------------

    def load_graph( device ):

    # Read the graph file into a buffer
    with open( ARGS.graph, mode='rb' ) as f:
        blob = f.read()
    
    graph = mvnc.Graph( 'graph1' )
    # Load the graph buffer into the NCS
    
    fifo_in, fifo_out = graph.allocate_with_fifos(device, blob)
    
    
    return graph, fifo_in, fifo_out
    

    ---- Step 4: Read & print inference results from the NCS -------------------

    def infer_image( graph,input_fifo, output_fifo, img, frame ):

    # Load the image as a half-precision floating point array
    #graph.LoadTensor( img, 'user object' )
    
    graph.queue_inference_with_fifo_elem(input_fifo, output_fifo,img.astype(numpy.float32), None )
    # Get the results from NCS
    output, userobj = output_fifo.read_elem()
    
    # Get execution time
    inference_time = graph.get_option( mvnc.GraphOption.RO_TIME_TAKEN )
    
    # Deserialize the output into a python dictionary
    output_dict = deserialize_output.ssd( 
                      output, 
                      CONFIDANCE_THRESHOLD, 
                      frame.shape )
    
    # Print the results (each image/frame may have multiple objects)
    for i in range( 0, output_dict['num_detections'] ):
    
        # Filter a specific class/category
        if( output_dict.get( 'detection_classes_' + str(i) ) == CLASS_PERSON ):
    
            cur_time = strftime( "%Y_%m_%d_%H_%M_%S", localtime() )
            print( "Person detected on " + cur_time )
    
            # Extract top-left & bottom-right coordinates of detected objects 
            (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
            (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]
    
            # Prep string to overlay on the image
            display_str = ( 
                labels[output_dict.get('detection_classes_' + str(i))]
                + ": "
                + str( output_dict.get('detection_scores_' + str(i) ) )
                + "%" )
    
            # Overlay bounding boxes, detection class and scores
            frame = visualize_output.draw_bounding_box( 
                        y1, x1, y2, x2, 
                        frame,
                        thickness=4,
                        color=(255, 255, 0),
                        display_str=display_str )
    
            # Saves image capture snapshots
            #img = Image.fromarray( frame )
            #photo = ( os.path.dirname(os.path.realpath(__file__))
            #          + "/captures/photo_"
            #          + cur_time + ".jpg" )
            #img.save( photo )
    
    
    # If a display is available, show the image on which inference was performed
    if 'DISPLAY' in os.environ:
        img.show()
    

    ---- Step 5: Unload the graph and close the device -------------------------

    def close_ncs_device( device, graph ):
    graph.destroy()
    device.close()
    device.destroy()

    ---- Main function (entry point for this script ) --------------------------

    def main():

    device = open_ncs_device()
    graph, fifo_in, fifo_out = load_graph( device )
    
    # Main loop: Capture live stream & send frames to NCS
    with picamera.PiCamera() as camera:
        with picamera.array.PiRGBArray( camera ) as frame:
            while( True ):
                camera.resolution = ( 640, 480 )
                camera.capture( frame, ARGS.colormode, use_video_port=True )
                img = pre_process_image( frame.array )
                infer_image( graph,fifo_in, fifo_out, img, frame.array )
    
                # Clear PiRGBArray, so you can re-use it for next capture
                frame.seek( 0 )
                frame.truncate()
    
                # Run the program until <ENTER> is pressed
                i, o, e = select.select( [sys.stdin], [], [], 0.1 )
                if( i ):
                    break
    
    close_ncs_device( device, graph )
    

    `

  • Just to keep this thread going a bit long I have modified the threaded stream_ty_gn_threaded to work with the Pi Camera. I altered the camera_processor.py to use the Pi camera instead of the USB camera.
    I won't get my second stick until Friday so I wont be able to test more. But at least I am getting results.

Sign In or Register to comment.