Commit 21cf4375 authored by Michael Murtaugh's avatar Michael Murtaugh

livefaces

parent 649fd8ce
import numpy as np
import cv2, os
from time import sleep
#tpath = os.path.expanduser("~/opencv/opencv-3.0.0/data/haarcascades")
tpath = os.path.expanduser("~/opencv/data/haarcascades")
face_cascade = cv2.CascadeClassifier(os.path.join(tpath, 'haarcascade_frontalface_default.xml'))
eye_cascade = cv2.CascadeClassifier(os.path.join(tpath, 'haarcascade_eye.xml'))
# eye_cascade = cv2.CascadeClassifier(os.path.join(tpath, 'haarcascade_profileface.xml'))
from argparse import ArgumentParser
import thread
from numpy import zeros, array
p = ArgumentParser("")
args = p.parse_args()
cur_frame = None
def get_cur_frame():
return cur_frame
def process_image (img):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
def process ():
while True:
img = get_cur_frame()
if img == None:
sleep(0.1)
continue
process_image(img)
cv2.imshow('img',img)
cap = cv2.VideoCapture(1)
pthread = None
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
cur_frame = frame
if pthread == None:
pthread = thread.start_new_thread(process, ())
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# cv2.imwrite("output.jpg", frame)
cap.release()
cv2.destroyAllWindows()
#!/usr/bin/env python
from __future__ import print_function
import cv2, os, datetime, sys
from argparse import ArgumentParser
from time import sleep
from analyze_image import analyze_image
p = ArgumentParser()
p.add_argument("--faces", type=int, default=1, help="number of consecutive faces to require")
p.add_argument("--camera", type=int, default=0, help="camera number, default: 0")
p.add_argument("--cascades", default="./haarcascades", help="location of the cascade XML files, default: ./haarcascades")
p.add_argument("--scaleFactor", type=float, default=None, help="scaleFactor, float, default: None (1.1)")
p.add_argument("--minNeighbors", type=int, default=None, help="minNeighbors, int, default: None (3)")
args = p.parse_args()
tpath = os.path.expanduser(args.cascades)
face_cascade = cv2.CascadeClassifier(os.path.join(tpath, 'haarcascade_frontalface_default.xml'))
eye_cascade = cv2.CascadeClassifier(os.path.join(tpath, 'haarcascade_eye.xml'))
cv2.namedWindow("display", cv2.cv.CV_WINDOW_NORMAL)
cv2.setWindowProperty("display", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
cap = cv2.VideoCapture(args.camera)
face_count = 0
detect = True
while(True):
ret, frame = cap.read()
if frame == None:
print ("ERROR CAPTURING FRAME. CHECK CAMERA CONNECTION AND SETTINGS", file=sys.stderr)
sys.exit(0)
# faces_p, faces, eyes = analyze_image(frame, face_cascade, eye_cascade, args.scaleFactor, args.minNeighbors)
if detect:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.5, minSize=(150,150))
for (x,y,w,h) in faces:
# print ((x, y, w, h))
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('display',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# def process_image (img):
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment