Commit fba02efe authored by Michael Murtaugh's avatar Michael Murtaugh

shifting to jorn

parent 8af80f84
This diff is collapsed.
......@@ -81,6 +81,7 @@ document.addEventListener("DOMContentLoaded", function () {
})
socket.on("annotatedcameraframe", function (d) {
img.src = d.src;
video.style.display = "none";
img.style.display = "block";
});
socket.on("collage", function (d) {
......
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<style type="text/css">
body {
background: #888;
}
iframe {
width: 700px;
height: 400px;
float: left;
margin: 10px;
border: none;
}
</style>
</head>
<body>
<iframe src="camera.html"></iframe>
<iframe src="collage.html"></iframe>
<iframe src="archive.html"></iframe>
</body>
</html>
This diff is collapsed.
import cv2
def analyze_image (img, face_cascade, eye_cascade, scaleFactor=None, minNeighbors=None):
def analyze_image (img, face_cascade, eye_cascade, scaleFactor=None, minNeighbors=None, minSize=None):
ret = False
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# faces = face_cascade.detectMultiScale(gray, 1.3, 5)
if scaleFactor !=None and minNeighbors != None:
faces = face_cascade.detectMultiScale(gray, scaleFactor, minNeighbors)
elif scaleFactor != None:
faces = face_cascade.detectMultiScale(gray, scaleFactor)
else:
faces = face_cascade.detectMultiScale(gray)
args = dict()
if scaleFactor:
args['scaleFactor'] = scaleFactor
if minNeighbors:
args['minNeighbors'] = minNeighbors
if minSize:
args['minSize'] = (minSize, minSize)
# face_cascade.detectMultiScale(**args)
faces = face_cascade.detectMultiScale(gray, **args)
# if scaleFactor !=None and minNeighbors != None:
# faces = face_cascade.detectMultiScale(gray, scaleFactor, minNeighbors, minSize=(300, 300))
# elif scaleFactor != None:
# faces = face_cascade.detectMultiScale(gray, scaleFactor)
# else:
# faces = face_cascade.detectMultiScale(gray)
ar_faces=[]
ar_eyes=[]
for (x,y,w,h) in faces:
......
......@@ -8,6 +8,7 @@ p = ArgumentParser("")
p.add_argument("--cascades", default="./haarcascades", help="location of the cascade XML files, default: ./haarcascades")
p.add_argument("--scaleFactor", type=float, default=None, help="scaleFactor, float, default: None (1.1)")
p.add_argument("--minNeighbors", type=int, default=None, help="minNeighbors, int, default: None (3)")
p.add_argument("--minSize", type=int, default=None, help="minSize, int")
p.add_argument("input", nargs="*", default=[])
args = p.parse_args()
......@@ -27,7 +28,7 @@ numinput = len(inputs)
for i, path in enumerate(inputs):
print ("{0}/{1} {2}".format(i, numinput, path), file=sys.stderr)
img = cv2.imread(path)
detection, faces, eyes = analyze_image(img, face_cascade, eye_cascade, args.scaleFactor, args.minNeighbors)
detection, faces, eyes = analyze_image(img, face_cascade, eye_cascade, args.scaleFactor, args.minNeighbors, args.minSize)
item={'path': path}
if detection:
item['faces'] = faces
......
#!/usr/bin/env python
from __future__ import print_function
import json
import sys
from argparse import ArgumentParser
p = ArgumentParser("join json lists")
p.add_argument("input", nargs="+", default=[])
args = p.parse_args()
od = []
for f in args.input:
with open(f) as fin:
data = json.load(fin)
od.extend(data)
print ("Combining {0} sources, {1} items total".format(len(args.input), len(od)), file=sys.stderr)
facecount = 0
eyecount = 0
for x in od:
if 'faces' in x:
facecount += len(x['faces'])
if 'eyes' in x:
eyecount += len(x['eyes'])
print ("{0} faces, {1} eyes".format(facecount, eyecount))
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment