Commit 11f74708 by murtaugh

vitrine/scripts

parent acf5c619
import cv2
def analyze_image (img, face_cascade, eye_cascade, scaleFactor=None, minNeighbors=None, minSize=None):
ret = False
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# faces = face_cascade.detectMultiScale(gray, 1.3, 5)
args = dict()
if scaleFactor:
args['scaleFactor'] = scaleFactor
if minNeighbors:
args['minNeighbors'] = minNeighbors
if minSize:
args['minSize'] = (minSize, minSize)
# face_cascade.detectMultiScale(**args)
faces = face_cascade.detectMultiScale(gray, **args)
# if scaleFactor !=None and minNeighbors != None:
# faces = face_cascade.detectMultiScale(gray, scaleFactor, minNeighbors, minSize=(300, 300))
# elif scaleFactor != None:
# faces = face_cascade.detectMultiScale(gray, scaleFactor)
# else:
# faces = face_cascade.detectMultiScale(gray)
ar_faces=[]
ar_eyes=[]
for (x,y,w,h) in faces:
#print "face", (x, y, w, h)
ar_faces.append([int(x), int(y), int(w), int(h)])
#print type(x)
ret = True
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
#print img
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
aex=int(ex)+int(x)
aey=int(ey)+int(y)
ar_eyes.append([aex,aey,int(ew),int(eh)]);
#print 'eye'
return ret, ar_faces, ar_eyes
#!/usr/bin/env python
from __future__ import print_function
import cv2, os, json, sys
from argparse import ArgumentParser
from analyze_image import analyze_image
p = ArgumentParser("draw feature rectangles")
p.add_argument("input", help="image to annotate")
p.add_argument("output", help="image to save to")
p.add_argument("--thickness", type=int, default=1, help="default line thickness, e.g. 2")
p.add_argument("--color", default="0,0,255", help="default draw color as rgb triple, e.g. 0,0,255 (blue)")
p.add_argument("--rect", action="append", nargs="+", default=[], help="rect with optional color and thickness, ex: --rect 0,0,100,100 0,0,255 2")
args = p.parse_args()
print (args)
im = cv2.imread(args.input)
for r in args.rect:
if (len(r) == 1):
rect = r[0]
color = args.color
thickness = args.thickness
elif (len(r) == 2):
rect, color = r
thickness = args.thickness
else:
rect, color, thickness = r[:3]
thickness = int(thickness)
x, y, w, h = [int(d) for d in rect.split(",")]
r,g,b = [int(d) for d in color.split(",")]
cv2.rectangle(im, (x, y), (x+w, y+h), (b, g, r), thickness)
cv2.imwrite(args.output, im)
#!/bin/bash
src=$1
shape=$2
color=$3
output=$4
# http://imagemagick.org/Usage/crop/#border
size=1824x984
#size=640x480
convert "$src" -fill none -stroke $color -strokewidth 3 -draw "$shape" "$output"
convert "$output" -resize $size -gravity center -extent $size "$output"
#!/bin/bash
ffmpeg -i "$1" -c:v libx264 -b:v 1000k "$2"
# && rm "$1"
#!/bin/bash
ffmpeg -i "$1" -c:v libvpx -b:v 1000k "$2" && rm "$1"
#!/bin/bash
cameragif=$1
archivegif=$2
collagegif=$3
rm -f frames/*
# rm "$cameragif" # actually this *can not* happen as it needs to be served...
# scp "$archivegif" "$collagegif" aa:/var/www/vhosts/sicv.activearchives.org/httpdocs/vitrine
#!/bin/bash
src=$1
srcrect=$2
dest=$3
destrect=$4
output=$5
mkdir -p output
# http://imagemagick.org/Usage/compose/#over
convert "$src" -crop $srcrect +repage feature.tmp.png
convert "$dest" feature.tmp.png -geometry $destrect -composite $output
rm feature.tmp.png
#!/usr/bin/env python
from __future__ import print_function
import cv2, os, datetime, sys
from argparse import ArgumentParser
from time import sleep
from analyze_image import analyze_image
p = ArgumentParser()
p.add_argument("input", nargs="+", default=[])
p.add_argument("--output", default=None, help="path to save movie, default: None (show live)")
p.add_argument("--cascade", default="./haarcascades/haarcascade_frontalface_default.xml", help="location of the cascade XML file to use, default: ./haarcascades/haarcascade_frontalface_default.xml")
# p.add_argument("--cascade2", default="./haarcascades/haarcascade_eye.xml", help="optional secondary cascade xml")
p.add_argument("--cascade2", default="./haarcascades/haarcascade_eye.xml", help="optional secondary cascade xml")
p.add_argument("--scaleFactor", type=float, default=None, help="scaleFactor, float, default: None (1.1)")
p.add_argument("--minNeighbors", type=int, default=None, help="minNeighbors, int, default: None (3)")
p.add_argument("--minSize", type=int, default=None, help="minSize, int, default: None")
p.add_argument("--drawColor", default="255,0,0", help="color in b,g,r format, e.g. default: 255,0,0 (blue)")
p.add_argument("--drawWidth", type=int, default=2, help="draw line width, default: 2")
p.add_argument("--drawColor2", default="0,255,0")
p.add_argument("--drawWidth2", type=int, default=2)
p.add_argument("--delayFeature", type=int, default=1000)
p.add_argument("--delayDefault", type=int, default=10)
p.add_argument("--width", type=int, default=640, help="pre-detect resize width")
p.add_argument("--height", type=int, default=480, help="pre-detect resize height")
p.add_argument("--background", default="255,255,255")
p.add_argument("--nopad", default=False, action="store_true")
p.add_argument("--fourcc", default="XVID", help="MJPG,mp4v,XVID")
p.add_argument("--framerate", type=float, default=25, help="output frame rate")
args = p.parse_args()
fourcc = None
if args.output:
try:
fourcc = cv2.cv.CV_FOURCC(*args.fourcc)
except AttributeError:
fourcc = cv2.VideoWriter_fourcc(*args.fourcc)
cascade = cv2.CascadeClassifier(os.path.expanduser(args.cascade))
cascade2 = None
if args.cascade2:
cascade2 = cv2.CascadeClassifier(os.path.expanduser(args.cascade2))
# cv2.namedWindow("display", cv2.cv.CV_WINDOW_NORMAL)
# cv2.setWindowProperty("display", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
# cap = cv2.VideoCapture()
color = tuple([int(x) for x in args.drawColor.split(",")])
color2 = tuple([int(x) for x in args.drawColor2.split(",")])
background = tuple([int(x) for x in args.background.split(",")])
def detectMultiScale (cascade, img, scaleFactor=None, minNeighbors=None, minSize=None):
params = {}
if scaleFactor:
params['scaleFactor'] = scaleFactor
if minNeighbors:
params['minNeighbors'] = minNeighbors
if minSize:
params['minSize'] = (minSize, minSize)
return cascade.detectMultiScale(img, **params)
def resize (img, w, h, interpolation = cv2.INTER_CUBIC):
ih, iw, ic = img.shape
if (ih > h) or (iw > w):
# try fitting width
sw = w
sh = int(sw * (float(ih)/iw))
if sh > h:
# fit height instead
sh = h
sw = int(sh * (float(iw)/ih))
return cv2.resize(img, (sw, sh), interpolation=interpolation)
return img
def pad (img, w, h, color=(0, 0, 0)):
ih, iw, ic = img.shape
top = (h - ih) / 2
bottom = h - ih - top
left = (w - iw) / 2
right = (w - iw - left)
return cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
out = None
count = 0
for i in args.input:
frame = cv2.imread(i)
frame = resize(frame, args.width, args.height)
if not args.nopad:
frame = pad(frame, args.width, args.height, background)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
features = detectMultiScale(cascade, gray, scaleFactor=args.scaleFactor, minNeighbors=args.minNeighbors, minSize=args.minSize)
for (x,y,w,h) in features:
# print ((x, y, w, h))
count += 1
cv2.rectangle(frame, (x,y), (x+w,y+h), color, args.drawWidth)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
# secondary cascade...
if cascade2:
features2 = cascade2.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in features2:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),color2,args.drawWidth2)
delay = args.delayDefault
if len(features):
delay = args.delayFeature
if args.output:
# VIDEO OUTPUT
if out == None:
out = cv2.VideoWriter()
out.open(args.output, fourcc, args.framerate, (args.width, args.height))
framecount = max(1, int(args.framerate * (delay / 1000.0)))
for f in range(framecount):
out.write(frame)
print ("{0}".format(i), file=sys.stderr)
else:
# WINDOW DISPLAY
cv2.imshow('display', frame)
if cv2.waitKey(delay) & 0xFF == ord('q'):
break
if out:
out.release()
else:
cv2.destroyAllWindows()
print ("Displayed {0} faces".format(count), file=sys.stderr)
#!/usr/bin/env python
from __future__ import print_function
import cv2, os, json, sys
from argparse import ArgumentParser
from analyze_image import analyze_image
p = ArgumentParser("")
p.add_argument("--cascades", default="./haarcascades", help="location of the cascade XML files, default: ./haarcascades")
p.add_argument("--scaleFactor", type=float, default=None, help="scaleFactor, float, default: None (1.1)")
p.add_argument("--minNeighbors", type=int, default=None, help="minNeighbors, int, default: None (3)")
p.add_argument("--minSize", type=int, default=None, help="minSize, int")
p.add_argument("input", nargs="*", default=[])
args = p.parse_args()
tpath = os.path.expanduser(args.cascades)
face_cascade = cv2.CascadeClassifier(os.path.join(tpath, 'haarcascade_frontalface_default.xml'))
eye_cascade = cv2.CascadeClassifier(os.path.join(tpath, 'haarcascade_eye.xml'))
output = []
inputs = args.input
numinput = len(inputs)
if (numinput == 0):
# attempt to read filenames from stdin
inputs = sys.stdin.read().splitlines()
inputs = [x.strip() for x in inputs if x.strip()]
numinput = len(inputs)
for i, path in enumerate(inputs):
print ("{0}/{1} {2}".format(i, numinput, path), file=sys.stderr)
img = cv2.imread(path)
detection, faces, eyes = analyze_image(img, face_cascade, eye_cascade, args.scaleFactor, args.minNeighbors, args.minSize)
item={'path': path}
if detection:
item['faces'] = faces
item['eyes'] = eyes
output.append(item)
print (json.dumps(output))
#!/usr/bin/env python
from __future__ import print_function
import cv2, os, json, sys
from argparse import ArgumentParser
from analyze_image import analyze_image
p = ArgumentParser("")
p.add_argument("--cascades", default="./haarcascades", help="location of the cascade XML files, default: ./haarcascades")
p.add_argument("--settings", default="settings.json", help="settings file, default settings.json")
p.add_argument("--resize", default=False, action="store_true")
p.add_argument("--width", type=int, default=640, help="pre-detect resize width")
p.add_argument("--height", type=int, default=480, help="pre-detect resize height")
p.add_argument("--background", default="255,255,255")
p.add_argument("--pad", default=False, action="store_true")
p.add_argument("input", nargs="*", default=[])
args = p.parse_args()
background = tuple([int(x) for x in args.background.split(",")])
def resize (img, w, h, interpolation = cv2.INTER_CUBIC):
ih, iw, ic = img.shape
if (ih > h) or (iw > w):
# try fitting width
sw = w
sh = int(sw * (float(ih)/iw))
if sh > h:
# fit height instead
sh = h
sw = int(sh * (float(iw)/ih))
return cv2.resize(img, (sw, sh), interpolation=interpolation)
return img
def pad (img, w, h, color=(0, 0, 0)):
ih, iw, ic = img.shape
top = (h - ih) / 2
bottom = h - ih - top
left = (w - iw) / 2
right = (w - iw - left)
return cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
with open (args.settings) as f:
settings = json.load(f)
cascades_path = os.path.expanduser(args.cascades)
def init_features (f):
if type(f) == list:
return [init_features(x) for x in f]
f.get("cascade")
f['_cascade'] = cv2.CascadeClassifier(os.path.join(cascades_path, f["cascade"]))
if 'args' not in f:
f['args'] = {}
return f
def ensure_list (f):
if type(f) == list:
return f
else:
return [f]
def tuple_lists(args):
for (key, value) in args.items():
if type(value) == list:
args[key] = tuple(value)
return args
def process_features (grayimg, features):
ret = {}
for x in features:
roi = grayimg
if type(x) == list:
f1, f2 = x
else:
f1 = x
f2 = None
if f1['name'] not in ret:
rects = ret[f1['name']] = []
else:
rects = ret[f1['name']]
if f2:
if f2['name'] not in ret:
rects2 = ret[f2['name']] = []
else:
rects2 = ret[f2['name']]
for (x,y,w,h) in f1['_cascade'].detectMultiScale(roi, **tuple_lists(f1['args'])):
rects.append([int(x), int(y), int(w), int(h)])
# narrow for (evt) next cascaded feature...
if f2:
roi = roi[y:y+h, x:x+w]
for (x2,y2,w2,h2) in f2['_cascade'].detectMultiScale(roi, **tuple_lists(f2['args'])):
rects2.append([int(x+x2), int(y+y2), int(w2), int(h2)])
return ret
output = []
inputs = args.input
numinput = len(inputs)
if (numinput == 0):
# attempt to read filenames from stdin
inputs = sys.stdin.read().splitlines()
inputs = [x.strip() for x in inputs if x.strip()]
numinput = len(inputs)
features = init_features(settings['features'])
for i, path in enumerate(inputs):
print ("{0}/{1} {2}".format(i, numinput, path), file=sys.stderr)
img = cv2.imread(path)
if args.resize:
img = resize(img, args.width, args.height)
if args.pad:
img = pad(img, args.width, args.height, background)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
item_features = process_features(gray, features)
item = {"path": path, "features": item_features}
output.append(item)
print (json.dumps(output, indent=2))
#!/usr/bin/env python
from __future__ import print_function
import json
import sys
from argparse import ArgumentParser
p = ArgumentParser("join json lists")
p.add_argument("--shuffle", action="store_true", default=False)
p.add_argument("input", nargs="+", default=[])
args = p.parse_args()
od = []
for f in args.input:
with open(f) as fin:
data = json.load(fin)
od.extend(data)
print ("Combining {0} sources, {1} items total".format(len(args.input), len(od)), file=sys.stderr)
if args.shuffle:
from random import shuffle
shuffle(od)
print (json.dumps(od, indent=2))
#!/usr/bin/env python
from __future__ import print_function
import cv2, os, datetime, sys
from argparse import ArgumentParser
from time import sleep
from analyze_image import analyze_image
p = ArgumentParser()
p.add_argument("--faces", type=int, default=1, help="number of consecutive faces to require")
p.add_argument("--camera", type=int, default=0, help="camera number, default: 0")
p.add_argument("--cascades", default="./haarcascades", help="location of the cascade XML files, default: ./haarcascades")
p.add_argument("--scaleFactor", type=float, default=None, help="scaleFactor, float, default: None (1.1)")
p.add_argument("--minNeighbors", type=int, default=None, help="minNeighbors, int, default: None (3)")
args = p.parse_args()
tpath = os.path.expanduser(args.cascades)
face_cascade = cv2.CascadeClassifier(os.path.join(tpath, 'haarcascade_frontalface_default.xml'))
eye_cascade = cv2.CascadeClassifier(os.path.join(tpath, 'haarcascade_eye.xml'))
# cv2.namedWindow("display", cv2.cv.CV_WINDOW_NORMAL)
# cv2.setWindowProperty("display", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
cap = cv2.VideoCapture(args.camera)
face_count = 0
detect = True
while(True):
ret, frame = cap.read()
if frame == None:
print ("ERROR CAPTURING FRAME. CHECK CAMERA CONNECTION AND SETTINGS", file=sys.stderr)
sys.exit(0)
# faces_p, faces, eyes = analyze_image(frame, face_cascade, eye_cascade, args.scaleFactor, args.minNeighbors)
if detect:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# faces = face_cascade.detectMultiScale(gray, 1.5, minSize=(50,50))
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.05, minSize=(30, 30))
for (x,y,w,h) in faces:
print ((x, y, w, h))
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('display',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# def process_image (img):
#!/usr/bin/env python
from argparse import ArgumentParser
import cv2
p = ArgumentParser("make a movie from frames, using pyopencv")
p.add_argument("frame", nargs="+")
p.add_argument("--output", default="output.avi")
p.add_argument("--framerate", type=float, default=25, help="output frame rate")
p.add_argument("--reverseloop", default=False, action="store_true", help="repeat frames in reverse order")
p.add_argument("--repeatlast", default=False, action="store_true", help="repeat the last frame (for slow frame rates this seems important")
p.add_argument("--fourcc", default="XVID", help="MJPG,mp4v,XVID")
args = p.parse_args()
try:
fourcc = cv2.cv.CV_FOURCC(*args.fourcc)
except AttributeError:
fourcc = cv2.VideoWriter_fourcc(*args.fourcc)
frames = args.frame
if (args.reverseloop):
for f in reversed(args.frame[:-1]):
frames.append(f)
out = None
for f in frames:
frame = cv2.imread(f)
h, w, c = frame.shape
if out == None:
out = cv2.VideoWriter()
out.open(args.output, fourcc, args.framerate, (w, h))
out.write(frame)
if args.repeatlast:
out.write(frame)
out.release()
#!/bin/bash
# cvlc --loop $1
killall -s 9 mplayer
mplayer -fs -loop 0 "$1"
#!/bin/bash
# cvlc -loop $1
killall -s 9 mplayer
# wget $1 -o tmp.webm
mplayer -fs -zoom -loop 0 "$1"
#!/usr/bin/env python
from __future__ import print_function
import cv2, os, datetime, sys
from argparse import ArgumentParser
from time import sleep
p = ArgumentParser()
p.add_argument("--frames", type=int, default=30, help="number of frames to record")
p.add_argument("--path", default="frames", help="output folder")
p.add_argument("--camera", type=int, default=0, help="camera number, default: 0")
args = p.parse_args()
try:
os.mkdir(args.path)
except OSError:
pass
cap = cv2.VideoCapture(args.camera)
frameno = 0
dt = datetime.datetime.now()
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if frame == None:
print ("ERROR CAPTURING FRAME. CHECK CAMERA CONNECTION AND SETTINGS", file=sys.stderr)
sys.exit(0)
tstamp = dt.strftime("%Y%m%d_%H%M%S")
outpath = "frame{0}_{1:04d}.jpg".format(tstamp, frameno)
outpath = os.path.join(args.path, outpath)
cv2.imwrite(outpath, frame)
if (not os.path.exists(outpath)) or os.path.getsize(outpath) == 0:
print("Bad frame (camera connected?)", file=sys.stderr)
try:
os.remove(outpath)
except OSError:
pass
sleep(0.1)
else:
sys.stdout.write(outpath+"\n")
sys.stdout.flush()
frameno += 1
if frameno >= args.frames:
break
cap.release()
#!/bin/bash
feh --no-fehbg --bg-max $1
#!/usr/bin/env python
from __future__ import print_function
import json
import sys
from argparse import ArgumentParser
p = ArgumentParser("join json lists")
p.add_argument("input", nargs="+", default=[])
args = p.parse_args()
od = []
for f in args.input:
with open(f) as fin:
data = json.load(fin)
od.extend(data)
print ("Combining {0} sources, {1} items total".format(len(args.input), len(od)), file=sys.stderr)
facecount = 0
eyecount = 0
counts = {}
for x in od:
for key in x['features'].keys():
if key not in counts:
counts[key] = 0
counts[key] += len(x['features'][key])
print (", ".join(["{0} {1}".format(counts[name], name) for name in sorted(counts.keys()) ] ))
#!/usr/bin/env python
from __future__ import print_function
import cv2, os, datetime, sys
from argparse import ArgumentParser
from time import sleep
from analyze_image import analyze_image
p = ArgumentParser()
p.add_argument("--faces", type=int, default=1, help="number of consecutive faces to require")
p.add_argument("--frames", type=int, default=30, help="number of frames to record")
p.add_argument("--path", default="frames", help="output folder")
p.add_argument("--camera", type=int, default=0, help="camera number, default: 0")
p.add_argument("--cascades", default="./haarcascades", help="location of the cascade XML files, default: ./haarcascades")
p.add_argument("--scaleFactor", type=float, default=None, help="scaleFactor, float, default: None (1.1)")
p.add_argument("--minNeighbors", type=int, default=None, help="minNeighbors, int, default: None (3)")
args = p.parse_args()
tpath = os.path.expanduser(args.cascades)
face_cascade = cv2.CascadeClassifier(os.path.join(tpath, 'haarcascade_frontalface_default.xml'))
eye_cascade = cv2.CascadeClassifier(os.path.join(tpath, 'haarcascade_eye.xml'))
try:
os.mkdir(args.path)
except OSError:
pass
cap = cv2.VideoCapture(args.camera)
face_count = 0
while(True):
ret, frame = cap.read()
if frame == None:
print ("ERROR CAPTURING FRAME. CHECK CAMERA CONNECTION AND SETTINGS", file=sys.stderr)
sys.exit(0)
faces_p, faces, eyes = analyze_image(frame, face_cascade, eye_cascade, args.scaleFactor, args.minNeighbors)