Commit 71709299 authored by Michael Murtaugh's avatar Michael Murtaugh

Merge branch 'master' of gitlab.constantvzw.org:SICV/recognition_machine

parents c87967b4 6961c96f
Pipeline #240 failed with stages
Situated Notes
========================
Why the project...
the para-archive ... for a missing archive...
create archives as they should be ...
Entice / show institutions / others what could happen...
The normative nature of models...
Colonial example of the photographed fitting the model / performing for the model
What does the software do to the collection
(dataset as an archive, problematic archive)
(
examples from ?:
Trevor Paglen
Hito Steyrel
)
21C19C_Procedures For
Anthropometric Image Reversal
Strong Looks
Antje Van Wichelen (2018)
Facial Weaponization
Communiqué: Fag Face
Zach Blas (2011)
http://zachblas.info/works/
facial-weaponization-suite/
https://vimeo.com/57882032
21C19C_Procedures For
Anthropometric Image Reversal
Debate
Antje Van Wichelen (2018)
https://vimeo.com/289833772
MakeUp Tutorial
HOW TO HIDE FROM CAMERAS
Jillian Mayer (2013)
https://youtu.be/kGGnnp43uNM
The Coded Gaze:
Unmasking Algorithmic Bias
Joy Buolamwini (2016)
https://youtu.be/162VzSzzoPs
sox -c1 -r8000 -n beep.wav synth 0.30 sine 440 vol 0.35
AGE_CHOICES = (
('0,5', 'infant'),
('5,12', 'infant'),
('12,20', 'adolescent'),
('20,35', 'young'),
('35,50', 'middle-aged'),
('50,65', 'older'),
('65,110', 'elderly')
)
def map_age (n):
for val, label in AGE_CHOICES:
low, high = [float(x) for x in val.split(",")]
# print ("age_map {0} {1} {2} {3}".format(x, low, high, val, label))
if n >= low and n < high:
return label
def get_emotions_sorted (d):
ret = []
ret.append((d['emotion']['angry'], "angry"))
ret.append((d['emotion']['disgust'], "disgust"))
ret.append((d['emotion']['fear'], "fear"))
ret.append((d['emotion']['happy'], "happy"))
ret.append((d['emotion']['sad'], "sad"))
ret.append((d['emotion']['neutral'], "neutral"))
ret.append((d['emotion']['surprise'], "surprise"))
ret.sort(reverse=True)
return ret
def caption(d, max_length=32):
lines = []
if 'gender' in d:
if d['gender']['label'] == "male":
n = "man"
else:
n = "woman"
if 'emotion' in d:
elabel = d['emotion']['label']+" "
else:
elabel = ""
title = "\"{0}{1} {2}\"".format(elabel, map_age(d['age']), n)
lines.append(title.upper())
if d['gender']['male'] != None:
lines.append("male({0:0.01f}%)/female({1:0.01f}%)".format(d['gender']['male']*100.0, d['gender']['female']*100.0))
if 'age' in d:
lines.append("age {0:0.1f}".format(d['age']))
if 'emotion' in d:
es = get_emotions_sorted(d)
lines.append("/".join(["{0}({1:0.01f}%)".format(label, value*100.0) for value, label in es[:2]]))
if max_length:
lines = [x[:max_length] for x in lines]
return "\n".join(lines)
......@@ -8,7 +8,7 @@ import imutils
# from time import sleep
# from eventlet.greenthread import sleep
from imutils import face_utils
import dlib
# import dlib
import numpy as np
from time import sleep
......@@ -82,7 +82,7 @@ def take_photo(camera, rotate, width, height, sleept=2.0, iso=800):
stream.release()
return current_image, current_image_bgr
def get_faces (face_cascade, current_image, scaleFactor=None, minNeighbors=None, minSize=100):
def get_faces (face_cascade, current_image, scaleFactor=None, minNeighbors=None, minSize=100, use_dlib=True):
args = dict()
if scaleFactor:
args['scaleFactor'] = scaleFactor
......@@ -95,7 +95,11 @@ def get_faces (face_cascade, current_image, scaleFactor=None, minNeighbors=None,
ret=[]
for rect in faces:
x, y, w, h = rect
ret.append({'x': int(x), 'y': int(y), 'width': int(w), 'height': int(h), 'rect': dlib.rectangle(x, y, x+w, y+h)})
d = {'x': int(x), 'y': int(y), 'width': int(w), 'height': int(h)}
if use_dlib:
import dlib
d['rect'] = dlib.rectangle(x, y, x+w, y+h)
ret.append(d)
return ret
# def get_landmarks(shape_predictor, current_image, current_faces):
......@@ -128,8 +132,8 @@ def get_emotions(emotion_classifier, current_image, current_faces):
dd = []
for face in current_faces:
rect = face['rect']
face_coordinates = (rect.left(), rect.top(), rect.width(), rect.height())
# rect = face['rect']
face_coordinates = (face['x'], face['y'], face['width'], face['height'])
x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
gray_face = current_image[y1:y2, x1:x2]
try:
......@@ -171,8 +175,8 @@ def get_genderage(genderage_classifier, current_image_bgr, current_faces):
if len(detected) > 0:
for i, face in enumerate(detected):
d = face['rect']
x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, d.bottom() + 1, d.width(), d.height()
# d = face['rect']
x1, y1, x2, y2, w, h = face['x'], face['y'], face['x']+face['width'] + 1, face['y']+face['height'] + 1, face['width'], face['height']
xw1 = max(int(x1 - MARGIN * w), 0)
yw1 = max(int(y1 - MARGIN * h), 0)
xw2 = min(int(x2 + MARGIN * w), img_w - 1)
......
notes
========
media.navigator.permission.disabled
set to true... no camera question!
WSB_WIFI/WoRkSpaCE17
// thinkpad:Tuva
tasks / todo
=========
x Institutions get text field (email Antje?)
x printing!
x fix labels
x display faces only as comments (delete original images ?!)
x camera exposure controls
x center qrcode (386pxls)
D https on recognition machine (lets encrypt??)
x dataset faces + labels
x BACKUP IMAGE PI !!!!
x mouse for laptop
x Receipt paper ?!
C 5-6 m usb cable
x stick with live debian ?!
x add a photo tone
* New items ?!
x Create Items for all Rephotos
x fix double qr in title page
* <!> add archive links (make easy to do so! --- edit link in interface)
* Detected Faces in interface
leave a message...
x white background transparent...
* Leave comments
* link to original images when present
* delete face interface
VIDEO PLAYER ... give button to move to next one ....
RM... add sensor for presence of someone ???
... or a button to take the picture... ????
BUTTON ...
POSITIONING ...
from __future__ import print_function
from imutils.video import VideoStream
import cv2, os, sys, json
import argparse
import base64
import imutils
# from time import sleep
# from eventlet.greenthread import sleep
from imutils import face_utils
import dlib
import numpy as np
import json
from common import *
from printing import *
# import subprocess
from PIL import Image, ImageOps
def fit_size (im, box):
bw, bh = box
iw, ih = im.size
w = bw
h = int((ih/iw) * w)
if h <= bh:
return (w, h)
h = bh
w = int((iw/ih) * h)
return (w, h)
# if __name__ == "__main__":
ap = argparse.ArgumentParser("")
ap.add_argument("--emotion", default=False, action="store_true")
ap.add_argument("--genderage", default=False, action="store_true")
ap.add_argument("--models", default="models")
ap.add_argument("image", nargs="+")
args = ap.parse_args()
def p (msg=""):
if msg.strip():
os.system("espeak \"{0}\" 2> /dev/null".format(msg))
print (msg)
if args.emotion:
p("loading emotion classifier")
# print ("Loading emotion classifier", file=sys.stderr)
from keras.models import load_model
emotion_classifier = load_model(os.path.join(args.models, "emotion_model.hdf5"))
if args.genderage:
p ("loading genderage classifier")
# from keras.models import load_model
from keras.utils.data_utils import get_file
weight_file = os.path.join(args.models, "weights.28-3.73.hdf5")
from wide_resnet import WideResNet
genderage_classifier = WideResNet(IMG_SIZE, depth=DEPTH, k=K)()
genderage_classifier.load_weights(weight_file)
p("all loaded")
AGE_CHOICES = (
('0,5', 'infant'),
('5,12', 'infant'),
('12,20', 'adolescent'),
('20,35', 'young'),
('35,50', 'middle-aged'),
('50,65', 'older'),
('65,110', 'elderly')
)
def map_age (n):
for val, label in AGE_CHOICES:
low, high = [float(x) for x in val.split(",")]
# print ("age_map {0} {1} {2} {3}".format(x, low, high, val, label))
if n >= low and n < high:
return label
def get_emotions_sorted (d):
ret = []
ret.append((d['emotion']['angry'], "angry"))
ret.append((d['emotion']['disgust'], "disgust"))
ret.append((d['emotion']['fear'], "fear"))
ret.append((d['emotion']['happy'], "happy"))
ret.append((d['emotion']['sad'], "sad"))
ret.append((d['emotion']['neutral'], "neutral"))
ret.append((d['emotion']['surprise'], "surprise"))
ret.sort(reverse=True)
return ret
def caption(d, max_length=32):
lines = []
if d['gender']:
if d['gender']['label'] == "male":
n = "man"
else:
n = "woman"
title = "\"{0} {1} {2}\"".format(d['emotion']['label'], map_age(d['age']), n)
lines.append(title.upper())
if d['gender']['male'] != None:
lines.append("male({0:0.01f}%)/female({1:0.01f}%)".format(d['gender']['male']*100.0, d['gender']['female']*100.0))
if d['age']:
lines.append("age {0:0.1f}".format(d['age']))
if d['emotion']:
es = get_emotions_sorted(d)
lines.append("/".join(["{0}({1:0.01f}%)".format(label, value*100.0) for value, label in es[:2]]))
if max_length:
lines = [x[:max_length] for x in lines]
return "\n".join(lines)
def make_print_image (pin, pout):
im = Image.open(pin)
im = im.convert("L")
rw, rh = fit_size(im, (400, 400))
im = im.resize((rw, rh), resample=Image.BICUBIC) # Image.BICUBIC
im = ImageOps.equalize(im)
im = im.convert("1")
im.save(pout)
p = {}
p['items'] = items = []
for n in args.image:
print (n)
current_image_bgr = cv2.imread(n)
h, w, _ = current_image_bgr.shape
current_image = cv2.cvtColor(current_image_bgr, cv2.COLOR_BGR2GRAY)
current_faces = []
face = {}
face['x'] = 0
face['y'] = 0
face['width'] = w
face['height'] = h
face['rect'] = dlib.rectangle(0, 0, w, h)
current_faces.append(face)
if args.emotion:
get_emotions(emotion_classifier, current_image, current_faces)
if args.genderage:
get_genderage(genderage_classifier, current_image_bgr, current_faces)
print(face)
base, ext = os.path.splitext(n)
nprint = base + ".print.png"
make_print_image(n, nprint)
items.append({'image': 'file://{0}'.format(nprint)})
items.append({'text': caption(face)})
items.append({'text': ''})
print (json.dumps(p, indent=2))
print_photostrip(p)
# print (face)
......@@ -3,6 +3,8 @@ from escpos.printer import Usb
from urllib2 import urlopen
from PIL import Image
import sys
from escpos.exceptions import USBNotFoundError
# 0416:5011
......@@ -44,23 +46,26 @@ def centered_text (text, width=32):
return "\n".join(lines)
def print_photostrip(data):
p = Usb(USB1,USB2,0)
for i in data['items']:
if "text" in i:
p.text(centered_text(i['text'])+"\n")
# p.text(i['text']+"\n")
elif "image" in i:
f = urlopen(i['image'])
# path = save_file(f)
im = Image.open(f)
p.image(im)
# p.image(path)
# p.image("00001Tris.400x.dither.png")
elif "url" in i:
# p.text(centered_text(i['url']))
p.text(i['url']+"\n")
p.cut()
try:
p = Usb(USB1,USB2,0)
for i in data['items']:
if "text" in i:
p.text(centered_text(i['text'])+"\n")
# p.text(i['text']+"\n")
elif "image" in i:
f = urlopen(i['image'])
# path = save_file(f)
im = Image.open(f)
p.image(im)
# p.image(path)
# p.image("00001Tris.400x.dither.png")
elif "url" in i:
# p.text(centered_text(i['url']))
p.text(i['url']+"\n")
p.cut()
return True
except USBNotFoundError:
return False
#p.text("Hello World\n")
# p.image("00001Tris.400x.jpg")
#p.image("00001Tris.400x.dither.png")
......
......@@ -36,6 +36,9 @@ def p (msg=""):
os.system("espeak \"{0}\" 2> /dev/null".format(msg))
print (msg)
def tone ():
os.system("aplay beep.wav 2> /dev/null")
# print ("Loading face detector", file=sys.stderr)
# face_detector = dlib.get_frontal_face_detector()
face_cascade = cv2.CascadeClassifier(os.path.join(MODELS, 'haarcascade_frontalface_default.xml'))
......@@ -62,6 +65,7 @@ from math import floor
while True:
tone()
current_image, current_image_bgr = take_photo(args.camera, args.rotate, args.width, args.height)
current_faces = get_faces(face_cascade, current_image_bgr)
if len(current_faces) > 0:
......@@ -123,5 +127,5 @@ while True:
# print("sleeping")
if len(current_faces) > 0:
p()
sleep(3.0)
sleep(15.0)
from __future__ import print_function
from imutils.video import VideoStream
import cv2, os, sys
import argparse
import base64
import imutils
# from time import sleep
# from eventlet.greenthread import sleep
from imutils import face_utils
import dlib
import numpy as np
from time import sleep
from common import *
# if __name__ == "__main__":
ap = argparse.ArgumentParser("")
ap.add_argument("--camera", type=int, default=-1)
ap.add_argument("--width", type=int, default=640)
ap.add_argument("--height", type=int, default=480)
ap.add_argument("--rotate", type=int, default=0)
ap.add_argument("--vflip", default=False, action="store_true")
ap.add_argument("--models", default="./models", help="location of the cascade XML files, default: ./models")
ap.add_argument("--emotion", default=False, action="store_true")
ap.add_argument("--genderage", default=False, action="store_true")
ap.add_argument("--post", default=None, help="post images to given URL")
args = ap.parse_args()
MODELS = os.path.expanduser(args.models)
# print ("Loading face detector", file=sys.stderr)
# face_detector = dlib.get_frontal_face_detector()
face_cascade = cv2.CascadeClassifier(os.path.join(MODELS, 'haarcascade_frontalface_default.xml'))
# print ("Loading shape predictor")
# shape_predictor = dlib.shape_predictor(os.path.join(MODELS, "shape_predictor_68_face_landmarks.dat"))
if args.emotion:
print ("Loading emotion classifier", file=sys.stderr)
from keras.models import load_model
emotion_classifier = load_model(os.path.join(MODELS, "emotion_model.hdf5"))
if args.genderage:
print ("Loading genderage classifier", file=sys.stderr)
# from keras.models import load_model
from keras.utils.data_utils import get_file
weight_file = os.path.join(MODELS, "weights.28-3.73.hdf5")
from wide_resnet import WideResNet
genderage_classifier = WideResNet(IMG_SIZE, depth=DEPTH, k=K)()
genderage_classifier.load_weights(weight_file)
print ("All loaded", file=sys.stderr)
import datetime
from math import floor
def p (msg=""):
if msg.strip():
os.system("espeak \"{0}\"".format(msg))
print (msg)
while True:
current_image, current_image_bgr = take_photo(args.camera, args.rotate, args.width, args.height)
current_faces = get_faces(face_cascade, current_image_bgr)
if len(current_faces) > 0:
n = datetime.datetime.now()
base = n.strftime("%Y%m%d_%H%M%S")
print (datetime.datetime.now())
if len(current_faces) > 1:
s = 's'
else:
s = ''
p ("{0} face{1}".format(len(current_faces), s))
if args.emotion:
get_emotions(emotion_classifier, current_image, current_faces)
if args.genderage:
get_genderage(genderage_classifier, current_image_bgr, current_faces)
for i, face in enumerate(current_faces):
# cv2.imwrite("image.jpg", current_image)
fr = face['rect']
# fr = current_faces[0]['rect']
x1, y1, x2, y2 = fr.left(), fr.top(), fr.right(), fr.bottom()
current_face = current_image[y1:y2, x1:x2]
# cv2.imwrite("face.jpg", current_face)
# landmarks = get_landmarks(shape_predictor, current_image, current_faces)
fname = base+"_f{0}".format(i)
if 'gender' in face:
gl = face['gender']['label']
fname += "g"+gl[0]
gp = face['gender'][gl] * 100.0
p("Face {0}: {1} ({2:0.02f}%)\nestimated age: {3:0.02f}".format(i+1, gl, gp, face['age']))
fname += "a{0}".format(int(floor(face['age']/10.0)))
if 'emotion' in face:
p("predominant emotion: {0}".format(face['emotion']['label']))
fname += "e{0}".format(face['emotion']['label'][:2])
fname += ".jpg"
print ("Face saved to {0}".format(fname), file=sys.stderr)
cv2.imwrite(os.path.join("faces", fname), current_face)
if args.post:
# POST IT
print ("posting face to {0}".format(args.post), file=sys.stderr)
import post
_, imagedata = cv2.imencode(".jpg", current_image)
rescode = post.post(imagedata, face, args.post)
print (u"post result {0}, {1}".format(rescode, rescode.text).encode("utf-8"), file=sys.stderr)
# print("sleeping")
if len(current_faces) > 0:
p()
sleep(3.0)
from __future__ import print_function
from imutils.video import VideoStream
import cv2, os, sys, json
import argparse
import base64
import imutils
# from time import sleep
# from eventlet.greenthread import sleep
from imutils import face_utils
import numpy as np
from time import sleep
import datetime
from common import *
# from printing import *
# import subprocess
from caption import caption
# if __name__ == "__main__":
ap = argparse.ArgumentParser("")
ap.add_argument("--camera", type=int, default=-1)
ap.add_argument("--width", type=int, default=640)
ap.add_argument("--height", type=int, default=480)
ap.add_argument("--rotate", type=int, default=0)
ap.add_argument("--vflip", default=False, action="store_true")
ap.add_argument("--models", default="./models", help="location of the cascade XML files, default: ./models")
ap.add_argument("--emotion", default=False, action="store_true")
ap.add_argument("--genderage", default=False, action="store_true")
ap.add_argument("--post", default=None, help="post images to given URL")
args = ap.parse_args()
MODELS = os.path.expanduser(args.models)
def p (msg=""):
if msg.strip():
os.system("espeak \"{0}\" 2> /dev/null".format(msg))
print (msg)
def tone ():
os.system("aplay beep.wav 2> /dev/null")
# print ("Loading face detector", file=sys.stderr)
# face_detector = dlib.get_frontal_face_detector()
face_cascade = cv2.CascadeClassifier(os.path.join(MODELS, 'haarcascade_frontalface_default.xml'))
# print ("Loading shape predictor")
# shape_predictor = dlib.shape_predictor(os.path.join(MODELS, "shape_predictor_68_face_landmarks.dat"))
if args.emotion:
p("loading emotion classifier")
# print ("Loading emotion classifier", file=sys.stderr)
from keras.models import load_model
emotion_classifier = load_model(os.path.join(MODELS, "emotion_model.hdf5"))
if args.genderage:
p ("loading genderage classifier")
# from keras.models import load_model
from keras.utils.data_utils import get_file
weight_file = os.path.join(MODELS, "weights.28-3.73.hdf5")
from wide_resnet import WideResNet
genderage_classifier = WideResNet(IMG_SIZE, depth=DEPTH, k=K)()
genderage_classifier.load_weights(weight_file)
p("all loaded")
import datetime
from math import floor
try:
os.makedirs("images")
except OSError:
pass
while True:
tone()
current_image, current_image_bgr = take_photo(args.camera, args.rotate, args.width, args.height)
current_faces = get_faces(face_cascade, current_image_bgr, use_dlib=False)
if len(current_faces) > 0:
n = datetime.datetime.now()
base = n.strftime("%Y%m%d_%H%M%S")
print (datetime.datetime.now())
if len(current_faces) > 1:
s = 's'
else:
s = ''
p ("{0} face{1}".format(len(current_faces), s))
if args.emotion:
get_emotions(emotion_classifier, current_image, current_faces)
if args.genderage:
get_genderage(genderage_classifier, current_image_bgr, current_faces)
for i, face in enumerate(current_faces):
# cv2.imwrite("image.jpg", current_image)
# draw bounding rectangle
# fr = face['rect']
x1, y1, x2, y2 = face['x'], face['y'], face['x'] + face['width'], face['y'] + face['height']
cv2.rectangle(current_image_bgr, (x1, y1), (x2, y2), (255, 255, 0), 2)
# text = "face{0}".format(i)
text = caption(face)
if "gender" in face:
text += ""
font_scale = 0.50
thickness = 2
color = (255, 0, 255)
line_height = 18
for i, line in enumerate(text.splitlines()):
cv2.putText(current_image_bgr, line,
(x1, y2 + (line_height * i+1)),
cv2.FONT_HERSHEY_SIMPLEX,
font_scale, color, thickness) # cv2.LINE_AA
# print("sleeping")
if len(current_faces) > 0:
p()
ts = os.path.join("images", datetime.datetime.now().strftime("%Y%m%d_%H%M%S.jpg"))
cv2.imwrite(ts, current_image_bgr)
cv2.imshow('window_frame', current_image_bgr)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# sleep(1.0)
......@@ -113,11 +113,11 @@ class Face (models.Model):
)
@classmethod
def map_age (cls, x, display=False):
def map_age (cls, n, display=False):
for val, label in cls.AGE_CHOICES:
low, high = [float(x) for x in val.split(",")]
print ("age_map {0} {1} {2} {3}".format(x, low, high, val, label))
if x >= low and x < high:
if n >= low and n < high:
if display:
return label
else:
......
......@@ -70,3 +70,8 @@ Ads tab
(Example: Bicycle route on 16 Aug 2016)
* Request to download data you think is relevant, and once you have it, try to locate relevant data to your starting point.
=================================