...
 
Commits (2)
Installation for the exhibition 'Bye Bye Future', early 2020 in Mariemont.
Nature words: 2004
Technique words: 1989
Trained nature words: 1912
Trained technique words: 1868
Score classifier: 0.8783068783068783
Lexique vocabulary: 143089 words
Unscored words, not in corpus embeddings: 12258
\ No newline at end of file
# Loop through all epubfiles in subdirs and convert using ebook-convert
# https://stackoverflow.com/a/9612560
find . -name "*.epub" -type f -print0 | while read -d $'\0' path
do
name=$(basename "${path}")
newname="${name%.epub}.txt"
echo "******"
echo "Converting: ${path}"
ebook-convert "${path}" "txt/${newname}"
done
102137437268
ESC.O
0 → Paper feed mode
0: Manual
1: Automatic
1 → Paper state
0: Clean
1: Not clean
2 → Paper load
0: Paper has not been loaded
1: Paper has been loaded
3 → I/O Buffer
0: not empy
1: Empty
4&5
0 0: Processing hpgl
1 0: View state
0 1: Not Ready state
6
0: Cover lowered
1: Cover raised
[Desktop Entry]
Type=Application
Exec=/usr/bin/chromium-browser --incognito --noerrdialogs --disable-session-crashed-bubble --disable-infobars --kiosk http://localhost
Hidden=false
X-GNOME-Autostart-enabled=true
Name[en_US]=Une-Anthologie
Name=Une-Anthologie
Comment=Open the browser and load the application on start
\ No newline at end of file
server {
listen 80;
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
location / {
alias /srv/une-anthologie/interface;
}
location /api {
proxy_pass http://0.0.0.0:5555;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
[Unit]
Description=Should plot the queue
After=multi-user.target
[Service]
User=pi
Group=pi
Type=idle
ExecStartPre = sudo chown pi:pi /dev/ttyUSB0
ExecStart=/srv/une-anthologie/venv-plotter/bin/python /srv/une-anthologie/runner.py > /srv/une-anthologie/logs/plotter/plotter.log 2>&1
[Install]
WantedBy=multi-user.target
\ No newline at end of file
python-escpos==3.0a6
flask
\ No newline at end of file
[Unit]
Description=The service starting Une Anthologie
After=network.target
[Service]
User=pi
Group=pi
Restart=on-failure
WorkingDirectory=/srv/une-anthologie
ExecStart=/srv/une-anthologie/venv/bin/gunicorn --config /srv/une-anthologie/gunicorn_app.pi --bind 0.0.0.0:5555 --workers 1 app:app
[Install]
WantedBy=multi-user.target
\ No newline at end of file
pattern
nltk
gensim
pyemd
\ No newline at end of file
from db import connect, find_one_word, find_words
from sonnets import generate_queneau_sonnet
import re
"""
API for the nature sonnet.
- Allows to generate sonnets
- Allows to plot them
"""
from flask import Flask, jsonify, request
from nature_sonnet import generate_nature_sonnet
from settings import BASEURL, HPGL_CACHEDIR, PLOT_PAGE, PLOT_HEADER_TEXT, PLOT_FOOTER_TEXT, PLOT_BODY_CHARACTER_SIZE, PLOT_FOOTER_CHARACTER_SIZE
import json
import random
import sys
from utils import hpgl_mm
from tempfile import mkstemp
import os.path
from os import makedirs
import time
from datetime import datetime
import re
import textwrap
from settings import THERMAL_CACHEDIR, THERMAL_FOOTER_TEXT, THERMAL_HEADER_TEXT
app = Flask(__name__)
if not os.path.exists(HPGL_CACHEDIR):
makedirs(HPGL_CACHEDIR)
if not os.path.exists(THERMAL_CACHEDIR):
makedirs(THERMAL_CACHEDIR)
@app.after_request
def add_headers(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers',
'Content-Type,Authorization')
return response
@app.route("{}/generate".format(BASEURL), methods=["POST"])
def sonnet ():
score = float(request.form['score'])
return jsonify({ 'sonnet': generate_nature_sonnet(score), 'score': score })
"""
word CHARACTER(50),
lemma CHARACTER(50),
tag CHARACTER(5),
syllable_count INTEGER,
rhyme_end CHARACTER(5),
nature_score FLOAT,
has_nature_score BOOLEAN
Special chars for charset 12, with 17 as alternative
"""
conn = connect()
def load_queneau_replacement_words ():
with open('../data/queneau_words.txt', 'r') as source:
return [l.strip() for l in source]
def find_replacement(word, should_rhyme=False, nature_score=None):
# Find information about this word
word_data = find_one_word(conn, [('tag', 'NOM'), ('word', word)])
if word_data:
# Find words with an equal amount of syllables and the same tag
parameters = [ ('tag', 'NOM'),
('gender', word_data['gender']),
('plural', word_data['plural']),
('syllable_count', word_data['syllable_count'])]
if nature_score:
parameters.append(('nature_score', 'BETWEEN', '{} AND {}'.format(nature_score[0], nature_score[1])))
parameters.append(('has_nature_score', 1))
if should_rhyme:
parameters.append(('rhyme_end', word_data['rhyme_end']))
parameters.append(('word', '!=', word))
words = find_words(conn, parameters, limit=10)
if words:
return [w['word'] for w in words[:10]]
return None
words_to_replace = load_queneau_replacement_words()
def generate_replaced_sonnet (score):
# generate a sonnet
sonnet = generate_queneau_sonnet()
new_sonnet = []
# per line replace candidate words with a word that is similar
for line in sonnet:
new_line = []
should_rhyme = True
line_words = line.split(' ')
line_words.reverse()
for word in line_words:
if word in words_to_replace:
replacements = None
# start = 8
# i = 0
# # First try to find a replacement within the nature selection
# while not replacements and i <= 5:
# replacements = find_replacement(word, should_rhyme, nature_score=(start, 9999))
# start -= 1
# i += 1
window = 0.25
# First try to find a replacement within the nature selection
while not replacements and window <= 10:
replacements = find_replacement(word, should_rhyme, nature_score=(score-window, score+window))
window += .25
# If we can't find a replacement within that range select
# based on syllable count and rhyme preference
if not replacements:
replacements = find_replacement(word, should_rhyme)
# If still no match and we where looking for a rhyming word
# drop rhyme preference
if not replacements and should_rhyme:
replacements = find_replacement(word, should_rhyme=False)
# Finally give up
if not replacements:
replacements = ['NO REPLACEMENTS']
new_line.append((word, replacements))
should_rhyme = False
else:
if re.match(r'\w+', word):
should_rhyme = False
new_line.append((word,))
new_line.reverse()
new_sonnet.append(new_line)
return new_sonnet
# for debug take one line, see how many options there are
# what is the correct pos-tag?
if __name__ == '__main__':
if len(sys.argv) > 1:
score = float(sys.argv[1])
else:
score = 0.0
new_sonnet = generate_replaced_sonnet(score)
print ('A generated sonnet with nature scorce: {}'.format(score))
for line in new_sonnet:
output = []
for word in line:
if len(word) > 1:
output.append('{replacement} ({old})'.format(old=word[0], replacement=random.choice(word[1])))
else:
output.append(word[0])
print(' '.join(output))
\ No newline at end of file
def fix_special_char (c):
if c == 'é':
return chr(14) + chr(69) + chr(15)
if c == 'É':
return chr(14) + chr(93) + chr(15)
if c == 'è':
return chr(14) + chr(73) + chr(15)
if c == 'È':
return chr(14) + chr(35) + chr(15)
if c == 'Ê':
return chr(14) + chr (36) + chr(15)
if c == 'ê':
return chr(14) + chr (65) + chr(15)
if c == 'ë':
return chr(14) + chr (77) + chr(15)
if c == 'á':
return chr(14) + chr(68) + chr(15)
if c == 'Á':
return chr(14) + chr(96) + chr(15)
if c == 'à':
return chr(14) + chr(72) + chr(15)
if c == 'À':
return chr(14) + chr(33) + chr(15)
if c == 'â':
return chr(14) + chr(64) + chr(15)
if c == 'Â':
return chr(14) + chr(34) + chr(15)
if c == 'Ä':
return chr(14) + chr(88) + chr(15)
if c == 'ó':
return chr(14) + chr(70) + chr(15)
if c == 'Ó':
return chr(14) + chr(103) + chr(15)
if c == 'ò':
return chr(14) + chr(74) + chr(15)
if c == 'Ò':
return chr(14) + chr(104) + chr(15)
if c == 'ô':
return chr(14) + chr(66) + chr(15)
if c == 'Ô':
return chr(14) + chr(94) + chr(15)
if c == 'ö':
return chr(14) + chr(78) + chr(15)
if c == 'Ö':
return chr(14) + chr(78) + chr(15)
if c == 'Õ':
return chr(14) + chr(105) + chr(15)
if c == 'õ':
return chr(14) + chr(105) + chr(15)
if c == 'û':
return chr(14) + chr(67) + chr(15)
if c == 'ú':
return chr(14) + chr(71) + chr(15)
if c == 'ù':
return chr(14) + chr(75) + chr(15)
if c == 'ü':
return chr(14) + chr(79) + chr(15)
if c == 'Ü':
return chr(14) + chr(91) + chr(15)
if c == 'Ù':
return chr(14) + chr(45) + chr(15)
if c == 'Ú':
return chr(14) + chr(46) + chr(15)
if c == 'í':
return chr(14) + chr(85) + chr(15)
if c == 'ï':
return chr(14) + chr(93) + chr(15)
if c == 'î':
return chr(14) + chr(81) + chr(15)
if c == 'Ï':
return chr(14) + chr(39) + chr(15)
if c == 'Î':
return chr(14) + chr(38) + chr(15)
if c == 'Ÿ':
return chr(14) + chr(110) + chr(15)
if c == 'ÿ':
return chr(14) + chr(111) + chr(15)
if c == 'ç':
return chr(14) + chr(53) + chr(15)
if c == 'Ç':
return chr(14) + chr(52) + chr(15)
if c == '«':
return chr(14) + chr(123) + chr(15)
if c == '»':
return chr(14) + chr(125) + chr(15)
return c
def plot_safe (dirty):
safe = ''
for c in dirty:
safe += fix_special_char(c)
return safe
@app.route("{}/plot".format(BASEURL), methods=["POST"])
def plot():
score = float(request.form['score'])
sonnet = request.form['sonnet']
_, path = mkstemp(suffix='.hpgl', text=True, dir=HPGL_CACHEDIR)
now = datetime.now()
date = plot_safe(now.strftime('Créé le %d-%m-%Y à %H:%M'))
with open(path, 'w') as h:
hpgl = 'SP1;RO90;'
hpgl += 'CS10;CA17;'
hpgl += 'FS2;VS15;'
hpgl += 'PA{},{};'.format(PLOT_PAGE['left'] + hpgl_mm(2), PLOT_PAGE['top'] - hpgl_mm(5))
hpgl += 'SI{},{};'.format(*PLOT_FOOTER_CHARACTER_SIZE)
hpgl += 'LB{}{};'.format(plot_safe(PLOT_HEADER_TEXT), chr(3))
hpgl += 'PA{},{};'.format(PLOT_PAGE['left'] + hpgl_mm(2), PLOT_PAGE['top'] - hpgl_mm(25))
hpgl += 'SI{},{};'.format(*PLOT_BODY_CHARACTER_SIZE)
hpgl += 'LB{}{};'.format(plot_safe(sonnet), chr(3))
hpgl += 'PA{},{};'.format(PLOT_PAGE['left'] + hpgl_mm(2), PLOT_PAGE['bottom'] + hpgl_mm(20))
hpgl += 'SI{},{};'.format(*PLOT_FOOTER_CHARACTER_SIZE)
hpgl += 'LBNature: {}\r\n{}\r\n{}{};'.format(score, date, plot_safe(PLOT_FOOTER_TEXT), chr(3))
hpgl += 'SP0;'
h.write(hpgl)
return '100'
@app.route("{}/print".format(BASEURL), methods=["POST"])
def print():
score = float(request.form['score'])
sonnet, _ = re.subn(r'\s([!?])', r'\1', request.form['sonnet'])
_, path = mkstemp(suffix='.txt', text=True, dir=THERMAL_CACHEDIR)
with open(path, 'w') as h:
h.write('\n')
h.write(THERMAL_HEADER_TEXT)
h.write('\n\n')
for line in sonnet.split('\n'):
h.write('\n'.join(textwrap.wrap(line, width=44, subsequent_indent=' ', break_on_hyphens=False)))
h.write('\n')
# h.write('\n'.join(sonnet_wrapped))
h.write('\n\n')
h.write(datetime.now().strftime('Créé le %d-%m-%Y à %H:%M\n'))
h.write('Nature: {}\n'.format(score))
h.write('\n')
h.write(THERMAL_FOOTER_TEXT)
return '100'
\ No newline at end of file
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
# Custom Algolit addition (logging disregarded words)
disregarded = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
# Custom Algolit addition
disregarded.append(word)
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary, disregarded
def read_input_text(trainingset):
with open(trainingset, 'r', buffering=4048) as source:
line_cnt = 0
print('File opened')
for line in source:
if line:
yield line.strip()
# wordlist = line.split(' ')
# for word in wordlist:
# yield word
line_cnt += 1
if (line_cnt % 1000000) == 0:
print(line_cnt)
# lines = source.readlines()
# print('Read )
# for line in lines:
# # line = line.decode('utf8')
# wordlist = word_tokenize(line)
# for word in wordlist:
# yield word
This diff is collapsed.
/* http://meyerweb.com/eric/tools/css/reset/
v2.0 | 20110126
License: none (public domain)
*/
html, body, div, span, applet, object, iframe,
h1, h2, h3, h4, h5, h6, p, blockquote, pre,
a, abbr, acronym, address, big, cite, code,
del, dfn, em, img, ins, kbd, q, s, samp,
small, strike, strong, sub, sup, tt, var,
b, u, i, center,
dl, dt, dd, ol, ul, li,
fieldset, form, label, legend,
table, caption, tbody, tfoot, thead, tr, th, td,
article, aside, canvas, details, embed,
figure, figcaption, footer, header, hgroup,
menu, nav, output, ruby, section, summary,
time, mark, audio, video {
margin: 0;
padding: 0;
border: 0;
font-size: 100%;
font: inherit;
vertical-align: baseline;
}
/* HTML5 display-role reset for older browsers */
article, aside, details, figcaption, figure,
footer, header, hgroup, menu, nav, section {
display: block;
}
body {
line-height: 1;
}
ol, ul {
list-style: none;
}
blockquote, q {
quotes: none;
}
blockquote:before, blockquote:after,
q:before, q:after {
content: '';
content: none;
}
table {
border-collapse: collapse;
border-spacing: 0;
}
\ No newline at end of file
@font-face {
font-family: 'Gelasio';
src: url('fonts/Gelasio-Regular.ttf');
font-weight: 400;
font-style: normal;
}
@font-face {
font-family: 'Necto Mono';
src: url('fonts/Necto-Mono.woff2');
font-weight: 400;
font-style: normal;
}
:root {
--font-size: 15pt;
--font-size-small: 10pt;
--font-size-button: 11pt;
--font-size-about: 12pt;
}
body {
font-size: var(--font-size);
font-family: 'Gelasio';
/* cursor: none; */
}
.replaced .queneau {
color: rgb(93, 166, 168);
}
button {
font-family: 'Necto Mono';
background: none;
border: none;
font-size: var(--font-size-button);
border: 1px solid;
padding: .3em 1em;
color: #333;
border-radius: 3px;
}
button:active {
color: black;
}
button:disabled {
color: #999;
}
.replaced .anthologie {
/* letter-spacing: 1px; */
color: #705c00;
/* font-family: monospace; */
}
.anthologie .replaced .queneau {
display: none;
}
.queneau .replaced .anthologie {
display: none;
}
.poem--body.anthologie::before {
content: "Une Anthologie";
display: block;
/* text-align: center; */
margin-top: -2em;
margin-bottom: 2em;
font-size: var(--font-size-small);
font-family: 'necto mono';
}
.poem--body.queneau::before {
content: "Cent mille milliards de poèmes, Raymond Queneau";
display: block;
/* text-align: center; */
margin-top: -2em;
margin-bottom: 2em;
font-size: var(--font-size-small);
font-family: 'necto mono';
}
#about {
display: none;
position: fixed;
z-index: 1;
top: 0;
left: 0;
bottom: 0;
right: 0;
background: #efefef;
padding-top: 3em;
column-width: 350pt;
column-fill: auto;
font-size: var(--font-size);
line-height: 1.3;
padding: 5vh 3em;
}
#about[data-active] {
display: initial;
}
h2 {
column-span: all;
}
h2 + h2 {
margin-bottom: 1.3em;
}
#about--close {
position: absolute;
top: 5vh;
right: 3em;
}
#title {
grid-row: title;
}
#controls {
grid-row: controls;
padding: 1em 1em 5vh 1em;
text-align: center;
display: grid;
grid-template-columns: [left] 1fr [range] 50em [right] 1fr;
}
#middle {
display: flex;
flex-direction: row;
grid-row: poem;
}
#poem {
display: flex;
flex-direction: row;
justify-content: center;
flex: 3 0;
}
#about-short {
flex: 1 0;
line-height: 1.3;
/* font-size: var(--font-size-about); */
padding: 1em;
color: #444;
display: none;
}
h2, h3 {
font-family: 'Necto Mono';
}
p {
margin-bottom: 1.3em;
}
#poem--meta {
text-align: center;
font-family: monospace;
}
.poem--body {
margin: 1em 0 4em 0;
background: white;
padding: 3em;
flex: 0 0 25em;
line-height: 1.6;
position: relative;
}
.poem--body.queneau {
background: transparent;
}
.poem--body.queneau .poem--metadata {
display: none;
}
.poem--body.queneau .line {
visibility: hidden;
}
.poem--body.queneau .line .queneau {
visibility: visible;
}
body {
background: #efefef;
}
main {
height: 100vh;
display: grid;
grid-template-rows: [title] 4vh [poem] 1fr [controls] min-content;
}
main, body {
height: 100vh;
margin: 0;
padding: 0;
}
:root {
--control-color: #666;
}
.range--wrapper {
display: flex;
flex-direction: row;
}
.range--wrapper label {
flex: 0 0 12em;
padding-top: .4em;
font-family: 'Necto Mono';
font-size: 85%;
}
.range--input--wrapper {
/* margin: auto; */
margin-left: 1em;
margin-right: 1em;
flex: 1 0;
/* grid-column: range; */
height: 1.5em;
position: relative;
background: linear-gradient(to right,
transparent 0%,
var(--control-color) 0%,
var(--control-color) calc(0% + 1px),
transparent calc(0% + 1px),
transparent 12.5%,
var(--control-color) 12.5%,
var(--control-color) calc(12.5% + 1px),
transparent calc(12.5% + 1px),
transparent 25%,
var(--control-color) 25%,
var(--control-color) calc(25% + 1px),
transparent calc(25% + 1px),
transparent 37.5%,
var(--control-color) 37.5%,
var(--control-color) calc(37.5% + 1px),
transparent calc(37.5% + 1px),
transparent 50%,
var(--control-color) 50%,
var(--control-color) calc(50% + 1px),
transparent calc(50% + 1px),
transparent 62.5%,
var(--control-color) 62.5%,
var(--control-color) calc(62.5% + 1px),
transparent calc(62.5% + 1px),
transparent 75%,
var(--control-color) 75%,
var(--control-color) calc(75% + 1px),
transparent calc(75% + 1px),
transparent 87.5%,
var(--control-color) 87.5%,
var(--control-color) calc(87.5% + 1px),
transparent calc(87.5% + 1px),
transparent calc(100% - 1px),
var(--control-color) calc(100% - 1px)),
linear-gradient(to bottom, transparent 50%, var(--control-color) 50%, var(--control-color) calc(50% + 1px), transparent calc(50% + 1px));
}
input[type="range"] {
position: absolute;
top: 0;
/* right: -12px; */
bottom: 0;
left: -9px;
width: calc(100% + 14px);
height: 100%;
}
/*
#label--technique {
position: absolute;
right: 100%;
padding-right: 1em;
}
#label--nature {
position: absolute;
left: 100%;
padding-left: 1em;
} */
.side-column {
text-align: center;
}
.poem--metadata {
font-family: 'Necto Mono';
font-size: var(--font-size-small);
position: absolute;
bottom: 3em;
}
\ No newline at end of file
......@@ -11,6 +11,17 @@ import csv
from db import connect, create_table, create_indexes, insert_many_words, drop_table_and_indexes
from nature_classifier import predict
def load_lexicon(filename):
lexicon = []
with open(filename, encoding='utf-8') as infile:
for line in infile:
lexicon.append(line.strip())
return lexicon
nature_words = load_lexicon('../data/synonymes_nature_clean.txt')
technology_words = load_lexicon('../data/synonymes_technique_clean.txt')
def extract_rhyme (syllables):
## To do more intelligent
consonants = ['b', 'c', 'd', 'f', 'g', 'G', 'h', 'j', 'k', 'l', 'm', 'n', 'N', 'p', 'r', 'R', 's', 'S', 't', 'v', 'w', 'x', 'z', 'Z']
......@@ -58,9 +69,16 @@ with open('../data/Lexique383_met_alles.csv', 'r') as source:
nature_score = predict(row[2])
if nature_score is None:
nature_score = 0
has_nature_score = False
unscored += 1
if row[0] in nature_words or row[2] in nature_words:
print('Found word in nature list: {}'.format(row[0]))
nature_score = 8
if row[0] in technology_words or row[2] in technology_words:
print('Found word in techonology list: {}'.format(row[0]))
nature_score = -8
else:
nature_score = 0
has_nature_score = False
unscored += 1
words.append({
'word': row[0],
......
......@@ -14,13 +14,13 @@ import sys
has_nature_score BOOLEAN
"""
conn = connect()
vowels = 'aeiouyáéíóúýàèìòùỳâêîôûŷäëïöüÿ'
def load_queneau_replacement_words ():
with open('../data/queneau_words.txt', 'r') as source:
return [l.strip() for l in source]
def find_replacement(word, should_rhyme=False, nature_score=None):
def find_replacement(conn, word, should_rhyme=False, nature_score=None):
# Find information about this word
tag = 'NOM'
word_data = find_one_word(conn, [('tag', tag), ('word', word)])
......@@ -55,8 +55,11 @@ def find_replacement(word, should_rhyme=False, nature_score=None):
words_to_replace = load_queneau_replacement_words()
def generate_replaced_sonnet (score):
def generate_nature_sonnet (score):
# generate a sonnet
conn = connect()
sonnet = generate_queneau_sonnet()
new_sonnet = []
......@@ -67,6 +70,8 @@ def generate_replaced_sonnet (score):
line_words = line.split(' ')
line_words.reverse()
for word in line_words:
word = word.lower()
if word in words_to_replace:
replacements = None
# start = 8
......@@ -78,35 +83,58 @@ def generate_replaced_sonnet (score):
# start -= 1
# i += 1
window = 0.25
window = 0.5
# First try to find a replacement within the nature selection
while not replacements and window <= 10:
replacements = find_replacement(word, should_rhyme, nature_score=(score-window, score+window))
window += .25
replacements = find_replacement(conn, word, should_rhyme, nature_score=(score-window, score+window))
window += .33
# If we can't find a replacement within that range select
# based on syllable count and rhyme preference
if not replacements:
replacements = find_replacement(word, should_rhyme)
replacements = find_replacement(conn, word, should_rhyme)
# If still no match and we where looking for a rhyming word
# drop rhyme preference
if not replacements and should_rhyme:
replacements = find_replacement(word, should_rhyme=False)
replacements = find_replacement(conn, word, should_rhyme=False)
# Finally give up
if not replacements:
replacements = ['NO REPLACEMENTS']
new_line.append((word, replacements))
new_line.append([word, random.choice(replacements)])
should_rhyme = False
else:
if re.match(r'\w+', word):
should_rhyme = False
new_line.append((word,))
# this word is le or la
# previous word is a replacement
# previous word starts with a vowel
if len(new_line) > 0 \
and (word == 'le' or word == 'la') \
and len(new_line[-1]) > 1 \
and new_line[-1][1][0].lower() in vowels:
new_line[-1][1] = 'l\''+ new_line[-1][1]
# replace de & du for d' if replacement word starts with a vowel
elif len(new_line) > 0 \
and (word == 'de' or word == 'du') \
and len(new_line[-1]) > 1 \
and new_line[-1][1][0].lower() in vowels:
new_line[-1][1] = 'd\''+ new_line[-1][1]
# Replace sa for son if replacement word starts with a vowel
elif len(new_line) > 0 \
and (word == 'sa') \
and len(new_line[-1]) > 1 \
and new_line[-1][1][0].lower() in vowels:
new_line.append(('son',))
else:
new_line.append((word,))
new_line[-1] = [w.capitalize() for w in new_line[-1]]
new_line.reverse()
new_sonnet.append(new_line)
......@@ -123,7 +151,7 @@ if __name__ == '__main__':
else:
score = 0.0
new_sonnet = generate_replaced_sonnet(score)
new_sonnet = generate_nature_sonnet(score)
print ('A generated sonnet with nature scorce: {}'.format(score))
......@@ -131,7 +159,7 @@ if __name__ == '__main__':
output = []
for word in line:
if len(word) > 1:
output.append('{replacement} ({old})'.format(old=word[0], replacement=random.choice(word[1])))
output.append('{replacement} ({old})'.format(old=word[0], replacement=word[1]))
else:
output.append(word[0])
......
export FLASK_APP=app.py
export FLASK_ENV=development
flask run
\ No newline at end of file
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import chiplotle
import os.path
import time
import glob
from settings import HPGL_CACHEDIR
from shutil import move
from settings import VIRTUAL_PLOTTER
from os import makedirs
import codecs
PAPER_LOADED = 4
BUFFER_EMPTY = 8
NR_STATE = 32 # Not ready state
def get_state (plotter):
# Should return current state
plotter._serial_port.flushInput()
plotter._serial_port.write('{}.O'.format(chr(27)))
state = plotter._read_port()
if state is not None:
state = int(state.strip('\r\n'))
return state
def get_pen_position (plotter):
plotter._serial_port.flushInput()
plotter._serial_port.write('OA')
state = plotter._read_port()
if state is not None:
state = state.strip('\r\n')
return state
if not os.path.exists(HPGL_CACHEDIR):
makedirs(HPGL_CACHEDIR)
if not os.path.exists(os.path.join(HPGL_CACHEDIR, 'plotted')):
makedirs(os.path.join(HPGL_CACHEDIR, 'plotted'))
def get_oldest_cachefile():
files = [path for path in glob.glob(
os.path.join(HPGL_CACHEDIR, '*.hpgl'))]
if files:
return sorted(files, cmp=lambda p, _: int(os.path.getmtime(p)), reverse=True)[0]
else:
return None
while True:
try:
if VIRTUAL_PLOTTER:
plotter = chiplotle.tools.plottertools.instantiate_virtual_plotter()
else:
plotters = chiplotle.tools.plottertools.instantiate_plotters()
if plotters:
plotter = plotters[0]
else:
plotter = None
# Not able to instantiate a plotter now. Trying again in 30 seconds.
if not plotter:
time.sleep(30)
continue
while True:
cachefile = get_oldest_cachefile()
if cachefile:
print(cachefile)
with codecs.open(cachefile, 'r', encoding='utf-8') as h:
try:
hpgl = h.read()
plotter.write(u'IN;{};PG;'.format(hpgl).decode("utf-8").encode('ascii'))
plotter._serial_port.flushInput()
move(cachefile, os.path.join(HPGL_CACHEDIR, 'plotted', os.path.basename(cachefile)))
if VIRTUAL_PLOTTER:
chiplotle.tools.io.view(plotter)
plotter = chiplotle.tools.plottertools.instantiate_virtual_plotter()
else:
state = 0
while not state & BUFFER_EMPTY:
state = get_state(plotter)
print('Buffer not yet empty.')
time.sleep(10)
# last_position = None
# current_position = get_pen_position(plotter)
# while last_position <> current_position:
# last_position = current_position
# print(current_position)
# time.sleep(5)
# current_position = get_pen_position(plotter)
# print('Pen stopped moving')
except Exception as e:
print('An error occured. Clearing plotter. Trying again in 30 seconds.')
print(str(e))
plotter = None
break
# except:
# print 'Something went wrong while plotting'
# pass
time.sleep(1)
except Exception as e:
print('Could not initiate plotter, waiting one minute')
print(str(e))
time.sleep(60)
\ No newline at end of file
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os.path
import time
import glob
from escpos import printer
from settings import THERMAL_CACHEDIR
from shutil import move
from os import makedirs
import codecs
def find_printer ():
candidates = glob.glob('/dev/ttyUSB*')
if candidates:
return printer.Serial(candidates[0], 19200)
return None
if not os.path.exists(THERMAL_CACHEDIR):
makedirs(THERMAL_CACHEDIR)
if not os.path.exists(os.path.join(THERMAL_CACHEDIR, 'printed')):
makedirs(os.path.join(THERMAL_CACHEDIR, 'printed'))
def get_oldest_cachefile():
files = [path for path in glob.glob(
os.path.join(THERMAL_CACHEDIR, '*.txt'))]
if files:
return sorted(files, key=lambda p: int(os.path.getmtime(p)), reverse=True)[0]
else:
return None
while True:
try:
thermal_printer = find_printer()
# Not able to instantiate a printer now. Trying again in 30 seconds.
if not thermal_printer:
time.sleep(30)
continue
while True:
cachefile = get_oldest_cachefile()
if cachefile:
with open(cachefile, 'r') as h:
try:
thermal_printer.set(custom_size=False, height=1, width=1, font='a', flip=True)
thermal_printer.line_spacing(65)
lines = h.read().split('\n')
lines.reverse()
thermal_printer.text(' ' + '\n '.join(lines))
thermal_printer.cut()
except Exception as e:
print('An error occured. Clearing printer. Trying again in 30 seconds.')
print(str(e))
thermal_printer = None
time.sleep(1)
break
move(cachefile, os.path.join(THERMAL_CACHEDIR, 'printed', os.path.basename(cachefile)))
time.sleep(1)
except Exception as e:
print('Could not initiate printer, waiting one minute')
print(str(e))
time.sleep(60)
\ No newline at end of file
# -*- coding: utf-8 -*-
import os.path
DEBUG = True
BASEPATH = os.path.dirname(os.path.realpath(__file__))
DATABASE = os.path.join(BASEPATH, '../data/mariemont.db')
\ No newline at end of file
DATABASE = os.path.join(BASEPATH, '../data/mariemont.db')
HPGL_CACHEDIR = os.path.join(BASEPATH, '../data/plots')
PLOT_PAGE = { 'top': 10430, 'right': 7400, 'bottom': 430, 'left': 200 }
PLOT_HEADER_TEXT = 'Une Anthologie'
PLOT_FOOTER_TEXT = 'Bye Bye Future! Musée royal de Mariemont, 2020\r\nGijs de Heij & Anaïs Berck'
PLOT_BODY_CHARACTER_SIZE = (0.3, 0.385)
PLOT_FOOTER_CHARACTER_SIZE = (0.2, 0.275)
BASEURL = '/api'
VIRTUAL_PLOTTER = True
THERMAL_HEADER_TEXT = 'Une Anthologie'
THERMAL_FOOTER_TEXT = 'Bye Bye Future! Musée Royal de Mariemont\nGijs de Heij & Anaïs Berck'
THERMAL_CACHEDIR = os.path.join(BASEPATH, '../data/thermal')
\ No newline at end of file
from nltk.corpus import stopwords
def stopwords(lang):
return stopwords.words(lang)
\ No newline at end of file
......@@ -8,4 +8,10 @@ def log (text):
print("Log : {}".format(text))
def error (text):
print("Error : {}".format(text))
\ No newline at end of file
print("Error : {}".format(text))
"""
Convert length in mm to plotter units
"""
def hpgl_mm (l):
return l * 40
\ No newline at end of file