Add SVG generation, get rid of sankeymatic dependency, create a heuristic algorithm for enhancing the sankey graph
This commit is contained in:
parent
2de96116bc
commit
3d6d126abc
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,7 +1,8 @@
|
|||||||
/.env
|
/.env
|
||||||
/venv
|
/venv
|
||||||
/cache.json
|
/*.json
|
||||||
/*.csv
|
/*.csv
|
||||||
|
/*.svg
|
||||||
|
|
||||||
# ---> Python
|
# ---> Python
|
||||||
# Byte-compiled / optimized / DLL files
|
# Byte-compiled / optimized / DLL files
|
||||||
|
22
README.md
22
README.md
@ -40,6 +40,21 @@ partirait dans une autre filière après. Le cas paraît beaucoup plus douteux q
|
|||||||
|
|
||||||
## FICHIERS
|
## FICHIERS
|
||||||
|
|
||||||
|
### get.py
|
||||||
|
|
||||||
|
Le programme principal. Il prend en argument des acronymes de département (par exemple GEA ou INFO) et fabrique un graphe comportant les formations BUT de ce département (ou ces départements dans le même graphe, s'ils sont plusieurs sur la ligne de commande).
|
||||||
|
|
||||||
|
Il faut un environnement virtuel pour que soient accessibles les bibliothèques Python pycairo, drawsvg, requests. A priori libcairo est optionnel, mais le graphe marchera moins bien sans. La bibliothèque (système) `libcairo2` doit aussi être installée (`apt install libcairo-dev` ou équivalent).
|
||||||
|
|
||||||
|
On peut rajouter l'option `--techno` pour n'avoir que les bacs technos.
|
||||||
|
|
||||||
|
### .env
|
||||||
|
|
||||||
|
Fichier très important puisqu'il contient toutes les informations de connexion à la base Scodoc.
|
||||||
|
C'est un fichier CSV (séparateur virgule `,`) avec 4 lignes: username, password, server, baseyear.
|
||||||
|
|
||||||
|
baseyear est l'année de départ de la cohorte étudiée.
|
||||||
|
|
||||||
### redirect.csv
|
### redirect.csv
|
||||||
|
|
||||||
Certains élèves ne reçoivent jamais de décision de jury lorsqu'ils quittent la cohorte, tout en n'étant pas démissionnaires. Ce sont des erreurs administratives, mais il est possible d'indiquer un *résultat de jury* fictif pour ces élèves. La plupart du temps, ce sont des élèves qui abandonnent la formation, et il suffit de leur donner le résultat NAR ou DEM. Dans d'autres cas, ça peut être des élèves en attente de décision parce que le jury n'a pas encore eu lieu, mais on sait déjà quel sera l'issue du jury (par exemple des notes élevés et un stage qui se déroule bien, ou au contraire pas de stage trouvé au mois de septembre).
|
Certains élèves ne reçoivent jamais de décision de jury lorsqu'ils quittent la cohorte, tout en n'étant pas démissionnaires. Ce sont des erreurs administratives, mais il est possible d'indiquer un *résultat de jury* fictif pour ces élèves. La plupart du temps, ce sont des élèves qui abandonnent la formation, et il suffit de leur donner le résultat NAR ou DEM. Dans d'autres cas, ça peut être des élèves en attente de décision parce que le jury n'a pas encore eu lieu, mais on sait déjà quel sera l'issue du jury (par exemple des notes élevés et un stage qui se déroule bien, ou au contraire pas de stage trouvé au mois de septembre).
|
||||||
@ -67,3 +82,10 @@ Possibilité de choisir les couleurs pour chacune des catégories.
|
|||||||
TRANSPARENT,#FFFFFF.0
|
TRANSPARENT,#FFFFFF.0
|
||||||
RED,#000000
|
RED,#000000
|
||||||
|
|
||||||
|
### <dept>.json
|
||||||
|
|
||||||
|
### <dept>.svg
|
||||||
|
|
||||||
|
### best-<dept>.json
|
||||||
|
|
||||||
|
Ce fichier contient le résultat d'une recherche heuristique pour avoir un graphe meilleur. Il peut être supprimé si le graphe ne s'améliore pas sur des lancements successifs. Il peut aussi être modifié à la main.
|
786
get.py
786
get.py
@ -1,10 +1,12 @@
|
|||||||
#!/usr/bin/python3
|
#!/usr/bin/env python3
|
||||||
import requests
|
import requests
|
||||||
from requests.auth import HTTPBasicAuth
|
from requests.auth import HTTPBasicAuth
|
||||||
import csv, os, sys
|
import csv, os, sys
|
||||||
import json
|
import json
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import re
|
import re
|
||||||
|
import drawsvg
|
||||||
|
|
||||||
|
|
||||||
debug = True # Not used
|
debug = True # Not used
|
||||||
techno = False # global flag
|
techno = False # global flag
|
||||||
@ -52,23 +54,40 @@ def read_conf(key):
|
|||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def write_conf(key, obj):
|
||||||
|
with open(f"{key}.json", "w") as f:
|
||||||
|
return json.dump(obj, f)
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
conf = read_conf(orderkey)
|
conf = read_conf(orderkey)
|
||||||
|
|
||||||
|
|
||||||
# Manage default values
|
# Manage default values
|
||||||
def conf_value(xkey: str):
|
def conf_value(xkey: str):
|
||||||
|
defaults = {
|
||||||
|
"spacing": 14,
|
||||||
|
"thickness": 6,
|
||||||
|
"fontsize_name": 10,
|
||||||
|
"fontsize_count": 14,
|
||||||
|
"width": 1300,
|
||||||
|
"height": 900,
|
||||||
|
"hmargin": 20,
|
||||||
|
"parcours_separator": "/",
|
||||||
|
"year_separator": "",
|
||||||
|
"rank_separator": "",
|
||||||
|
"diplome_separator": "",
|
||||||
|
}
|
||||||
if xkey in conf:
|
if xkey in conf:
|
||||||
return conf[xkey]
|
return conf[xkey]
|
||||||
if xkey == "width":
|
if xkey in defaults:
|
||||||
return 1300
|
return defaults[xkey]
|
||||||
if xkey == "height":
|
|
||||||
return 900
|
|
||||||
if xkey[-9:] == "separator":
|
if xkey[-9:] == "separator":
|
||||||
return " "
|
return " "
|
||||||
if xkey == "nick":
|
if xkey == "nick":
|
||||||
return "{diplome}{rank}{department}{modalite}{parcours}"
|
return "{diplome}{rank}{multidepartment}{modalite}{parcours}"
|
||||||
if xkey == "extnick":
|
if xkey == "extnick":
|
||||||
return "{rank}{diplomenobut}{modaliteshort}"
|
return "{rank}{multidepartment}{diplomenobut}{modaliteshort}"
|
||||||
if xkey == "orders":
|
if xkey == "orders":
|
||||||
return [[], [], [], [], []]
|
return [[], [], [], [], []]
|
||||||
return {}
|
return {}
|
||||||
@ -78,21 +97,38 @@ def conf_value(xkey: str):
|
|||||||
# This file should be kept really safe
|
# This file should be kept really safe
|
||||||
# Currently, only baseyear = 2021 has been tested
|
# Currently, only baseyear = 2021 has been tested
|
||||||
|
|
||||||
# TODO: a CSV file would be more declarative and easier to manage
|
|
||||||
|
|
||||||
from dotenv import dotenv_values
|
server, username, password, baseyear = "", "", "", 0
|
||||||
|
|
||||||
|
|
||||||
def read_secrets():
|
def read_secrets(filename):
|
||||||
global server, username, password, baseyear
|
keys = ["server", "username", "password", "baseyear"]
|
||||||
config = dotenv_values(".env")
|
integers = ["baseyear"]
|
||||||
server = config["server"]
|
if os.path.exists(filename):
|
||||||
username = config["username"]
|
with open(filename, newline="") as csvfile:
|
||||||
password = config["password"]
|
csvreader = csv.reader(csvfile, delimiter=",", quotechar='"')
|
||||||
baseyear = int(config["baseyear"])
|
found = 0
|
||||||
|
for row in csvreader:
|
||||||
|
for p, k in enumerate(keys):
|
||||||
|
if row[0] == k:
|
||||||
|
if k in integers:
|
||||||
|
globals()[k] = int(row[1])
|
||||||
|
else:
|
||||||
|
globals()[k] = row[1]
|
||||||
|
found |= 0x1 << p
|
||||||
|
if found != 0xF:
|
||||||
|
print(f'Des paramètres manquent dans "{filename}" ({found}).')
|
||||||
|
for p, k in enumerate(keys):
|
||||||
|
if found & (0x1 << p) == 0:
|
||||||
|
if k in globals():
|
||||||
|
g = '"' + globals()[k] + '"'
|
||||||
|
else:
|
||||||
|
g = None
|
||||||
|
print(f"{k} = {g}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
read_secrets()
|
read_secrets(".env")
|
||||||
|
|
||||||
|
|
||||||
student = {}
|
student = {}
|
||||||
@ -327,6 +363,12 @@ def analyse_student(semobj, etud, year=None):
|
|||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
nick = nick.replace("{department}", "")
|
nick = nick.replace("{department}", "")
|
||||||
|
if len(department) > 0 and len(depts) > 1:
|
||||||
|
nick = nick.replace(
|
||||||
|
"{multidepartment}", conf_value("department_separator") + department
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
nick = nick.replace("{multidepartment}", "")
|
||||||
if len(diplome) > 0:
|
if len(diplome) > 0:
|
||||||
nick = nick.replace("{diplome}", conf_value("diplome_separator") + diplome)
|
nick = nick.replace("{diplome}", conf_value("diplome_separator") + diplome)
|
||||||
else:
|
else:
|
||||||
@ -344,6 +386,9 @@ def analyse_student(semobj, etud, year=None):
|
|||||||
else:
|
else:
|
||||||
nick = nick.replace("{parcours}", "")
|
nick = nick.replace("{parcours}", "")
|
||||||
formsem_department[str(semobj["id"])] = department
|
formsem_department[str(semobj["id"])] = department
|
||||||
|
if nick == " BUT 1 GEA EXT":
|
||||||
|
print(department, diplome, rank, modalite, parcours, nick)
|
||||||
|
sys.exit(0)
|
||||||
return department, diplome, rank, modalite, parcours, nick
|
return department, diplome, rank, modalite, parcours, nick
|
||||||
|
|
||||||
|
|
||||||
@ -384,7 +429,9 @@ def analyse_depts():
|
|||||||
futuresemsdept[semid] = dept
|
futuresemsdept[semid] = dept
|
||||||
if offset != 0:
|
if offset != 0:
|
||||||
continue
|
continue
|
||||||
if offset == 0 and sem["formation"]["type_parcours"] != 700:
|
if sem["formation"]["type_parcours"] != 700:
|
||||||
|
continue
|
||||||
|
if sem["modalite"] == "EXT":
|
||||||
continue
|
continue
|
||||||
# This is a BUT semester, part of the cohort
|
# This is a BUT semester, part of the cohort
|
||||||
# 0,1 : preceding year ; 2-7 : cohort ; 8+ : future
|
# 0,1 : preceding year ; 2-7 : cohort ; 8+ : future
|
||||||
@ -506,10 +553,9 @@ def allseeingodin():
|
|||||||
student[etudid]["oldsem"] = oldstudents[unid][0]
|
student[etudid]["oldsem"] = oldstudents[unid][0]
|
||||||
if unid in futurestudents:
|
if unid in futurestudents:
|
||||||
student[etudid]["future"] = futurestudents[unid]
|
student[etudid]["future"] = futurestudents[unid]
|
||||||
|
for unid in duplicates:
|
||||||
lastsem = -1
|
lastsem = -1
|
||||||
best = []
|
best = []
|
||||||
for unid in duplicates:
|
|
||||||
for suppidx in duplicates[unid][1:]:
|
for suppidx in duplicates[unid][1:]:
|
||||||
supp = student[suppidx]
|
supp = student[suppidx]
|
||||||
if str(lastsem) in supp["cursus"]:
|
if str(lastsem) in supp["cursus"]:
|
||||||
@ -520,10 +566,11 @@ def allseeingodin():
|
|||||||
best = [suppidx]
|
best = [suppidx]
|
||||||
break
|
break
|
||||||
if len(best) > 1:
|
if len(best) > 1:
|
||||||
print(
|
print(f"// Error: cannot chose last semester for NIP {unid}: ")
|
||||||
f"// Warning: cannot chose last semester for NIP {unid}: "
|
print(repr(best))
|
||||||
+ ", ".join(best)
|
for x in best:
|
||||||
)
|
print(cache["sem"][str(x)])
|
||||||
|
sys.exit(6)
|
||||||
bestid = best[0]
|
bestid = best[0]
|
||||||
base = student[bestid]
|
base = student[bestid]
|
||||||
for suppidx in duplicates[unid]:
|
for suppidx in duplicates[unid]:
|
||||||
@ -542,6 +589,7 @@ def allseeingodin():
|
|||||||
"old",
|
"old",
|
||||||
"oldsem",
|
"oldsem",
|
||||||
):
|
):
|
||||||
|
if skey in supp:
|
||||||
for bucket in supp[skey]:
|
for bucket in supp[skey]:
|
||||||
if bucket not in base[skey]:
|
if bucket not in base[skey]:
|
||||||
base[skey][bucket] = supp[skey][bucket]
|
base[skey][bucket] = supp[skey][bucket]
|
||||||
@ -592,19 +640,19 @@ for etudid in student.keys():
|
|||||||
nextbest = {}
|
nextbest = {}
|
||||||
nextnickbest = {}
|
nextnickbest = {}
|
||||||
for key in next:
|
for key in next:
|
||||||
max = 0
|
imax = 0
|
||||||
best = None
|
best = None
|
||||||
for key2 in next[key]:
|
for key2 in next[key]:
|
||||||
if next[key][key2] > max:
|
if next[key][key2] > imax:
|
||||||
max = next[key][key2]
|
imax = next[key][key2]
|
||||||
best = key2
|
best = key2
|
||||||
nextbest[key] = best
|
nextbest[key] = best
|
||||||
for key in nextnick:
|
for key in nextnick:
|
||||||
max = 0
|
imax = 0
|
||||||
best = None
|
best = None
|
||||||
for key2 in nextnick[key]:
|
for key2 in nextnick[key]:
|
||||||
if nextnick[key][key2] > max:
|
if nextnick[key][key2] > imax:
|
||||||
max = nextnick[key][key2]
|
imax = nextnick[key][key2]
|
||||||
best = key2
|
best = key2
|
||||||
nextnickbest[key] = best
|
nextnickbest[key] = best
|
||||||
|
|
||||||
@ -664,8 +712,6 @@ for etudid in student.keys():
|
|||||||
if ddd not in depts:
|
if ddd not in depts:
|
||||||
depts.append(ddd)
|
depts.append(ddd)
|
||||||
|
|
||||||
dd = len(depts)
|
|
||||||
|
|
||||||
badred = {}
|
badred = {}
|
||||||
goodred = {}
|
goodred = {}
|
||||||
failure = {}
|
failure = {}
|
||||||
@ -786,7 +832,10 @@ for etudid in student.keys():
|
|||||||
firstyear - 1 - i
|
firstyear - 1 - i
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
if etud["cursus"][str(firstyear * 2 - 2)] is not None:
|
if (
|
||||||
|
str(firstyear * 2 - 2) in etud["cursus"]
|
||||||
|
and etud["cursus"][str(firstyear * 2 - 2)] is not None
|
||||||
|
):
|
||||||
startsem = str(firstyear * 2 - 2)
|
startsem = str(firstyear * 2 - 2)
|
||||||
else:
|
else:
|
||||||
startsem = str(firstyear * 2 - 1)
|
startsem = str(firstyear * 2 - 1)
|
||||||
@ -802,6 +851,12 @@ for etudid in student.keys():
|
|||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
nick = nick.replace("{department}", "")
|
nick = nick.replace("{department}", "")
|
||||||
|
if len(department) > 0 and len(depts) > 1:
|
||||||
|
nick = nick.replace(
|
||||||
|
"{multidepartment}", conf_value("department_separator") + department
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
nick = nick.replace("{multidepartment}", "")
|
||||||
if len(diplome) > 0:
|
if len(diplome) > 0:
|
||||||
nick = nick.replace("{diplome}", conf_value("diplome_separator") + diplome)
|
nick = nick.replace("{diplome}", conf_value("diplome_separator") + diplome)
|
||||||
else:
|
else:
|
||||||
@ -857,121 +912,94 @@ for etudid in student.keys():
|
|||||||
bags[i][nstart][nend] += 1
|
bags[i][nstart][nend] += 1
|
||||||
|
|
||||||
|
|
||||||
layers = [[], [], [], [], []]
|
# layers = [[], [], [], [], []]
|
||||||
finallayers = [[], [], [], [], []]
|
# alllayers = []
|
||||||
alllayers = []
|
# flatbags = []
|
||||||
flatbags = []
|
# for i in range(4):
|
||||||
for i in range(4):
|
# for u in bags[i]:
|
||||||
for u in bags[i]:
|
# if u not in alllayers:
|
||||||
if u not in alllayers:
|
# alllayers.append(u)
|
||||||
alllayers.append(u)
|
# layers[i].append(u)
|
||||||
layers[i].append(u)
|
# for v in bags[i][u]:
|
||||||
for v in bags[i][u]:
|
# if v not in alllayers:
|
||||||
if v not in alllayers:
|
# alllayers.append(v)
|
||||||
alllayers.append(v)
|
# layers[i + 1].append(v)
|
||||||
layers[i + 1].append(v)
|
# flatbags.append([u, v, bags[i][u][v]])
|
||||||
flatbags.append([u, v, bags[i][u][v]])
|
# allowed = []
|
||||||
allowed = []
|
# nextallowed = [[], [], [], [], []]
|
||||||
nextallowed = [[], [], [], [], []]
|
# weights = {}
|
||||||
weights = {}
|
|
||||||
|
|
||||||
|
|
||||||
orders = conf_value("orders")
|
# orders = conf_value("orders")
|
||||||
|
|
||||||
x = set(alllayers)
|
# x = set(alllayers)
|
||||||
y = set()
|
# y = set()
|
||||||
for i in orders:
|
# for i in orders:
|
||||||
y = y.union(set(i))
|
# y = y.union(set(i))
|
||||||
|
|
||||||
for i in range(5):
|
# for i in range(5):
|
||||||
if len(orders[i]) > 0:
|
# if len(orders[i]) > 0:
|
||||||
allowed.append(orders[i][0])
|
# allowed.append(orders[i][0])
|
||||||
for j in orders[i]:
|
# for j in orders[i]:
|
||||||
if j in alllayers:
|
# if j in alllayers:
|
||||||
nextallowed[i].append(j)
|
# nextallowed[i].append(j)
|
||||||
for j, k in enumerate(orders[i]):
|
# for j, k in enumerate(orders[i]):
|
||||||
weights[k] = j + 1
|
# weights[k] = j + 1
|
||||||
for u in layers[i]:
|
# for u in layers[i]:
|
||||||
if u not in allowed and u not in nextallowed[i]:
|
# if u not in allowed and u not in nextallowed[i]:
|
||||||
allowed.append(u)
|
# allowed.append(u)
|
||||||
else:
|
# else:
|
||||||
for i in range(5):
|
# for i in range(5):
|
||||||
allowed.extend(layers[i])
|
# allowed.extend(layers[i])
|
||||||
|
|
||||||
|
|
||||||
for bag in flatbags:
|
# for bag in flatbags:
|
||||||
w = 0
|
# w = 0
|
||||||
if bag[0] in weights:
|
# if bag[0] in weights:
|
||||||
w += weights[bag[0]]
|
# w += weights[bag[0]]
|
||||||
if bag[1] in weights:
|
# if bag[1] in weights:
|
||||||
w += weights[bag[1]]
|
# w += weights[bag[1]]
|
||||||
bag.append(w)
|
# bag.append(w)
|
||||||
flatbags = sorted(flatbags, key=lambda x: x[-1])
|
# flatbags = sorted(flatbags, key=lambda x: x[-1])
|
||||||
|
|
||||||
|
|
||||||
orderedflatbags = []
|
# orderedflatbags = []
|
||||||
|
# finallayers = [[], [], [], [], []]
|
||||||
|
|
||||||
while len(flatbags) > 0:
|
# while len(flatbags) > 0:
|
||||||
gotone = False
|
# gotone = False
|
||||||
for x in flatbags:
|
# for x in flatbags:
|
||||||
if x[0] in allowed and x[1] in allowed:
|
# if x[0] in allowed and x[1] in allowed:
|
||||||
# print(f"{x} est pris")
|
# # print(f"{x} est pris")
|
||||||
gotone = True
|
# gotone = True
|
||||||
orderedflatbags.append(x)
|
# orderedflatbags.append(x)
|
||||||
flatbags.remove(x)
|
# flatbags.remove(x)
|
||||||
# print(f"Choosing {x}")
|
# # print(f"Choosing {x}")
|
||||||
for i in range(5):
|
# for i in range(5):
|
||||||
if x[0] in layers[i] and x[0] not in finallayers[i]:
|
# if x[0] in layers[i] and x[0] not in finallayers[i]:
|
||||||
finallayers[i].append(x[0])
|
# finallayers[i].append(x[0])
|
||||||
if i < 4 and x[1] in layers[i + 1] and x[1] not in finallayers[i + 1]:
|
# if i < 4 and x[1] in layers[i + 1] and x[1] not in finallayers[i + 1]:
|
||||||
finallayers[i + 1].append(x[1])
|
# finallayers[i + 1].append(x[1])
|
||||||
if x[0] in nextallowed[i]:
|
# if x[0] in nextallowed[i]:
|
||||||
# print(f"[{i}] Removing {x[0]} from {nextallowed[i]}")
|
# # print(f"[{i}] Removing {x[0]} from {nextallowed[i]}")
|
||||||
nextallowed[i].remove(x[0])
|
# nextallowed[i].remove(x[0])
|
||||||
if x[1] in nextallowed[i]:
|
# if x[1] in nextallowed[i]:
|
||||||
# print(f"[{i}] Removing {x[1]} from {nextallowed[i]}")
|
# # print(f"[{i}] Removing {x[1]} from {nextallowed[i]}")
|
||||||
nextallowed[i].remove(x[1])
|
# nextallowed[i].remove(x[1])
|
||||||
# print(f"[{i}] {nextallowed[i]}")
|
# # print(f"[{i}] {nextallowed[i]}")
|
||||||
if len(nextallowed[i]) > 0 and nextallowed[i][0] not in allowed:
|
# if len(nextallowed[i]) > 0 and nextallowed[i][0] not in allowed:
|
||||||
# print(f"[{i}] Allowing now {nextallowed[i][0]}")
|
# # print(f"[{i}] Allowing now {nextallowed[i][0]}")
|
||||||
allowed.append(nextallowed[i][0])
|
# allowed.append(nextallowed[i][0])
|
||||||
break
|
# break
|
||||||
if not gotone:
|
# if not gotone:
|
||||||
print("BUG")
|
# print("BUG")
|
||||||
print(flatbags)
|
# print(flatbags)
|
||||||
print("---", allowed)
|
# print("---", allowed)
|
||||||
print(nextallowed)
|
# print(nextallowed)
|
||||||
sys.exit(3)
|
# sys.exit(3)
|
||||||
|
|
||||||
|
|
||||||
def printout():
|
def node_color(x):
|
||||||
with open(f"sankeymatic_{orderkey}.txt", "w") as fout:
|
|
||||||
|
|
||||||
def output(*a, **b):
|
|
||||||
b["file"] = fout
|
|
||||||
print(*a, **b)
|
|
||||||
|
|
||||||
date_actuelle = datetime.now()
|
|
||||||
date_formatee = date_actuelle.strftime("%m/%d/%Y %H:%M:%S")
|
|
||||||
|
|
||||||
output(
|
|
||||||
f"// SankeyMATIC diagram inputs - Saved: {date_formatee}\n// https://sankeymatic.com/build/\n\n// === Nodes and Flows ===\n\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
output("// THEME INFO")
|
|
||||||
for c, cc in colors.items():
|
|
||||||
output(f"// !{c}:{cc}")
|
|
||||||
output()
|
|
||||||
|
|
||||||
allnodes = []
|
|
||||||
for y in orderedflatbags:
|
|
||||||
output(f"{y[0]} [{y[2]}] {y[1]}")
|
|
||||||
allnodes.append(y[0])
|
|
||||||
allnodes.append(y[1])
|
|
||||||
allnodes = list(set(allnodes))
|
|
||||||
|
|
||||||
nodes = {}
|
|
||||||
for x in allnodes:
|
|
||||||
color = colors["NORMAL"]
|
color = colors["NORMAL"]
|
||||||
if x[0:4] == "FAIL":
|
if x[0:4] == "FAIL":
|
||||||
color = f"{colors['FAIL']} <<"
|
color = f"{colors['FAIL']} <<"
|
||||||
@ -991,57 +1019,449 @@ def printout():
|
|||||||
color = f"{colors['OLD']} >>"
|
color = f"{colors['OLD']} >>"
|
||||||
if x[-1] == "*":
|
if x[-1] == "*":
|
||||||
color = f"{colors['TRANSPARENT']} >>"
|
color = f"{colors['TRANSPARENT']} >>"
|
||||||
if len(color):
|
return color
|
||||||
nodes[x] = color
|
|
||||||
|
|
||||||
for u in sorted(nodes.keys()):
|
|
||||||
output(f":{u} {nodes[u]}")
|
|
||||||
|
|
||||||
|
# def printout():
|
||||||
|
# with open(f"sankeymatic_{orderkey}.txt", "w") as fout:
|
||||||
|
|
||||||
|
# def output(*a, **b):
|
||||||
|
# b["file"] = fout
|
||||||
|
# print(*a, **b)
|
||||||
|
|
||||||
|
# date_actuelle = datetime.now()
|
||||||
|
# date_formatee = date_actuelle.strftime("%m/%d/%Y %H:%M:%S")
|
||||||
|
|
||||||
|
# output(
|
||||||
|
# f"// SankeyMATIC diagram inputs - Saved: {date_formatee}\n// https://sankeymatic.com/build/\n\n// === Nodes and Flows ===\n\n"
|
||||||
|
# )
|
||||||
|
|
||||||
|
# output("// THEME INFO")
|
||||||
|
# for c, cc in colors.items():
|
||||||
|
# output(f"// !{c}:{cc}")
|
||||||
|
# output()
|
||||||
|
|
||||||
|
# allnodes = []
|
||||||
|
# for y in orderedflatbags:
|
||||||
|
# output(f"{y[0]} [{y[2]}] {y[1]}")
|
||||||
|
# allnodes.append(y[0])
|
||||||
|
# allnodes.append(y[1])
|
||||||
|
# allnodes = list(set(allnodes))
|
||||||
|
|
||||||
|
# nodes = {}
|
||||||
|
# for x in allnodes:
|
||||||
|
# color = node_color(x)
|
||||||
|
# if len(color):
|
||||||
|
# nodes[x] = color
|
||||||
|
|
||||||
|
# for u in sorted(nodes.keys()):
|
||||||
|
# output(f":{u} {nodes[u]}")
|
||||||
|
|
||||||
|
# height = conf_value("height")
|
||||||
|
# width = conf_value("width")
|
||||||
|
# output("\n\n// === Settings ===\n")
|
||||||
|
# output(f"size w {width}")
|
||||||
|
# output(f" h {height}")
|
||||||
|
# with open("trailer.txt", "r") as fichier:
|
||||||
|
# contenu = fichier.read()
|
||||||
|
# output(contenu)
|
||||||
|
# for ddd in depts:
|
||||||
|
# if entries[ddd] == 0:
|
||||||
|
# continue
|
||||||
|
# p1 = round(100 * diploma[ddd] / entries[ddd])
|
||||||
|
# p2 = round(100 * (diploma[ddd] + reor2[ddd]) / entries[ddd])
|
||||||
|
# p3 = round(100 * (failure[ddd] / entries[ddd]))
|
||||||
|
# p4 = round(100 * (failure[ddd] + badred[ddd] + reor1[ddd]) / entries[ddd])
|
||||||
|
|
||||||
|
# output(f"// Département {ddd}")
|
||||||
|
# output(f"// {entries[ddd]} Entrées")
|
||||||
|
# output(f"// {diploma[ddd]} Diplômes")
|
||||||
|
# output(f"// {reor2[ddd]} DUT")
|
||||||
|
# output(f"// {p1}-{p2}% de réussite")
|
||||||
|
# output(f"// {goodred[ddd]} Redoublements")
|
||||||
|
# output(f"// {reor1[ddd]} départs de la formation")
|
||||||
|
# output(f"// {badred[ddd]} redoublements autorisés non actés")
|
||||||
|
# output(f"// {failure[ddd]} échecs")
|
||||||
|
# output(f"// {p3}-{p4}% d'échecs")
|
||||||
|
# output(f"// {unknown[ddd]} inconnus")
|
||||||
|
# for x in strangecases:
|
||||||
|
# output(f"// {x}")
|
||||||
|
|
||||||
|
# output(f'// orders["{orderkey}"] = {finallayers}')
|
||||||
|
# output(f"// bacs: {bacs}")
|
||||||
|
|
||||||
|
|
||||||
|
# printout()
|
||||||
|
|
||||||
|
|
||||||
|
def textwidth(text, font="Arial", fontsize=14):
|
||||||
|
try:
|
||||||
|
import cairo
|
||||||
|
except:
|
||||||
|
return len(text) * fontsize
|
||||||
|
surface = cairo.SVGSurface("undefined.svg", 1280, 200)
|
||||||
|
cr = cairo.Context(surface)
|
||||||
|
cr.select_font_face(font, cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
|
||||||
|
cr.set_font_size(fontsize)
|
||||||
|
xbearing, ybearing, width, height, xadvance, yadvance = cr.text_extents(text)
|
||||||
|
return width
|
||||||
|
|
||||||
|
|
||||||
|
def crossweight(node_position, node_layer, edges):
|
||||||
|
w = 0
|
||||||
|
for e in edges:
|
||||||
|
for ee in edges:
|
||||||
|
if node_layer[e[0]] != node_layer[ee[0]]:
|
||||||
|
continue
|
||||||
|
if node_layer[e[1]] != node_layer[ee[1]]:
|
||||||
|
continue
|
||||||
|
if (node_position[e[0]] - node_position[ee[0]]) * (
|
||||||
|
node_position[e[1]] - node_position[ee[1]]
|
||||||
|
) < 0:
|
||||||
|
w += e[2] * ee[2]
|
||||||
|
return w
|
||||||
|
|
||||||
|
|
||||||
|
import random
|
||||||
|
|
||||||
|
|
||||||
|
def genetic_optimize(node_position, node_layer, edges):
|
||||||
|
oldcandidates = []
|
||||||
|
l_indices = list(range(5))
|
||||||
|
lays = []
|
||||||
|
randomness_l = []
|
||||||
|
for index in range(5):
|
||||||
|
lays.append([x for x in node_layer.keys() if node_layer[x] == index])
|
||||||
|
if len(lays[index]) > 1:
|
||||||
|
for i in lays[index]:
|
||||||
|
randomness_l.append(index)
|
||||||
|
w = crossweight(node_position, node_layer, edges)
|
||||||
|
for i in range(20):
|
||||||
|
oldcandidates.append([node_position.copy(), w])
|
||||||
|
w = crossweight(node_position, node_layer, edges)
|
||||||
|
for i in range(10):
|
||||||
|
n = node_position.copy()
|
||||||
|
l_idx = random.choice(randomness_l)
|
||||||
|
q = lays[l_idx].copy()
|
||||||
|
k = 0
|
||||||
|
while len(q) > 0:
|
||||||
|
nn = random.choice(q)
|
||||||
|
q.remove(nn)
|
||||||
|
n[nn] = k
|
||||||
|
k += 1
|
||||||
|
oldcandidates.append([n, w])
|
||||||
|
candidates = oldcandidates
|
||||||
|
for i in range(300):
|
||||||
|
oldcandidates = candidates
|
||||||
|
oldcandidates.sort(key=lambda x: x[1])
|
||||||
|
candidates = oldcandidates[:30]
|
||||||
|
while len(candidates) < 60:
|
||||||
|
# mutate some random candidate
|
||||||
|
candidate = random.choice(candidates)[0]
|
||||||
|
new_position = candidate.copy() # Copier la position pour la muter
|
||||||
|
l_idx = random.choice(randomness_l)
|
||||||
|
swapa = random.choice(lays[l_idx])
|
||||||
|
swapb = random.choice(lays[l_idx])
|
||||||
|
while swapa == swapb:
|
||||||
|
swapb = random.choice(lays[l_idx])
|
||||||
|
tmp = new_position[swapa]
|
||||||
|
new_position[swapa] = new_position[swapb]
|
||||||
|
new_position[swapb] = tmp
|
||||||
|
w = crossweight(new_position, node_layer, edges)
|
||||||
|
candidates.append([new_position, w])
|
||||||
|
while len(candidates) < 90:
|
||||||
|
# mutate some random candidate
|
||||||
|
candidate = random.choice(candidates)[0]
|
||||||
|
new_position = candidate.copy() # Copier la position pour la muter
|
||||||
|
l_idx = random.choice(randomness_l)
|
||||||
|
startidx = random.randrange(len(lays[l_idx]) - 1)
|
||||||
|
stopidx = random.randrange(startidx + 1, len(lays[l_idx]))
|
||||||
|
for n in lays[l_idx]:
|
||||||
|
if new_position[n] >= startidx and new_position[n] < stopidx:
|
||||||
|
new_position[n] += 1
|
||||||
|
elif new_position[n] == stopidx:
|
||||||
|
new_position[n] = startidx
|
||||||
|
w = crossweight(new_position, node_layer, edges)
|
||||||
|
candidates.append([new_position, w])
|
||||||
|
while len(candidates) < 100:
|
||||||
|
# mutate some random candidate
|
||||||
|
candidate = random.choice(candidates)[0]
|
||||||
|
candidate2 = random.choice(candidates)[0]
|
||||||
|
new_position = candidate.copy() # Copier la position pour la muter
|
||||||
|
l_idx = random.choice(randomness_l)
|
||||||
|
for n in lays[l_idx]:
|
||||||
|
new_position[n] = candidate2[n]
|
||||||
|
w = crossweight(new_position, node_layer, edges)
|
||||||
|
candidates.append([new_position, w])
|
||||||
|
candidates.sort(key=lambda x: x[1])
|
||||||
|
orders = []
|
||||||
|
best = candidates[0][0]
|
||||||
|
for i in range(5):
|
||||||
|
b = lays[i].copy()
|
||||||
|
b.sort(key=lambda x: best[x])
|
||||||
|
orders.append(b)
|
||||||
|
print(orders)
|
||||||
|
print(candidates[0][1])
|
||||||
|
return orders
|
||||||
|
|
||||||
|
|
||||||
|
def printsvg():
|
||||||
|
padding = 4
|
||||||
|
unit_ratio = 96 / 72
|
||||||
|
thickness = conf_value("thickness")
|
||||||
|
fontsize_name = conf_value("fontsize_name")
|
||||||
|
fontsize_count = conf_value("fontsize_count")
|
||||||
|
spacing = conf_value("spacing")
|
||||||
height = conf_value("height")
|
height = conf_value("height")
|
||||||
|
hmargin = conf_value("hmargin")
|
||||||
width = conf_value("width")
|
width = conf_value("width")
|
||||||
output("\n\n// === Settings ===\n")
|
node_structure = {}
|
||||||
output(f"size w {width}")
|
layers = [[], [], [], [], []]
|
||||||
output(f" h {height}")
|
edges = []
|
||||||
with open("trailer.txt", "r") as fichier:
|
for layer, layernodes in enumerate(bags):
|
||||||
contenu = fichier.read()
|
for startnode in layernodes:
|
||||||
output(contenu)
|
if startnode[-1] == "*":
|
||||||
for ddd in depts:
|
continue
|
||||||
p1 = round(100 * diploma[ddd] / entries[ddd])
|
for endnode in layernodes[startnode]:
|
||||||
p2 = round(100 * (diploma[ddd] + reor2[ddd]) / entries[ddd])
|
if endnode[-1] == "*":
|
||||||
p3 = round(100 * (failure[ddd] / entries[ddd]))
|
continue
|
||||||
p4 = round(100 * (failure[ddd] + badred[ddd] + reor1[ddd]) / entries[ddd])
|
weight = layernodes[startnode][endnode]
|
||||||
|
if endnode not in node_structure:
|
||||||
|
node_structure[endnode] = {
|
||||||
|
"prev": [[startnode, weight]],
|
||||||
|
"next": [],
|
||||||
|
"layer": layer + 1,
|
||||||
|
}
|
||||||
|
layers[layer + 1].append(endnode)
|
||||||
|
else:
|
||||||
|
node_structure[endnode]["prev"].append([startnode, weight])
|
||||||
|
if startnode not in node_structure:
|
||||||
|
node_structure[startnode] = {
|
||||||
|
"prev": [],
|
||||||
|
"next": [[endnode, weight]],
|
||||||
|
"layer": layer,
|
||||||
|
}
|
||||||
|
layers[layer].append(startnode)
|
||||||
|
else:
|
||||||
|
node_structure[startnode]["next"].append([endnode, weight])
|
||||||
|
edges.append([startnode, endnode, weight])
|
||||||
|
node_position = {}
|
||||||
|
node_layer = {}
|
||||||
|
layer_structure = [
|
||||||
|
{"olayer": []},
|
||||||
|
{"olayer": []},
|
||||||
|
{"olayer": []},
|
||||||
|
{"olayer": []},
|
||||||
|
{"olayer": []},
|
||||||
|
]
|
||||||
|
|
||||||
output(f"// Département {ddd}")
|
lastorders = read_conf("best-" + orderkey)
|
||||||
output(f"// {entries[ddd]} Entrées")
|
if lastorders != {}:
|
||||||
output(f"// {diploma[ddd]} Diplômes")
|
for i in range(5):
|
||||||
output(f"// {reor2[ddd]} DUT")
|
ls = layer_structure[i]
|
||||||
output(f"// {p1}-{p2}% de réussite")
|
ord = lastorders[i]
|
||||||
output(f"// {goodred[ddd]} Redoublements")
|
for node in lastorders[i]:
|
||||||
output(f"// {reor1[ddd]} départs de la formation")
|
if node in layers[i]:
|
||||||
output(f"// {badred[ddd]} redoublements autorisés non actés")
|
ls["olayer"].append(node)
|
||||||
output(f"// {failure[ddd]} échecs")
|
for node in layers[i]:
|
||||||
output(f"// {p3}-{p4}% d'échecs")
|
if node not in ls["olayer"]:
|
||||||
output(f"// {unknown[ddd]} inconnus")
|
ls["olayer"].append(node)
|
||||||
for x in strangecases:
|
for layer, layernodes in enumerate(layer_structure):
|
||||||
output(f"// {x}")
|
for j, n in enumerate(layernodes["olayer"]):
|
||||||
|
node_position[n] = j
|
||||||
|
node_layer[n] = layer
|
||||||
|
print(crossweight(node_position, node_layer, edges))
|
||||||
|
else:
|
||||||
|
for layer, layernodes in enumerate(layers):
|
||||||
|
for j, n in enumerate(layernodes):
|
||||||
|
node_position[n] = j
|
||||||
|
node_layer[n] = layer
|
||||||
|
orders = genetic_optimize(node_position, node_layer, edges)
|
||||||
|
write_conf("best-" + orderkey, orders)
|
||||||
|
layer_structure = [
|
||||||
|
{"olayer": []},
|
||||||
|
{"olayer": []},
|
||||||
|
{"olayer": []},
|
||||||
|
{"olayer": []},
|
||||||
|
{"olayer": []},
|
||||||
|
]
|
||||||
|
for i in range(5):
|
||||||
|
ls = layer_structure[i]
|
||||||
|
ord = orders[i]
|
||||||
|
for node in orders[i]:
|
||||||
|
if node in layers[i]:
|
||||||
|
ls["olayer"].append(node)
|
||||||
|
for node in layers[i]:
|
||||||
|
if node not in ls["olayer"]:
|
||||||
|
ls["olayer"].append(node)
|
||||||
|
for layer, layernodes in enumerate(layer_structure):
|
||||||
|
for j, n in enumerate(layernodes["olayer"]):
|
||||||
|
node_position[n] = j
|
||||||
|
node_layer[n] = layer
|
||||||
|
print(crossweight(node_position, node_layer, edges))
|
||||||
|
density = []
|
||||||
|
for i in range(5):
|
||||||
|
ls = layer_structure[i]
|
||||||
|
ls["num"] = len(ls["olayer"])
|
||||||
|
ls["inout"] = 0
|
||||||
|
for j in ls["olayer"]:
|
||||||
|
lhi = 0
|
||||||
|
lho = 0
|
||||||
|
k = node_structure[j]
|
||||||
|
for prev_node in k["prev"]:
|
||||||
|
lhi += prev_node[1]
|
||||||
|
for next_node in k["next"]:
|
||||||
|
lho += next_node[1]
|
||||||
|
k["size"] = max(lhi, lho)
|
||||||
|
k["in"] = lhi
|
||||||
|
k["out"] = lho
|
||||||
|
if lhi != lho and lhi * lho != 0:
|
||||||
|
print(f"BUG1: {j} {k} {lhi} {lho}")
|
||||||
|
ls["inout"] += k["size"]
|
||||||
|
ls["density"] = ls["inout"] / (
|
||||||
|
spacing + height - spacing * ls["num"] - 2 * hmargin
|
||||||
|
)
|
||||||
|
density.append(ls["density"])
|
||||||
|
realdensity = max(density)
|
||||||
|
columns = []
|
||||||
|
l = 0
|
||||||
|
for i in range(5):
|
||||||
|
l += width / 6
|
||||||
|
columns.append(l)
|
||||||
|
for i in range(5):
|
||||||
|
ls = layer_structure[i]
|
||||||
|
supp_spacing = (
|
||||||
|
spacing
|
||||||
|
+ height
|
||||||
|
- 2 * hmargin
|
||||||
|
- spacing * ls["num"]
|
||||||
|
- ls["inout"] / realdensity
|
||||||
|
) / (ls["num"] + 1)
|
||||||
|
cs = hmargin - spacing
|
||||||
|
for j in ls["olayer"]:
|
||||||
|
ns = node_structure[j]
|
||||||
|
ns["top"] = supp_spacing + spacing + cs
|
||||||
|
h = ns["size"] / realdensity
|
||||||
|
cs = ns["bottom"] = ns["top"] + h
|
||||||
|
d = drawsvg.Drawing(width, height, origin=(0, 0), id_prefix=orderkey)
|
||||||
|
g1 = drawsvg.Group()
|
||||||
|
g2 = drawsvg.Group()
|
||||||
|
g3 = drawsvg.Group()
|
||||||
|
g4 = drawsvg.Group()
|
||||||
|
font_offset = max(fontsize_count, fontsize_name)
|
||||||
|
for n in node_structure:
|
||||||
|
ns = node_structure[n]
|
||||||
|
col = node_color(n).split(" ")[0].split(".")[0]
|
||||||
|
ns["color"] = col
|
||||||
|
xpos = width / 6 * (ns["layer"] + 1)
|
||||||
|
r = drawsvg.Rectangle(
|
||||||
|
xpos - thickness,
|
||||||
|
ns["top"],
|
||||||
|
2 * thickness,
|
||||||
|
ns["bottom"] - ns["top"],
|
||||||
|
fill=col,
|
||||||
|
stroke_width=0.2,
|
||||||
|
stroke="black",
|
||||||
|
)
|
||||||
|
g1.append(r)
|
||||||
|
nw = textwidth(n, "Arial", fontsize_name) * unit_ratio
|
||||||
|
cw = textwidth(str(ns["size"]), "Arial", fontsize_count) * unit_ratio
|
||||||
|
gw = nw + cw + padding
|
||||||
|
ggw = gw + 2 * padding
|
||||||
|
nxpos = xpos - gw / 2 + cw + padding + nw / 2
|
||||||
|
ypos = (ns["top"] + ns["bottom"]) / 2
|
||||||
|
cxpos = cw / 2 - gw / 2 + xpos
|
||||||
|
rxpos = xpos
|
||||||
|
if ns["in"] == 0:
|
||||||
|
nxpos -= gw / 2 + padding + thickness
|
||||||
|
cxpos -= gw / 2 + padding + thickness
|
||||||
|
rxpos -= gw / 2 + padding + thickness
|
||||||
|
if ns["out"] == 0:
|
||||||
|
nxpos += gw / 2 + padding + thickness
|
||||||
|
cxpos += gw / 2 + padding + thickness
|
||||||
|
rxpos += gw / 2 + padding + thickness
|
||||||
|
t = drawsvg.Text(
|
||||||
|
n,
|
||||||
|
str(fontsize_name) + "pt",
|
||||||
|
nxpos,
|
||||||
|
ypos + fontsize_name / 2,
|
||||||
|
fill="black",
|
||||||
|
text_anchor="middle",
|
||||||
|
font_family="Arial",
|
||||||
|
)
|
||||||
|
tt = drawsvg.Text(
|
||||||
|
str(ns["size"]),
|
||||||
|
str(fontsize_count) + "pt",
|
||||||
|
cxpos,
|
||||||
|
ypos + fontsize_count / 2,
|
||||||
|
fill="black",
|
||||||
|
text_anchor="middle",
|
||||||
|
font_family="Arial",
|
||||||
|
)
|
||||||
|
g3.append(t)
|
||||||
|
g3.append(tt)
|
||||||
|
g4.append(
|
||||||
|
drawsvg.Rectangle(
|
||||||
|
rxpos - gw / 2 - padding,
|
||||||
|
ypos - font_offset / 2 - padding,
|
||||||
|
ggw,
|
||||||
|
font_offset + 2 * padding,
|
||||||
|
stroke="black",
|
||||||
|
stroke_width=0,
|
||||||
|
fill="white",
|
||||||
|
fill_opacity=".5",
|
||||||
|
rx=padding,
|
||||||
|
ry=padding,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
for n in node_structure:
|
||||||
|
ns = node_structure[n]
|
||||||
|
ns["prev"].sort(key=lambda x: node_structure[x[0]]["top"])
|
||||||
|
ns["next"].sort(key=lambda x: node_structure[x[0]]["top"])
|
||||||
|
start = ns["top"]
|
||||||
|
for link in ns["prev"]:
|
||||||
|
ysize = link[-1]
|
||||||
|
link.append(start)
|
||||||
|
start += ysize / realdensity
|
||||||
|
link.append(start)
|
||||||
|
for n in node_structure:
|
||||||
|
ns = node_structure[n]
|
||||||
|
start = ns["top"]
|
||||||
|
for link in ns["next"]:
|
||||||
|
ysize = link[-1]
|
||||||
|
link.append(start)
|
||||||
|
start += ysize / realdensity
|
||||||
|
link.append(start)
|
||||||
|
targets = node_structure[link[0]]
|
||||||
|
target = None
|
||||||
|
for t in targets["prev"]:
|
||||||
|
if t[0] == n:
|
||||||
|
target = t
|
||||||
|
if target == None:
|
||||||
|
print(f"BUG: {n},{ns},{t}")
|
||||||
|
sys.exit(5)
|
||||||
|
|
||||||
output(f'// orders["{orderkey}"] = {finallayers}')
|
posxa = columns[ns["layer"]] + thickness
|
||||||
output(f"// bacs: {bacs}")
|
posxb = columns[targets["layer"]] - thickness
|
||||||
# output("\nhttps://observablehq.com/@mbostock/flow-o-matic\n\n")
|
posxc = (3 * posxa + posxb) / 4
|
||||||
|
posxd = (posxa + 3 * posxb) / 4
|
||||||
# for y in range(4):
|
grad = drawsvg.LinearGradient(posxa, 0, posxb, 0)
|
||||||
# for u in bags[y]:
|
grad.add_stop(0, ns["color"], opacity=0.5)
|
||||||
# for v in bags[y][u]:
|
grad.add_stop(1, targets["color"], opacity=0.5)
|
||||||
# color = ""
|
p = drawsvg.Path(fill=grad, stroke_width=0)
|
||||||
# if v[0:4] == "FAIL":
|
p.M(posxa, link[-2])
|
||||||
# color = ",red"
|
p.C(posxc, link[-2], posxd, target[-2], posxb, target[-2])
|
||||||
# elif v[0:4] == "+DUT":
|
p.L(posxb, target[-1])
|
||||||
# color = ",green"
|
p.C(posxd, target[-1], posxc, link[-1], posxa, link[-1])
|
||||||
# elif v[0:4] == "DIPL":
|
p.Z()
|
||||||
# color = ",cyan"
|
g2.append(p)
|
||||||
# elif u[0:3] == "EXT":
|
d.append(g2)
|
||||||
# color = ",yellow"
|
d.append(g1)
|
||||||
# output(f"{u},{v},{bags[y][u][v]}{color}")
|
d.append(g4)
|
||||||
|
d.append(g3)
|
||||||
|
d.save_svg(orderkey + ".svg")
|
||||||
|
|
||||||
|
|
||||||
printout()
|
printsvg()
|
||||||
|
for x in strangecases:
|
||||||
|
print(f"// {x}")
|
||||||
|
Loading…
Reference in New Issue
Block a user