* SCODOC_SERVER instead of SCODOC_URL to match documentation
* Ignore EXT semesters for old semesters * General cleanup
This commit is contained in:
parent
41bf7fd069
commit
6ccb348361
225
get.py
225
get.py
@ -5,7 +5,7 @@ import argparse
|
|||||||
import csv
|
import csv
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import pdb # used for debugging
|
import pdb # used for debugging
|
||||||
import random
|
import random
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@ -20,17 +20,23 @@ except ModuleNotFoundError:
|
|||||||
print("\nError: dotenv not installed !", file=sys.stderr)
|
print("\nError: dotenv not installed !", file=sys.stderr)
|
||||||
print("You may install it using:\npip install python-dotenv\n", file=sys.stderr)
|
print("You may install it using:\npip install python-dotenv\n", file=sys.stderr)
|
||||||
|
|
||||||
def die(msg:str, status=3):
|
|
||||||
|
def die(msg: str, status=3):
|
||||||
print(msg, file=sys.stderr)
|
print(msg, file=sys.stderr)
|
||||||
sys.exit(status)
|
sys.exit(status)
|
||||||
|
|
||||||
|
|
||||||
load_dotenv(".env")
|
load_dotenv(".env")
|
||||||
|
|
||||||
SCODOC_SERVER = os.environ.get("SCODOC_URL") or "http://localhost:5000"
|
SCODOC_SERVER = os.environ.get("SCODOC_SERVER") or "http://localhost:5000"
|
||||||
SCODOC_USER = os.environ.get("SCODOC_USER") or die("SCODOC_USER must be set in .env or the environment")
|
SCODOC_USER = os.environ.get("SCODOC_USER") or die(
|
||||||
SCODOC_PASSWORD = os.environ.get("SCODOC_PASSWORD") or die("SCODOC_PASSWORD must be set in .env or the environment")
|
"SCODOC_USER must be set in .env or the environment"
|
||||||
|
)
|
||||||
|
SCODOC_PASSWORD = os.environ.get("SCODOC_PASSWORD") or die(
|
||||||
|
"SCODOC_PASSWORD must be set in .env or the environment"
|
||||||
|
)
|
||||||
|
|
||||||
API_URL=f"{SCODOC_SERVER}/ScoDoc/api"
|
API_URL = f"{SCODOC_SERVER}/ScoDoc/api"
|
||||||
|
|
||||||
# TODO : refactor globals
|
# TODO : refactor globals
|
||||||
debug = True # Not used
|
debug = True # Not used
|
||||||
@ -40,30 +46,40 @@ BLOCKING = True # Die if csv is incorrect
|
|||||||
depts = []
|
depts = []
|
||||||
orderkey = ""
|
orderkey = ""
|
||||||
|
|
||||||
|
|
||||||
def blockordie(status=2):
|
def blockordie(status=2):
|
||||||
if BLOCKING:
|
if BLOCKING:
|
||||||
sys.exit(status)
|
sys.exit(status)
|
||||||
|
|
||||||
|
|
||||||
class Options:
|
class Options:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
def cli_check():
|
def cli_check():
|
||||||
"""Read args from the command line
|
"""Read args from the command line
|
||||||
then read config from {orderkey}.json
|
then read config from {orderkey}.json
|
||||||
"""
|
"""
|
||||||
global orderkey # TODO: globales à supprimer
|
global orderkey # TODO: globales à supprimer
|
||||||
global depts
|
global depts
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Process some departments.')
|
parser = argparse.ArgumentParser(description="Process some departments.")
|
||||||
parser.add_argument('--techno', action='store_true', help='Enable TECHNO mode')
|
parser.add_argument("--techno", action="store_true", help="Enable TECHNO mode")
|
||||||
parser.add_argument('depts', nargs='+', help='List of departments')
|
parser.add_argument("depts", nargs="+", help="List of departments")
|
||||||
parser.add_argument('--base', '-b', type=int, choices=range(2000, 2667), default=2021,
|
parser.add_argument(
|
||||||
help='base year for the cohort (integer between 2000 and 2666)')
|
"--base",
|
||||||
|
"-b",
|
||||||
|
type=int,
|
||||||
|
choices=range(2000, 2667),
|
||||||
|
default=2021,
|
||||||
|
help="base year for the cohort (integer between 2000 and 2666)",
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
Options.base_year = args.base
|
Options.base_year = args.base
|
||||||
Options.techno = args.techno
|
Options.techno = args.techno
|
||||||
depts = args.depts
|
depts = args.depts
|
||||||
orderkey = "_".join(depts)
|
orderkey = "_".join(depts)
|
||||||
|
|
||||||
@ -71,11 +87,13 @@ def cli_check():
|
|||||||
parser.print_help()
|
parser.print_help()
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
def api_url(dept:str|None=None):
|
|
||||||
"""L'URL de l'API départementale.
|
def api_url(dept: str | None = None):
|
||||||
"""
|
"""L'URL de l'API départementale."""
|
||||||
# peut être modifié ici pour n'utiliser que l'API globale
|
# peut être modifié ici pour n'utiliser que l'API globale
|
||||||
return f"{SCODOC_SERVER}/ScoDoc/{dept}/api" if dept else f"{SCODOC_SERVER}/ScoDoc/api"
|
return (
|
||||||
|
f"{SCODOC_SERVER}/ScoDoc/{dept}/api" if dept else f"{SCODOC_SERVER}/ScoDoc/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
cli_check()
|
cli_check()
|
||||||
@ -96,6 +114,7 @@ def write_conf(key, obj):
|
|||||||
|
|
||||||
conf = read_conf(orderkey)
|
conf = read_conf(orderkey)
|
||||||
|
|
||||||
|
|
||||||
def conf_value(xkey: str):
|
def conf_value(xkey: str):
|
||||||
"""Manage default values"""
|
"""Manage default values"""
|
||||||
defaults = {
|
defaults = {
|
||||||
@ -129,6 +148,7 @@ def conf_value(xkey: str):
|
|||||||
student = {}
|
student = {}
|
||||||
CACHE_FILE = "cache.json"
|
CACHE_FILE = "cache.json"
|
||||||
|
|
||||||
|
|
||||||
def load_cache(cache_file):
|
def load_cache(cache_file):
|
||||||
if os.path.exists(cache_file):
|
if os.path.exists(cache_file):
|
||||||
with open(cache_file, "r") as f:
|
with open(cache_file, "r") as f:
|
||||||
@ -142,6 +162,7 @@ def save_cache(cache, file=None):
|
|||||||
with open(CACHE_FILE, "w") as f:
|
with open(CACHE_FILE, "w") as f:
|
||||||
json.dump(cache, f)
|
json.dump(cache, f)
|
||||||
|
|
||||||
|
|
||||||
cache = load_cache(CACHE_FILE)
|
cache = load_cache(CACHE_FILE)
|
||||||
|
|
||||||
|
|
||||||
@ -210,7 +231,9 @@ def get_json(url: str, params=None):
|
|||||||
global token
|
global token
|
||||||
if token == None:
|
if token == None:
|
||||||
url_token = f"{API_URL}/tokens"
|
url_token = f"{API_URL}/tokens"
|
||||||
response = requests.post(url_token, auth=HTTPBasicAuth(SCODOC_USER, SCODOC_PASSWORD))
|
response = requests.post(
|
||||||
|
url_token, auth=HTTPBasicAuth(SCODOC_USER, SCODOC_PASSWORD)
|
||||||
|
)
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
token = response.json().get("token")
|
token = response.json().get("token")
|
||||||
else:
|
else:
|
||||||
@ -282,7 +305,7 @@ def get_etuds_from_formsem(dept, semid):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def get_jury_from_formsem(dept:str, semid):
|
def get_jury_from_formsem(dept: str, semid):
|
||||||
if type(semid) == type(0):
|
if type(semid) == type(0):
|
||||||
semid = str(semid)
|
semid = str(semid)
|
||||||
if "semjury" in cache and semid in cache["semjury"]:
|
if "semjury" in cache and semid in cache["semjury"]:
|
||||||
@ -500,6 +523,9 @@ def allseeingodin():
|
|||||||
for semid in oldsems:
|
for semid in oldsems:
|
||||||
sem = cache["sem"][semid]
|
sem = cache["sem"][semid]
|
||||||
semlevel = sem["semestre_id"]
|
semlevel = sem["semestre_id"]
|
||||||
|
# For a while, some people registered former semesters (in other places) with "EXT" modalite for a fake semester
|
||||||
|
if sem["modalite"] == "EXT": # Ignore EXT modalite
|
||||||
|
continue
|
||||||
semlevel = abs(semlevel)
|
semlevel = abs(semlevel)
|
||||||
dept = oldsemsdept[semid]
|
dept = oldsemsdept[semid]
|
||||||
etuds = get_etuds_from_formsem(dept, semid)
|
etuds = get_etuds_from_formsem(dept, semid)
|
||||||
@ -902,93 +928,6 @@ for etudid in student.keys():
|
|||||||
bags[i][nstart][nend] += 1
|
bags[i][nstart][nend] += 1
|
||||||
|
|
||||||
|
|
||||||
# layers = [[], [], [], [], []]
|
|
||||||
# alllayers = []
|
|
||||||
# flatbags = []
|
|
||||||
# for i in range(4):
|
|
||||||
# for u in bags[i]:
|
|
||||||
# if u not in alllayers:
|
|
||||||
# alllayers.append(u)
|
|
||||||
# layers[i].append(u)
|
|
||||||
# for v in bags[i][u]:
|
|
||||||
# if v not in alllayers:
|
|
||||||
# alllayers.append(v)
|
|
||||||
# layers[i + 1].append(v)
|
|
||||||
# flatbags.append([u, v, bags[i][u][v]])
|
|
||||||
# allowed = []
|
|
||||||
# nextallowed = [[], [], [], [], []]
|
|
||||||
# weights = {}
|
|
||||||
|
|
||||||
|
|
||||||
# orders = conf_value("orders")
|
|
||||||
|
|
||||||
# x = set(alllayers)
|
|
||||||
# y = set()
|
|
||||||
# for i in orders:
|
|
||||||
# y = y.union(set(i))
|
|
||||||
|
|
||||||
# for i in range(5):
|
|
||||||
# if len(orders[i]) > 0:
|
|
||||||
# allowed.append(orders[i][0])
|
|
||||||
# for j in orders[i]:
|
|
||||||
# if j in alllayers:
|
|
||||||
# nextallowed[i].append(j)
|
|
||||||
# for j, k in enumerate(orders[i]):
|
|
||||||
# weights[k] = j + 1
|
|
||||||
# for u in layers[i]:
|
|
||||||
# if u not in allowed and u not in nextallowed[i]:
|
|
||||||
# allowed.append(u)
|
|
||||||
# else:
|
|
||||||
# for i in range(5):
|
|
||||||
# allowed.extend(layers[i])
|
|
||||||
|
|
||||||
|
|
||||||
# for bag in flatbags:
|
|
||||||
# w = 0
|
|
||||||
# if bag[0] in weights:
|
|
||||||
# w += weights[bag[0]]
|
|
||||||
# if bag[1] in weights:
|
|
||||||
# w += weights[bag[1]]
|
|
||||||
# bag.append(w)
|
|
||||||
# flatbags = sorted(flatbags, key=lambda x: x[-1])
|
|
||||||
|
|
||||||
|
|
||||||
# orderedflatbags = []
|
|
||||||
# finallayers = [[], [], [], [], []]
|
|
||||||
|
|
||||||
# while len(flatbags) > 0:
|
|
||||||
# gotone = False
|
|
||||||
# for x in flatbags:
|
|
||||||
# if x[0] in allowed and x[1] in allowed:
|
|
||||||
# # print(f"{x} est pris")
|
|
||||||
# gotone = True
|
|
||||||
# orderedflatbags.append(x)
|
|
||||||
# flatbags.remove(x)
|
|
||||||
# # print(f"Choosing {x}")
|
|
||||||
# for i in range(5):
|
|
||||||
# if x[0] in layers[i] and x[0] not in finallayers[i]:
|
|
||||||
# finallayers[i].append(x[0])
|
|
||||||
# if i < 4 and x[1] in layers[i + 1] and x[1] not in finallayers[i + 1]:
|
|
||||||
# finallayers[i + 1].append(x[1])
|
|
||||||
# if x[0] in nextallowed[i]:
|
|
||||||
# # print(f"[{i}] Removing {x[0]} from {nextallowed[i]}")
|
|
||||||
# nextallowed[i].remove(x[0])
|
|
||||||
# if x[1] in nextallowed[i]:
|
|
||||||
# # print(f"[{i}] Removing {x[1]} from {nextallowed[i]}")
|
|
||||||
# nextallowed[i].remove(x[1])
|
|
||||||
# # print(f"[{i}] {nextallowed[i]}")
|
|
||||||
# if len(nextallowed[i]) > 0 and nextallowed[i][0] not in allowed:
|
|
||||||
# # print(f"[{i}] Allowing now {nextallowed[i][0]}")
|
|
||||||
# allowed.append(nextallowed[i][0])
|
|
||||||
# break
|
|
||||||
# if not gotone:
|
|
||||||
# print("BUG")
|
|
||||||
# print(flatbags)
|
|
||||||
# print("---", allowed)
|
|
||||||
# print(nextallowed)
|
|
||||||
# sys.exit(3)
|
|
||||||
|
|
||||||
|
|
||||||
def node_color(x):
|
def node_color(x):
|
||||||
color = colors["NORMAL"]
|
color = colors["NORMAL"]
|
||||||
if x[0:4] == "FAIL":
|
if x[0:4] == "FAIL":
|
||||||
@ -1012,78 +951,6 @@ def node_color(x):
|
|||||||
return color
|
return color
|
||||||
|
|
||||||
|
|
||||||
# def printout():
|
|
||||||
# with open(f"sankeymatic_{orderkey}.txt", "w") as fout:
|
|
||||||
|
|
||||||
# def output(*a, **b):
|
|
||||||
# b["file"] = fout
|
|
||||||
# print(*a, **b)
|
|
||||||
|
|
||||||
# date_actuelle = datetime.now()
|
|
||||||
# date_formatee = date_actuelle.strftime("%m/%d/%Y %H:%M:%S")
|
|
||||||
|
|
||||||
# output(
|
|
||||||
# f"// SankeyMATIC diagram inputs - Saved: {date_formatee}\n// https://sankeymatic.com/build/\n\n// === Nodes and Flows ===\n\n"
|
|
||||||
# )
|
|
||||||
|
|
||||||
# output("// THEME INFO")
|
|
||||||
# for c, cc in colors.items():
|
|
||||||
# output(f"// !{c}:{cc}")
|
|
||||||
# output()
|
|
||||||
|
|
||||||
# allnodes = []
|
|
||||||
# for y in orderedflatbags:
|
|
||||||
# output(f"{y[0]} [{y[2]}] {y[1]}")
|
|
||||||
# allnodes.append(y[0])
|
|
||||||
# allnodes.append(y[1])
|
|
||||||
# allnodes = list(set(allnodes))
|
|
||||||
|
|
||||||
# nodes = {}
|
|
||||||
# for x in allnodes:
|
|
||||||
# color = node_color(x)
|
|
||||||
# if len(color):
|
|
||||||
# nodes[x] = color
|
|
||||||
|
|
||||||
# for u in sorted(nodes.keys()):
|
|
||||||
# output(f":{u} {nodes[u]}")
|
|
||||||
|
|
||||||
# height = conf_value("height")
|
|
||||||
# width = conf_value("width")
|
|
||||||
# output("\n\n// === Settings ===\n")
|
|
||||||
# output(f"size w {width}")
|
|
||||||
# output(f" h {height}")
|
|
||||||
# with open("trailer.txt", "r") as fichier:
|
|
||||||
# contenu = fichier.read()
|
|
||||||
# output(contenu)
|
|
||||||
# for ddd in depts:
|
|
||||||
# if entries[ddd] == 0:
|
|
||||||
# continue
|
|
||||||
# p1 = round(100 * diploma[ddd] / entries[ddd])
|
|
||||||
# p2 = round(100 * (diploma[ddd] + reor2[ddd]) / entries[ddd])
|
|
||||||
# p3 = round(100 * (failure[ddd] / entries[ddd]))
|
|
||||||
# p4 = round(100 * (failure[ddd] + badred[ddd] + reor1[ddd]) / entries[ddd])
|
|
||||||
|
|
||||||
# output(f"// Département {ddd}")
|
|
||||||
# output(f"// {entries[ddd]} Entrées")
|
|
||||||
# output(f"// {diploma[ddd]} Diplômes")
|
|
||||||
# output(f"// {reor2[ddd]} DUT")
|
|
||||||
# output(f"// {p1}-{p2}% de réussite")
|
|
||||||
# output(f"// {goodred[ddd]} Redoublements")
|
|
||||||
# output(f"// {reor1[ddd]} départs de la formation")
|
|
||||||
# output(f"// {badred[ddd]} redoublements autorisés non actés")
|
|
||||||
# output(f"// {failure[ddd]} échecs")
|
|
||||||
# output(f"// {p3}-{p4}% d'échecs")
|
|
||||||
# output(f"// {unknown[ddd]} inconnus")
|
|
||||||
# for x in strangecases:
|
|
||||||
# output(f"// {x}")
|
|
||||||
|
|
||||||
# output(f'// orders["{orderkey}"] = {finallayers}')
|
|
||||||
# output(f"// bacs: {bacs}")
|
|
||||||
|
|
||||||
|
|
||||||
# printout()
|
|
||||||
|
|
||||||
|
|
||||||
def textwidth(text, font="Arial", fontsize=14):
|
def textwidth(text, font="Arial", fontsize=14):
|
||||||
try:
|
try:
|
||||||
import cairo
|
import cairo
|
||||||
@ -1112,8 +979,6 @@ def crossweight(node_position, node_layer, edges):
|
|||||||
return w
|
return w
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def genetic_optimize(node_position, node_layer, edges):
|
def genetic_optimize(node_position, node_layer, edges):
|
||||||
oldcandidates = []
|
oldcandidates = []
|
||||||
l_indices = list(range(5))
|
l_indices = list(range(5))
|
||||||
|
Loading…
Reference in New Issue
Block a user