Commit 31819bf2 authored by bhmeyer's avatar bhmeyer Committed by BrunoMeyer
Browse files

Merge branch 'development' into '218-criar-modulo-da-interface-que-converta-adega-para-weka'

parents 8f2d8500 2dd11a22
Pipeline #23582 passed with stage
in 4 minutes and 38 seconds
......@@ -67,13 +67,18 @@
<br>
{% for degree in degrees_last_submissions %}
<div class="row">
<div class="col-xl-4">{{degree.name}} ({{degree.code}})</div>
<div class="col-xl-4">
<div class="col-xl-3">{{degree.name}} ({{degree.code}})</div>
<div class="col-xl-3">
<a href="{% url 'degree:index' submission_id=degree.last_submission.id %}">Última análise</a><br>
</div>
<div class="col-xl-4">
<div class="col-xl-3">
<a href="{% url 'grid:GridList' degree_code=degree.code %}">Grade Curricular</a><br>
</div>
{% if degree.download_allowed %}
<div class="col-xl-3">
<a href="{% url 'submission:download' submission_id=degree.last_submission.id %}">Baixar resultados</a><br>
</div>
{% endif %}
</div>
{% endfor %}
......
......@@ -20,8 +20,8 @@
<li class="nav-item"><a class="btn-primary disabled text-left" href="#">Turmas</a></li class="nav-item">
{% endcomment %}
<li class="nav-item"><a class="btn-primary text-left" href="{% url 'admission:index' submission_id=submission.id%}">Turmas de Ingresso</a></li class="nav-item">
{% comment %}
<li class="nav-item" style="display: block;" >
{% comment %}
<li class="nav-item" style="display: block;" >
<a href="#" data-toggle="collapse" data-target="#side-outros" style="display:flex" class="drop">
<span style="flex-grow:1">Outros</span>
<span style="padding-top:3px" class="rotate"><i class="fa fa-angle-left"></i></span>
......@@ -29,7 +29,14 @@
<ul class="sub-menu collapse" id="side-outros">
<li class="nav-item"><a class="btn-primary disabled text-left" href="#">Cepe 96/15</a></li class="nav-item">
</ul>
</li class="nav-item">
{% endcomment %}
</ul>
</li class="nav-item">
{% endcomment %}
{% if download_allowed %}
<li class="nav-item"><a class="btn-primary text-left" href="{% url 'submission:download' submission_id=submission.id%}">
Baixar resultados<br>
(incluindo formatos padronizados em CSV)
</a></li class="nav-item">
{% endif %}
</ul>
......@@ -18,12 +18,17 @@ def dashboard(request):
degrees_last_submissions.append({
"name": degree.name,
"code": degree.code,
"last_submission": last_submission
"last_submission": last_submission,
"download_allowed": last_submission.download_allowed(request.user)
})
return render(request, 'adega/dashboard.html', {"title": "Dashboard",
"degrees_last_submissions":degrees_last_submissions,
"hide_navbar": True
})
return render(request,
'adega/dashboard.html',
{
"title": "Dashboard",
"degrees_last_submissions":degrees_last_submissions,
"hide_navbar": True
}
)
@login_required
......
......@@ -51,9 +51,7 @@ def index(request, submission_id):
@permission_required_or_403('view_course', (Submission, 'id', 'submission_id'))
def compare(request, submission_id):
print(request,submission_id)
submission_id = int(submission_id)
print(submission_id)
submission = Submission.objects.get(id=submission_id)
degree = submission.degree
......
......@@ -53,6 +53,7 @@ def index(request, submission_id):
return render(request, "degree/index.html", {
"submission": submission,
"download_allowed": submission.download_allowed(request.user),
"degree": degree,
"degree_data": degree_data,
"situations_pass": situations_pass,
......
......@@ -359,6 +359,12 @@ class Course(Analysis):
course_dict = {}
course_dict["disciplina_codigo"] = course
course_dict["disciplina_nome"] = self.analysis["courses"][course]
# If the course code is related to more than one name,
# concatenate these names into an unique string
if type(course_dict["disciplina_nome"]) != str:
new_course_name = " | ".join(list(course_dict["disciplina_nome"]))
course_dict["disciplina_nome"] = new_course_name
# quantidade de matriculas
count = self.analysis["general_count_submission"][course]
course_dict["qtd_alunos"] = count
......
......@@ -137,7 +137,7 @@ def generate_student_data(path, dataframe, student_analysis):
list_situations = student_analysis.list_students_situation()
for fl in files_list:
list_name = EvasionForm.code_to_str(int(fl))
list_content = {"description_name":"", "description_value":""}
list_content = {"student_list":[], "description_name":"Forma de evasão"}
if(fl in list_situations):
list_content = list_situations[fl]
save_json(path + "list/" + list_name + ".json", list_content)
......
import os
from submission.analysis.conversor_de_dados_adega.utils.situations import PeriodType
def get_all_subjson(path, only_subdir=True, ignore_files=None):
ignore_files = [] if ignore_files is None else ignore_files
'''
Return all json files that is in the subdirectories of a path
--
return: list
'''
return_list = []
if only_subdir:
subpath = next(os.walk(path))[1]
for subpath_name in subpath:
subpath_name = os.path.join(path,subpath_name)
for file in os.listdir(subpath_name):
if file.endswith(".json") and not(file in ignore_files):
return_list.append(os.path.join(subpath_name, file))
else:
for file in os.listdir(path):
if file.endswith(".json") and not(file in ignore_files):
return_list.append(os.path.join(path, file))
return return_list
\ No newline at end of file
import numpy as np
from submission.analysis.json_submission_to_csv.common_conversor import get_all_subjson, PeriodType
import os
import pandas as pd
import json
from pathlib import Path
class AdmissionConversor:
def __init__(self, submission_path, not_allowed_keys=None,
min_year=None, max_year=None):
admission_dir = os.path.join(submission_path, "admissions")
self.submission_path = submission_path
self.admission_dir = admission_dir
if not_allowed_keys is None:
self.not_allowed_keys = ["evasion_per_semester"]
if (min_year is None) or (max_year is None):
self.init_min_max_year()
else:
self.min_year = min_year
self.max_year = max_year
def init_min_max_year(self):
df = pd.read_csv(os.path.join(self.submission_path,"csv_data_file.csv"))
self.min_year = int(df["ANO_ATIV_CURRIC"].min())
self.max_year = int(df["ANO_ATIV_CURRIC"].max())
def convert_graph_data_to_vector(self, graph_data, graph_name,
return_header=False):
# Collect the second element of the list of tuples
# This is the name of each type period (see situations.py)
period_list = [x[1] for x in PeriodType.PERIODS]
if graph_name == "ira_per_semester":
range_size = (self.max_year-self.min_year+1)*len(period_list)
# Consider average and std
vector_size = 2*range_size
vector_data = np.zeros(vector_size)
for key in graph_data:
# Recover the first parameter of the tuple
year = int(key.split(",")[0].replace("(",""))
# Recover the second parameter of the tuple
period = key.split(",")[1].replace(")","").replace("'", "")
period = str(period.replace("'", ""))
# Remove the white space after ','
period = period[1:]
period_id = PeriodType.str_to_code(period)
idx_in_vector = (year - self.min_year)*len(period_list)+period_id
mean_val, std_val = graph_data[key]
vector_data[ idx_in_vector] = mean_val
vector_data[range_size+idx_in_vector] = std_val
if return_header:
header = []
for i, metric_name in enumerate(["Media", "DesvioPadrao"]):
for year in range(self.min_year, self.max_year+1):
for period_id, period_name in enumerate(period_list):
key_name = "{}_{}_{}_{}".format(
graph_name,
metric_name,
year,
period_name
)
header.append(key_name)
return (vector_data, np.array(header))
else:
return vector_data
if graph_name == "students_per_semester":
range_size = (self.max_year-self.min_year+1)*len(period_list)
# Consider average and std
vector_size = range_size
vector_data = np.zeros(vector_size)
for key in graph_data:
# Recover the first parameter of the tuple
year = int(key.split(",")[0].replace("(",""))
# Recover the second parameter of the tuple
period = key.split(",")[1].replace(")","").replace("'", "")
period = str(period.replace("'", ""))
# Remove the white space after ','
period = period[1:]
period_id = PeriodType.str_to_code(period)
idx_in_vector = (year - self.min_year)*len(period_list)+period_id
students_count = int(graph_data[key])
vector_data[idx_in_vector] = students_count
if return_header:
header = []
for year in range(self.min_year, self.max_year+1):
for period_id, period_name in enumerate(period_list):
key_name = "{}_{}_{}".format(
graph_name,
year,
period_name
)
header.append(key_name)
return (vector_data, np.array(header))
else:
return vector_data
raise Exception("A graph data cannot be converted: {}".format(
graph_name))
def get_header(self, keys_from_instances=True, keys_from_list=True):
# Get only the first level of admission path (subdirectories)
keys_set = set()
instance_files = get_all_subjson(self.admission_dir)
# Collect the keys of each admission instance
if keys_from_instances:
for json_instance_path in instance_files:
with open(json_instance_path, 'r') as f:
admission_instance = json.load(f)
new_keys = set(admission_instance.keys())
for key in new_keys:
if key in self.not_allowed_keys:
continue
# keys_set = keys_set.union({key})
if type(admission_instance[key]) in [int, float, str]:
keys_set = keys_set.union({key})
else: # is an attribute with graph data
_, graph_header = self.convert_graph_data_to_vector(
admission_instance[key],
key,
return_header=True)
keys_set = keys_set.union(set(graph_header))
# Collect the keys of each admission instance inside list file
if keys_from_list:
json_instance_path = os.path.join(self.admission_dir,
"lista_turma_ingresso.json")
with open(json_instance_path, 'r') as f:
json_list_content = json.load(f)
for admission_instance in json_list_content:
new_keys = set(admission_instance.keys())
for key in new_keys:
if key in self.not_allowed_keys:
continue
if type(admission_instance[key]) in [int, float, str]:
keys_set = keys_set.union({key})
else: # is an attribute with graph data
_, graph_header = self.convert_graph_data_to_vector(
admission_instance[key],
key,
return_header=True)
keys_set = keys_set.union(set(graph_header))
# TODO: Implement not_allowed_keys in analysis or remove if from build_cache
for key in self.not_allowed_keys:
# Remove 'banned' key from the set if it exists
keys_set = keys_set - {key}
keys_list = sorted(list(keys_set))
return keys_list
def get_admission_as_matrix(self):
# header_instances = get_header(self.admission_dir,
# keys_from_instances=True,
# keys_from_list=False)
# header_list = get_header(self.admission_dir,
# keys_from_instances=False,
# keys_from_list=True)
header = self.get_header()
instance_files = get_all_subjson(self.admission_dir)
# Get the period of each admission instance (json name)
instances_year = [os.path.basename(p.replace(".json", "")) for p in instance_files]
# Get the year of each admission instance (parent dir name)
instances_period = [os.path.basename(str(Path(p).parent)) for p in instance_files]
admission_matrix = np.zeros((len(instance_files),len(header)),dtype=object)
admission_matrix[:,:] = None
get_admission_id = {}
for inst_id, instance_path in enumerate(instance_files):
with open(instance_path, 'r') as f:
admission_instance = json.load(f)
for key in admission_instance:
if key in self.not_allowed_keys:
continue
if type(admission_instance[key]) in [int, float, str]:
col_id = header.index(key)
admission_matrix[inst_id,col_id] = admission_instance[key]
else: # is an attribute with graph data
graph_data, graph_header = self.convert_graph_data_to_vector(
admission_instance[key],
key,
return_header=True)
for key, val in zip(graph_header, graph_data):
col_id = header.index(key)
admission_matrix[inst_id,col_id] = val
ano_val = str(admission_instance["ano"])
semestre_val = str(admission_instance["semestre"])
get_admission_id[(ano_val, semestre_val)] = inst_id
###
json_instance_path = os.path.join(self.admission_dir,
"lista_turma_ingresso.json")
with open(json_instance_path, 'r') as f:
json_list_content = json.load(f)
for admission_instance in json_list_content:
ano_val = str(admission_instance["ano"])
semestre_val = str(admission_instance["semestre"])
inst_id = get_admission_id[(ano_val, semestre_val)]
for key in admission_instance:
if key in self.not_allowed_keys:
continue
if type(admission_instance[key]) in [int, float, str]:
col_id = header.index(key)
admission_matrix[inst_id,col_id] = admission_instance[key]
else: # is an attribute with graph data
graph_data, graph_header = self.convert_graph_data_to_vector(
admission_instance[key],
key,
return_header=True)
for key, val in zip(graph_header, graph_data):
col_id = header.index(key)
admission_matrix[inst_id,col_id] = val
instances_year = np.array(instances_year)
instances_period = np.array(instances_period)
header = ["Ano", "Período"] + header
header = np.array(header)
admission_matrix = np.concatenate((instances_year.reshape(-1,1), admission_matrix), axis=1)
admission_matrix = np.concatenate((instances_period.reshape(-1,1), admission_matrix), axis=1)
return admission_matrix, header
import numpy as np
from submission.analysis.json_submission_to_csv.common_conversor import get_all_subjson, PeriodType
import os
import pandas as pd
import json
from pathlib import Path
class CourseConversor:
def __init__(self, submission_path, not_allowed_keys=None,
min_year=None, max_year=None):
course_dir = os.path.join(submission_path, "courses")
self.submission_path = submission_path
self.course_dir = course_dir
if not_allowed_keys is None:
self.not_allowed_keys = ["evasion_per_semester"]
if (min_year is None) or (max_year is None):
self.init_min_max_year()
else:
self.min_year = min_year
self.max_year = max_year
def init_min_max_year(self):
df = pd.read_csv(os.path.join(self.submission_path,"csv_data_file.csv"))
self.min_year = int(df["ANO_ATIV_CURRIC"].min())
self.max_year = int(df["ANO_ATIV_CURRIC"].max())
def convert_graph_data_to_vector(self, graph_data, graph_name,
return_header=False):
# Collect the second element of the list of tuples
# This is the name of each type period (see situations.py)
period_list = [x[1] for x in PeriodType.PERIODS]
if graph_name == "grafico_qtd_cursada_aprov":
#TODO: Import range size from analysis
range_size = 5
# Consider average and std
vector_size = range_size
vector_data = np.zeros(vector_size)
for key in graph_data:
coursed_count = graph_data[key]
key_idx = int(key)-1
vector_data[key_idx] = coursed_count
if return_header:
header = []
for i in range(range_size):
count = i+1
if count == 5:
count = "MaisQue4"
key_name = "{}_{}_{}".format(
"grafico_qtd_cursada_aprov",
"QuantidadeDeVezesCursadaAteAprovacao",
count
)
header.append(key_name)
return (vector_data, np.array(header))
else:
return vector_data
if graph_name == "nota":
vector_data = np.array(graph_data)
header = ["NotaMedia", "NotaDesvioPadrao"]
if return_header:
return (vector_data, np.array(header))
else:
return vector_data
if graph_name == "nota_ultimo_ano":
vector_data = np.array(graph_data)
header = ["NotaUltimoAnoMedia", "NotaUltimoAnoDesvioPadrao"]
if return_header:
return (vector_data, np.array(header))
else:
return vector_data
if graph_name == "nota_ultimo_ano":
vector_data = np.array(graph_data)
header = ["NotaUltimoAnoMedia", "NotaUltimoAnoDesvioPadrao"]
if return_header:
return (vector_data, np.array(header))
else:
return vector_data
if graph_name == "aprovacao_semestral":
range_size = (self.max_year-self.min_year+1)*len(period_list)
# Consider average and std
vector_size = 3*range_size
vector_data = np.zeros(vector_size)
for key in graph_data:
year = int(key.split("/")[0])
period = key.split("/")[1]
period_id = PeriodType.str_to_code(period)
idx_in_vector = (year - self.min_year)*len(period_list)+period_id
approve_rate, approve_count, register_count = graph_data[key]
approve_rate = float(approve_rate)
approve_count = int(approve_count)
register_count = int(register_count)
vector_data[ idx_in_vector] = approve_rate
vector_data[range_size+idx_in_vector] = approve_count
vector_data[2*range_size+idx_in_vector] = register_count
if return_header:
header = []
metric_list = ["TaxaAprovacao",
"QuantidadeAprovacao",
"QuantidadeMatriculas"]
for i, metric_name in enumerate(metric_list):
for year in range(self.min_year, self.max_year+1):
for period_id, period_name in enumerate(period_list):
key_name = "{}_{}_{}_{}".format(
graph_name,
metric_name,
year,
period_name
)
header.append(key_name)
return (vector_data, np.array(header))
else:
return vector_data
raise Exception(
"A graph data cannot be converted: {}".format(graph_name))
def get_header(self, keys_from_instances=True, keys_from_list=True):
# Get only the first level of course path (subdirectories)
keys_set = set()
instance_files = get_all_subjson(self.course_dir, only_subdir=False,
ignore_files=["disciplinas.json"])
# Collect the keys of each course instance
if keys_from_instances:
for json_instance_path in instance_files:
with open(json_instance_path, 'r') as f:
course_instance = json.load(f)
new_keys = set(course_instance.keys())
for key in new_keys:
if key in self.not_allowed_keys:
continue
# keys_set = keys_set.union({key})
if type(course_instance[key]) in [int, float, str]:
keys_set = keys_set.union({key})
else: # is an attribute with graph data
_, graph_header = self.convert_graph_data_to_vector(
course_instance[key],
key,
return_header=True)
keys_set = keys_set.union(set(graph_header))
# Collect the keys of each course instance inside list file
if keys_from_list:
json_instance_path = os.path.join(self.course_dir,
"disciplinas.json")
with open(json_instance_path, 'r') as f:
# TODO: Remove 'cache' key on analysis
# And also add compara_aprov graph data
# Get only the values of each course as list
# and ignore dict keys (course codes)
json_list_content = list(json.load(f)["cache"].values())
for course_instance in json_list_content:
new_keys = set(course_instance.keys())
for key in new_keys:
if key in self.not_allowed_keys:
continue
if type(course_instance[key]) in [int, float, str]:
keys_set = keys_set.union({key})
else: # is an attribute with graph data
_, graph_header = self.convert_graph_data_to_vector(