Add analysis function
This commit is contained in:
parent
1026cc71a9
commit
3714919ba5
@ -1,4 +1,4 @@
|
|||||||
from ..models import Dataset
|
from ..models import Dataset, Test
|
||||||
from ..tools.logs import write
|
from ..tools.logs import write
|
||||||
|
|
||||||
from flask import current_app as app
|
from flask import current_app as app
|
||||||
@ -7,6 +7,8 @@ from flask.helpers import abort, flash, redirect, url_for
|
|||||||
import json
|
import json
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from random import shuffle
|
from random import shuffle
|
||||||
|
from statistics import mean, median, stdev
|
||||||
|
from typing import Union
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
|
|
||||||
def load(filename:str):
|
def load(filename:str):
|
||||||
@ -85,3 +87,62 @@ def check_dataset_exists(function):
|
|||||||
return redirect(url_for('admin._questions'))
|
return redirect(url_for('admin._questions'))
|
||||||
return function(*args, **kwargs)
|
return function(*args, **kwargs)
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
def check_test_exists(function):
|
||||||
|
@wraps(function)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
try: tests = Test.query.all()
|
||||||
|
except Exception as exception:
|
||||||
|
write('system.log', f'Database error when checking existing datasets: {exception}')
|
||||||
|
return abort(500)
|
||||||
|
if not tests:
|
||||||
|
flash('There are no exams configured. Please create an exam first.', 'error')
|
||||||
|
return redirect(url_for('admin._tests'))
|
||||||
|
return function(*args, **kwargs)
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
def analyse(subject:Union[Dataset,Test]) -> dict:
|
||||||
|
output = {
|
||||||
|
'answers': {},
|
||||||
|
'entries': 0,
|
||||||
|
'grades': {
|
||||||
|
'merit': 0,
|
||||||
|
'pass': 0,
|
||||||
|
'fail': 0
|
||||||
|
},
|
||||||
|
'scores': {
|
||||||
|
'mean': 0,
|
||||||
|
'median': 0,
|
||||||
|
'stdev': 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
scores_raw = []
|
||||||
|
dataset = subject if isinstance(subject, Dataset) else subject.dataset
|
||||||
|
if isinstance(subject, Test):
|
||||||
|
for entry in subject.entries:
|
||||||
|
if entry.answers:
|
||||||
|
for question, answer in entry.answers.items():
|
||||||
|
if int(question) not in output['answers']: output['answers'][int(question)] = {}
|
||||||
|
if int(answer) not in output['answers'][int(question)]: output['answers'][int(question)][int(answer)] = 0
|
||||||
|
output['answers'][int(question)][int(answer)] += 1
|
||||||
|
if entry.result:
|
||||||
|
output['entries'] += 1
|
||||||
|
output['grades'][entry.result['grade']] += 1
|
||||||
|
scores_raw.append(int(entry.result['score']))
|
||||||
|
else:
|
||||||
|
for test in subject.tests:
|
||||||
|
output['entries'] += len(test.entries)
|
||||||
|
for entry in test.entries:
|
||||||
|
if entry.answers:
|
||||||
|
for question, answer in entry.answers.items():
|
||||||
|
if int(question) not in output['answers']: output['answers'][int(question)] = {}
|
||||||
|
if int(answer) not in output['answers'][int(question)]: output['answers'][int(question)][int(answer)] = 0
|
||||||
|
output['answers'][int(question)][int(answer)] += 1
|
||||||
|
if entry.result:
|
||||||
|
output['entries'] += 1
|
||||||
|
output['grades'][entry.result['grade']] += 1
|
||||||
|
scores_raw.append(entry.result['score'])
|
||||||
|
output['scores']['mean'] = mean(scores_raw)
|
||||||
|
output['scores']['median'] = median(scores_raw)
|
||||||
|
output['scores']['stdev'] = stdev(scores_raw, output['scores']['mean']) if len(scores_raw) > 1 else None
|
||||||
|
return output
|
Loading…
Reference in New Issue
Block a user