viveksantayana
350c67ab10
Moved most of app definitions out of guard function to use wsgi Updated configuration files and referencing of .env values. Local version needs dotenv or exporting of env variables. Dockerised version works fine without load_dotenv. Ready to test now!
225 lines
8.0 KiB
Python
225 lines
8.0 KiB
Python
import os
|
|
import pathlib
|
|
from json import dump, loads
|
|
from datetime import datetime, timedelta
|
|
from glob import glob
|
|
from random import shuffle
|
|
from werkzeug.utils import secure_filename
|
|
|
|
from .security.database import decrypt_find_one
|
|
|
|
def check_data_folder_exists():
|
|
from main import app
|
|
if not os.path.exists(app.config['DATA_FILE_DIRECTORY']):
|
|
pathlib.Path(app.config['DATA_FILE_DIRECTORY']).mkdir(parents='True', exist_ok='True')
|
|
|
|
def check_default_indicator():
|
|
from main import app
|
|
if not os.path.isfile(os.path.join(app.config['DATA_FILE_DIRECTORY'], '.default.txt')):
|
|
open(os.path.join(app.config['DATA_FILE_DIRECTORY'], '.default.txt'),'w').close()
|
|
|
|
def get_default_dataset():
|
|
check_default_indicator()
|
|
from main import app
|
|
default_file_path = os.path.join(app.config['DATA_FILE_DIRECTORY'], '.default.txt')
|
|
with open(default_file_path, 'r') as default_file:
|
|
default = default_file.read()
|
|
return default
|
|
|
|
def available_datasets():
|
|
from main import app
|
|
files = glob(os.path.join(app.config["DATA_FILE_DIRECTORY"],'*.json'))
|
|
default = get_default_dataset()
|
|
output = []
|
|
for file in files:
|
|
filename = file.rsplit('/')[-1]
|
|
label = f'{filename[:-5]} (Default)' if filename == default else filename[:-5]
|
|
element = (filename, label)
|
|
output.append(element)
|
|
output.reverse()
|
|
return output
|
|
|
|
def check_json_format(file):
|
|
if not '.' in file.filename:
|
|
return False
|
|
if not file.filename.rsplit('.', 1)[-1] == 'json':
|
|
return False
|
|
return True
|
|
|
|
def validate_json_contents(file):
|
|
file.stream.seek(0)
|
|
data = loads(file.read())
|
|
if not type(data) is dict:
|
|
return False
|
|
elif not all( key in data for key in ['meta', 'questions']):
|
|
return False
|
|
elif not type(data['meta']) is dict:
|
|
return False
|
|
elif not type(data['questions']) is list:
|
|
return False
|
|
return True
|
|
|
|
def store_data_file(file, default:bool=None):
|
|
from admin.views import get_id_from_cookie
|
|
from main import app
|
|
check_default_indicator()
|
|
timestamp = datetime.utcnow()
|
|
filename = '.'.join([timestamp.strftime('%Y%m%d%H%M%S'),'json'])
|
|
filename = secure_filename(filename)
|
|
file_path = os.path.join(app.config['DATA_FILE_DIRECTORY'], filename)
|
|
file.stream.seek(0)
|
|
data = loads(file.read())
|
|
data['meta']['timestamp'] = timestamp.strftime('%Y-%m-%d %H%M%S')
|
|
data['meta']['author'] = get_id_from_cookie()
|
|
data['meta']['tests'] = []
|
|
with open(file_path, 'w') as _file:
|
|
dump(data, _file, indent=2)
|
|
if default:
|
|
with open(os.path.join(app.config['DATA_FILE_DIRECTORY'], '.default.txt'), 'w') as _file:
|
|
_file.write(filename)
|
|
return filename
|
|
|
|
def randomise_list(list:list):
|
|
_list = list.copy()
|
|
shuffle(_list)
|
|
return(_list)
|
|
|
|
def generate_questions(dataset:dict):
|
|
questions_list = dataset['questions']
|
|
output = []
|
|
for block in randomise_list(questions_list):
|
|
if block['type'] == 'question':
|
|
question = {
|
|
'type': 'question',
|
|
'q_no': block['q_no'],
|
|
'question_header': '',
|
|
'text': block['text']
|
|
}
|
|
if block['q_type'] == 'Multiple Choice':
|
|
question['options'] = randomise_list(block['options'])
|
|
else:
|
|
question['options'] = block['options'].copy()
|
|
output.append(question)
|
|
if block['type'] == 'block':
|
|
for key, _question in enumerate(randomise_list(block['questions'])):
|
|
question = {
|
|
'type': 'block',
|
|
'q_no': _question['q_no'],
|
|
'question_header': block['question_header'] if 'question_header' in block else '',
|
|
'block_length': len(block['questions']),
|
|
'block_q_no': key,
|
|
'text': _question['text']
|
|
}
|
|
if _question['q_type'] == 'Multiple Choice':
|
|
question['options'] = randomise_list(_question['options'])
|
|
else:
|
|
question['options'] = _question['options'].copy()
|
|
output.append(question)
|
|
return output
|
|
|
|
def evaluate_answers(dataset: dict, answers: dict):
|
|
score = 0
|
|
max = 0
|
|
tags = {}
|
|
for block in dataset['questions']:
|
|
if block['type'] == 'question':
|
|
max += 1
|
|
q_no = block['q_no']
|
|
if str(q_no) in answers:
|
|
correct = block['correct']
|
|
correct_answer = block['options'][correct]
|
|
if answers[str(q_no)] == correct_answer:
|
|
score += 1
|
|
for tag in block['tags']:
|
|
if tag not in tags:
|
|
tags[tag] = {
|
|
'scored': 1,
|
|
'max': 1
|
|
}
|
|
else:
|
|
tags[tag]['scored'] += 1
|
|
tags[tag]['max'] += 1
|
|
else:
|
|
for tag in block['tags']:
|
|
if tag not in tags:
|
|
tags[tag] = {
|
|
'scored': 0,
|
|
'max': 1
|
|
}
|
|
else:
|
|
tags[tag]['max'] += 1
|
|
if block['type'] == 'block':
|
|
for question in block['questions']:
|
|
max += 1
|
|
q_no = question['q_no']
|
|
if str(q_no) in answers:
|
|
correct = question['correct']
|
|
correct_answer = question['options'][correct]
|
|
if answers[str(q_no)] == correct_answer:
|
|
score += 1
|
|
for tag in question['tags']:
|
|
if tag not in tags:
|
|
tags[tag] = {
|
|
'scored': 1,
|
|
'max': 1
|
|
}
|
|
else:
|
|
tags[tag]['scored'] += 1
|
|
tags[tag]['max'] += 1
|
|
else:
|
|
for tag in question['tags']:
|
|
if tag not in tags:
|
|
tags[tag] = {
|
|
'scored': 0,
|
|
'max': 1
|
|
}
|
|
else:
|
|
tags[tag]['max'] += 1
|
|
|
|
grade = 'merit' if score/max >= .85 else 'pass' if score/max >= .70 else 'fail'
|
|
return {
|
|
'grade': grade,
|
|
'tags': tags,
|
|
'score': score,
|
|
'max': max
|
|
}
|
|
|
|
def get_tags_list(dataset:dict):
|
|
output = []
|
|
blocks = dataset['questions']
|
|
for block in blocks:
|
|
if block['type'] == 'question':
|
|
output = list(set(output) | set(block['tags']))
|
|
if block['type'] == 'block':
|
|
for question in block['questions']:
|
|
output = list(set(output) | set(question['tags']))
|
|
return output
|
|
|
|
def get_time_options():
|
|
time_options = [
|
|
('none', 'None'),
|
|
('60', '1 hour'),
|
|
('90', '1 hour 30 minutes'),
|
|
('120', '2 hours')
|
|
]
|
|
return time_options
|
|
|
|
def get_datasets():
|
|
from main import app, db
|
|
files = glob(os.path.join(app.config["DATA_FILE_DIRECTORY"],'*.json'))
|
|
data = []
|
|
if files:
|
|
for file in files:
|
|
filename = file.rsplit('/')[-1]
|
|
with open(file) as _file:
|
|
load = loads(_file.read())
|
|
_author = load['meta']['author']
|
|
author = decrypt_find_one(db.users, {'_id': _author})['username']
|
|
data_element = {
|
|
'filename': filename,
|
|
'timestamp': datetime.strptime(load['meta']['timestamp'], '%Y-%m-%d %H%M%S'),
|
|
'author': author,
|
|
'use': len(load['meta']['tests'])
|
|
}
|
|
data.append(data_element)
|
|
return data |