# 测试文本长度为20万词
# 3000, 5000, 一万词，之后每一万词测试一次

import time
from functools import *
from evaluation import luhn_func, textrank_func, lexrank_func, lsa_func, bart_func, llama_3d2_1b_func, llama_3d2_3b_func, llama_3d1_8b_func, qtsumm_bart_func, ctrlsumm_bart_func
from pathlib import Path
import nltk
import json
import math
from summarization_interface import Material
from syllabus_gen import generate_syllabus



word_count = 0
sentences, links = [], []

base_dir = Path(__file__).parent.parent.parent / 'datasets/med_rag_textbooks/'
# 句子对应的一组Link
link_mapper = {}
for line in (base_dir / 'links/' / 'Immunology_Janeway').read_text(encoding='utf-8').split('\n'):
    index = line.split('\t')[-1]
    link_mapper[index] = link_mapper.get(index, []) + [line]


lengths = [3000, 5000, 8000, 10000, 20000, 30000, 50000, 70000, 80000, 100000, 120000, 140000, 160000, 180000, 200000
        #    , 230000, 260000, 300000, 350000, 400000
           ]

threshold_index = 0
links = []
for line in (base_dir / 'sentences' / 'Immunology_Janeway').read_text(encoding='utf-8').split('\n'):
    sentence, index = line.split('\t')
    word_count += len(nltk.word_tokenize(sentence))

    sentences.append(line)
    links.extend(link_mapper[index])
    if word_count > lengths[threshold_index]:
        (base_dir / f'various_length/sentences/Immunology_Janeway_{lengths[threshold_index]}').write_text('\n'.join(sentences), encoding='utf-8')
        (base_dir / f'various_length/links/preprocessed/Immunology_Janeway_{lengths[threshold_index]}').write_text('\n'.join(links), encoding='utf-8')

        threshold_index += 1
        if threshold_index >= len(lengths):
            break


# TODO:按句子数量来算吧
baselines = {
    'luhn': partial(luhn_func, N=200),
    'textrank': partial(textrank_func, N=200),
    'lexrank': partial(lexrank_func, N=200), 
    'lsa': partial(lsa_func, N=200),
    'llama_3d1_8b': partial(llama_3d1_8b_func, N=200),
    'llama_3d2_1b': partial(llama_3d2_1b_func, N=200),
    'llama_3d2_3b': partial(llama_3d2_3b_func, N=200),
    'bart': partial(bart_func, N=200),
    'ctrlsumm': partial(qtsumm_bart_func, N=200),
    'qtsumm': partial(ctrlsumm_bart_func, N=200),
}

# 运行时间计算

recorder_file = Path(__file__).parent / 'figs/recorder.json'
if recorder_file.exists() and recorder_file.stat().st_size > 0:
    recorder = json.loads(recorder_file.read_text(encoding='utf-8'))
    for model_name in baselines.keys():
        if model_name not in recorder:
            recorder[model_name] = {}
else:
    recorder = {model_name: {} for model_name in baselines.keys()}
    recorder['wsln'] = {}

learned_material = Material(
    '1st grade',
    base_dir.parent / 'cosmopedia/khanacademy/links/1st grade',
    base_dir.parent / 'cosmopedia/khanacademy/sentences/1st grade',
)
# 执行W-SLN大纲生成
for text_length in lengths:
    if str(text_length) in recorder['wsln']:
        print(text_length, ' skip')
        continue

    learning_material = Material(text_length, 
        base_dir / f'various_length/links/Immunology_Janeway_{text_length}',
        base_dir / f'various_length/sentences/Immunology_Janeway_{text_length}',
    )

    start_time = time.time()
    generate_syllabus(learned_material, learning_material)
    end_time = time.time()
    recorder['wsln'][text_length] = round(end_time - start_time, 2)


for model_name, model_func in baselines.items():
    for text_length in lengths:
        text = (base_dir / f'various_length/sentences/Immunology_Janeway_{text_length}').read_text(encoding='utf-8')

        print(f"model: {model_name}, text_length: {text_length}")
        print()
        
        # json的键值是字符串
        if str(text_length) in recorder[model_name]:
            print('skip')
            continue
        
        words = nltk.word_tokenize(text)[:text_length]
        test_text = ' '.join(words)
    
        start_time = time.time()
        model_func(test_text)
        end_time = time.time()
        
        recorder[model_name][text_length] = round(end_time - start_time, 2)

        print(f"model: {model_name}, text_length: {text_length}, time: {end_time - start_time}")
        
        recorder_file.write_text(json.dumps(recorder, indent=2), encoding='utf-8')
        