# 在三个数据集上
# 评估生成的课程大纲
# 个性化评估指标

import re
import json
import itertools
from syllabus_gen import generate_syllabus
from summarization_framework import load_med_rag, load_khanacademy, load_bigsurvey, load_rsm, data_set_dir
from summarization_interface import Material
from collections import namedtuple
from evaluation import compose_row, word_limit, ctrlsumm_bart_func, qtsumm_bart_func, luhn_func
from tabulate import tabulate
from tqdm import tqdm
from datetime import datetime

class DataPair:
    
    def __init__(self, learning_material, learned_material, reference, path):
        self.learning_material, self.learned_material, self.reference, self.path = learning_material, learned_material, reference, path

def count_frequency(text: str, query_set: list):
    '''
    计算text中包含多少个query_set中的单词
    '''
    query_set = sorted(query_set, key=lambda x: len(x), reverse=True)
    pattern = '(' + '|'.join(query_set) + ')'
    result = re.findall(pattern, text)
    
    return len(result)

def load_personalized_test():
    foundation_of_database_material = Material('foundations_of_database', data_set_dir / 'rsm/foundations_of_database/links/foundations_of_database', data_set_dir / 'rsm/foundations_of_database/sentences/foundations_of_database')

    file_name = 'Immunology_Janeway_3000'
    learning_material = Material(
        file_name,
        data_set_dir / f'med_rag_textbooks/various_length/links/{file_name}',
        data_set_dir / f'med_rag_textbooks/various_length/sentences/{file_name}'
    )

    reference = 'fuckaskdlfjal dflkafdkja lsjlfsa jflksajd f'
    
    return [DataPair(learning_material, foundation_of_database_material, reference, file_name)], data_set_dir / 'med_rag_textbooks/various_length/output'


def load_personalized_rsm():
    materials, references, output_dir = load_rsm()
    
    rsm_material = materials[0]
    reference = references[0]
    foundation_of_database_material = Material('foundations_of_database', data_set_dir / 'rsm/foundations_of_database/links/foundations_of_database', data_set_dir / 'rsm/foundations_of_database/sentences/foundations_of_database')
    
    return [DataPair(rsm_material, foundation_of_database_material, reference, rsm_material.name)], output_dir.parent / 'output'

def load_personalized_med_rag():
    '''
    返回一个列表，每个元素是一个两元组（学过的材料，学习材料，标准摘要），输出路径
    
    随机选择一个material当作历史材料
    '''
    materials, references, output_dir = load_med_rag()
    
    data_list = []
    
    for index, (learning_material, reference) in enumerate(zip(materials, references)):
        # TODO: 移除一个材料
        learned_index = len(materials) - 1 if index == 0 else index-1
        learned_material = materials[learned_index]
        
        data_list.append(DataPair(
            learning_material,
            learned_material,
            reference,
            learning_material.name,
        ))
        
    return data_list, output_dir.parent / 'output'


def load_personalized_khanacademy():
    materials, references, output_dir = load_khanacademy()
    mapper = {
        material.name: (material, ref) for material, ref in zip(materials, references)
    }
    
    learned_pairs = {
        '2nd grade': '1st grade',
        '3rd grade': '2nd grade',
        '4th grade': '3rd grade',
        '5th grade': '4th grade',
        '6th grade': '5th grade',
        '7th grade': '6th grade',
        '8th grade': '7th grade',
        '9th grade': '8th grade',
        
        '3rd grade reading & vocabulary': '2nd grade reading & vocabulary',
        '4th grade reading & vocabulary': '3rd grade reading & vocabulary',
        '5th grade reading & vocabulary': '4th grade reading & vocabulary',
        '6th grade reading & vocabulary': '5th grade reading & vocabulary',
        '7th grade reading & vocabulary': '6th grade reading & vocabulary',
        '8th grade reading & vocabulary': '7th grade reading & vocabulary',
        '9th grade reading & vocabulary': '8th grade reading & vocabulary',
        
        '4th grade (Eureka MathEngageNY)': '3rd grade (Eureka MathEngageNY)',
        '5th grade (Eureka MathEngageNY)': '4th grade (Eureka MathEngageNY)',
        '6th grade (Eureka MathEngageNY)': '5th grade (Eureka MathEngageNY)',
        '7th grade (Eureka MathEngageNY)': '6th grade (Eureka MathEngageNY)',
        '8th grade (Eureka MathEngageNY)': '7th grade (Eureka MathEngageNY)',
    }
    
    data_list = []
    for material in materials:
        if material.name not in learned_pairs:
            continue
        
        learning_material, reference = mapper[material.name]
        learned_material, _ = mapper[learned_pairs[learning_material.name]]
        
        data_list.append(DataPair(
            learning_material,
            learned_material,
            reference,
            learning_material.name,
        ))
    
    return data_list, output_dir.parent / 'output'


def load_personalized_bigsurvey():
    materials, references, output_dir = load_bigsurvey()

    data_list = []
    for index, (material, reference) in enumerate(zip(materials, references)):
        # 把其他的一个材料作为学过的材料
        learned_material = materials[index - 1] if index != 0 else materials[-1]
        learning_material = material

        data_list.append(DataPair(
            learning_material,
            learned_material,
            reference,
            learning_material.name,
        ))

    return data_list, output_dir.parent / 'output'

def evaluate(data_pairs, dataset_output_dir, length):
    prediction_mapper = {}
    # 非ROUGE指标的计算结果
    complement_scores = {}
    references = [pair.reference for pair in data_pairs]

    for dir in itertools.chain(
        dataset_output_dir.glob('*'),
        (dataset_output_dir.parent / 'wsln_output').glob('*')
    ):
        if not dir or dir.name.endswith('bak') or dir.name == '.DS_Store': continue
        print(f'find model result of {dir.name}')
        for pair in data_pairs:
            predict = word_limit(
                    (dir / pair.learning_material.name).read_text(encoding='utf-8'),
                    length
                )
            prediction_mapper[dir.name] = prediction_mapper.get(dir.name, []) + [predict]
            
            # 计算个性化指标：prediction中，前20%有出现在learned material中的节点
            beginning = word_limit(predict, length*0.2)
            learned_count = count_frequency(beginning, pair.learned_nodes)
            
            complement_scores[dir.name] = complement_scores.get(dir.name, {})
            complement_scores[dir.name]['personalized'] = complement_scores[dir.name].get('personalized', []) + [learned_count / len(beginning.split(' '))]

            # 计算连贯性指标，根据节点间依赖关系
            score = 0
            for prereq, (node, value) in pair.prereq_dict.items():
                if re.search(f'{prereq}.*{node}', predict):
                    score += value
                elif re.search(f'{node}.*{prereq}', predict):
                    score -= value

            complement_scores[dir.name]['coherence'] = complement_scores[dir.name].get('coherence', []) + [score]
    
    rows = []
    for model_name, predictions in tqdm(prediction_mapper.items(), desc='calculation rouge'):
        row, headers = compose_row(predictions, references, model_name, complement_scores[model_name])
        rows.append(row)

    # 按照ROUGE1-F排序
    rows = sorted(
        rows, key=lambda r: r[1].split('/')[-1], reverse=True
    )

    table = tabulate(rows, headers = headers, tablefmt = 'fancy_grid')
    print(table)
    (dataset_output_dir.parent / 'evaluation').write_text(json.dumps(rows), encoding='utf-8')


if __name__ == '__main__':
    import sys
    data_pairs, output_base_dir = {
        'test': load_personalized_test,
        'rsm': load_personalized_rsm,
        'med_rag': load_personalized_med_rag,
        'khan': load_personalized_khanacademy,
        'bigsurvey': load_personalized_bigsurvey,
    }[sys.argv[1]]()

    baselines = sys.argv[2].split(',')

    baseline_funcs = {
        # 'ctrlsumm': ctrlsumm_bart_func,
        # 'qtsumm': qtsumm_bart_func,
        'luhn': luhn_func,
        
        # 'llama3.2_1b': llama_3d2_1b_func,
        # 'llama3.2_3b': llama_3d2_3b_func,
        # 'llama3.1_8b': llama_3d1_8b_func,
    }
    
    
    
    for data_pair in tqdm(data_pairs, total=len(data_pairs)):
        for baseline in baselines:
            # if baseline == 'wsln':
            #     continue
            
            # dir = output_base_dir / f'{baseline}'
            # dir.mkdir(parents=True, exist_ok=True)
            # output_file = dir / data_pair.learning_material.name

            # if output_file.exists(): continue

            # func = baseline_funcs[baseline]
            # summary = func(data_pair.learning_material.text)
            # output_file.write_text(summary, encoding='utf-8')

            if baseline == 'wsln':
                for node in range(10, 1070, 10):
                    print('node', node)
                    dir = output_base_dir.parent / f"wsln_output/focused_node_{node}_{datetime.now().strftime('%y%m%d_%H%M%S')}"
                    dir.mkdir(parents=True, exist_ok=True)
                    summary, nodes_prereq_dict, learned_nodes = generate_syllabus(data_pair.learning_material, data_pair.learned_material, node, exec=False)
                    
                    (dir / data_pair.learning_material.name).write_text(summary, encoding='utf-8')
                    
                    # 记录一下学过的材料的节点/节点间先序关系
                    record_dir = dir.parent.parent / 'record'
                    record_dir.mkdir(parents=True, exist_ok=True)
                    
                    (record_dir / f'{data_pair.learning_material.name}.prereq').write_text(json.dumps(nodes_prereq_dict, indent=2), encoding='utf-8')
                    (record_dir / f'{data_pair.learning_material.name}.learned_nodes').write_text(
                        '\n'.join(learned_nodes), encoding='utf-8'
                    )
                    
                    data_pair.learned_nodes = learned_nodes
                    (dir / data_pair.learning_material.name)
                data_pair.prereq_dict = nodes_prereq_dict

    evaluate(data_pairs, output_base_dir, 1250)



    # model_name = f"syllabus_{datetime.now().strftime('%y%m%d_%H%M%S')}"
    # output_dir = output_base_dir / datetime.now().strftime('%y%m%d_%H%M%S')

    # predictions = []
    # polished_predictions = []
    # for material, reference in tqdm(zip(materials, references), total=len(materials)):
    #     process = psutil.Process(os.getpid())
    #     start_time = time.time()
    #     start_memory = process.memory_info().rss / (1024 ** 2)
        
    #     prediction, dependency_list = wsln_summarize(material, history_material)
        
    #     end_time = time.time()
    #     end_memory = process.memory_info().rss / (1024 ** 2)
    #     print(f'summarizing time: {end_time - start_time:.2f}s')
    #     print(f'memory cost: {end_memory - start_memory:.2f} MB')
        
    #     prediction = word_limit(prediction, 1250)
    #     print(prediction)
        
    #     start_time = time.time()
    #     start_memory = process.memory_info().rss / (1024 ** 2)
        
    #     polished_prediction = word_limit(polish(prediction, dependency_list), 1250)
        
    #     end_time = time.time()
    #     end_memory = process.memory_info().rss / (1024 ** 2)
    #     print(f'polish time: {end_time - start_time:.2f}s')
    #     print(f'memory cost: {end_memory - start_memory:.2f} MB')
        
    #     predictions.append(prediction)
    #     polished_predictions.append(polished_prediction)
        
    #     if not output_dir.exists(): output_dir.mkdir()
        
    #     (output_dir / material.name).write_text(prediction, encoding='utf-8')
    #     (output_dir / f'{material.name}-polished').write_text(polished_prediction, encoding='utf-8')

    # row, headers = compose_row(predictions, references, model_name)
    # polished_row, headers = compose_row(polished_predictions, references, f'{model_name}-polished')
    # table = tabulate([row, polished_row], headers = headers, tablefmt = 'fancy_grid')
    # init_time = time.time()
    # print(table)


