# 2025-03-03 16:23

# write a function that performs the process of syllabus generation
# Input: learned materials, learning materials
# Output: a syllabus
# 1. construct the dependency relations between semantic nodes
# 2. build the dependency relations between sentences
# 3. evaluate the importance of each sentence
# 4. generate the syllabus
import itertools
from summarization_framework import load_rsm, Material
from summarization_interface import LinkType, get_idf_value
from syllabus_prerequisite import construct_node_prerequisite_matrix
from pathlib import Path
from collections import Counter, namedtuple
import numpy as np
import re

data_set_dir = Path(__file__).parent.parent.parent / 'datasets'
SyllabusSentence = namedtuple('SyllabusSentence', ['sentence', 'nodes', 'focused_nodes'])


def generate_syllabus(learning_material, learned_material, focus_node_count=50, summary_sentence_count=50, exec=True):
    '''
    exec: 如果
    '''
    # data_set_dir / 'rsm/rsm.json'
    # learning_materials, references, output_dir = load_rsm()
    # learned_material = Material('foundations_of_database', data_set_dir / 'rsm/foundations_of_database/links/foundations_of_database', data_set_dir / 'rsm/foundations_of_database/sentences/foundations_of_database')

    # for learning_material, reference in zip(learning_materials, references):
    sorted_nodes, node_importance_mapper = node_evaluation(learning_material.sentences)
    focus_nodes = sorted_nodes[:focus_node_count]
    node_to_index = {node: i for i, node in enumerate(focus_nodes)}
    index_to_node = {i: node for i, node in enumerate(focus_nodes)}

    nodes_prereq_matrix = construct_node_prerequisite_matrix(learned_material, learning_material, set(focus_nodes), node_to_index)

    sentence_prereq_matrix, filtered_sentences = construct_sentence_prerequisite_matrix(learning_material.sentences, nodes_prereq_matrix, node_to_index)
    sentence_scores = PageRank(sentence_prereq_matrix)
    
    # 从1开始句子排序
    sorted_sentence_scores = sorted([(s_index, score) for s_index, score in enumerate(sentence_scores, 1)], key=lambda x: x[1], reverse=True)
    # 从1开始prereq句子排序
    sentence_prereq_score_mapper = {
        s_index: prereq_index for prereq_index, (s_index, _) in enumerate(sorted_sentence_scores, 1)
    }

    print(len(sorted_sentence_scores))
    print(len(filtered_sentences))

    summary = []
    while len(summary) < summary_sentence_count and filtered_sentences:
        selected_sentence = find_a_sentence(filtered_sentences, sentence_prereq_score_mapper, node_importance_mapper, nodes_prereq_matrix, node_to_index, index_to_node, len(summary) + 1)
        summary.append(selected_sentence)

        filtered_sentences.remove(selected_sentence)

    # for sentence in summary:
    #     print(sentence.sentence.text)
        
    nodes_prereq_dict = {}
    for i, j in itertools.product(range(len(focus_nodes)), range(len(focus_nodes))):
        if nodes_prereq_matrix[i, j] != 0:
            nodes_prereq_dict[(index_to_node[i])] = [index_to_node[j], nodes_prereq_matrix[i, j]]
    
    learned_nodes = set()
    for sentence in learned_material.sentences:
        for node in sentence.nodes:
            learned_nodes.add(node)
    
    return '\n'.join(sentence.sentence.text for sentence in summary), nodes_prereq_dict, learned_nodes


def find_a_sentence(sentences, sentence_prereq_score_mapper, node_importance_mapper, node_prerequisite_matrix, node_to_index, index_to_node, summary_index):
    scores = []
    # 从1开始句子排序
    for s_index, sentence in enumerate(sentences, 1):
        score = 0
        for node in sentence.nodes:
            score += node_importance_mapper[node]

        # 1 - \frac{|j-2i|}{100}, where i是选择的第i个摘要句子，j是所有句子的顺序
        # prereq_index是这个句子在先序关系中的位置
        prereq_index = sentence_prereq_score_mapper[s_index]
        position_coef = 2 - abs(prereq_index - 2 * summary_index) / len(sentences)

        score = score * position_coef
        scores.append(score)

    selected_sentence = sentences[np.argmax(scores)]
    
    for node in selected_sentence.nodes:
        node_importance_mapper[node] /= 2

    for node in selected_sentence.focused_nodes:
        for n_index, prereq_value in enumerate(node_prerequisite_matrix[node_to_index[node], :]):
            node_importance_mapper[index_to_node[n_index]] *= 1 + prereq_value
            
    return selected_sentence

# def find_a_sentence(pair, sentences, last_sentence, importance_mapper, sentence_dependency_matrix):
#     node1, node2 = pair
#     sentence_scores = []
#     for sentence in sentences:
#         # 1. 找到与node1, node2最接近的句子
#         # 2. 句子要与上一个句子连贯
#         # 3. 句子内包含的词要重要
#         common_words = set(node1.split(' ') + node2.split(' ')) & set(sentence.words)

#         score = len(common_words) * sentence_dependency_matrix(last_sentence, sentence) * [importance_mapper[node] for node in sentence.nodes]

#         sentence_scores.append((sentence, score))

#     return sorted(sentence_scores, key=lambda x: -x[1])[0]


def node_evaluation(sentences):
    bucket_count = 100
    # budges = [{} for _ in range(budget_count)]
    bucket = {}
    importance_mapper = {}
    for index, sentence in enumerate(sentences):
        position = int(100 * (index // (len(sentences) / bucket_count)) / bucket_count)

        for pre, ind, rtype, post, _ in sentence.links:
            # for node in [pre, post]:
            # 只考虑active node
            if rtype != LinkType.Action: continue
            for node in [pre]:
                bucket[node] = bucket.get(node, [0 for _ in range(bucket_count)])
                bucket[node][position] += 1

    print('node count: ', len(bucket))
    for node, counts in bucket.items():
        vars = 0; mean = sum(counts) / len(counts)
        for count in counts:
            vars += (count - mean) ** 2
        vars /= 10000
        
        global_distance = 0;
        positions = [index for index, count in enumerate(counts) if count != 0]
        for (pos1, pos2) in itertools.product(positions, positions):
            if pos1 >= pos2: continue
            global_distance += abs(pos1 - pos2)
        # 如果只出现在一个桶里，那该值为0
        if len(positions) == 1:
            global_distance = 1 # 按相邻计算
        else:
            global_distance /= len(positions) * (len(positions) - 1) / 2
        global_distance /= 100

        # from utils import red_text
        # print(red_text(node))
        # print('sum counts: ', sum(counts), counts)
        # print('idf value: ', get_idf_value(node))
        # print('global distance: ', global_distance)
        # print('vars: ', vars)
        # print('score: ', sum(counts) * get_idf_value(node) * (1 / (vars + 1)) * global_distance)
        # print('===========')

        importance_mapper[node] = sum(counts) * get_idf_value(node) * (1 / (vars + 1)) * global_distance

    for sentence in sentences:
        for pre, ind, rtype, post, _ in sentence.links:
            importance_mapper[pre] = importance_mapper.get(pre, 0) + 0.5
            importance_mapper[post] = importance_mapper.get(post, 0) + 0.5

    sorted_nodes = [t[0] for t in sorted(importance_mapper.items(), key=lambda x: -x[1])]

    return sorted_nodes, importance_mapper


def PageRank(transition_matrix, d=0.85, max_iterations=1000, tolerance=1e-6):
    n = transition_matrix.shape[0]
    pr = np.ones(n) / n  # 初始化每个节点的PageRank值为1/n
    for iter in range(max_iterations):
        new_pr = (1 - d) / n + d * np.dot(transition_matrix, pr)
        # 检查收敛性，若前后两次PR值变化小于阈值则停止迭代
        if np.linalg.norm(new_pr - pr) < tolerance:
            break
        pr = new_pr

    print(f'PageRank iter: {iter}')
    return pr


def _sentence_prerequisite(nodes1, nodes2, nodes_prerequisite_matrix, node_to_index):
    prerequisite = 0
    reverse_prerequisite = 0

    for node1, node2 in itertools.product(nodes1, nodes2):
        prerequisite += nodes_prerequisite_matrix[node_to_index[node1], node_to_index[node2]]
        reverse_prerequisite += nodes_prerequisite_matrix[node_to_index[node2], node_to_index[node2]]
    
    return prerequisite, reverse_prerequisite



def construct_sentence_prerequisite_matrix(sentences, nodes_prerequisite_matrix, node_to_index):
    # 过滤掉不在sentence中的词
    filtered_sentences = []
    pattern = '(' + '|'.join(node for node in sorted(node_to_index.keys(), reverse=True)) + ')'

    for sentence in sentences:
        nodes = set(); focused_nodes = set()
        for pre, ind, rtype, post, _ in sentence.links:
            for node in [pre, post]:
                nodes.add(pre)
                if (match := re.search(pattern, node)):
                    focused_nodes.add(match.group(0))

        filtered_sentences.append(SyllabusSentence(sentence, nodes, focused_nodes))

    sentence_prereq_matrix = np.zeros((len(filtered_sentences), len(filtered_sentences)))

    for index1, sentence1 in enumerate(filtered_sentences):
        for index2, sentence2 in enumerate(filtered_sentences):
            if index2 <= index1: continue

            prereq, reverse_prereq = _sentence_prerequisite(sentence1.focused_nodes, sentence2.focused_nodes, nodes_prerequisite_matrix, node_to_index)

            sentence_prereq_matrix[index1, index2] = prereq
            sentence_prereq_matrix[index2, index1] = reverse_prereq

    # 把prereq_matrix归一化为列和为1，代表每个句子的被依赖的权重和为1
    for index in range(len(filtered_sentences)):
        column_sum = np.sum(sentence_prereq_matrix[:, index])
        if column_sum != 0:
            sentence_prereq_matrix[:, index] /= column_sum

    return sentence_prereq_matrix, filtered_sentences


# 五个方面构造依赖关系
# 1. 宾语依赖于主语
# 2. 在一组相邻的句子中，后面的依赖于前面的
# 3. 抽象的依赖于具体的 resource -> resource space
# 4. 首次出现位置更靠前的
# 5. 去掉以后互信息少的依赖于多的
# 6. learned材料依赖于learning materials


if __name__ == '__main__':
    main()