import itertools
import numpy as np
from collections import Counter
from summarization_interface import LinkType

def RefD(sentences):
    pass


def subject_object_prerequisite(sentences):
    counter = Counter()
    
    for sentence in sentences:
        for pre, ind, rtype, post, position in sentence.links:
            if rtype in [LinkType.Action, LinkType.Attribute]:
                #  pre prereqends on post
                counter.update([(post, pre)])

    max_count = max(counter.values())
    norm_counter = {k: v/max_count for k, v in counter.items()}

    return norm_counter


def constraint_prerequisite(sentences):
    counter = Counter()
    
    for sentence in sentences:
        for pre, ind, rtype, post, position in sentence.links:
            if rtype in [LinkType.Constraint]:
                #  pre prereqends on post
                counter.update([(post, pre)])

    max_count = max(counter.values())
    norm_counter = {k: v/max_count for k, v in counter.items()}

    return norm_counter


def adjacent_prerequisite(sentences):
    counter = Counter()
    # 先把句子分成group_size (5-10)一组
    group_size = 5
    for i in range(0, len(sentences), group_size):
        pre_nodes = []
        for sentence in sentences[i: i+group_size]:
            # 记录之前句子的节点
            curr_nodes = []
            for pre, ind, rtype, post, position in sentence.links:
                curr_nodes.extend([pre, post])

            for curr, pre in itertools.product(curr_nodes, pre_nodes):
                # curr 依赖于 pre
                counter.update([(curr, pre)])
            
            pre_nodes.extend(curr_nodes)

    max_count = max(counter.values())
    norm_counter = {k: v/max_count for k, v in counter.items()}

    return norm_counter


def abstract_prerequisite(sentences):
    counter = Counter()
    for sentence in sentences:
        for pre, ind, rtype, post, position in sentence.links:
            if rtype == LinkType.Constraint:
                if ind in {'of', 'at', 'in', 'to', 'with', 'of', 'in', 'at', 'on', 'from', 'for', 'as'}:
                    counter.update([(post, f'{pre} {ind} {post}')])
                else:
                    counter.update([(pre, f'{pre} {ind} {post}')])

            else:
                if len(pre_words := pre.split(' ')) > 1:
                    counter.update([(' '.join(pre_words[1:]), pre)])
                
                if len(post_words := post.split(' ')) > 1:
                    counter.update([(' '.join(post_words[1:]), post)])

    max_count = max(counter.values())
    norm_counter = {k: v/max_count for k, v in counter.items()}
    return norm_counter


def avg_position_prerequisite(sentences, focus_nodes):
    counter = Counter()
    node_position = {}
    for index_pos, sentence in enumerate(sentences, 1):
        # 记录每个节点的平均出现位置
        for pre, ind, rtype, post, position in sentence.links:
            node_position[pre] = node_position.get(pre, []) + [index_pos]
            node_position[post] = node_position.get(post, []) + [index_pos]

    node_position = {k: sum(v) / len(v) for k, v in node_position.items() if k in focus_nodes}

    for node1, node2 in itertools.product(node_position.keys(), node_position.keys()):
        if node1 == node2: continue
        
        # 对于相邻的概念，越近的概念，依赖强度越大
        if node_position[node1] < node_position[node2]:
            # node1 依赖于 node2
            counter[(node1, node2)] = index_pos - (node_position[node2] - node_position[node1])

    max_count = max(counter.values())
    norm_counter = {k: v/max_count for k, v in counter.items()}
    return norm_counter


# 分布更具体的词，依赖于分布更具体的词
def distribution_prerequisite():
    pass


def citation_prerequisite(learned_material, learning_material, focus_concepts=None):
    counter = Counter()
    learned_nodes_counter = Counter()
    learning_nodes_counter = Counter()
    for sentence in learned_material.sentences:
        for pre, ind, rtype, post, position in sentence.links:
            learned_nodes_counter.update([pre, post])

    for sentence in learning_material.sentences:
        for pre, ind, rtype, post, position in sentence.links:
            # learned_node 依赖于 pre, post
            learning_nodes_counter.update([pre, post])

    print(learned_nodes_counter, len(learned_nodes_counter))
    print(learning_nodes_counter, len(learning_nodes_counter))

    if focus_concepts is None:
        learned_nodes = (set(learned_nodes_counter.keys()) - set(learning_nodes_counter.keys()))
        learning_nodes = set(learning_nodes_counter.keys()) - set(learned_nodes_counter.keys())
    else:
        learned_nodes = (set(learned_nodes_counter.keys()) - set(learning_nodes_counter.keys())) & focus_concepts
        learning_nodes = set(learning_nodes_counter.keys()) - set(learned_nodes_counter.keys()) & focus_concepts

    for learned_node, learning_node in itertools.product(learned_nodes, learning_nodes):
        # learned_node 依赖于 learning_node
        counter[(learned_node, learning_node)] = learned_nodes_counter[learned_node] + learning_nodes_counter[learning_node]

    # TODO: 构建完以后，选出来置信度特别高的，使用大模型进行验证。

    if not counter:
        return {}

    max_count = max(counter.values())
    norm_counter = {k: v/max_count for k, v in counter.items()}
    return norm_counter


def construct_node_prerequisite_matrix(learned_material, learning_material, focus_nodes, node_to_index, options = [
    'subject_object_prereq',
    'adjacent_prereq',
    'abstract_prereq',
    'avg_position_prereq',
    'citation_prereq',
]):
    # construct the prerequisite relations between semantic nodes

    prerequisite_mapper = {}

    # 1. 宾语依赖于主语
    if 'subject_object_prereq' in options:
        subject_object_prereq_counter = subject_object_prerequisite(learning_material.sentences)

        for key, value in subject_object_prereq_counter.items():
            prerequisite_mapper[key] = prerequisite_mapper.get(key, 0) + value

    # 2. 在一组相邻的句子中，后面的依赖于前面的
    if 'adjacent_prereq' in options:
        adjacent_prereq_counter = adjacent_prerequisite(learning_material.sentences)

        for key, value in adjacent_prereq_counter.items():
            prerequisite_mapper[key] = prerequisite_mapper.get(key, 0) + value
        
    # 3. 抽象的依赖于具体的 resource -> resource space
    if 'abstract_prereq' in options:
        abstract_prereq_counter = abstract_prerequisite(learning_material.sentences)
        for key, value in abstract_prereq_counter.items():
            prerequisite_mapper[key] = prerequisite_mapper.get(key, 0) + value

    # 4. 平均出现位置更靠前的，更基础
    if 'avg_position_prereq' in options:
        position_prereq_counter = avg_position_prerequisite(learning_material.sentences, focus_nodes)

        for key, value in position_prereq_counter.items():
            prerequisite_mapper[key] = prerequisite_mapper.get(key, 0) + value

    # 5. 去掉以后互信息少的依赖于多的
    pass

    # 6. learned材料依赖于learning materials
    if 'citation_prereq' in options:
        learned_prereq_counter = citation_prerequisite(learned_material, learning_material, focus_nodes)
        for key, value in learned_prereq_counter.items():
            prerequisite_mapper[key] = prerequisite_mapper.get(key, 0) + value

    prerequisite_matrix = np.zeros((len(focus_nodes), len(focus_nodes)))
    for (node1, node2), value in prerequisite_mapper.items():
        if node1 not in node_to_index or node2 not in node_to_index: continue
        prerequisite_matrix[node_to_index[node1], node_to_index[node2]] = value

    return prerequisite_matrix


def construct_implicit_node_prerequisite_matrix():
    for node1, node2 in itertools.product(nodes, nodes):
        prereq = 0
        for node in nodes:
            # P(node1 | node2) = P(node1 | node2) + P(node1 | node) * P(node | node2)
            prereq += prerequisite_matrix(node, node1) + prerequisite_matrix(node, node2)

        print(prereq)


# 构建句子之间的依赖关系
# 根据两个句子之间所有词对之间的依赖关系进行判断
# 可以用next sentence prediction进行判断
def build_sentence_prerequisite_matrix(sentences, node_prerequisite_matrix):
    sentence_prerequisite_matrix = [[0 for _ in range(len(sentences))] for _ in range(len(sentences))]

    for index1, index2 in itertools.produce(range(len(sentences)), range(len(sentences))):
        sentence1 = sentences[index1]
        sentence2 = sentences[index2]

        # 先计算句子之间的依赖关系
        for word1, word2 in itertools.product(sentence1.words, sentence2.words):
            sentence_prerequisite_matrix[index1][index2] += node_prerequisite_matrix[(word1, word2)]

    return sentence_prerequisite_matrix


def construct_syllabus(sentences, node_prerequisite_pairs, desired_concept_count):
    sentences = [s for s in sentences]
    summary_sentences = []
    node_importance_mapper = evaluate_node_importance(sentences)
    # 排序依赖关系，每个依赖关系对应一个摘要句子
    
    prerequisite_pairs = sample_pairs(desired_concept_count * 10, node_prerequisite_pairs)
    # 找到与prerequisite pair最相关的句子

    # 2. build the prerequisite relations between sentences
    sentence_prerequisite_matrix = build_sentence_prerequisite_matrix(node_prerequisite_pairs)
    
    # 3. evaluate the importance of each sentence
    node_importance_mapper = evaluate_node_importance(sentences)

    last_sentence = None
    for pair in prerequisite_pairs:
        last_sentence = find_a_sentence(pair, sentences, last_sentence, node_importance_mapper, sentence_prerequisite_matrix)
        summary_sentences.append(last_sentence)
        sentences.remove(last_sentence)

    return summary_sentences


def sample_pairs(desired_length, node_prerequisite_pairs, importance_mapper):
    values = []
    for node1, node2, prereq in node_prerequisite_pairs:
        value = (importance_mapper[node1] + importance_mapper[node2]) * prereq

        values.append((node1, node2, value))

    return sorted(values, key=lambda x: -x[2])[:desired_length]


def evaluate_node_importance(sentences):
    bucket_count = 100
    bucket = {}
    for index, sentence in enumerate(sentences):
        position = int(100 * (index // (len(sentences) / bucket_count)) / bucket_count)

        for pre, ind, rtype, post, _ in sentence.links:
            # for node in [pre, post]:
            # 只考虑active node
            if rtype not in [LinkType.Action, LinkType.Attribute]: continue
            for node in [pre]:
                bucket[node] = bucket.get(node, [0 for _ in range(bucket_count)])
                bucket[node][position] += 1

    importance_mapper = {}
    print('node count: ', len(bucket))
    for node, counts in bucket.items():
        vars = 0; mean = sum(counts) / len(counts)
        for count in counts:
            vars += (count - mean) ** 2
        vars /= 10000
        
        global_distance = 0
        positions = [index for index, count in enumerate(counts) if count != 0]
        for (pos1, pos2) in itertools.product(positions, positions):
            if pos1 >= pos2: continue
            global_distance += abs(pos1 - pos2)
        # 如果只出现在一个桶里，那该值为0
        if len(positions) == 1:
            global_distance = 1 # 按相邻计算
        else:
            global_distance /= len(positions) * (len(positions) - 1) / 2
        global_distance /= 100

        # from utils import red_text
        # print(red_text(node))
        # print('sum counts: ', sum(counts), counts)
        # print('idf value: ', get_idf_value(node))
        # print('global distance: ', global_distance)
        # print('vars: ', vars)
        # print('score: ', sum(counts) * get_idf_value(node) * (1 / (vars + 1)) * global_distance)
        # print('===========')

        importance_mapper[node] = sum(counts) * get_idf_value(node) * (1 / (vars + 1)) * global_distance

    sorted_words = [t[0] for t in sorted(importance_mapper.items(), key=lambda x: -x[1])]

    return sorted_words, 
    importance_mapper
