import math
import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from retrieval.process import get_text


class Item:
    def __init__(self, index, text):
        self.index = index
        self.fname = os.listdir('../data')[index]
        self.text = text
        self.head = text.strip().split('\n')[0].split(':')[1].strip()
        self.author = text.strip().split('\n')[1].split(':')[1].strip()
        self.date = text.strip().split('\n')[2].split(':')[1].strip()
        self.abstract = text.strip().split('\n')[3].split(':')[1].strip()
        self.url = text.strip().split('\n')[4].split(':', 1)[1].strip()
        self.similarity = 0.0
        self.score = 0.0
        self.occurrence = []

    def __str__(self):
        sstr = f"""
        file: {self.fname}
        head: {self.head}
        author: {self.author}
        date: {self.date}
        url: {self.url}
        TF-IDF: {self.score}
        similarity: {str(self.similarity)}
        """
        sstr = "\n".join(line.strip() for line in sstr.split("\n"))
        for index, i in enumerate(self.occurrence):
            sstr += f'[{index}]> ...' + self.abstract[max(0, i[0] - 50):i[0] + 50] + '...\n'
        return sstr

    def get_tfidf(self, inverse_table, word_value):
        tf, idf = 0.0, 0.0
        total_words = len(self.text.split())
        paper_id = self.index
        for record in inverse_table[word_value]:
            if record[0] == paper_id:
                tf = record[1] / total_words
        paper_included = len(inverse_table[word_value])
        idf = math.log(500/paper_included, 10)
        return tf * idf


    def bm25(self, inverse_table, word_value):
        tf, idf = 0.0, 0.0
        len_D = len(self.text.split())
        # 计算平均文档长度
        avgdl = 0
        texts = get_text()
        for text in texts:
            avgdl += len(text.split())
        avgdl /= 500
        # 计算tf，idf
        paper_id = self.index
        for record in inverse_table[word_value]:
            if record[0] == paper_id:
                tf = record[1] / len_D
        paper_included = len(inverse_table[word_value])
        idf = math.log(500 / paper_included, 10)
        # 设定参数k, b
        k = 1.2
        b = 0.75
        # 计算
        numerator = (k + 1) * tf
        denominator = k * ((1 - b) + b * (len_D / avgdl)) + tf
        bm25_score = (numerator / denominator) * idf
        return bm25_score



def retrieve(sstr, inverse_table, texts, vec, X):
    """
    一个 item 对应一篇文章，以及该篇文章中对应的搜索字符串的排名

    """
    word_list = sstr.split()
    search_word = [inverse_table[word].copy() for word in word_list]
    # 'document':  | [(0, 1, [(44, 52)], 'document'), (1, 2, [(5, 13), (28, 36)]] |
    result_dict = {}
    # search_word中保存着和search_str有关的所有'文章的集合'
    for index, records in enumerate(search_word):
        if not records:
            continue
        for record in records:
            # result_dict保存了search_str在每篇文章的得分情况
            if record[0] not in result_dict:
                # print('hit first!')
                item = Item(record[0], texts[record[0]])
                item.score = item.get_tfidf(inverse_table, record[3])
                item.occurrence.extend(record[2])
                result_dict[record[0]] = item
            else:
                # print('hit again......')
                result_dict[record[0]].score += result_dict[record[0]].get_tfidf(inverse_table, record[3])
                result_dict[record[0]].occurrence.extend(record[2])

    result_list = [item for item in result_dict.values()]
    search_vec = CountVectorizer(vocabulary=vec.get_feature_names_out()).fit_transform([sstr]).toarray()
    for item in result_list:
        item.similarity = cosine_similarity(search_vec, X[item.index])[0]
    result_list.sort(key=lambda x: x.score, reverse=True)
    return result_list
