import math
import string

import nltk
import numpy as np
import pandas as pd
from tqdm import tqdm
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from collections import Counter
from nltk.stem.porter import PorterStemmer
from nltk.stem.lancaster import LancasterStemmer
from nltk.stem.snowball import SnowballStemmer

# 加载分词工具
nltk.download('punkt')
nltk.download('stopwords')


class TFIDF:
    """
    TF-IDF(term frequency–inverse document frequency)
    是一种用于信息检索与数据挖掘的常用加权技术，用以评估一字词对于一个文件集或一个语料库中的其中一份文件的重要程度。
    常用于挖掘文章中的关键词，而且算法简单高效，常被工业用于最开始的文本数据清洗。
    TF-IDF 不再关注分词出现的顺序而是更关注其出现的频率和次数。
    它由 TF 和 IDF 两部分组成：
        * TF 是统计一个词在一篇文章中的出现频次。
        * IDF 则统计一个词在文件集中的多少个文件出现。
    统计后字词的重要性随着它在文件中出现的次数成正比增加，但同时会随着它在语料库中出现的频率成反比下降。


    模型流程：
    1. 分词
    2. stopword过滤
    3. 获取分词集合
    """
    stop_words: str
    participle_utils: None
    _total_data: pd.DataFrame
    _train_data: pd.DataFrame
    _test_data: pd.DataFrame
    _train_label: np.ndarray
    _word_2_idx_dict: dict = {}
    _total_word_num: int
    _total_file_num: int
    _total_train_file_num: int

    _article_word_table: np.ndarray
    _train_article_word_table: np.ndarray
    _test_article_word_table: np.ndarray
    _word_idf_table: dict = {}

    def __init__(self, stop_words='english',
                 participle_utils=word_tokenize, stemmer='porter'):
        if participle_utils is None:
            print("ERROR：arg participle_utils is None. Please set it.")

        self.stop_words = stopwords.raw(stop_words).replace('\n', ' ')
        self.participle_utils = participle_utils

        if stop_words == 'english':
            if stemmer == 'lancaster':
                self._stemmer = LancasterStemmer().stem
            elif stemmer == 'snowball':
                self._stemmer = SnowballStemmer('english').stem
            else:
                self._stemmer = PorterStemmer().stem
        else:
            # 针对中文时，不做词干还原操作
            self._stemmer = lambda w: w

    def _create_tfidf_table(self):
        # 创建文本 * 字典矩阵（_total_file_num * _total_word_num）
        self._article_word_table = np.zeros((self._total_file_num, self._total_word_num))
        self._calc_tfidf(self._article_word_table, self._total_data)

    def _split_tfidf_table(self):
        # _total_train_file_num * _total_word_num
        self._train_article_word_table = self._article_word_table[:self._total_train_file_num]
        # _total_test_file_num * _total_word_num
        self._test_article_word_table = self._article_word_table[self._total_train_file_num:]

    def _calc_tfidf(self, table: np.ndarray, dataFrame: pd.DataFrame):
        for idx in tqdm(range(len(dataFrame[['dealt_content', 'content_word_num']])), desc="正在创建tfidf表"):
            article = dataFrame.iloc[idx]
            dealt_content_list = article["dealt_content"]
            content_word_num = article["content_word_num"]
            counter = Counter(dealt_content_list)
            for word, count in counter.items():
                table[idx, self._word_2_idx_dict.get(word, 0)] = \
                    (float(count) / content_word_num) * self._word_idf_table.get(word, self._word_idf_table['<unk>'])

    def _idf(self):
        dealt_content_list = self._total_data['dealt_content']

        def frac_calculation(w):
            article_num = sum([1 for idx, li in dealt_content_list.items() if w in li])
            return math.log(self._total_file_num/(1 + article_num))

        self._word_idf_table = {word: frac_calculation(word) for word, idx in tqdm(self._word_2_idx_dict.items(),
                                                                                   desc="正在计算tf表")}

    def fit(self, X, y):
        # 处理原始数据
        self._train_data = pd.DataFrame()
        self._train_label = y.copy()
        self._pre_deal_data(X, self._train_data)
        self._total_train_file_num = len(self._train_data)

    def predict(self, X) -> np.ndarray:
        # 处理原始数据
        self._test_data = pd.DataFrame()
        self._pre_deal_data(X, self._test_data)

        # 拼接所有数据
        self._total_data = pd.concat([self._train_data, self._test_data])
        # 对处理后的数据做统计
        self._total_file_num = len(self._total_data)
        dealt_content_list = np.hstack(self._total_data['dealt_content'].to_numpy())
        dealt_content_set = np.unique(dealt_content_list)

        # 创建词典信息
        self._word_2_idx_dict = {word: idx[0]+1 for idx, word in np.ndenumerate(dealt_content_set)}
        self._word_2_idx_dict['<unk>'] = 0
        self._total_word_num = len(self._word_2_idx_dict)

        # 创建整体idf表和tfidf表
        self._idf()
        self._create_tfidf_table()

        # 创建训练集和测试集的tfidf表
        self._split_tfidf_table()

        # 计算相似度
        similar_table = self._cosine_similar_dist()

        # 返回标签
        return self._train_label[similar_table.argmax(axis=0)]

    def _cosine_similar_dist(self) -> np.ndarray:
        """
        计算文章和文章之间的tfidf相似度
            训练数据集tfidf表：train_total_file * word_num
            测试数据集tfidf表：test_total_file * word_num
        得到相似表：train_total_file * test_total_file
        :return: np.ndarray 余弦相似度数组。当值越大时证明相似度越高，当值越小时证明相似度越低。
        """
        # train_total_file * test_total_file
        dot_val = self._train_article_word_table @ self._test_article_word_table.T
        train_vec = np.sqrt((self._train_article_word_table ** 2).sum(axis=1, keepdims=True))
        test_vec = np.sqrt((self._test_article_word_table ** 2).sum(axis=1, keepdims=True))
        sum_val = train_vec * test_vec.T
        return dot_val / sum_val

    def _pre_deal_data(self, X, dataFrame: pd.DataFrame):
        dataFrame['content'] = X.copy()
        dataFrame['dealt_content'] = dataFrame['content'] \
            .map(lambda x: self._remove_stop_words(self.participle_utils(x.lower())))
        dataFrame['content_word_num'] = dataFrame['dealt_content'].map(lambda x: len(x))

    def _remove_stop_words(self, words):
        return [self._stemmer(word) for word in words if word not in self.stop_words
                and word not in string.punctuation]

    @property
    def tfidf_table(self):
        return self._article_word_table

    @property
    def test_tfidf_table(self):
        return self._test_article_word_table
