# some utils
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from textrank4zh import TextRank4Keyword,TextRank4Sentence
from functools import partial
import jieba
import hanlp
import torch


def HelloWorld():
    print("hello world")
    


def ReverseDic(dic,is_int=False,is_float=False,n=1):
    if is_float:
        return {round(float(k),n):v for v,k in dic.items()}

    return {k:v for v,k in dic.items()}


def cut_word(x,method='jieba'):
    """分词
    
    Args:
        x (list): 列表中存储着分词的句子
        method (str, optional): 使用jieba分词或者hanlp分词(更强大的分词，多种模型可供选择). Defaults to 'hanlp'.

    Returns:
        list[list]: 嵌套列表，内部是分词好的句子
    """
    if method == 'jieba':
        return [jieba.lcut(i) for i in x]
    elif method == 'hanlp':
        tok = hanlp.load(hanlp.pretrained.tok.MSR_TOK_ELECTRA_BASE_CRF)  # Electra ( Clark et al. 2020 ) 在 MSR CWS 数据集上训练的基础模型。性能远高于MTL机型P: 98.71% R: 98.64%    
        return tok(x)
    
# 字典树，高效查找与搜索
class Trie:
    def __init__(self):
        self.child = {}

    def insert(self, word):
        a = self.child
        for i in word:
            if not i in a.keys():
                a[i] = {}
            a = a[i]
        a["end"] = True
    
    def search(self, word):
        a = self.child
        for i in word:
            if not i in a.keys():
                return False
            a = a[i]
        return True if "end" in a.keys() else False
    
    def startsWith(self, prefix):
        a = self.child
        for i in prefix:
            if not i in a.keys():
                return False
            a = a[i]
        return True

def Dropwords(drop_words,sentence_array):
    import gc
    """基于字典树的高效查找与搜索

    `注意：如果是中文的词过多的话，这个数据结构可能会占用大量的内存！！！！`
    Args:
        drop_words (_type_): _description_
        sentence_array (_type_): _description_

    Returns:
        _type_: _description_
    """
    t = Trie()
    for i in drop_words:
        t.insert(i)
    
    contents_clean = []
    all_words = []
    for line in sentence_array:
        line_clean = []
        for word in line:
            if t.search(word):
                continue
            line_clean.append(word)
            all_words.append(str(word))
        contents_clean.append(line_clean)
    del t
    gc.collect()
    return contents_clean

import pandas as pd
def load_stop_words(path = r"D:\code\Python-Project\TeddyCup\teddy-cup\Utils\stopwords.txt"):
    stopwords = pd.read_csv(path,index_col=False,sep="\t",quoting=3,names=['stopword'], encoding='utf-8').stopword.values.tolist() #list
    return stopwords




def extra_english(x):
    """提取英文

    Args:
        x (str): 一个句子

    Returns:
        _type_: 句子中的英文
    """
    pattern = r'[a-zA-Z]+'
    result = re.findall(pattern, x)
    return result

def change_english(source,target):
    """将句子中的英文替换为别的字符

    Args:
        source (str): 原字符串
        target (_type_): 替换字符串 

    Returns:
        _type_: _description_
    """
    pattern = r'[a-zA-Z]+'
    result = re.sub(pattern, target, source)
    return result




def TFIDFGetKeyWord(cut_words,threshold = 0.15):
    """TFIDF提取关键词

    Args:
        cut_words (array[array[]]): 二维数组
    """
    # 初始化 TfidfVectorizer
    tfidf = TfidfVectorizer()
    
    documents = [" ".join(i) for i in cut_words]

    # 计算 TF-IDF 值
    tfidf_matrix = tfidf.fit_transform(documents)
    # 获取特征名称（即关键词）
    feature_names = tfidf.get_feature_names()

    ans = []
    # 打印每篇文档的关键词及其 TF-IDF 值
    for i, document in enumerate(documents):
        res = []

        # print(f"Document {i + 1}:")
        for j, feature in enumerate(feature_names):
            if tfidf_matrix[i, j] > threshold:
                res.append([feature,tfidf_matrix[i, j]])
        ans.append(res)
    return ans




def TextRank4GetKeyWord(setence,num,word_min_len = 2):
    """TextRank提取关键词

    Args:
        setence (str): 待处理的句子
        num (int): 提取数量
        word_min_len (int, optional): 每个句子的长度. Defaults to 2.

    Returns:
        _type_: _description_
    """
    tr4w = TextRank4Keyword()
    tr4w.analyze(text=setence,window=2)
    res = []
    for item in tr4w.get_keywords(num, word_min_len = word_min_len):
        res.append(item['word'])
    return res



def TextRank4GetKeyWords(setence,num,min_occur_num = 1):
    """TextRank提取关键词语，相当于关键词的组合

    Args:
        setence (str): 句子
        num (int): 这里的num应该是使用多少个上个函数的关键词
        min_occur_num (int, optional): 关键词中的词的最低频率. Defaults to 1.

    Returns:
        _type_: _description_
    """
    tr4w = TextRank4Keyword()
    tr4w.analyze(text=setence,window=2)
    res = []
    for phrase in tr4w.get_keyphrases(keywords_num=num, min_occur_num=min_occur_num):
        res.append(phrase)
    return res


def TextRank4GetSentence(sentence,num=3):
    """提取关键语句

    Args:
        sentence (str): 句子
        num (int, optional): 提取句子数量. Defaults to 3.

    Returns:
        _type_: _description_
    """
    tr4s = TextRank4Sentence()
    tr4s.analyze(text=sentence, lower=True, source = 'all_filters')
    res = []
    for item in tr4s.get_key_sentences(num=num):
        res.append(item.sentence)  # index是语句在文本中位置，weight是权重
    return res

import numpy as np

def AltB_axis1(A,B,value):
    """
    if A中第i行的数组中的值小于B中第i行数组的中值，则A值值设为：value
    ```
    A = np.array([[1, 2, 3], 
              [1, 2, 3],
              [1, 2, 3]])
    B = np.array([2,
                  3,
                  4])
    >>> AltB_axis1(A,B,-2)
    array([[-2,  2,  3],
       [-2, -2,  3],
       [-2, -2, -2]])
              
    Args:
        A (二维数组): m * n
        B (二维数组(一维数组)): m * 1 (m)  
    """
    if len(B.shape)==1:
        B = B[ : ,np.newaxis]       # 转换为m *1 得二维数组
    mask = (A<B)
    A[mask] = value
    return A


def AsubstractB(A,B,ignore=False,value=None):
    """
    A中第i行数组得值减去B中第i行数组的值
    
    如果设置ignore，那么A中的某些值不参与运算，value是A中不参与运算的值
    A = np.array([[-2, 2, 4],
              [-2, 5, 8],
              [-2, -2, 10]])
    B = np.array([2, 5, 9])
    >>> AsubstractB(A,B,True,-2)
    array([[-2,  0,  2],
       [-2,  0,  3],
       [-2, -2,  1]])
       
    Args:
        A (二维数组): m * n
        B (二维数组(一维数组)): B中的形状需要与A的形状相同，m * n (m*1)
        ignore (bool) : 是否忽略掉某些值
    """
    if len(B.shape)==1:
        B = np.broadcast_to(B[:, np.newaxis], A.shape)
    if ignore:
        mask = (A != value)
        A[mask] -= B[mask]
        return A
    return A - B


def BreplaceA(A,B,ignore,value):
    """
    用B代替A中的部分值，如果设置ignore，则设置value，A中的value不会被代替
    
    A = np.array([[-2, 2, 4],
            [-2, 5, 8],
            [-2, -2, 10]])
    B = np.array([[1,2,3],
                [1,2,3],
                [1,2,19]])
    >>> BreplaceA(A,B,True,value=-2)
    
    array([[-2,  2,  3],
       [-2,  2,  3],
       [-2, -2, 19]])
       
    Args:
        A (_type_): m*n
        B (_type_): m*n
        ignore (_type_): _description_
        value (_type_): _description_
    """
    if ignore:
        mask = (A != value)
        A[mask] = B[mask]
        return A
    return B



def AsameB(A,B):
    """
    判断B中第i行的元素与A中第i个的元素，是否相同。相同变为1，不同为0
    a = np.array([["广东省", '广东省', "广东省"],
              ['广东省', '上海省', '广东省'],
              ["广东省", '广东省', "广东省"]
              ])

    b = np.array(['广东省',
                '上海省',
                '广东省'])
    >>> AsameB(b,a)
    [[1 1 1]
    [0 1 0]
    [1 1 1]]
    
    
    Args:
        A (_type_): m*1
        B (_type_): m*n
    """
    return (B==A.reshape(-1,1)).astype(int)



def ArrContainValue(arr,value):
    result = np.isin(arr,value)
    contains_value = np.any(result)
    return  contains_value


def cut_word(x,method='hanlp'):
    """分词
    
    Args:
        x (list): 列表中存储着分词的句子
        method (str, optional): 使用jieba分词或者hanlp分词(更强大的分词，多种模型可供选择). Defaults to 'hanlp'.

    Returns:
        list[list]: 嵌套列表，内部是分词好的句子
    """
    if method == 'jieba':
        return [jieba.lcut(i) for i in x]
    elif method == 'hanlp':
        tok = hanlp.load(hanlp.pretrained.tok.MSR_TOK_ELECTRA_BASE_CRF)  # Electra ( Clark et al. 2020 ) 在 MSR CWS 数据集上训练的基础模型。性能远高于MTL机型P: 98.71% R: 98.64%    
        return tok(x)
    
    
    
import hanlp
def Text2Vec(x,method='word2vec'):
    """预训练模型表示法

    Args:
        x (list[list]): [['tok1','tok2',...],['tok1','tok2'],...]
        method (str, optional): _description_. Defaults to 'word2vec'.
    """
    if method=='word2vec':
        model = hanlp.load(hanlp.pretrained.word2vec.MERGE_SGNS_BIGRAM_CHAR_300_ZH)
        res = []
        for i in x:
            res.append([model(j) for j in i])
        res = [np.sum(i)/len(i) for i in res]               # 因为句子的长度不一，在这里进行取个均值的操作（损失信息）。如何可以的话可以进行拼接tensor
        return res
    elif method=='glov':    # 全局词向量
        model = hanlp.load(hanlp.pretrained.glove.GLOVE_6B_300D)
        res = []
        for i in x:
            res.append([model(j).cpu() for j in i])
        res = [np.sum(i)/len(i) for i in res]               # 因为句子的长度不一，在这里进行取个均值的操作（损失信息）。如何可以的话可以进行拼接tensor
        return res
    elif method == 'fasttxt':
        model = hanlp.load(hanlp.pretrained.fasttext.FASTTEXT_CC_300_EN)
        res = []
        for i in x:
            res.append([model(j).cpu() for j in i])
        res = [np.sum(i)/len(i) for i in res]               # 因为句子的长度不一，在这里进行取个均值的操作（损失信息）。如何可以的话可以进行拼接tensor
        return res



def get_embedding(data,cut_method="hanlp",mode="word2vec"):
    cut_words = cut_word(data,method=cut_method)
    stopwords = load_stop_words()
    drop_words = Dropwords(stopwords,cut_words)
    return Text2Vec(drop_words,method=mode)



def calculate_cos_sim(job_embedding,people_embedding,has_nan=True):
    """计算job_embedding中每个词向量与people_embedding中所有词向量的余弦相似度

    Args:
        job_embedding ([tensor,tensor,...]): hanlp处理后的word2vec向量
        people_embedding ([tensor,tensor,...]): hanlp处理后的word2vec向量
        has_nan (bool, optional): 处理后的数据是否有nan

    Returns:
        _type_: _description_
    """
    if has_nan:
        job_embedding = [x if torch.is_tensor(x) else torch.Tensor([0]*300).to("cuda")  for x in job_embedding]             # 此模型是300维
        people_embedding = [x if torch.is_tensor(x) else torch.Tensor([0]*300).to("cuda")  for x in people_embedding]
        
    A_2d = torch.cat([tensor.view(1, -1) for tensor in job_embedding], dim=0)       
    B_2d = torch.cat([tensor.view(1, -1) for tensor in people_embedding], dim=0)
    cos_sim = torch.zeros((A_2d.shape[0], B_2d.shape[0]), device='cuda:0')                      # ans

    # 分别计算每个A向量与所有B向量的余弦相似度
    for i in range(A_2d.shape[0]):
        cos_sim[i] = torch.nn.functional.cosine_similarity(A_2d[i].unsqueeze(0), B_2d, dim=1)
    return cos_sim