# from transformers import pipeline
import glob
import hashlib
import requests
import re
import numpy as np
import pandas as pd
from tqdm import tqdm
from gensim.models import Word2Vec
from gensim.models.callbacks import CallbackAny2Vec
from gensim.models import FastText
from gensim.models.word2vec import LineSentence
from gensim.models import KeyedVectors
from gensim.models.fasttext import FastText
from gensim.test.utils import datapath, get_tmpfile
# from text2vec import SentenceModel
# from text2vec import Word2Vec


def compute_emb(model):
    # Embed a list of sentences
    sentences = [
        '卡',
        '银行卡',
        '如何更换花呗绑定银行卡',
        '花呗更改绑定银行卡',
        'This framework generates embeddings for each input sentence',
        'Sentences are passed as a list of string.',
        'The quick brown fox jumps over the lazy dog.'
    ]
    sentence_embeddings = model.encode(sentences)
    print(type(sentence_embeddings), sentence_embeddings.shape)

    # The result is a list of sentence embeddings as numpy arrays
    for sentence, embedding in zip(sentences, sentence_embeddings):
        print("Sentence:", sentence)
        print("Embedding shape:", embedding.shape)
        print("Embedding head:", embedding[:10])
        print()


def similarity_computation_main():
    # 中文句向量模型(CoSENT)，中文语义匹配任务推荐，支持fine-tune继续训练
    t2v_model = SentenceModel("shibing624/text2vec-base-chinese")
    compute_emb(t2v_model)

    # 支持多语言的句向量模型（CoSENT），多语言（包括中英文）语义匹配任务推荐，支持fine-tune继续训练
    sbert_model = SentenceModel("shibing624/text2vec-base-multilingual")
    compute_emb(sbert_model)

    # 中文词向量模型(word2vec)，中文字面匹配任务和冷启动适用
    w2v_model = Word2Vec("w2v-light-tencent-chinese")
    compute_emb(w2v_model)


def get_md5(input_string):
    import hashlib
    return hashlib.md5(input_string.encode("utf-8")).hexdigest()


def python_rematch():
    example_string = '[[11]]'
    if re.match(r'\[\[([0-9]+)\]\]', example_string):
        # Python backreferences get number from above regex
        print(re.match(r'\[\[([0-9]+)\]\]', example_string).group(1))


def get_files(in_directory):
    return [file for file in glob.glob(in_directory + "**/*", recursive=True)]


def sentiment_analysis():
    classifier = pipeline("sentiment-analysis")
    classifier("I've been waiting for a HuggingFace course my whole life.")
    print(classifier("I've been waiting for a HuggingFace course my whole life."))


def text_generation():
    generator = pipeline("text-generation")
    generator("In this course, we will teach you how to")
    print(generator("In this course, we will teach you how to"))


def summarize():
    summarizer = pipeline("summarization")
    summarizer(
        """
    America has changed dramatically during recent years. Not only has the number of 
    graduates in traditional engineering disciplines such as mechanical, civil, 
    electrical, chemical, and aeronautical engineering declined, but in most of 
    the premier American universities engineering curricula now concentrate on 
    and encourage largely the study of engineering science. As a result, there 
    are declining offerings in engineering subjects dealing with infrastructure, 
    the environment, and related issues, and greater concentration on high 
    technology subjects, largely supporting increasingly complex scientific 
    developments. While the latter is important, it should not be at the expense 
    of more traditional engineering.

    Rapidly developing economies such as China and India, as well as other 
    industrial countries in Europe and Asia, continue to encourage and advance 
    the teaching of engineering. Both China and India, respectively, graduate 
    six and eight times as many traditional engineers as does the United States. 
    Other industrial countries at minimum maintain their output, while America 
    suffers an increasingly serious decline in the number of engineering graduates 
    and a lack of well-educated engineers.
"""
    )


def ChineseSentenceSimlarity():
    from transformers import BertTokenizer, BertModel
    from sklearn.metrics.pairwise import cosine_similarity
    import torch
    model = BertModel.from_pretrained("./sbert-base-chinese-nli")
    tokenizer = BertTokenizer.from_pretrained("./sbert-base-chinese-nli")
    sentences = ["那个人很开心", "那个人非常开心", "那只猫很开心", "那个人在吃东西"]  # 初始化字典来存储
    tokens = {'input_ids': [], 'attention_mask': []}
    for sentence in sentences:  # 编码每个句子并添加到字典
        new_tokens = tokenizer.encode_plus(
            sentence, max_length=15, truncation=True, padding='max_length', return_tensors='pt')
        tokens['input_ids'].append(new_tokens['input_ids'][0])
        tokens['attention_mask'].append(
            new_tokens['attention_mask'][0])  # 将张量列表重新格式化为一个张量
    tokens['input_ids'] = torch.stack(tokens['input_ids'])
    tokens['attention_mask'] = torch.stack(tokens['attention_mask'])
    # print(outputs.keys()) #odict_keys(['last_hidden_state', 'pooler_output'])
    outputs = model(**tokens)
    # print(embeddings.shape) #torch.Size([4, 15, 768])
    embeddings = outputs.last_hidden_state
    # print(attention_mask.shape) #torch.Size([4, 15])
    attention_mask = tokens['attention_mask']
    # print(mask.shape) #torch.Size([4, 15, 768])
    mask = attention_mask.unsqueeze(-1).expand(embeddings.size()).float()
    # print(masked_embeddings.shape) #torch.Size([4, 15, 768])
    masked_embeddings = embeddings * mask
    # print(summed.shape) #torch.Size([4, 768])
    summed = torch.sum(masked_embeddings, 1)
    # print(summed_mask.shape) #torch.Size([4, 768])
    summed_mask = torch.clamp(mask.sum(1), min=1e-9)
    # print(mean_pooled.shape) #torch.Size([4, 768])
    mean_pooled = summed / summed_mask
    mean_pooled = mean_pooled.detach().numpy()
    # print(result) #[[0.9864919 0.39011386 0.29779416]]
    result = cosine_similarity([mean_pooled[0]], mean_pooled[1:])


def translate():

    translator = pipeline("translation", model="Helsinki-NLP/opus-mt-fr-en")
    translator("Ce cours est produit par Hugging Face.")


def sentence_sim():
    from text2vec import Similarity
    sentences1 = ['买东西',
                  '购物']

    sentences2 = ['歌唱',
                  '歌声',
                  '振鸣',
                  '骑单车', '去买东西', '买', '获得', '买得的东西', '商店',
                  '选购', '购物：购物流程：逛',
                  '购物：购物流程：选', '购物：购物流程：结账',
                  '购物：购物流程：付款', '上街购物', '购买东西', '商店：商店用的包装纸']

    sim_model = Similarity()
    for i in range(len(sentences1)):
        for j in range(len(sentences2)):
            score = sim_model.get_score(sentences1[i], sentences2[j])
            print("{} \t\t {} \t\t Score: {:.4f}".format(
                sentences1[i], sentences2[j], score))


def text_summerized():
    from datasets import load_dataset, load_metric
    spanish_dataset = load_dataset("amazon_reviews_multi", "es")
    english_dataset = load_dataset("amazon_reviews_multi", "en")
    metric = load_metric("sacrebleu")
    print(spanish_dataset)
    print(english_dataset)
    print(metric)


if __name__ == "__main__":
    text_summerized()
    sentence_sim()
    similarity_computation_main()
    sentiment_analysis()
    text_generation()
    summarize()
