# -*- coding: utf-8 -*-
"""
@Time ： 2022/11/26 17:26
@Auth ： xlwreally
@File ：datasets.py
@IDE ：PyCharm
"""
import csv
import re
import sys
from argparse import Namespace
from contextlib import ExitStack
import random

import gensim
import nltk
import numpy as np
import torch
from gensim.scripts.glove2word2vec import glove2word2vec
from torch import nn
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from transformers import BertTokenizerFast

from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import TweetTokenizer
from nltk.tokenize import sent_tokenize

# nltk.download('stopwords')
# nltk.download('wordnet')
tokenizer = TweetTokenizer(preserve_case=False)
lemmatizer = WordNetLemmatizer()
stops = set(stopwords.words("english"))


class Vocabulary(object):
    """ Class to process text and extract vocabulary for mapping

        Args:
            token_to_idx (dict): a pre-existing map of tokens to indices
            mask_token (str): the MASK token to add into the Vocabulary; indicates
                a position that will not be used in updating the model's parameters
            add_unk (bool): a flag that indicates whether to add the UNK token
            unk_token (str): the UNK token to add into the Vocabulary

    """

    def __init__(self, token_to_idx=None, mask_token="<MASK>", unk_token="<UNK>", num_token='<NUM>'):
        if token_to_idx is None:
            token_to_idx = {}
        self._token_to_idx = token_to_idx

        self._idx_to_token = {idx: token
                              for token, idx in self._token_to_idx.items()}

        self._unk_token = unk_token
        self._mask_token = mask_token
        self._num_token = num_token

        self.unk_index = self.add_token(unk_token)
        self.num_index = self.add_token(num_token)
        self.mask_index = self.add_token(self._mask_token)

    def to_serializable(self):
        """Returns a dictionary that can be serialized"""
        return {'token_to_idx': self._token_to_idx,
                'add_unk': self._add_unk,
                'unk_token': self._unk_token,
                'mask_token': self._mask_token,
                'num_token': self._num_token,
                }

    @classmethod
    def from_serializable(cls, contents):
        """Instantiates the Vocabulary from a serialized dictionary"""
        return cls(**contents)

    def add_token(self, token):
        """Update mapping dicts based on the token

        Args:
            token (str): the item to add into the Vocabulary

        Returns:
            index (int): the integer corresponding to the token

        """
        if token in self._token_to_idx:
            index = self._token_to_idx[token]
        else:
            index = len(self._token_to_idx)
            self._token_to_idx[token] = index
            self._idx_to_token[index] = token

        return index

    def add_many(self, tokens):
        """Add a list of tokens into the Vocabulary

        Args:
            tokens (list): a list of string tokens

        Returns:
            indices (list): a list of indices corresponding to the tokens

        """
        return [self.add_token(token) for token in tokens]

    def lookup_token(self, token):
        """Retrieve the index associated with the token or the UNK index if token isn't present

        Args:
            token (str): the token to look up

        Returns:
            index (int): the index corresponding to the token

        Notes:
            `unk_index` needs to be >=0 (having been added into the Vocabulary) for the UNK functionality

        """
        if self.is_number(token):
            return self.num_index

        return self._token_to_idx.get(token, self.unk_index)

    def is_number(self, token):
        """Returns true if token in number else false"""
        num_regex = re.compile('^[+-]?[0-9]+\.?[0-9]*$')

        return bool(num_regex.match(token))

    def lookup_index(self, index):
        """Token associated with the index

        Args:
            index (int): the index to look up

        Returns:
            token (str): the token corresponding to the index

        Raises:
            KeyError: if the index is not in the Vocabulary

        """
        if index not in self._idx_to_token:
            raise KeyError("the index (%d) is not in the Vocabulary" % index)
        return self._idx_to_token[index]

    def __str__(self):
        return "<Vocabulary(size=%d)>" % len(self)

    def __len__(self):
        return len(self._token_to_idx)


class Data(Dataset):
    def vectorize(self, context, vector_length=-1):
        """Vectorizer

        Args:
            context (str): the string of words separated by a space
            vector_length (int): an argument for forcing the length of index vector

        """

        indices = [self.vocab.lookup_token(token) for token in context.split(' ')]
        if vector_length < 0:
            vector_length = len(indices)

        out_vector = np.zeros(vector_length, dtype=np.int64)
        out_vector[:len(indices)] = indices
        out_vector[len(indices):] = self.vocab.mask_index

        return out_vector
    def neg_attention_generater(self, attention):
        neg = []
        for i in attention:
            neg.append(max(attention) - i)
        return neg
    def read_data_batches(self):
        att = np.load(self.args.attentions, allow_pickle=True)
        with ExitStack() as stack:
            files = [stack.enter_context(open(fname,encoding='utf-8')) for fname in [self.args.scentences, self.args.overalls]]
            i = 0
            list=[]
            for rows in tqdm(zip(*files, att),desc='read data'):
                list.append(rows)
        random.shuffle(list)
        for  sentence, overall, attention in tqdm(list,total=self.args.epochs_size,desc='process_data'):
            if len(sentence.split(" ")) > 96:
                continue
            overall = int(float(overall.replace('\n', '')))
            # if overall < 3:
            #     overall = 0
            # elif overall > 3:
            #     overall = 1
            if overall == 1:
                overall = 0
            elif overall == 5:
                overall = 1
            else:
                continue
                # overall = 2
            sentence = sentence.replace('\n', '')



            bert_encode = self.tokenizer.encode_plus(
                text=sentence,
                # 当句子长度大于max_length时,截断
                truncation=True,

                # 一律补零到max_length长度
                padding='max_length',
                max_length=self.args.text_size,
                add_special_tokens=True,

                # 可取值tf,pt,np,默认为返回list
                return_tensors="pt",

                # 返回token_type_ids
                return_token_type_ids=True,

                # 返回attention_mask
                return_attention_mask=True,

                # 返回offset_mapping 标识每个词的起止位置,这个参数只能BertTokenizerFast使用
                return_offsets_mapping=True,

                # 返回length 标识长度
                return_length=True,

            )

            self.vec.append(bert_encode)
            vec=self.vectorize(sentence, self.args.text_size)
            tmp=torch.zeros(self.args.text_size,self.glove.vector_size)
            for n,v in enumerate(vec):
                if v>self.vocab.mask_index-10:
                    tmp[n]=torch.zeros(self.glove.vector_size)
                else:
                    tmp[n]=torch.from_numpy(self.glove.vectors[v])
            self.glove_embedding.append(tmp)
            # self.texts.append(scentence)
            self.overall.append(overall)
            attention=attention[:self.args.text_size]
            self.ABAE_attention.append(attention)
            self.neg_ABAE_attention.append(self.neg_attention_generater(attention))
            i += 1
            if i >= self.args.epochs_size:
                break


    def __init__(self, args):
        super(Data, self).__init__()
        self.args = args
        self.tokenizer=BertTokenizerFast.from_pretrained('bert-base-uncased')
        # glove2word2vec(args.glove_cache, "vectors_wd2.txt")
        print("加载glove")
        self.glove = gensim.models.KeyedVectors.load_word2vec_format(args.glove_cache, binary=False)
        print("创建glove词典")
        token2index_lim = {token: index for index, token in enumerate(self.glove.index_to_key) }
        self.vocab = Vocabulary(token2index_lim)
        # self.texts=[]
        self.glove_embedding=[]
        self.overall=[]
        self.ABAE_attention=[]
        self.neg_ABAE_attention=[]
        self.vec =[]
        self.read_data_batches()
        self.neg_ABAE_attention = torch.tensor(self.neg_ABAE_attention)
    def __getitem__(self, idx):

        return {
            # 'text': self.texts[idx],
            'overall': self.overall[idx],
            'ABAE_attention': self.ABAE_attention[idx],
            'neg_ABAE_attention': self.neg_ABAE_attention[idx],
            'vector': self.vec[idx],
            "glove_embedding":self.glove_embedding[idx]
        }

    def __len__(self):
        return len(self.overall)

class Fine_tuning_data(Dataset)    :
    def vectorize(self, context, vector_length=-1):
        """Vectorizer

        Args:
            context (str): the string of words separated by a space
            vector_length (int): an argument for forcing the length of index vector

        """

        indices = [self.vocab.lookup_token(token) for token in context.split(' ')]
        if vector_length < 0:
            vector_length = len(indices)

        out_vector = np.zeros(vector_length, dtype=np.int64)
        out_vector[:len(indices)] = indices
        out_vector[len(indices):] = self.vocab.mask_index

        return out_vector

    def lemmatize(self,w: str):
        # caching the word-based lemmatizer to speed the process up
        return lemmatizer.lemmatize(w)
    def read_data_batches(self,args,path):

        csvdata=csv.reader(open(path))
        i=0
        for row in tqdm(csvdata,desc='读取预训练数据'):
            if len(row[2].split(" ")) > 96:
                continue
            if row[0]=='negative':
                self.overall.append(0)
            else:
                self.overall.append(1)
            sentence = row[2]
            sentence = [self.lemmatize(word) for word in tokenizer.tokenize(sentence) if not word in stops and str.isalpha(word)]
            sentence = " ".join(sentence)
            bert_encode = self.tokenizer.encode_plus(
                text=sentence ,
                # 当句子长度大于max_length时,截断
                truncation=True,

                # 一律补零到max_length长度
                padding='max_length',
                max_length=args.text_size,
                add_special_tokens=True,

                # 可取值tf,pt,np,默认为返回list
                return_tensors="pt",

                # 返回token_type_ids
                return_token_type_ids=True,

                # 返回attention_mask
                return_attention_mask=True,

                # 返回offset_mapping 标识每个词的起止位置,这个参数只能BertTokenizerFast使用
                return_offsets_mapping=True,

                # 返回length 标识长度
                return_length=True,

            )

            self.vec.append(bert_encode)
            vec = self.vectorize(sentence, args.text_size)
            tmp = torch.zeros(args.text_size, self.glove.vector_size)
            for n, v in enumerate(vec):
                if v > self.vocab.mask_index - 10:
                    tmp[n] = torch.zeros(self.glove.vector_size)
                else:
                    tmp[n] = torch.from_numpy(self.glove.vectors[v])
            self.glove_embedding.append(tmp)
            i += 1
            if i >= args.epochs_size:
                break
    def __init__(self, args,path):
        super(Fine_tuning_data, self).__init__()
        self.tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
        # glove2word2vec(args.glove_cache, "vectors_wd2.txt")
        print("加载glove")
        self.glove = gensim.models.KeyedVectors.load_word2vec_format(args.glove_cache, binary=False)
        print("创建glove词典")
        token2index_lim = {token: index for index, token in enumerate(self.glove.index_to_key)}
        self.vocab = Vocabulary(token2index_lim)
        self.glove_embedding = []
        self.overall = []
        self.vec = []
        self.read_data_batches(args,path)
    def __getitem__(self, idx):

        return {
            'overall': self.overall[idx],
            'vector': self.vec[idx],
            "glove_embedding": self.glove_embedding[idx]
        }

    def __len__(self):
        return len(self.overall)

if __name__ == '__main__':
    args = Namespace(
        scentences=r"D:\data\Weakly_labeled_data_1.1M\laptop.txt",
        overalls=r"D:\data\Weakly_labeled_data_1.1M\laptop_overall.txt",
        attentions=r"D:\data\Weakly_labeled_data_1.1M\laptop_attention.npy",

        bert=r"D:\data\bert_torch",
        glove_cache=r"D:\data\glove.twitter.27B\vectors_wd2.txt",

        wv_dim=200,
        bert_dim=768,
        batch_size=50,
        epochs_size=10000,
        epochs=15,
        text_size=128,
        WCL_loss_size=6,
        f=50,
        m=192,
        u=192,
        t_PL=0.5,
        t_WCL=0.5,
        σ1=0.2,

        device="cuda",
        reload_from_files=False,
        learning_rate=1e-5,
        early_stopping_criteria=10,
        catch_keyboard_interrupt=True,
        seed=1234,

        save_dir="model_storage",
        model_state_file="训练.pth",
    )
    if sys.platform == "linux":
        # args.scentences = "/mnt/JuChiYun-XiongLiWei/wml/data/Cell_Phones_and_Accessories_5.json.txt"
        # args.overalls = "/mnt/JuChiYun-XiongLiWei/wml/data/Cell_Phones_and_Accessories_5.json_overall.txt"
        # args.attentions = "/mnt/JuChiYun-XiongLiWei/wml/data/Cell_Phones_and_Accessories_5.json_att.npy"
        args.scentences = "/mnt/JuChiYun-XiongLiWei/wml/data/laptop.txt"
        args.overalls = "/mnt/JuChiYun-XiongLiWei/wml/data/laptop_overall.txt"
        args.attentions = "/mnt/JuChiYun-XiongLiWei/wml/data/laptop_attention.npy"
        args.bert = "/mnt/JuChiYun-XiongLiWei/wml/data"
        args.glove_cache = "/mnt/JuChiYun-XiongLiWei/wml/data/vectors_wd2.txt"
        args.save_dir = "/mnt/JuChiYun-XiongLiWei/wml/model_storage"

    print("使用 CUDA: {}".format(args.device))


    print("开始加载数据")
    loader = DataLoader(dataset=Data(args), batch_size=args.batch_size, shuffle=True, num_workers=0,
                        drop_last=False)
    # %%
    print("开始训练")
    train_bar = tqdm(
        desc='训练',
        total=loader.__len__(),
    )
    epoch_bar = tqdm(
        desc='训练epochs',
        total=args.epochs,
    )


    for epoch_index in range(args.epochs):
        for item_number, batch_dict in enumerate(loader):
            overall = batch_dict['overall']
            ABAE_attention = batch_dict['ABAE_attention'].to(args.device)
            neg_ABAE_attention = batch_dict['neg_ABAE_attention'].to(args.device)
            vector = batch_dict['vector'].to(args.device)
            glove_embedding = batch_dict['glove_embedding'].to(args.device)
