import torch
import torch.nn as nn
import numpy as np
import pickle
import os
import datetime
import random
import numpy as np
from tensorboardX import SummaryWriter
import torch.utils.data as Data
import torch.nn.functional as F
from optimization import BERTAdam
import argparse
from transformers import (
    BertTokenizer,
    BertConfig,
    BertForSequenceClassification,
    BertModel,
)
from sklearn.metrics import precision_score, recall_score

device = torch.device("cuda")
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
n_gpu = torch.cuda.device_count()

def set_seed(num):
    random.seed(num)
    np.random.seed(num)
    torch.manual_seed(num)  # 为CPU设置随机种子
    if n_gpu > 0:
        torch.cuda.manual_seed_all(num)  # 为GPU设置随机种子
    else:
        torch.cuda.manual_seed(num)  # 单GPU设置

valid_herbs = []
with open("herb_vocab_6888.txt", 'r', encoding='utf-8') as f:
    content = f.readlines()
valid_herbs = [item.strip().split()[0] for item in content]
class MyDataset():
    def __init__(self, path):
        with open(path, 'rb') as out_file:
            p = pickle.load(out_file)
            h = pickle.load(out_file)
        print("len(p),len(h):", len(p), len(h))
        self.train_data = []
        for i, j in zip(p, h):
            self.train_data.append((i, j))

    def __getitem__(self, idx):
        return self.train_data[idx]

    def __len__(self):
        return len(self.train_data)

def fill_label(item):
    target_label = [0] * len(valid_herbs)
    fill = [valid_herbs.index(x) for x in item.split('、')]
    fill_label = [1 if id in fill else 0 for id in range(len(valid_herbs))]
    target_label = np.array(target_label) + np.array(fill_label)
    return target_label, fill

def make_data(batch_data):
    input_ids, token_type_ids, attention_mask, labels, token = [], [], [], [], []
    for item in batch_data:
        encoded_dict = tokenizer(item[0], return_tensors="pt", padding='max_length', max_length=512, truncation=True)
        target_label, target_token = fill_label(item[1])

        input_ids.append(encoded_dict['input_ids'])
        token_type_ids.append(encoded_dict['token_type_ids'])
        attention_mask.append(encoded_dict['attention_mask'])
        labels.append(torch.from_numpy(target_label).unsqueeze(0))
        token.append(target_token)


    input_ids = torch.cat(input_ids, dim=0)
    token_type_ids = torch.cat(token_type_ids, dim=0)
    attention_mask = torch.cat(attention_mask, dim=0)
    labels = torch.cat(labels, dim=0)
    # token = torch.cat(token, dim=0)


    input_ids = torch.LongTensor(input_ids)
    token_type_ids = torch.LongTensor(token_type_ids)
    attention_mask = torch.LongTensor(attention_mask)
    labels = torch.LongTensor(labels)
    # token = torch.LongTensor(token)

    # print("labels", labels.shape)  # torch.Size([batch_size, 7053])
    # print(input_ids.size(), token_type_ids.size(), attention_mask.size(), labels.shape)
    return input_ids, token_type_ids, attention_mask, labels, token


class bert4predict(nn.Module):
    def __init__(self, model_path, config):
        super().__init__()
        self.pretrain_bert = torch.load(model_path)
        self.predict_cls = nn.Linear(config.hidden_size, config.herb_size)

    def forward(self, input_ids, token_type_ids, attention_mask, labels=None, return_dict=None):
        out, last = self.pretrain_bert(input_ids,
                                       attention_mask=attention_mask,
                                       token_type_ids=token_type_ids,
                                       labels=labels,
                                       return_dict=True)
        #  last.last_hidden_state:[batch_size, maxseq_lenth, hidden_dim]
        print(last.pooler_output.shape)
        last = last.pooler_output[:, 0]  # CLS: [batch_size, hidden_dim]
        output = self.predict_cls(last)  #[batch_size, herb_size]
        return output
class origin_bert(nn.Module):
    def __init__(self, path, config):
        super().__init__()
        self.pretrain_bert = BertModel.from_pretrained(path)
        self.predict_cls = nn.Linear(config.hidden_size, config.herb_size)

    def resize_token_embeddings(self, len):
        self.pretrain_bert.resize_token_embeddings(len)

    def forward(self, input_ids, token_type_ids, attention_mask, labels=None, return_dict=None):
        out = self.pretrain_bert(input_ids,
                                       attention_mask=attention_mask,
                                       token_type_ids=token_type_ids,
                                       return_dict=True)
        #  last.last_hidden_state:[batch_size, maxseq_lenth, hidden_dim]
        print("last.pooler_output.shape:", out.pooler_output.shape)  # torch.Size([11, 768])
        pooler_output = out.pooler_output
        output = self.predict_cls(pooler_output)  #[batch_size, herb_size]
        return output

def flat_accuracy(preds, labels):
    tmp_eval_accuracy = 0
    for i, j in zip(preds, labels):
        same = list(set(i).intersection(set(j)))
        p = len(same) / len(i)
        tmp_eval_accuracy += p
    print("tmp_eval_accuracy:", tmp_eval_accuracy/len(preds))
    return tmp_eval_accuracy/len(preds)
if __name__=="__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_name_or_path", default=None, type=str)
    parser.add_argument("--train_file", default=None, type=str)
    parser.add_argument("--eval_file", default=None, type=str)
    parser.add_argument("--num_train_epochs", default=1, type=int)
    parser.add_argument("--train_batch_size", default=8, type=int)
    parser.add_argument("--eval_batch_size", default=8, type=int)
    args = parser.parse_args()

    start_time = datetime.datetime.now()
    set_seed(2021)
    tokenizer = BertTokenizer.from_pretrained(args.model_name_or_path)
    print(tokenizer, tokenizer.vocab_size)

    #  prepare data
    train_dataset = MyDataset(args.train_file)
    train_sampler = Data.RandomSampler(train_dataset)
    train_dataloader = Data.DataLoader(train_dataset, batch_size=args.train_batch_size, sampler=train_sampler, collate_fn=make_data)
    eval_dataset = MyDataset(args.eval_file)
    eval_dataloader = Data.DataLoader(eval_dataset, batch_size=args.eval_batch_size, collate_fn=make_data)

    #  prepare model & criterion
    config = BertConfig.from_pretrained(args.model_name_or_path)
    config.herb_size = len(valid_herbs)
    print("config.herb_size:", config.herb_size)
    # model = bert4predict('model.pkl', config)
    model = origin_bert(args.model_name_or_path, config)
    model.resize_token_embeddings(tokenizer.vocab_size)
    model.to(device)
    if n_gpu > 1:
        model = torch.nn.DataParallel(model)
    a = [x for x in range(1, config.herb_size+1)]
    a = np.array(a)
    pos_weight = torch.from_numpy(a).to(device)  # 定义每个类的权重值,label smooth
    print(pos_weight.shape, pos_weight)
    loss_fct = nn.BCEWithLogitsLoss(pos_weight=pos_weight)
    from keras import backend as K
    import tensorflow as tf
    def multilabel_categorical_crossentropy(y_true, y_pred):
        """多标签分类的交叉熵
        说明：y_true和y_pred的shape一致，y_true的元素非0即1，
             1表示对应的类为目标类，0表示对应的类为非目标类。
        """
        y_pred = (1 - 2 * y_true) * y_pred
        y_pred_neg = y_pred - y_true * 1e12
        y_pred_pos = y_pred - (1 - y_true) * 1e12
        zeros = K.zeros_like(y_pred[..., :1])
        y_pred_neg = tf.convert_to_tensor(y_pred_neg)
        y_pred_pos = tf.convert_to_tensor(y_pred_pos)
        y_pred_neg = K.concatenate([y_pred_neg, zeros], axis=-1)
        y_pred_pos = K.concatenate([y_pred_pos, zeros], axis=-1)
        neg_loss = K.logsumexp(y_pred_neg, axis=-1)
        pos_loss = K.logsumexp(y_pred_pos, axis=-1)
        return neg_loss + pos_loss



    #  prepare optimizer
    total_steps = int(len(train_dataloader) * args.num_train_epochs)
    optimizer = BERTAdam(params=model.parameters(),
                         lr=1e-5,
                         warmup=0.1,
                         max_grad_norm=1.0,
                         t_total=total_steps,
                         schedule='warmup_linear',
                         weight_decay_rate=0.01)
    total_steps = 0
    # writer = SummaryWriter(log_dir='show')
    tmp_train_accuracy, train_accuracy = 0, 0

    for ep in range(args.num_train_epochs):
        for i, batch1 in enumerate(train_dataloader):
            total_steps += 1
            # out = model.module(input_ids=batch1[0].to(device), token_type_ids=batch1[1].to(device), attention_mask=batch1[2].to(device), return_dict=True)
            out = model(input_ids=batch1[0].to(device),
                        token_type_ids=batch1[1].to(device),
                        attention_mask=batch1[2].to(device),
                        return_dict=True)

            pred = []
            for idx in range(len(batch1[4])):
                pred.append(torch.topk(out[idx], len(batch1[4][idx])).indices.detach().cpu().numpy().tolist())
            print("pred:", pred)
            print("target:", batch1[4])
            print(out.shape, batch1[3].shape)
            m = nn.Sigmoid()
            result = m(out)
            zero = torch.zeros_like(result)
            one = torch.ones_like(result)
            result = torch.where(result >= 0.7, one, result)
            result = torch.where(result < 0.7, zero, result)
            result = result.detach().cpu().numpy().tolist()
            for item, true in zip(result, batch1[3]):
                print(item[:10])
                num = str(item).count("1.0")
                print("num=:", num)
                p_index = [i for i, x in enumerate(item) if x == 1]
                # print("pp:", p_index)
                # print("true:", true)
                y_pred = item
                y_true = true.numpy().tolist()
                print("precision_score:", precision_score(y_true, y_pred, average=None))
                print("recall_score:", recall_score(y_true, y_pred, average=None))
            # y_true = batch1[3]
            # print("batch1[3]:", batch1[3])
            # y_pred = result.int().detach().cpu()
            # print("precision_score:", precision_score(y_true, y_pred, average=None))
            # print(len(precision_score(y_true, y_pred, average=None)))
            loss = loss_fct(out, batch1[3].float().to(device))

            # y_true = batch1[3].float().cpu().numpy()
            # y_pred = out.detach().cpu().numpy()
            # loss = multilabel_categorical_crossentropy(y_true, y_pred)
            # loss = torch.from_numpy(loss.numpy()).to(device)
            if n_gpu > 1:
                loss = loss.mean()
            # writer.add_scalar('show/training loss', loss, total_steps)
            # loss = loss.requires_grad_()
            print("loss:", loss, total_steps)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

    print("train done!")
    end_time = datetime.datetime.now()
    during_time = end_time - start_time
    print("during_time:", during_time)
    print(endtime)














































# tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
    # encoder = AutoModel.from_pretrained("bert-base-uncased")
    # # encoder=torch.load('model.pkl')
    # input = tokenizer("my dog is cute", add_special_tokens=True, return_tensors='pt')
    # print(input)
    # encode_out = encoder(input_ids=input['input_ids'],token_type_ids=input['token_type_ids'],attention_mask=input['attention_mask'],return_dict=True)
    # print(encode_out[0],encode_out[0].shape)
    # decoder_input = tokenizer("I like it so much", add_special_tokens=True, return_tensors='pt')
    # decoder = AutoModelForCausalLM.from_pretrained("bert-base-uncased",add_cross_attention=True,is_decoder=True)
    # decoder_output = decoder(input_ids=decoder_input['input_ids'],encoder_hidden_states=encode_out[0],attention_mask=decoder_input['attention_mask'],return_dict=True)
    # print(decoder_output.logits.shape) # past_key_values
    # logits = decoder_output.logits.view(-1, 30522).detach().numpy()
    # pred_flat = np.argmax(logits, axis=1).flatten()
    # print(pred_flat)
    # out = torch.tensor(pred_flat)
    # print(out)
    # out = tokenizer.decode(out)
    # print(out)