#!/usr/bin/env python
# -*- coding: utf-8 -*-


from transformers import AutoTokenizer, AutoModel, BertTokenizer, BertModel, XLNetTokenizer, XLNetModel, \
    XLNetForSequenceClassification
import logging
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from sklearn.model_selection import train_test_split
from transformers import AdamW
from tqdm import tqdm, trange
import pandas as pd
import numpy as np
import pandas as pd
import os
import random
from collections import defaultdict
import numpy as np
import codecs
import json
import time
import re, string
import os
import unicodedata

start_flag = u'！“#¥%&、‘*+，-。/：；《=》？@【、】……——·「」|～'
start_extended_punctuation = '%s' % string.punctuation + start_flag
# 末尾保留问好
# end_flag = u'！“#¥%&、‘*+，-。/：；《=》@【、】……——·「」|～'
# end_extended_punctuation = '%s' % string.punctuation.replace('?', '') + end_flag

expun_pattern = re.compile('[%s\s]+' % re.escape(start_extended_punctuation))
prep_re_str = '^[%s\s]+|[%s\s]+$|\s+' % (re.escape(start_extended_punctuation), re.escape(start_extended_punctuation))
prep_pattern = re.compile(prep_re_str)


def remove_space_strip_punctuation(text):
    """
    去掉句子首尾标点和句子中所有的空格
    :param :
    :return:
    """
    return prep_pattern.sub('', text)


def normalize_str(text, form='NFKC'):
    return unicodedata.normalize(form, text)


# logger = logging.getLogger(__file__)
# logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
# logging.root.setLevel(level=logging.INFO)

CLS_NUM = 188
MAX_LENGTH = 512
# noteme:数据要混洗
fp = '/Users/wengjunjie/works/人工智能/11data/1632652922606/train_cleaned_with_label.xlsx'
MODEL_NAME = '/Users/lee/proj/data_favorite/XLNet-Chinese/XLNet-Chinese/chinese_xlnet_base_pytorch'

is_cuda = torch.cuda.is_available()

directory_path = './model'
# Number of training epochs (authors recommend between 2 and 4)
epochs = 500
batch_size = 6

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if is_cuda:
    n_gpu = torch.cuda.device_count()
    torch.cuda.get_device_name(0)

tokenizer = XLNetTokenizer.from_pretrained(MODEL_NAME, do_lower_case=True)
# model = XLNetModel.from_pretrained(MODEL_NAME)
model = XLNetForSequenceClassification.from_pretrained(MODEL_NAME, num_labels=CLS_NUM)
print(model)

if is_cuda:
    model.cuda()


def text_clean(fp):
    """
    训练模型前预处理函数
    :param fp:
    :return:
    """
    df = pd.read_excel(fp)
    df = df.sample(frac=1)
    df.reset_index(drop=True, inplace=True)
    print('total: {}'.format(len(df)), flush=True)
    df['question'] = df.apply(
        lambda row: pd.Series('{}{}{}'.format(row['caseCause'], row['opinion_tiaoli'], row['justice'])), axis=1)
    df['question'] = df['question'].map(lambda txt: txt.strip())
    df['question'] = df['question'].map(lambda txt: txt.lower())
    df['question'] = df['question'].map(lambda txt: normalize_str(txt))
    df['question'] = df['question'].map(lambda txt: remove_space_strip_punctuation(txt))
    return df


df = text_clean(fp)

df['question_len'] = df['question'].map(lambda txt: len(txt))
mmax = df['question_len'].max()
mmin = df['question_len'].min()
print('max: {}, min: {}'.format(mmax, mmin))

sentences = df['question'].values.tolist()
labels = df['label'].values.tolist()

# sentences = ['测试编码方式', '中文xlnet预训练模型']
# 单句添加特殊标记: sentence + ' <sep> <cls>'
# 句子对添加特殊标记: sentence_1 + ' <sep> ' + sentence_2 + ' <sep> <cls>'
# <sep>:4
# <cls>:3
# ['<sep>', '<s>', '<pad>', '<mask>', '<eop>', '<unk>', '<eod>', '</s>', '<cls>']
# [4, 1, 5, 6, 8, 0, 7, 2, 3]
# encode: max_length是加上<sep>和<cls>两个特殊标记不超过max_length

encoded = [tokenizer.encode(sentence, max_length=MAX_LENGTH, add_special_tokens=True) for sentence in sentences]
# print(encoded)
# 补齐使用tokenizer对象的pad_id
pad_id = tokenizer.pad_token_id
print('label: {}'.format(df['label'].unique()), flush=True)
print('path: {}'.format(fp), flush=True)
print('count: {}'.format(len(df)), flush=True)


def pad(text, maxlen):
    """
    # 补齐使用tokenizer对象的pad_id

    :param text:
    :return:
    """
    text_pad = []
    for t in text:
        if len(t) >= maxlen:
            text_pad.append(t[: maxlen])
        else:
            text_pad.append(t + [pad_id] * (maxlen - len(t)))
    return text_pad


input_ids = pad(encoded, maxlen=MAX_LENGTH)

# input_ids = pad_sequences(encoded, maxlen=MAX_LENGTH, dtype='long', truncating='post', padding='post',
#                           value=float(pad_id))

# Create attention masks
attention_masks = []
# Create a mask of 1s for each token followed by 0s for padding
for seq in input_ids:
    seq_mask = [float(i != pad_id) for i in seq]
    attention_masks.append(seq_mask)

# Use train_test_split to split our data into train and validation sets for training

train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(input_ids, labels,
                                                                                    random_state=56,
                                                                                    test_size=0.1)
train_masks, validation_masks, _, _ = train_test_split(attention_masks, input_ids, random_state=56, test_size=0.1)

# Convert all of our data into torch tensors, the required datatype for our model

train_inputs = torch.tensor(train_inputs, dtype=torch.long)
validation_inputs = torch.tensor(validation_inputs, dtype=torch.long)
train_labels = torch.tensor(train_labels, dtype=torch.long)
validation_labels = torch.tensor(validation_labels, dtype=torch.long)
train_masks = torch.tensor(train_masks, dtype=torch.long)
validation_masks = torch.tensor(validation_masks, dtype=torch.long)

# Select a batch size for training. For fine-tuning with XLNet, the authors recommend a batch size of 32, 48, or 128. We will use 32 here to avoid memory issues.

# Create an iterator of our data with torch DataLoader. This helps save on memory during training because, unlike a for loop,
# with an iterator the entire dataset does not need to be loaded into memory

train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)

validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)

param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
    {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
     'weight_decay_rate': 0.01},
    {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
     'weight_decay_rate': 0.0}
]

# This variable contains all of the hyperparemeter information our training loop needs
optimizer = AdamW(optimizer_grouped_parameters, lr=2e-5)


# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
    pred_flat = np.argmax(preds, axis=1).flatten()
    labels_flat = labels.flatten()
    return np.sum(pred_flat == labels_flat) / len(labels_flat)


# Store our loss and accuracy for plotting
train_loss_set = []
best_accuracy = 0.0

print('start train...', flush=True)
# trange is a tqdm wrapper around the normal python range
for epoch in trange(epochs, desc="Epoch"):

    # Training

    # Set our model to training mode (as opposed to evaluation mode)
    model.train()

    # Tracking variables
    tr_loss = 0
    nb_tr_examples, nb_tr_steps = 0, 0

    # Train the data for one epoch
    for step, batch in enumerate(train_dataloader):
        # Add batch to GPU
        batch = tuple(t.to(device) for t in batch)
        # Unpack the inputs from our dataloader
        b_input_ids, b_input_mask, b_labels = batch
        # Clear out the gradients (by default they accumulate)
        optimizer.zero_grad()
        # Forward pass
        outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
        loss = outputs[0]
        logits = outputs[1]
        train_loss_set.append(loss.item())
        # Backward pass
        loss.backward()
        # Update parameters and take a step using the computed gradient
        optimizer.step()

        # Update tracking variables
        tr_loss += loss.item()
        nb_tr_examples += b_input_ids.size(0)
        nb_tr_steps += 1

    print("Train loss: {}".format(tr_loss / nb_tr_steps), flush=True)

    # Validation
    # Put model in evaluation mode to evaluate loss on the validation set
    model.eval()

    # Tracking variables
    eval_loss, eval_accuracy = 0, 0
    nb_eval_steps, nb_eval_examples = 0, 0

    # Evaluate data for one epoch
    for batch in validation_dataloader:
        # Add batch to GPU
        batch = tuple(t.to(device) for t in batch)
        # Unpack the inputs from our dataloader
        b_input_ids, b_input_mask, b_labels = batch
        # Telling the model not to compute or store gradients, saving memory and speeding up validation
        with torch.no_grad():
            # Forward pass, calculate logit predictions
            output = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
            logits = output[0]

        # Move logits and labels to CPU
        logits = logits.detach().cpu().numpy()
        label_ids = b_labels.to('cpu').numpy()

        tmp_eval_accuracy = flat_accuracy(logits, label_ids)

        eval_accuracy += tmp_eval_accuracy
        nb_eval_steps += 1

    eval_accuracy_mean = eval_accuracy / nb_eval_steps
    print("Validation Accuracy: {}".format(eval_accuracy_mean), flush=True)
    if eval_accuracy_mean > best_accuracy:
        best_accuracy = eval_accuracy_mean
        # 存储最佳模型
        torch.save(model.state_dict(), directory_path + '/pytorch_model_{}.bin'.format(epoch))

# Test the model
with torch.no_grad():
    correct = 0
    total = 0
    for i, batch in enumerate(validation_dataloader):
        batch = tuple(t.to(device) for t in batch)
        # Unpack the inputs from our dataloader
        b_input_ids, b_input_mask, b_labels = batch
        # Forward pass
        outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
        # print (outputs)
        prediction = torch.argmax(outputs[0], dim=1)
        total += b_labels.size(0)
        correct += (prediction == b_labels).sum().item()

print('Test Accuracy of the model on vla data is: {} %'.format(100 * correct / total), flush=True)
