import re
import os
import time
import tarfile
import random
import argparse
import numpy as np
from functools import partial
import paddle
from paddle.io import Dataset, DataLoader
from paddle.metric import Accuracy

from paddlenlp.datasets import MapDataset, load_dataset
from paddlenlp.data import Tuple,Pad,Stack
from paddlenlp.transformers import LinearDecayWithWarmup
from paddlenlp.transformers.xlnet.tokenizer import XLNetTokenizer
from paddlenlp.transformers.xlnet.modeling import XLNetPretrainedModel, XLNetForSequenceClassification

def set_seed(arg):
    random.seed(arg.seed)
    np.random.seed(arg.seed)
    paddle.seed(arg.seed)

class Config():
    def __init__(self):
        self.task_name = 'sst-2'
        self.model_name_or_path = 'xlnet-base-cased'
        self.output_dir ='./tmp'
        self.max_seq_len = 128
        self.batch_size = 32
        self.learning_rate = 2e-5
        self.weight_decay = 0.0
        self.adam_epsilon = 1e-8
        self.max_grad_norm = 1.0
        self.num_train_epochs = 3
        self.max_step = 1
        self.logging_step = 100
        self.save_step = 500
        self.seed = 50
        self.device = 'gpu'
        self.warmup_steps = 0
        self.warmup_proportion = 0.1

args = Config()

class IMDBDataset(Dataset):
    def __init__(self, is_training=True):
        self.data = self.load_imdb(is_training)

    def __getitem__(self, idx):
        return self.data[idx]

    def __len__(self):
        return len(self.data)

    def load_imdb(self, is_trainning):
        data_set = []
        # data_set 中每个元素都是一个二元组，(sentences, label), lable = 0 表示消极情感， label = 1 表示正面情感
        for label in ['pos','neg']:
            with tarfile.open('/home/aistudio/data/data14926/aclImdb_v1.tar.gz') as tarf:
                path_pattern = 'aclImdb/train/' + label + '/.*\.txt$' if is_trainning \
                    else 'aclImdb/test/' + label + '/.*\.txt$'
                path_pattern = re.compile(path_pattern)
                tf = tarf.next()
                while tf != None:
                    if bool(path_pattern.match(tf.name)):
                        sentence = tarf.extractfile(tf).read().decode()
                        sentence_label = 0 if label == 'neg' else 1
                        data_set.append({'sentence':sentence, 'label': sentence_label})
                    tf = tarf.next()
        return data_set

trainset = IMDBDataset(is_training=True)
testset = IMDBDataset(is_training=False)

train_ds = MapDataset(trainset)
test_ds = MapDataset(testset)

def convert_example(
        example,
        tokenizer,
        label_list,
        max_seq_len,
        is_test=False
):
    if not is_test:
        label_dtype = 'int64' if label_list else 'float32'
        label = example['label']
        label = np.array([label], dtype=label_dtype)

    if(int(is_test)) + len(example) == 2:
        example_encode = tokenizer(
            example['sentence'],
            max_seq_len=max_seq_len,
            return_attention_mask=True
        )
    else:
        example_encode = tokenizer(
            example['sentence1'],
            text_pair=example['sentence2'],
            max_seq_len = max_seq_len,
            return_attention_mask=True
        )

    if not is_test:
        return example_encode['input_ids'], example_encode['token_type_ids'], example_encode['attention_mask'], label
    else
        return example_encode['input_ids'], example_encode['token_type_ids'], example_encode['attention_mask']

tokenizer = XLNetTokenizer.from_pretrained(args.model_name_or_path)

trans_func = partial(
    convert_example,
    tokenizer=tokenizer,
    label_list=train_ds.label_list,
    max_seq_len=args.max_seq_len
)

def create_dataloader(
        dataset,
        mode='train',
        batch_size=1,
        trans_fnc=None,
        batchify=None
):
    if mode == 'tran':
        sample = paddle.io.DistributedBatchSampler(
            dataset=dataset,batch_size=batch_size,shuffle=True
        )
    else:
        sample = paddle.io.BatchSampler(
            dataset=dataset,batch_size=batch_size,shuffle=False
        )
    return DataLoader(
        dataset=dataset,
        batch_size=batch_size,
        batch_sampler=sample,
        collate_fn=batchify,
        return_list=True
    )

batchify = lambda samples, fn=Tuple(
    Pad(axis=0,pad_val=tokenizer.pad_token_id, pad_right=False), # input_ids
    Pad(axis=0,pad_val=tokenizer.pad_token_type_id, pad_right=False), # token_type_ids
    Pad(axis=0,pad_val=0, pad_right=False), # input_ids
    Stack(dtype='int64' if train_ds.label_list else 'float32') # label
) : [data for data in fn(samples)]

train_dataloader = create_dataloader(
    dataset=train_ds,
    mode='train',
    batch_size=args.batch_size,
    trans_fnc=trans_func,
    batchify=batchify
)

dev_dataloader = create_dataloader(
    dataset=test_ds,
    mode='dev',
    batch_size=args.batch_size,
    trans_fnc=trans_func,
    batchify=batchify
)

num_classes = len(train_ds.label_list)
model = XLNetForSequenceClassification.from_pretrained(args.model_name_or_path, num_classes=num_classes)

set_seed(args)

paddle.set_device(args.device)

if paddle.distributed.get_world_size() > 1:
    paddle.distributed.init_parallel_env()
    model = paddle.DataParallel(model)

from math import ceil
# 设定 lr_scheudler
if args.max_step > 0:
    num_training_steps = args.max_step
    num_train_epochs = ceil(num_training_steps / len(train_dataloader))
else:
    num_training_steps = len(train_dataloader) * args.num_train_epochs
    num_train_epochs = args.num_train_epochs

warmup = args.warmup_steps if args.warmup_steps > 0 else args.warmup_proportion
lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps)

# 制定优化器
clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=args.max_grad_norm)
decay_params = [
    p.name for n,p in model.named_parameters()
    if not any (nd in n for nd in ['bias', 'layer_norm'])

]
optimizer = paddle.optimizer.AdamW(
    learning_rate=lr_scheduler,
    beta1=0.9,
    beta2=0.999,
    epsilon=args.adam_epsilon,
    parameters=model.parameters(),
    grad_clip=clip,
    weight_decay=args.weight_decay,
    apply_decay_param_fun=lambda x: x in decay_params
)
@paddle.no_grad()

def evaluate(model, loss_fct, metric, data_loader):
    model.eval()
    metric.reset()
    losses = []
    for batch in data_loader:
        input_ids, token_type_ids, attenion_mask, labels = batch
        logits = model(input_ids,token_type_ids,attenion_mask)
        loss = loss_fct(logits,labels)
        losses.append(loss.detach().numpy())
        correct = metric.comput(logits,labels)
        metric.update(correct)
    acc = metric.accumulate()
    print('eval loss %.5f, acc: %.5f' %(np.average(losses)), acc)

    model.train()

def train():
    metric = Accuracy()
    loss_fct = paddle.nn.loss.CrossEntropyLoss() if train_ds.label_list else paddle.nn.loss.MSELoss()
    global_step = 0
    tic_train = time.time()
    model.train()
    for epoch in range(num_train_epochs):
        for step, batch in enumerate(train_dataloader):
            global_step += 1
            input_ids, token_type_ids, attention_mask, labels = batch
            logits = model(input_ids, token_type_ids, attention_mask)
            loss = loss_fct(logits,labels)
            loss.backward()
            optimizer.step()
            lr_scheduler.step()
            optimizer.clear_grad()

            if global_step % args.logging_step == 0:
                print(
                    "global step %d/%d, epoch: %d, batch: %d, rank_id: %s, loss: %f, lr: %.10f, speed: %.4f step/s"
                    % (global_step, num_training_steps, epoch, step,
                       paddle.distributed.get_rank(), loss, optimizer.get_lr(),
                       args.logging_steps / (time.time() - tic_train)))
                tic_train = time.time()

            if global_step % args.save_step == 0 or global_step == num_training_steps:
                tic_eval = time.time()
                evaluate(model, loss_fct, metric, dev_dataloader)
                print("eval done total : %s s" % (time.time() - tic_eval))

                if (not paddle.distributed.get_world_size() > 1) or paddle.distributed.get_rank() == 0:
                    output_dir = os.path.join(args.output_dir, '%s-ft_model_%d' %(args.task_name, global_step))

                    if not os.path.exists(output_dir):
                        os.makedirs(output_dir)
                    model_to_save = model._layers if isinstance(model, paddle.DataParallel) else model
                    model_to_save.save_pretrained(output_dir)
                if global_step == num_training_steps:
                    exit(0)
                tic_train = time.time()

