from torch.utils.data import Dataset
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers.optimization import get_linear_schedule_with_warmup

from transformers.optimization import AdamW
from tqdm import tqdm, trange

import pandas as pd
import torch
import math
import data
import os


def preprocess_data(data):
    '''
    处理单条数据
    data[0] '所以应该让孩子们多接触各种不同的方面，这样他们会发现自己所喜欢的，也能够自发地去学习。'
    data[1] '所以应该让孩子们多接触各种不同的方面，这样他们会发现自己所喜欢的，也能够自发地去学习。'
    data[2] 原文分词器
    data[3] 译文分词器
    data[4] 配置参数 这里的设置是 args.max_seq_length = 128 args.silent= False
    :param data:
    :return: 原文和译文的单词变成id
    '''
    input_text, target_text, encoder_tokenizer, decoder_tokenizer, args = data

    input_text = encoder_tokenizer.encode(
        input_text, max_length=args.max_seq_length, padding="max_length", return_tensors="pt", truncation=True
    )

    target_text = decoder_tokenizer.encode(
        target_text, max_length=args.max_seq_length, padding="max_length", return_tensors="pt", truncation=True
    )
    return (torch.flatten(input_text), torch.flatten(target_text))


class Seq2SeqDataset(Dataset):
    def __init__(self, encoder_tokenizer, decoder_tokenizer, args, data):
        data = [
            (input_text, target_text, encoder_tokenizer, decoder_tokenizer, args)
            for input_text, target_text in zip(data["input_text"], data["target_text"])
        ]
        self.examples = [preprocess_data(d) for d in tqdm(data, disable=args.silent)]

    def __len__(self):
        return len(self.examples)

    def __getitem__(self, index):
        return self.examples[index]


class Config:
    def __init__(self):
        self.max_seq_length = 128
        # tqdm 显示进度条
        self.silent = False
        self.train_batch_size = 32
        self.num_train_epochs = 40
        self.weight_decay = 0.0
        self.warmup_ratio = 0.06
        self.learning_rate = 4e-05
        self.adam_epsilon = 1e-08
        self.device = 'cpu'
        self.gradient_accumulation_steps = 1
        self.max_grad_norm = 1
        self.evaluate_during_training_steps = 1


def create_training_progress_scores():
    training_progress_scores = {
        "global_step": [],
        "eval_loss": [],
        "train_loss": [],
    }

    return training_progress_scores


def _get_inputs_dict(device, batch, decoder_tokenizer):
    labels = batch[1]
    labels_masked = labels.clone()
    labels_masked[labels_masked == decoder_tokenizer.pad_token_id] = -100

    inputs = {
        "input_ids": batch[0].to(device),
        "decoder_input_ids": labels.to(device),
        "labels": labels_masked.to(device),
    }

    return inputs


def train(model, args, train_dataset, decoder_tokenizer, output_dir, show_running_loss=True, eval_data=None,
          verbose=True):
    '''
    show_running_loss=True,
            eval_data=pandas 的 dataframe,
            verbose=True,
    :return:
    '''
    device = args.device
    # 创建dataloader
    train_sampler = RandomSampler(train_dataset)

    train_dataloader = DataLoader(
        train_dataset,
        sampler=train_sampler,
        batch_size=args.train_batch_size,
    )
    for i in train_dataloader:
        print(i[0].shape)
        print(i[0].shape)
        break

    t_total = len(train_dataloader) // args.num_train_epochs
    args.warmup_steps = math.ceil(t_total * args.warmup_ratio)
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = []

    optimizer_grouped_parameters.extend(
        [
            {
                "params": [
                    p
                    for n, p in model.named_parameters()
                    if not any(nd in n for nd in no_decay)
                ],
                "weight_decay": args.weight_decay,
            },
            {
                "params": [
                    p
                    for n, p in model.named_parameters()
                    if any(nd in n for nd in no_decay)
                ],
                "weight_decay": 0.0,
            },
        ]
    )

    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)

    scheduler = get_linear_schedule_with_warmup(
        optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
    )

    global_step = 0
    training_progress_scores = None
    tr_loss, logging_loss = 0.0, 0.0

    model.zero_grad()

    train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.silent, mininterval=0)
    epoch_number = 0
    best_eval_metric = None
    early_stopping_counter = 0
    steps_trained_in_current_epoch = 0
    epochs_trained = 0

    training_progress_scores = create_training_progress_scores()

    for current_epoch in train_iterator:
        model.train()
        if epochs_trained > 0:
            epochs_trained -= 1
            continue
        train_iterator.set_description(f"Epoch {epoch_number + 1} of {args.num_train_epochs}")

        batch_iterator = tqdm(
            train_dataloader,
            desc=f"Running Epoch {epoch_number} of {args.num_train_epochs}",
            disable=args.silent,
            mininterval=0,
        )
        for step, batch in enumerate(batch_iterator):
            if steps_trained_in_current_epoch > 0:
                steps_trained_in_current_epoch -= 1
                continue

            inputs = _get_inputs_dict(device, batch, decoder_tokenizer)

            outputs = model(**inputs)
            loss = outputs[0]

            current_loss = loss.item()

            # 展示训练过程中的loss
            batch_iterator.set_description(
                f"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}"
            )

            loss.backward()
            tr_loss += loss.item()

            if (step + 1) % args.gradient_accumulation_steps == 0:
                torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)

                optimizer.step()

                scheduler.step()  # Update learning rate schedule
                model.zero_grad()
                global_step += 1
                if args.logging_steps > 0 and global_step % args.logging_steps == 0:
                    logging_loss = tr_loss
                    print({
                        "Training loss": current_loss,
                        "lr": scheduler.get_last_lr()[0],
                        "global_step": global_step,
                    })

                    if args.evaluate_during_training and (
                            args.evaluate_during_training_steps > 0
                            and global_step % args.evaluate_during_training_steps == 0
                    ):
                        pass


def eval_model(eval_data, output_dir=None, verbose=True, silent=False):
    if not output_dir:
        output_dir = 'output/model_sighan'

    decoder_tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
    encoder_tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
    args = Config()





if __name__ == '__main__':
    from transformers import BertConfig, BertModel, BertTokenizer, EncoderDecoderModel

    model = EncoderDecoderModel.from_encoder_decoder_pretrained('bert-base-chinese', 'bert-base-chinese', config=None)

    decoder_tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
    encoder_tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
    encoder_config = model.config.encoder
    decoder_config = model.config.decoder

    train_path = 'data/train.txt'
    eval_path = 'data/test.txt'
    use_segment = False
    from _02 import load_bert_data

    src_trg_lines = load_bert_data(train_path, use_segment)
    eval_lines = load_bert_data(train_path, use_segment)

    train_df = pd.DataFrame(src_trg_lines, columns=['input_text', 'target_text'])
    eval_df = pd.DataFrame(eval_lines, columns=['input_text', 'target_text'])

    args = Config()
    train_dataset = Seq2SeqDataset(encoder_tokenizer, decoder_tokenizer, args, train_df)

    output_dir = 'output'
    train(model, args, train_dataset, decoder_tokenizer, output_dir, True, eval_df, decoder_tokenizer)

'''
disable=args.silent  False
max_seq_length    args.max_seq_length
'''
