import joblib

from paddlenlp.transformers import FunnelTokenizer
from paddlenlp.transformers.funnel.modeling import  FunnelForQuestionAnswering as PDImplemBertModel
from paddlenlp.transformers.funnel.modeling_old import FunnelForQuestionAnswering as HGImplemBertModel

from transformers import FunnelTokenizerFast as HGCTRLTokenizer
from paddlenlp.transformers.funnel.tokenizer import FunnelTokenizerFast as PDCTRLTokenizer
import itertools
from paddlenlp.datasets import load_dataset

tokenizer = PDCTRLTokenizer.from_pretrained("funnel-transformer/xlarge")

import paddle
import torch
import numpy as np
import time

paddle.set_device("cpu")

train_ds = load_dataset('squad', splits='train_v2')
model_str="data/funnel-transformer/xlarge"
paddle.seed(1234)
old_model = HGImplemBertModel.from_pretrained(model_str, hidden_dropout=0.0, attention_dropout=0.0, activation_dropout=0.0)
paddle.seed(1234)
new_model = PDImplemBertModel.from_pretrained(model_str ,     hidden_dropout=0.0,  attention_dropout=0.0,  activation_dropout=0.0)

not_match_count=0
for key in old_model.state_dict():
    old_val=old_model.state_dict()[key]
    new_val=new_model.state_dict()[key]
    if not paddle.allclose(old_val,new_val):
        print("not match value:",key, old_val.mean(),new_val.mean())
        not_match_count+=1

print("not match count:",not_match_count)

old_model.train()
new_model.train()
optimizer_old = paddle.optimizer.AdamW(parameters=old_model.parameters())
optimizer_new = paddle.optimizer.AdamW(parameters=new_model.parameters())





def prepare_train_features(examples, tokenizer, args):
    # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
    # in one example possible giving several features when a context is long, each of those features having a
    # context that overlaps a bit the context of the previous feature.
    #NOTE: Almost the same functionality as HuggingFace's prepare_train_features function. The main difference is
    # that HugggingFace uses ArrowTable as basic data structure, while we use list of dictionary instead.
    contexts = [examples[i]['context'] for i in range(len(examples))]
    questions = [examples[i]['question'] for i in range(len(examples))]

    tokenized_examples = tokenizer(
        questions,
        contexts,
        stride=args.doc_stride,
        max_seq_len=args.max_seq_length)

    # Let's label those examples!
    for i, tokenized_example in enumerate(tokenized_examples):
        # We will label impossible answers with the index of the CLS token.
        input_ids = tokenized_example["input_ids"]
        cls_index = input_ids.index(tokenizer.cls_token_id)

        # The offset mappings will give us a map from token to character position in the original context. This will
        # help us compute the start_positions and end_positions.
        offsets = tokenized_example['offset_mapping']

        # Grab the sequence corresponding to that example (to know what is the context and what is the question).
        sequence_ids = tokenized_example['token_type_ids']

        # One example can give several spans, this is the index of the example containing this span of text.
        sample_index = tokenized_example['overflow_to_sample']
        answers = examples[sample_index]['answers']
        answer_starts = examples[sample_index]['answer_starts']

        # If no answers are given, set the cls_index as answer.
        if len(answer_starts) == 0:
            tokenized_examples[i]["start_positions"] = cls_index
            tokenized_examples[i]["end_positions"] = cls_index
        else:
            # Start/end character index of the answer in the text.
            start_char = answer_starts[0]
            end_char = start_char + len(answers[0])

            # Start token index of the current span in the text.
            token_start_index = 0
            while sequence_ids[token_start_index] != 1:
                token_start_index += 1

            # End token index of the current span in the text.
            token_end_index = len(input_ids) - 1
            while sequence_ids[token_end_index] != 1:
                token_end_index -= 1
            # Minus one more to reach actual text
            token_end_index -= 1

            # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
            if not (offsets[token_start_index][0] <= start_char and
                    offsets[token_end_index][1] >= end_char):
                tokenized_examples[i]["start_positions"] = cls_index
                tokenized_examples[i]["end_positions"] = cls_index
            else:
                # Otherwise move the token_start_index and token_end_index to the two ends of the answer.
                # Note: we could go after the last offset if the answer is the last word (edge case).
                while token_start_index < len(offsets) and offsets[
                        token_start_index][0] <= start_char:
                    token_start_index += 1
                tokenized_examples[i]["start_positions"] = token_start_index - 1
                while offsets[token_end_index][1] >= end_char:
                    token_end_index -= 1
                tokenized_examples[i]["end_positions"] = token_end_index + 1

    return tokenized_examples

from functools import partial
import argparse
def parse_args():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument(
        "--train_file",
        type=str,
        required=False,
        default=None,
        help="Train data path.")
    parser.add_argument(
        "--predict_file",
        type=str,
        required=False,
        default=None,
        help="Predict data path.")
    parser.add_argument(
        "--model_type",
        default="funnel",
        type=str,
        help="Type of pre-trained model.")
    parser.add_argument(
        "--model_name_or_path",
        default="funnel-transformer/xlarge",
        type=str,
        help="Path to pre-trained model or shortcut name of model.")
    parser.add_argument(
        "--output_dir",
        default="outputs",
        type=str,
        help="The output directory where the model predictions and checkpoints will be written. "
        "Default as `outputs`")
    parser.add_argument(
        "--max_seq_length",
        default=512,
        type=int,
        help="The maximum total input sequence length after tokenization. Sequences longer "
        "than this will be truncated, sequences shorter will be padded.")
    parser.add_argument(
        "--batch_size",
        default=2,
        type=int,
        help="Batch size per GPU/CPU for training.")
    parser.add_argument(
        "--learning_rate",
        default=1e-5,
        type=float,
        help="The initial learning rate for Adam.")
    parser.add_argument(
        "--weight_decay",
        default=0.01,
        type=float,
        help="Weight decay if we apply some.")
    parser.add_argument(
        "--adam_epsilon",
        default=1e-6,
        type=float,
        help="Epsilon for Adam optimizer.")
    parser.add_argument(
        "--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
    parser.add_argument(
        "--num_train_epochs",
        default=2,
        type=int,
        help="Total number of training epochs to perform.")
    parser.add_argument(
        "--max_steps",
        default=-1,
        type=int,
        help="If > 0: set total number of training steps to perform. Override num_train_epochs."
    )
    parser.add_argument(
        "--warmup_proportion",
        default=0.1,
        type=float,
        help="Proportion of training steps to perform linear learning rate warmup for."
    )
    parser.add_argument(
        "--logging_steps",
        type=int,
        default=500,
        help="Log every X updates steps.")
    parser.add_argument(
        "--save_steps",
        type=int,
        default=500,
        help="Save checkpoint every X updates steps.")
    parser.add_argument(
        "--seed", type=int, default=42, help="random seed for initialization")
    parser.add_argument(
        '--device',
        choices=['cpu', 'gpu'],
        default="gpu",
        help="Select which device to train model, defaults to gpu.")
    parser.add_argument(
        "--doc_stride",
        type=int,
        default=128,
        help="When splitting up a long document into chunks, how much stride to take between chunks."
    )
    parser.add_argument(
        "--n_best_size",
        type=int,
        default=20,
        help="The total number of n-best predictions to generate in the nbest_predictions.json output file."
    )
    parser.add_argument(
        "--null_score_diff_threshold",
        type=float,
        default=0.0,
        help="If null_score - best_non_null is greater than the threshold predict null."
    )
    parser.add_argument(
        "--max_query_length", type=int, default=64, help="Max query length.")
    parser.add_argument(
        "--max_answer_length", type=int, default=30, help="Max answer length.")
    parser.add_argument(
        "--do_lower_case",
        action='store_false',
        help="Whether to lower case the input text. Should be True for uncased models and False for cased models."
    )
    parser.add_argument(
        "--verbose", action='store_true', help="Whether to output verbose log.")
    parser.add_argument(
        "--version_2_with_negative",
        action='store_true',
        help="If true, the SQuAD examples contain some that do not have an answer. If using squad v2.0, it should be set true."
    )
    parser.add_argument(
        "--do_train", action='store_true', help="Whether to train the model.")
    parser.add_argument(
        "--do_predict", action='store_true', help="Whether to predict.")
    parser.add_argument(
        "--scheduler_type",
        default="linear",
        type=str,
        help="scheduler_type.",
    )
    parser.add_argument(
        "--layer_lr_decay",
        default=0.8,
        type=float,
        help="layer_lr_decay")
    args = parser.parse_args()
    return args

args = parse_args()
train_ds.map(partial(
    prepare_train_features, tokenizer=tokenizer, args=args),
    batched=True)
train_batch_sampler = paddle.io.DistributedBatchSampler(
    train_ds, batch_size=args.batch_size, shuffle=False)
from paddlenlp.data import Pad, Stack, Dict
train_batchify_fn = lambda samples, fn=Dict({
    "input_ids": Pad(axis=0, pad_val=tokenizer.pad_token_id),
    "token_type_ids": Pad(axis=0, pad_val=tokenizer.pad_token_type_id),
    'attention_mask': Pad(axis=0, pad_val=tokenizer.pad_token_type_id),
    "start_positions": Stack(dtype="int64"),
    "end_positions": Stack(dtype="int64")
}): fn(samples)
from paddle.io import DataLoader
train_data_loader = DataLoader(
    dataset=train_ds,
    batch_sampler=train_batch_sampler,
    collate_fn=train_batchify_fn,
    num_workers=0,
    return_list=True)
import tqdm
descriptor = tqdm.tqdm(enumerate(train_data_loader))

class CrossEntropyLossForSQuAD(paddle.nn.Layer):
    def __init__(self):
        super(CrossEntropyLossForSQuAD, self).__init__()

    def forward(self, y, label):
        start_logits, end_logits = y
        start_position, end_position = label
        start_position = paddle.unsqueeze(start_position, axis=-1)
        end_position = paddle.unsqueeze(end_position, axis=-1)
        start_loss = paddle.nn.functional.cross_entropy(
            input=start_logits, label=start_position)
        end_loss = paddle.nn.functional.cross_entropy(
            input=end_logits, label=end_position)
        loss = (start_loss + end_loss) / 2
        return loss

criterion = CrossEntropyLossForSQuAD()
for step, batch in descriptor:

    input_ids, token_type_ids, attention_mask, start_positions, end_positions = batch

    old_model_outputs = old_model(
        input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)

    new_model_outputs = new_model(
        input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
    # old_model_outputs= old_model_outputs[0]  + old_model_outputs[1]
    # new_model_outputs = new_model_outputs[0] + new_model_outputs[1]

    old_model_outputs = criterion(old_model_outputs, (start_positions, end_positions))
    new_model_outputs = criterion(new_model_outputs, (start_positions, end_positions))
    old_model_outputs.mean().backward()
    new_model_outputs.mean().backward()

    try:
        paddle_grad = new_model.funnel.embeddings.word_embeddings.weight.gradient()
    except:
        paddle_grad = new_model.embeddings.word_embeddings.weight.gradient()

    try:
        hg_grad = old_model.funnel.embeddings.word_embeddings.weight.gradient()
    except:
        hg_grad = old_model.embeddings.word_embeddings.weight.gradient()
    print(step)
    if not paddle.allclose(old_model_outputs,new_model_outputs):
        print("not match!",batch)
        joblib.dump(batch,"debug_example.joblib")
        break
    if not np.allclose(hg_grad , paddle_grad ):
        print("not match!", batch)
        joblib.dump(batch, "debug_example_grad.joblib")
        idx=np.where(np.abs(hg_grad-paddle_grad)>1e-4)
        if np.sum(idx)>0:
            print(hg_grad[idx][:5])
            print(paddle_grad[idx][:5])
    optimizer_old.clear_grad()
    optimizer_new.clear_grad()
