import joblib

from paddlenlp.transformers import FunnelTokenizer
from paddlenlp.transformers.funnel.modeling import  FunnelForQuestionAnswering as PDImplemBertModel
from paddlenlp.transformers.funnel.modeling_old import FunnelForQuestionAnswering as PTImplemBertModel

from transformers import FunnelTokenizerFast as HGCTRLTokenizer
from paddlenlp.transformers.funnel.tokenizer import FunnelTokenizerFast as PDCTRLTokenizer
import itertools
from paddlenlp.datasets import load_dataset

hg_tokenizer = HGCTRLTokenizer.from_pretrained("funnel-transformer/xlarge")
pd_tokenizer = PDCTRLTokenizer.from_pretrained("funnel-transformer/xlarge")

import paddle
import torch
import numpy as np
import time

paddle.set_device("cpu")

train_ds = load_dataset('squad', splits='train_v2')
model_str="data/funnel-transformer/bad_model"
old_model = PTImplemBertModel.from_pretrained(model_str,        hidden_dropout=0.0,  attention_dropout=0.0,  activation_dropout=0.0)
new_model = PDImplemBertModel.from_pretrained(model_str ,     hidden_dropout=0.0,  attention_dropout=0.0,  activation_dropout=0.0)

# old_model = PTImplemBertModel.from_pretrained("glue/qnli/qnli_ft_model_best2",        hidden_dropout=0.0,  attention_dropout=0.0,  activation_dropout=0.0)
# new_model = PDImplemBertModel.from_pretrained("glue/qnli/qnli_ft_model_best2" ,     hidden_dropout=0.0,  attention_dropout=0.0,  activation_dropout=0.0)

old_model.train()
new_model.train()
optimizer_old = paddle.optimizer.AdamW(parameters=old_model.parameters())
optimizer_new = paddle.optimizer.AdamW(parameters=new_model.parameters())
for i,example in enumerate(train_ds):
    # example=joblib.load( "debug_example.joblib")
    hg_output = hg_tokenizer(example["question"], text_pair=example["context"], return_attention_mask=True, stride=128, max_length=512,  return_token_type_ids=True)
    pd_output = pd_tokenizer(example["question"], text_pair=example["context"],  return_attention_mask=True, stride=128, max_seq_len=512,
        return_token_type_ids=True)
    # print(hg_output["input_ids"])
    # print(pd_output["input_ids"])
    assert  hg_output["input_ids"] == pd_output["input_ids"]
    assert hg_output["token_type_ids"] == pd_output["token_type_ids"]
    assert hg_output["attention_mask"] == pd_output["attention_mask"]

    pd_decode = pd_tokenizer.convert_tokens_to_string(pd_tokenizer.convert_ids_to_tokens(pd_output["input_ids"]))
    hg_decode = hg_tokenizer.convert_tokens_to_string(hg_tokenizer.convert_ids_to_tokens(hg_output["input_ids"]))
    # assert pd_decode == hg_decode
    # print(hg_output1["input_ids"])
    # print(pd_output1["input_ids"])
    # print("==================")


    inputs = paddle.to_tensor(hg_output["input_ids"])
    token_type_ids =paddle.to_tensor(hg_output["token_type_ids"])
    attention_mask = paddle.to_tensor(hg_output["attention_mask"])
    old_model_outputs = old_model(inputs.view(1, -1), attention_mask=attention_mask.view(1, -1),
                          token_type_ids=token_type_ids.view(1, -1))[0]

    old_model_outputs.mean().backward()

    new_model_outputs =  new_model(inputs.view(1, -1), attention_mask=attention_mask.view(1, -1),
                          token_type_ids=token_type_ids.view(1, -1))[0]
    new_model_outputs.mean().backward()
    try:
        paddle_grad = new_model.funnel.embeddings.word_embeddings.weight.gradient()
    except:
        paddle_grad = new_model.embeddings.word_embeddings.weight.gradient()

    try:
        hg_grad = old_model.model.embeddings.word_embeddings.weight.gradient()
    except:
        hg_grad = old_model.model.embeddings.word_embeddings.weight.gradient()
    print(i)
    if not paddle.allclose(old_model_outputs,new_model_outputs):
        print("not match!",example)
        joblib.dump(example,"debug_example.joblib")
        break
    if not np.allclose(hg_grad , paddle_grad ):
        print("not match!", example)
        joblib.dump(example, "debug_example_grad.joblib")
        idx=np.where(np.abs(hg_grad-paddle_grad)>1e-4)
        if np.sum(idx)>0:
            print(hg_grad[idx][:5])
            print(paddle_grad[idx][:5])
    optimizer_old.clear_grad()
    optimizer_new.clear_grad()
