import argparse
import os
from functools import partial

import deepspeed
import torch
import torch.utils.data as Data
from datasets import load_dataset
from transformers import AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding
# from megatron import mpu
import mpu
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'

parser = argparse.ArgumentParser(add_help=True, description='lijing')
parser.add_argument('--model_name_or_path', default="bert-base-uncased", type=str, help='lujing')
parser.add_argument('--batch_size', default=4, type=int, help='lujing')
parser.add_argument('--num_epochs', default=10, type=int, help='lujing')
parser.add_argument('--save_interval', default=100, type=int, help='lujing')
parser.add_argument('--save_dir', default="./save_model/", type=str, help='lujing')
parser.add_argument('--local_rank', default=0, type=int, help='lujing')
# parser.add_argument('--deepspeed', default="", type=str, help='lujing')
# parser.add_argument('--deepspeed_config', default="", type=str, help='lujing')



parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
# args={}
# args["batch_size"]=4
# args["num_epochs"]=10
# args["save_interval"]=100
# args["save_dir"]="./save_model/"
# args["model_name_or_path"]="bert-base-uncased"



raw_datasets = load_dataset("glue", "mrpc")

model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, num_labels=raw_datasets.num_columns["train"])
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)






tokenized_datasets = raw_datasets.map(lambda data:tokenizer(data["sentence1"], data["sentence2"], padding=True), batched=True)

model, optimizer, _, _ = deepspeed.initialize(args=args, model=model,
                                              model_parameters=model.parameters())

data_collator = DataCollatorWithPadding(tokenizer=tokenizer,padding="max_length",max_length=512,)


def collate_fn(data, data_collator):
    encodedata=[{'input_ids':indata['input_ids'],'token_type_ids':indata['token_type_ids'],'attention_mask':indata['attention_mask'],'label':indata['label']} for indata in data]

    # labels=[indata['label'] for indata in data]
    return data_collator(encodedata)


collate_fn_partial = partial(collate_fn, data_collator=data_collator)

train_dataloader = Data.DataLoader(
    tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn_partial, batch_size=args.batch_size
)

dev_dataloader = Data.DataLoader(
    tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn_partial, batch_size=args.batch_size
)

# 从checkpoint获取模型
# _, client_sd = model.load_checkpoint(args.load_dir, args.ckpt_id)
# step = client_sd['step']

for epoch in range(args.num_epochs):
    for step, batchdata in enumerate(train_dataloader):
        print("----------------------")
        print(model.device)
        print("----------------------")
        #**batchdata ** 用于二维数组  *用于一维 将dict进行拆解，同时会将input_ids等进行参数传递，不用担心匹配出错

        # SequenceClassifierOutput(
        #     loss=loss,
        #     logits=logits,
        #     hidden_states=outputs.hidden_states,
        #     attentions=outputs.attentions,
        # )
        output = model(**batchdata.to(model.device))
        loss=output.loss
        model.backward(loss)
        optimizer.step()

        # save checkpoint
        if step % args.save_interval:
            # client_sd['step'] = step
            ckpt_id = loss.item()
            # model.save_checkpoint(args.save_dir, ckpt_id, client_sd=client_sd)
            model.save_checkpoint(args.save_dir, ckpt_id)

# _, client_sd = model_engine.load_checkpoint(args.load_dir, args.ckpt_id)
# step = client_sd['step']


# # 初始化 TritonInferenceSession
ds_engine = deepspeed.init_inference(model,
                                     mp_size=2,
                                     dtype=torch.half,
                                     checkpoint=None if args.pre_load_checkpoint else args.checkpoint_json,
                                     replace_with_kernel_inject=True)
model = ds_engine.module
output = model('Input String')

# deepspeed testbert/testBERT2.py --deepspeed_config testbert/ds_config.json