from dataclasses import dataclass, field
from typing import Dict, Optional

import torch
from datasets import Dataset, load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, TrainingArguments, \
    DataCollatorWithPadding

from trl import DPOTrainer

import os
from datasets import load_dataset
import transformers
from transformers import Trainer, TrainingArguments
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import BitsAndBytesConfig
from peft import (
    LoraConfig,
    get_peft_model,
    prepare_model_for_kbit_training,
    set_peft_model_state_dict,
)
import torch

from baseconf import BASE_DISK

###定义dpo策略模型
# tokenizer = AutoTokenizer.from_pretrained("./internlm-chat-7b-8k/",trust_remote_code=True)
# model = AutoModelForCausalLM.from_pretrained("./internlm-chat-7b-8k/", ###替换成你的模型
#                                              trust_remote_code=True,
#                                              quantization_config=BitsAndBytesConfig(
#                                                  load_in_4bit=True,
#                                                  bnb_4bit_compute_dtype=torch.bfloat16,
#                                                  bnb_4bit_use_double_quant=True,
#                                                  bnb_4bit_quant_type='nf4'
#                                              ),
#                                              device_map="auto")

tokenizer = AutoTokenizer.from_pretrained(BASE_DISK + ":\model_path\gpt2-dialogbot-base-chinese")
model = AutoModelForCausalLM.from_pretrained(BASE_DISK + ":\model_path\gpt2-dialogbot-base-chinese")
tokenizer.eos_token_id = tokenizer.sep_token_id
# model = prepare_model_for_kbit_training(model)

### 所有的线性layer都装配上lora
# import bitsandbytes as bnb
# def find_all_linear_names(model):
#     #cls = bnb.nn.Linear8bitLt
#     cls = bnb.nn.Linear4bit
#     lora_module_names = set()
#     for name, module in model.named_modules():
#         if isinstance(module, cls):
#             names = name.split('.')
#             lora_module_names.add(names[0] if len(names) == 1 else names[-1])
#
#
#     if 'lm_head' in lora_module_names: # needed for 16-bit
#         lora_module_names.remove('lm_head')
#     return list(lora_module_names)
# modules = find_all_linear_names(model)

# print(modules)
# config = LoraConfig(
#     r=8,
#     lora_alpha=16,
#     lora_dropout=0.05,
#     bias="none",
#     target_modules=modules,
#     task_type="CAUSAL_LM",
# )


# model = get_peft_model(model, config)


###定义参考模型
model_ref = AutoModelForCausalLM.from_pretrained(BASE_DISK + ":\model_path\gpt2-dialogbot-base-chinese",  ###替换成你的模型
                                                 # trust_remote_code=True,
                                                 # quantization_config=BitsAndBytesConfig(
                                                 #     load_in_4bit=True,
                                                 #     bnb_4bit_compute_dtype=torch.bfloat16,
                                                 #     bnb_4bit_use_double_quant=True,
                                                 #     bnb_4bit_quant_type='nf4'
                                                 # ),
                                                 # device_map="auto"
                                                 )

###准备训练数据
traindataset = load_dataset("json", data_files="./data/harmless_base_cn_train.jsonl")

# traindataset = load_dataset("json", data_files="./data/harmless_base_cn_test.jsonl")

train_val = traindataset["train"].train_test_split(
    test_size=2000, shuffle=True, seed=42
)
train_data = train_val["train"]
val_data = train_val["test"]


def extract_anthropic_prompt(prompt_and_response):
    final = ""
    for sample in prompt_and_response:
        final += "\n\n" + sample["role"] + ": " + sample["text"]
    # final += "\n"
    return final


def get_hh(dataset, split: str, sanity_check: bool = False, silent: bool = False, cache_dir: str = None) -> Dataset:
    """Load the Anthropic Helpful-Harmless dataset from Hugging Face and convert it to the necessary format.

    The dataset is converted to a dictionary with the following structure:
    {
        'prompt': List[str],
        'chosen': List[str],
        'rejected': List[str],
    }

    Prompts should be structured as follows:
      \n\nHuman: <prompt>\n\nAssistant:
    Multiple turns are allowed, but the prompt should always start with \n\nHuman: and end with \n\nAssistant:.
    """
    dataset = dataset
    if sanity_check:
        dataset = dataset.select(range(min(len(dataset), 1000)))

    def split_prompt_and_responses(sample) -> Dict[str, str]:
        prompt = extract_anthropic_prompt(sample["context"])
        return {
            "prompt": prompt.strip() + "\n\n" + sample["chosen"]["role"] + ":",
            "chosen": (sample["chosen"]["text"]).strip(),
            "rejected": (sample["rejected"]["text"]).strip(),
        }

    # 直接相当于都map
    return dataset.map(split_prompt_and_responses).remove_columns("context")


train_dataset = get_hh(train_data, "train", sanity_check=False) #sanity_check测试用
eval_dataset = get_hh(val_data, "test", sanity_check=False)#sanity_check测试用


# def DataCollatorWithPaddingXX(data):
#     return data
# train_dataloader = DataLoader(
#     train_dataset, shuffle=True, collate_fn=DataCollatorWithPaddingXX,
#     batch_size=8, pin_memory=True
# )
# for td in train_dataloader:
#     print(td)
# train_dataset[2]
# print(train_dataset[0])



###定义dpo训练参数
training_args = TrainingArguments(
    per_device_train_batch_size=1,
    max_steps=200,
    remove_unused_columns=False,
    gradient_accumulation_steps=2,
    learning_rate=3e-4,
    evaluation_strategy="steps",
    output_dir="./test",
    num_train_epochs=300,
    # report_to="tensorboard"
    run_name="dpo_llama2",
)

###定义dpo训练器
dpo_trainer = DPOTrainer(
    model,
    model_ref,
    args=training_args,
    beta=0.1,
    # train_dataloader=
    train_dataset=train_dataset,
    eval_dataset=eval_dataset,
    tokenizer=tokenizer,
    max_length=300,
    max_prompt_length=300,
    max_target_length=300,

)
###训练
dpo_trainer.train()
###模型保存 checkpoint
dpo_trainer.save_model()

# 7. save
# output_dir = os.path.join(script_args.output_dir, "final_checkpoint")
# dpo_trainer.model.save_pretrained(output_dir)