import argparse
from dataclasses import dataclass
from typing import Optional, List, Any, Union, Dict

import evaluate
import numpy as np
import torch
from datasets import load_dataset, Dataset
from peft import LoraConfig, TaskType, get_peft_model
from torch import nn
from torch.optim import optimizer
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSequenceClassification, AutoTokenizer, PreTrainedTokenizerBase
from transformers.utils import PaddingStrategy
from sklearn.metrics import accuracy_score, f1_score

parser = argparse.ArgumentParser(add_help=True, description='lijing')
parser.add_argument('--model_name_or_path', default="/model_path/roberta-base", type=str, help='lujing')
parser.add_argument('--reward_dataset_name_or_path', default="/datasets_path/lvwerra/stack-exchange-paired/data/reward", type=str, help='lujing')
parser.add_argument('--evaluation_dataset_name_or_path', default="/datasets_path/lvwerra/stack-exchange-paired/data/evaluation", type=str, help='lujing')
parser.add_argument('--device', default="cuda" if torch.cuda.is_available() else "cpu", type=str, help='lujing')
parser.add_argument('--batch_size', default=4, type=int, help='lujing')
parser.add_argument('--num_epochs', default=100, type=int, help='lujing')
parser.add_argument('--save_interval', default=100, type=int, help='lujing')
parser.add_argument('--save_dir', default="./save_model/", type=str, help='lujing')
parser.add_argument('--local_rank', default=0, type=int, help='lujing')
parser.add_argument('--text_column', default="Tweet text", type=str, help='lujing')
parser.add_argument('--label_column', default="text_label", type=str, help='lujing')
parser.add_argument('--max_length', default=512, type=int, help='lujing')
parser.add_argument('--lora_alpha', default=16, type=int, help='lujing')
parser.add_argument('--lora_dropout', default=0.1, type=int, help='lujing')

args = parser.parse_args()

args.model_name_or_path="E:\\model_path\\roberta-base"
args.reward_dataset_name_or_path="E:\\datasets_path\\lvwerra\\stack-exchange-paired\\data\\reward"
args.evaluation_dataset_name_or_path="E:\\datasets_path\\lvwerra\\stack-exchange-paired\\data\\evaluation"

train_dataset = load_dataset("parquet",
                       split="train",
                       data_dir=args.reward_dataset_name_or_path)

eval_dataset = load_dataset("parquet",
                       split="train",
                       data_dir=args.evaluation_dataset_name_or_path)


peft_config = LoraConfig(
    task_type=TaskType.SEQ_CLS,
    inference_mode=False,
    r=8,
    lora_alpha=32,
    lora_dropout=0.1,
)



model = AutoModelForSequenceClassification.from_pretrained(
    args.model_name_or_path, num_labels=1, torch_dtype=torch.bfloat16
)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
tokenizer.pad_token = tokenizer.eos_token

# Need to do this for gpt2, because it doesn't have an official pad token.
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = tokenizer.eos_token_id



original_columns = train_dataset.column_names


# Turn the dataset into pairs of post + summaries, where text_j is the preferred question + answer and text_k is the other.
# Then tokenize the dataset.
def preprocess_function(examples):
    new_examples = {
        "input_ids_j": [],
        "attention_mask_j": [],
        "input_ids_k": [],
        "attention_mask_k": [],
    }
    for question, response_j, response_k in zip(examples["question"], examples["response_j"], examples["response_k"]):
        tokenized_j = tokenizer("Question: " + question + "\n\nAnswer: " + response_j, truncation=True)
        tokenized_k = tokenizer("Question: " + question + "\n\nAnswer: " + response_k, truncation=True)

        new_examples["input_ids_j"].append(tokenized_j["input_ids"])
        new_examples["attention_mask_j"].append(tokenized_j["attention_mask"])
        new_examples["input_ids_k"].append(tokenized_k["input_ids"])
        new_examples["attention_mask_k"].append(tokenized_k["attention_mask"])

    return new_examples

#本地调试，加快计算
train_dataset=Dataset.from_dict(train_dataset[:10000])
eval_dataset=Dataset.from_dict(eval_dataset[:1000])

num_proc = 8  # Can adjust to be higher if you have more processors.
# preprocess the dataset and filter out QAs that are longer than script_args.max_length
train_dataset = train_dataset.map(
    preprocess_function,
    batched=True,
    # num_proc=num_proc,
    remove_columns=original_columns,
)
train_dataset = train_dataset.filter(
    lambda x: len(x["input_ids_j"]) <= args.max_length and len(x["input_ids_k"]) <= args.max_length
)

eval_dataset = eval_dataset.map(
    preprocess_function,
    batched=True,
    # num_proc=num_proc,
    remove_columns=original_columns,
)
eval_dataset = eval_dataset.filter(
    lambda x: len(x["input_ids_j"]) <= args.max_length and len(x["input_ids_k"]) <= args.max_length
)


# We need to define a special data collator that batches the data in our j vs k format.
@dataclass
class RewardDataCollatorWithPadding:
    tokenizer: PreTrainedTokenizerBase
    padding: Union[bool, str, PaddingStrategy] = True
    max_length: Optional[int] = None
    pad_to_multiple_of: Optional[int] = None
    return_tensors: str = "pt"

    def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
        features_j = []
        features_k = []
        for feature in features:
            features_j.append(
                {
                    "input_ids": feature["input_ids_j"],
                    "attention_mask": feature["attention_mask_j"],
                }
            )
            features_k.append(
                {
                    "input_ids": feature["input_ids_k"],
                    "attention_mask": feature["attention_mask_k"],
                }
            )
        batch_j = self.tokenizer.pad(
            features_j,
            padding=self.padding,
            max_length=self.max_length,
            pad_to_multiple_of=self.pad_to_multiple_of,
            return_tensors=self.return_tensors,
        )
        batch_k = self.tokenizer.pad(
            features_k,
            padding=self.padding,
            max_length=self.max_length,
            pad_to_multiple_of=self.pad_to_multiple_of,
            return_tensors=self.return_tensors,
        )
        batch = {
            "input_ids_j": batch_j["input_ids"],
            "attention_mask_j": batch_j["attention_mask"],
            "input_ids_k": batch_k["input_ids"],
            "attention_mask_k": batch_k["attention_mask"]

        }
        return batch

train_dataloader = DataLoader(
    train_dataset, shuffle=True, collate_fn=RewardDataCollatorWithPadding, batch_size=args.batch_size, pin_memory=True
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=RewardDataCollatorWithPadding, batch_size=args.batch_size,
                             pin_memory=True)


# Define the metric that we'll use for validation.
accuracy = evaluate.load("accuracy")


def compute_metrics(eval_pred):
    predictions, _ = eval_pred
    # Here, predictions is rewards_j and rewards_k.
    # We want to see how much of the time rewards_j > rewards_k.
    predictions = np.argmax(predictions, axis=0)
    labels = np.zeros(predictions.shape)
    accuracy_score(y_true=labels, y_pred=predictions)
    # return accuracy.compute(predictions=predictions, references=labels)

    return accuracy_score(y_true=labels, y_pred=predictions)


for epoch in range(args.num_epochs):
    model.train()
    total_loss = 0
    for step, batch in enumerate(tqdm(train_dataloader)):
        batch = {k: v.to(model.device) for k, v in batch.items()}

        output_j = model(input_ids=batch["input_ids_j"], attention_mask=batch["attention_mask_j"])
        output_k = model(input_ids=batch["input_ids_k"], attention_mask=batch["attention_mask_k"])
        rewards_j = output_j[0]
        rewards_k = output_k[0]
        loss = -nn.functional.logsigmoid(rewards_j - rewards_k).mean()

        total_loss += loss.detach().float()
        # 反向传播
        # model.backward(loss)
        # optimizer.step()
        # 梯度清零
        optimizer.zero_grad()
        # 反向传播
        loss.backward(loss)
        # 梯度进行更新
        optimizer.step()


model.save_pretrained("./reword_model/_peft_last_checkpoint")