import torch
from transformers import OPTForSequenceClassification
import torch.nn as nn
from datasets import load_dataset
from transformers import AutoModel, AutoTokenizer
from torch.utils.data import DataLoader
from tqdm import tqdm
import json
import torch.nn.functional as F


def save_model(model, tokenizer, output_dir, sub_folder=""):
    # used to save huggingface format, so we can use it for hf.from_pretrained
    model_to_save = model.module if hasattr(model, 'module') else model
    CONFIG_NAME = "config.json"
    WEIGHTS_NAME = "pytorch_model.bin"
    output_dir = os.path.join(output_dir, sub_folder)
    os.makedirs(output_dir, exist_ok=True)
    output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
    output_config_file = os.path.join(output_dir, CONFIG_NAME)
    save_dict = model_to_save.state_dict()
    torch.save(save_dict, output_model_file)
    model_to_save.config.to_json_file(output_config_file)
    tokenizer.save_vocabulary(output_dir)


class OPTTokenLevelClassifier(nn.Module):
    def __init__(self, opt_model_name="facebook/opt-125m", num_labels=2):
        super().__init__()
        self.opt = AutoModel.from_pretrained(opt_model_name)
        self.config = self.opt.config
        self.num_labels = num_labels

        # 每个token位置的分类器
        if hasattr(self.config, "word_embed_proj_dim"):
            # `OPT` models use word_embed_proj_dim as final output
            # https://github.com/huggingface/transformers/blob/main/src/transformers/models/opt/modeling_opt.py#L497
            self.token_classifier = nn.Linear(self.config.word_embed_proj_dim,
                                              num_labels,
                                              bias=False)
        else:
            # `gpt-neo(x)` models use `hidden_size` attribute names instead of `n_embd``
            self.config.n_embd = self.config.hidden_size if hasattr(
                self.config, "hidden_size") else self.config.n_embd
            self.token_classifier = nn.Linear(self.config.n_embd, num_labels, bias=False)
        self.PAD_ID = 2
        # 用于聚合token预测的权重层
        # self.aggregator = nn.Linear(self.config.hidden_size, 1)

    def forward(self,
                input_ids=None,
                attention_mask=None,
                labels=None):
        # 获取完整的Transformer输出
        outputs = self.opt(
            input_ids=input_ids,
            attention_mask=attention_mask,
            return_dict=True
        )
        # 获取所有token的hidden states [batch_size, seq_len, hidden_size]
        sequence_output = outputs.last_hidden_state

        # 每个token的分类logits [batch_size, seq_len, num_labels]
        token_logits = self.token_classifier(sequence_output)

        def masked_mean(token_logits, attention_mask):
            """
            对 token_logits 进行掩码处理，并计算有效部分的平均值。

            参数:
                token_logits: [batch_size, seq_len, num_labels]
                attention_mask: [batch_size, seq_len]

            返回:
                avg_logits: [batch_size, num_labels]
            """
            # 扩展 attention_mask
            mask = attention_mask.unsqueeze(-1).expand_as(token_logits).float()

            # 将填充部分的 logits 置为 0
            masked_logits = token_logits * mask

            # 对有效部分求和
            sum_logits = masked_logits.sum(dim=1)

            # 计算有效 token 的数量
            valid_token_counts = mask.sum(dim=1)

            # 避免除以零
            valid_token_counts = valid_token_counts.clamp(min=1e-9)

            # 计算平均值
            avg_logits = sum_logits / valid_token_counts

            return avg_logits

        # 加权聚合得到句子级logits [batch_size, num_labels]
        sentence_logits = masked_mean(token_logits, attention_mask)
        # 计算loss（如果提供labels）
        loss = None
        if labels is not None:
            loss_fct = nn.CrossEntropyLoss()
            loss = loss_fct(sentence_logits, labels)

        return {
            "loss": loss,
            "logits": sentence_logits,
            "token_logits": token_logits,
        }

    def forward_value(self,
                      input_ids=None,
                      attention_mask=None,
                      past_key_values=None,
                      position_ids=None,
                      head_mask=None,
                      inputs_embeds=None,
                      return_value_only=False,
                      prompt_length=0,
                      use_cache=False):
        print("imdb reward model")
        if self.config.model_type == "llama":
            kwargs = dict()
        else:
            kwargs = dict(head_mask=head_mask)
        # 获取完整的Transformer输出
        outputs = self.opt(
            input_ids,
            past_key_values=past_key_values,
            attention_mask=attention_mask,
            inputs_embeds=inputs_embeds,
            use_cache=use_cache,
            output_attentions=True,
            **kwargs
        )
        attentions = outputs.attentions
        # 获取所有token的hidden states [batch_size, seq_len, hidden_size]
        sequence_output = outputs.last_hidden_state

        # 每个token的分类logits [batch_size, seq_len, num_labels]
        token_logits = self.token_classifier(F.relu(sequence_output))
        values = token_logits
        # values = F.relu(values)
        # for i in range(values.shape[0]):
        #     print(values[i,-1])
        values = values[:, :, 1] - values[:, :, 0]
        # values = values[:, :, 1]
        values_backup = values.clone()
        if return_value_only:
            return values
        else:
            # [0 0 0 0 prompt, answer, 0 0 0 0 ] for step 3, we have padding at the beginning
            # [prompt, answer, 0, 0, 0, 0] this is normal
            assert prompt_length > 1, "prompt_length must be greater than 1 to help select the end score"
            bs = values.size(0)
            seq_len = input_ids.shape[1]
            chosen_end_scores = [
            ]  # we use this name for consistency with the original forward function
            ends_pos = []
            for i in range(bs):
                input_id = input_ids[i]
                value = values[i]
                # print("value_before", value)
                # print("value_before_no", value[prompt_length:])
                c_inds = (input_id[prompt_length:] == self.PAD_ID).nonzero()
                # here we only use the answer part of the sequence so we do not need to care about the padding at the beginning
                c_ind = c_inds[0].item() + prompt_length if len(
                    c_inds) > 0 else seq_len
                for j in range(prompt_length + 1, c_ind):
                    value[j] = values_backup[i][prompt_length:j + 1].sum() / (j - prompt_length + 1)
                #     value[j] = values_backup[i][c_ind - 1] if value[j] > values_backup[i][c_ind - 1] else value[j]
                #     value[j] = 0 if value[j] < 0 else value[j]
                #     if(j == c_ind - 1):
                #         value[j] = values_backup[i][c_ind - 1]
                # print("value", value[prompt_length:])
                # print("value_after", value[prompt_length:])
                chosen_end_scores.append(value[c_ind - 1])
                ends_pos.append(c_ind)
            # values = values[:, :, 1] - values[:, :, 0]
            return {
                "values": values,
                "chosen_end_scores": torch.stack(chosen_end_scores),
                "ends_pos": ends_pos,
                "attentions": attentions,
            }


if __name__ == "__main__":

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # (1) 加载数据集
    data_files = {"train": "IMDB_test.csv",
                  "test": "IMDB_train.csv"}
    dataset = load_dataset(
        "csv",
        data_dir="/root/autodl-tmp/DeepSpeedExamples/applications/fcrlhf/DeepSpeed-Chat/datasets/IMDb_movie_reviews",
        data_files=data_files
    )


    # (2) 预处理函数
    def preprocess_function(examples):
        # 假设数据列名为 "text" 和 "label"
        # 如果列名不同需要修改
        return {
            # "text": [text.replace("<br />", " ") for text in examples["text"]],  # 清理HTML标签
            "text": examples["text"],  # 清理HTML标签
            "label": examples["label"]
        }


    # 应用预处理
    dataset = dataset.map(preprocess_function, batched=True)


    # (3) 转换为PyTorch Dataset
    class IMDBDataset(torch.utils.data.Dataset):
        def __init__(self, dataset, tokenizer, max_length=512):
            self.texts = dataset["text"]
            self.labels = dataset["label"]
            self.tokenizer = tokenizer
            self.max_length = max_length

        def __len__(self):
            return len(self.texts)

        def __getitem__(self, idx):
            text = self.texts[idx]
            label = self.labels[idx]

            encoding = self.tokenizer(
                text,
                max_length=self.max_length,
                padding="max_length",
                truncation=True,
                return_tensors="pt"
            )

            return {
                "input_ids": encoding["input_ids"].squeeze(),  # [seq_len]
                "attention_mask": encoding["attention_mask"].squeeze(),
                "labels": torch.tensor(label, dtype=torch.long)
            }


    # (4) 初始化组件
    tokenizer = AutoTokenizer.from_pretrained(
        "/root/autodl-tmp/DeepSpeedExamples/applications/fcrlhf/DeepSpeed-Chat/models/opt-350m")
    tokenizer.pad_token = tokenizer.eos_token  # 设置pad_token

    # 创建训练集和验证集
    train_dataset = IMDBDataset(dataset["train"], tokenizer)
    val_dataset = IMDBDataset(dataset["test"], tokenizer)

    train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=8)
    # 初始化组件
    model = OPTTokenLevelClassifier(
        opt_model_name="/root/autodl-tmp/DeepSpeedExamples/applications/fcrlhf/DeepSpeed-Chat/models/opt-350m").to(
        device)
    optimizer = torch.optim.AdamW(model.parameters(), lr=2e-5)
    print("Start training...")
    # 训练循环
    for epoch in range(1):
        model.train()
        total_loss = 0
        progress_bar = tqdm(train_loader, desc=f"Epoch {epoch + 1}")
        for batch in progress_bar:
            input_ids = batch["input_ids"].to(device)
            attention_mask = batch["attention_mask"].to(device)
            labels = batch["labels"].to(device)

            outputs = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
            loss = outputs["loss"]
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()

            total_loss += loss.item()
            avg_loss = total_loss / (progress_bar.n + 1)  # 计算当前的平均 loss

            progress_bar.set_postfix(loss=avg_loss)  # 在进度条上显示 loss

        print(f"Epoch {epoch + 1} | Avg Loss: {total_loss / len(train_loader):.4f}")
    print("training complete!")

    save_path = "/root/autodl-tmp/DeepSpeedExamples/applications/fcrlhf/DeepSpeed-Chat/tests/output"

    import os

    # 创建保存目录
    os.makedirs(save_path, exist_ok=True)

    save_model(model, tokenizer, save_path)

    print(f"Model saved to {save_path}")
