import os
import platform
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from datasets import load_from_disk, load_dataset
from transformers import AdamW
from transformers import T5Tokenizer, T5ForConditionalGeneration
# from logging_util import get_logger
from rouge import Rouge
from transformers.utils import ExplicitEnum
import pandas as pd
from datetime import datetime


device = 'cuda' if torch.cuda.is_available() else 'cpu'

# 获取当前操作系统的名称
os_name = platform.system()
# logger = get_logger(model_name='mengzi-t5-base')

# 设置模型路径及数据集路径
if os_name == "Windows":
    model_dir = r'C:\Users\23248\PycharmProjects\stance\StanceDetectionLab\lab1\model\mengzi-t5-base'
    tot_data_dir = r'C:\Users\23248\PycharmProjects\stance\StanceDetectionLab\data\tot_data'
    test_data_dir = r'C:\Users\23248\PycharmProjects\stance\StanceDetectionLab\data\test_data'
    train_data_dir = r'C:\Users\23248\PycharmProjects\stance\StanceDetectionLab\data\train_data'
    print("当前执行环境是 Windows...")
elif os_name == "Linux":
    model_dir = r'/data/wangzihao/model/mengzi-t5-base'
    tot_data = r'/data/wangzihao/stance/StanceDetectionLab/lab1/data/tot_data'
    test_data_dir = r'/data/wangzihao/stance/StanceDetectionLab/lab1/data/test_data'
    train_data_dir = r'/data/wangzihao/stance/StanceDetectionLab/lab1/data/train_data'
    print("当前执行环境是 Linux...")
else:
    raise ValueError("当前执行环境不是 Windows 也不是 Linux")


def get_collate_fn(tokenizer):
    def collate_fn(batch):
        contents = ["观点生成: \n" + tup2[0] for tup2 in batch]
        original_labels = [tup2[1] for tup2 in batch]
        # 特殊字符
        # 0 -> <pad>
        # 1 -> </s>
        # 2 -> <unk>
        inputs = tokenizer(contents, max_length=384, truncation=True, return_tensors='pt', padding=True)
        labels = tokenizer(text_target=original_labels, max_length=64, truncation=True, return_tensors='pt',
                           padding=True)
        return inputs, labels

    return collate_fn

# 模型训练
def train(epochs, model, loader):
    model.to(device)
    lr = 2e-5
    # 训练
    optimizer = AdamW(model.parameters(), lr=lr)

    model.train()
    for epoch in range(epochs):
        for step, (inputs, labels) in enumerate(loader):
            inputs = inputs.to(device)
            labels = labels.to(device)
            # 模型计算
            # [b, lens] -> [b, lens, 8]
            loss = model(inputs, labels)

            # 梯度下降
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()

            if step % 10 == 0:
                print(f'epoch = {epoch}, step = {step}, loss = {loss:.4f}')

    os.makedirs('./output', exist_ok=True)
    torch.save(model, './output/meng_zi_t5_sft.pt')


def test():
    # 1、加载模型
    model_load = torch.load('output/meng_zi_t5_sft.pt').to(device)
    model_load.eval()

    # 2、加载测试集
    test_loader = DataLoader(dataset=Dataset('test'),
                             batch_size=32,
                             collate_fn=get_collate_fn(tokenizer=tokenizer),
                             shuffle=False,
                             drop_last=True)

    # 初始化保存结果的列表
    results = []

    for step, (inputs, labels) in enumerate(test_loader):
        if step == 2:  # 只跑两个 batch，可以根据需要调整
            break
        with torch.no_grad():
            # [b, lens] -> [b, lens, 8]
            decode_preds = model_load(inputs.to(device))
            decode_labels = tokenizer.batch_decode(labels['input_ids'].to(device), skip_special_tokens=True)

            # 将预测结果和标签保存到列表
            for pred, label in zip(decode_preds, decode_labels):
                results.append({
                    "预测结果": pred,
                    "真实标签": label
                })

    # 将结果保存到 Excel 文件
    df = pd.DataFrame(results)
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    filename = f"model_output_{timestamp}.xlsx"
    df.to_excel(filename, index=False)
    print("模型评估结果已保存到 model_output.xlsx")


class MengZiT5Model(nn.Module):
    def __init__(self):
        super().__init__()
        # 加载预训练模型
        self.model = T5ForConditionalGeneration.from_pretrained(model_dir)


    def forward(self, inputs, labels=None):
        # 1、encoder的input_ids和attention_mask
        input_ids = inputs['input_ids']
        attention_mask = inputs['attention_mask']

        if labels is not None:
            # 2、decoder 的labels
            train_labels = labels['input_ids'].contiguous()
            train_labels_mask = labels['attention_mask']

            # 3、decoder 的input_ids和attention_mask
            decoder_input_ids = train_labels.new_zeros(train_labels.shape)
            decoder_input_ids[..., 1:] = train_labels[..., :-1].clone()

            decoder_attention_mask = train_labels_mask.new_zeros(train_labels_mask.shape)
            decoder_attention_mask[..., 1:] = train_labels_mask[..., :-1].clone()
            decoder_attention_mask[..., 0] = 1
            # 4、送入模型进行预测
            outputs = self.model(input_ids=input_ids
                                 , attention_mask=attention_mask
                                 , decoder_input_ids=decoder_input_ids
                                 , decoder_attention_mask=decoder_attention_mask
                                 , labels=train_labels)
            # 5、返回训练时候的Loss值
            return outputs.loss
        else:
            # 模型生成
            summary_ids = self.model.generate(input_ids
                                              , num_beams=4 # 束搜索法
                                              , no_repeat_ngram_size=2 # 确保不重复
                                              , min_length=10 # 长度限制
                                              , max_length=64
                                              , early_stopping=True
            )
            # 将id转换为输出 summary_ids.shape = [bs, length]
            outputs = tokenizer.batch_decode(summary_ids, skip_special_tokens=True)
            return outputs


class Dataset(Dataset):
    def __init__(self, split, num=None):
        # 从本地加载数据集
        #dataset = load_from_disk(train_data_dir)  # 替换为你的数据集路径

        # # 划分数据集为训练集和测试集
        # split_dataset = dataset.train_test_split(test_size=0.15, seed=42)  # 80% 训练集，20% 测试集

        try:
            if split == 'train':
                dataset = load_from_disk(train_data_dir)
            elif split == 'test':
                dataset = load_from_disk(test_data_dir)
            else:
                raise ValueError("split 必须是 'train' 或 'test'")

            # 如果指定了 num，则只取前 num 条数据
            if num:
                dataset = dataset.select(range(num))

            # 过滤掉太长的句子
            def filter_long_text(example):
                return len(example['content']) <= 512 - 2  # 假设 'content' 是文本字段

            dataset = dataset.filter(filter_long_text)

            self.dataset = dataset

        except FileNotFoundError as e:
            print(f"Dataset not found: {e.filename}")
            raise

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, idx):
        # 返回 content 和 title
        content = self.dataset[idx]['content']
        title = self.dataset[idx]['target']
        return content, title

    # 对数据集进行组装
    # 主要就是在content前面加上特定的提示词
    # 然后调用tokenizer进行批处理
    # def get_collate_fn(tokenizer):
    #     def collate_fn(batch):
    #         contents = ["摘要生成: \n" + tup2[0] for tup2 in batch]
    #         original_labels = [tup2[1] for tup2 in batch]
    #         # 特殊字符
    #         # 0 -> <pad>
    #         # 1 -> </s>
    #         # 2 -> <unk>
    #         inputs = tokenizer(contents, max_length=384, truncation=True, return_tensors='pt', padding=True)
    #         labels = tokenizer(text_target=original_labels, max_length=64, truncation=True, return_tensors='pt',
    #                            padding=True)
    #         return inputs, labels
    #
    #     return collate_fn

# transformers\generation\configuration_utils.py
class GenerationMode(ExplicitEnum):
    """
    Possible generation modes, downstream of the [`~generation.GenerationMixin.generate`] method.
    """
    # Non-beam methods
    CONTRASTIVE_SEARCH = "contrastive_search"
    GREEDY_SEARCH = "greedy_search"
    SAMPLE = "sample"
    ASSISTED_GENERATION = "assisted_generation"
    # Beam methods
    BEAM_SEARCH = "beam_search"
    BEAM_SAMPLE = "beam_sample"
    CONSTRAINED_BEAM_SEARCH = "constrained_beam_search"
    GROUP_BEAM_SEARCH = "group_beam_search"



if __name__ == '__main__':
    # 1、加载分词器
    tokenizer = T5Tokenizer.from_pretrained(model_dir, legacy=False)

    # 2、加载训练数据
    train_loader = DataLoader(dataset=Dataset('train'),
                              batch_size=16,
                              collate_fn=get_collate_fn(tokenizer=tokenizer),
                              shuffle=True,
                              drop_last=True)


    # 3、创建模型
    model = MengZiT5Model()

    # 4、模型训练及评估
    train(epochs=30, model=model, loader=train_loader)

    test() # 模型评估

    # # 5、对模型进行预测
    # text = """目标生成: \n在经历了那段惊心动魄但又充满人情味的艰难时刻后，32岁的埃里克森时隔1100天再次为国征战欧洲杯，而且奉献了进球。
    # 丹麦队对垒斯洛文尼亚，这场热度并不算高的小组赛首轮争夺因为一个人的出现得到了外界的关注，他就是埃里克森。
    # 曼联中场在在第17分钟的进球帮助祖国球队取得了领先，他也在经历上届欧洲杯的心脏骤停的遭遇之后，实现了“王者归来”。
    # 尽管这次破门遗憾没能帮助丹麦队最终获得胜利，但绰号“爱神”的埃里克森依然得到了全场乃至全世界球迷的祝福。
    # """
    # inputs = tokenizer(text, return_tensors='pt')
    # model_load = torch.load('output/meng_zi_t5_sft.pt')
    # model_load.eval()
    # print('观点内容：\n', model_load(inputs))
