# import numpy as np
# import jieba
# from transformers import BertTokenizer, logging
# from torch.utils import data
# from config import *
# from rouge import Rouge
# import json
#
# # 屏蔽模型警告
# logging.set_verbosity_error()
#
# # 加载分词器
# tokenizer = BertTokenizer.from_pretrained(BART_MODEL)
#
# # 数据集定义
# class SummarizationDataset(data.Dataset):
#     def __init__(self, type='train'):
#         super().__init__()
#         if type == 'train':
#             sample_path = TRAIN_SAMPLE_PATH
#         elif type == 'val':  # 验证集
#             sample_path = VAL_SAMPLE_PATH
#         elif type == 'test':
#             sample_path = TEST_SAMPLE_PATH
#         else:
#             raise ValueError("Invalid dataset type. Choose from 'train', 'val', 'test'.")
#
#         # 加载 JSON 格式的数据
#         with open(sample_path, 'r', encoding='utf-8') as f:
#             self.data = json.load(f)
#
#     def __len__(self):
#         return len(self.data)
#
#     def __getitem__(self, index):
#         source_text = self.data[index]['content']
#         summary_text = self.data[index]['title']
#
#         # 编码源文本
#         source_encoding = tokenizer(
#             source_text,
#             max_length=MAX_CONTENT_LEN,
#             padding="max_length",
#             truncation=True,
#             return_tensors="pt"
#         )
#
#         # 编码目标文本
#         summary_encoding = tokenizer(
#             summary_text,
#             max_length=MAX_SUMMARY_LEN,
#             padding="max_length",
#             truncation=True,
#             return_tensors="pt"
#         )
#
#         return {
#             'input_ids': source_encoding['input_ids'].squeeze(0),
#             'attention_mask': source_encoding['attention_mask'].squeeze(0),
#             'labels': summary_encoding['input_ids'].squeeze(0),
#         }
#
#
# # 评估函数
# def evaluate(preds, refs):
#     # 解码预测结果和真实值
#     decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
#     refs = np.where(refs != -100, refs, tokenizer.pad_token_id)
#     decoded_labels = tokenizer.batch_decode(refs, skip_special_tokens=True)
#
#     decoded_preds = [preds.replace(" ","") for preds in decoded_preds]
#     decoded_labels = [labels.replace(" ", "") for labels in decoded_labels]
#
#     # 使用 jieba 对解码结果分词
#     tokenized_preds = [" ".join(jieba.lcut(pred)) for pred in decoded_preds]
#     tokenized_labels = [" ".join(jieba.lcut(label)) for label in decoded_labels]
#
#     # 初始化 ROUGE 计算器
#     rouge = Rouge()
#     # 计算分词后的 ROUGE 分数
#     scores = rouge.get_scores(tokenized_preds, tokenized_labels, avg=True)
#
#     # 提取并格式化结果
#     rouge_scores = {
#         'rouge-1': scores['rouge-1']['f'] * 100,
#         'rouge-2': scores['rouge-2']['f'] * 100,
#         'rouge-l': scores['rouge-l']['f'] * 100,
#     }
#     return rouge_scores
#
#
# # 测试代码
# if __name__ == '__main__':
#     # 测试数据集加载
#     dataset = SummarizationDataset(type='test')
#     loader = data.DataLoader(dataset, batch_size=2)  # 每批次加载 2 条数据
#     print(f"Number of batches: {len(loader)}")
#
#     # 打印第一批数据
#     for batch in loader:
#         print("First Batch:")
#         print("Input IDs:", batch['input_ids'])
#         print("Attention Mask:", batch['attention_mask'])
#         print("Labels:", batch['labels'])
#         break
#
#     # 测试评估函数
#     pred_token_ids = [
#         [101, 2769, 2637, 1045, 741, 102]  # 模型预测的摘要
#     ]
#     true_token_ids = [
#         [101, 2769, 2637, 1741, 2500,2600, 2800, 2900, 102]
#     ]
#
#     # 评估 ROUGE
#     rouge_scores = evaluate(pred_token_ids, true_token_ids)
#     print("ROUGE Scores:", rouge_scores)
import numpy as np
import jieba
from transformers import BertTokenizer, logging
from torch.utils import data
from config import *
from rouge import Rouge
import json

# 屏蔽模型警告
logging.set_verbosity_error()

# 加载分词器
tokenizer = BertTokenizer.from_pretrained(BART_MODEL)


# 数据集定义
class SummarizationDataset(data.Dataset):
    def __init__(self, type='train'):
        super().__init__()
        if type == 'train':
            sample_path = TRAIN_SAMPLE_PATH
        elif type == 'val':  # 验证集
            sample_path = VAL_SAMPLE_PATH
        elif type == 'test':
            sample_path = TEST_SAMPLE_PATH
        else:
            raise ValueError("Invalid dataset type. Choose from 'train', 'val', 'test'.")

        # 加载 JSON 格式的数据
        with open(sample_path, 'r', encoding='utf-8') as f:
            self.data = json.load(f)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        source_text = self.data[index]['content']
        summary_text = self.data[index]['title']

        # 编码源文本
        source_encoding = tokenizer(
            source_text,
            max_length=MAX_CONTENT_LEN,
            padding="max_length",
            truncation=True,
            return_tensors="pt"
        )

        # 编码目标文本
        summary_encoding = tokenizer(
            summary_text,
            max_length=MAX_SUMMARY_LEN,
            padding="max_length",
            truncation=True,
            return_tensors="pt"
        )

        return {
            'input_ids': source_encoding['input_ids'].squeeze(0),
            'attention_mask': source_encoding['attention_mask'].squeeze(0),
            'labels': summary_encoding['input_ids'].squeeze(0),
        }


# 评估函数
def evaluate(preds, refs):
    # 解码预测结果和真实值
    decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
    refs = np.where(refs != -100, refs, tokenizer.pad_token_id)
    decoded_labels = tokenizer.batch_decode(refs, skip_special_tokens=True)

    decoded_preds = [preds.replace(" ", "") for preds in decoded_preds]
    decoded_labels = [labels.replace(" ", "") for labels in decoded_labels]

    # 使用 jieba 对解码结果分词
    tokenized_preds = [" ".join(jieba.lcut(pred)) for pred in decoded_preds]
    tokenized_labels = [" ".join(jieba.lcut(label)) for label in decoded_labels]

    # 初始化 ROUGE 计算器
    rouge = Rouge()
    # 计算分词后的 ROUGE 分数
    scores = rouge.get_scores(tokenized_preds, tokenized_labels, avg=True)

    # 提取并格式化结果
    rouge_scores = {
        'rouge-1': scores['rouge-1']['f'] * 100,
        'rouge-2': scores['rouge-2']['f'] * 100,
        'rouge-l': scores['rouge-l']['f'] * 100,
    }
    return rouge_scores


# 测试代码
if __name__ == '__main__':
    # 测试数据集加载
    dataset = SummarizationDataset(type='test')
    loader = data.DataLoader(dataset, batch_size=2)  # 每批次加载 2 条数据
    print(f"Number of batches: {len(loader)}")

    # 打印第一批数据
    for batch in loader:
        print("First Batch:")
        print("Input IDs:", batch['input_ids'])
        print("Attention Mask:", batch['attention_mask'])
        print("Labels:", batch['labels'])
        break

    # 测试评估函数
    pred_token_ids = [
        [101, 2769, 2637, 1045, 741, 102]  # 模型预测的摘要
    ]
    true_token_ids = [
        [101, 2769, 2637, 1741, 2500, 2600, 2800, 2900, 102]
    ]

    # 评估 ROUGE
    rouge_scores = evaluate(pred_token_ids, true_token_ids)
    print("ROUGE Scores:", rouge_scores)
