import torch
import torch.nn as nn
from transformers import BartForConditionalGeneration, BertTokenizer
import warnings
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.utils import *

warnings.filterwarnings("ignore", category=UserWarning, message="TypedStorage is deprecated.*")

BART_MODEL = r'D:\Program Files (x86)\PycharmProject\BartSummarization\model\bart_zh'
MODEL_LIST = {
    'BART_WEIBO': r'D:\Program Files (x86)\MODEL\best_weibo.pth',
    'BART_NLPCC': r'D:\Program Files (x86)\MODEL\best_nlpcc.pth',
}
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
MAX_CONTENT_LEN = 500  # 文本的最大长度
MAX_SUMMARY_LEN = 52  # 摘要的最大长度
# 加载分词器
tokenizer = BertTokenizer.from_pretrained(BART_MODEL)


class SumModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.bart = BartForConditionalGeneration.from_pretrained(BART_MODEL)

    def forward(self, input_ids, attention_mask, labels=None):
        outputs = self.bart(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
        return outputs

    def generate(self, input_ids, attention_mask, max_length=MAX_SUMMARY_LEN):
        return self.bart.generate(input_ids=input_ids, attention_mask=attention_mask, max_length=max_length)

def generate_bart(input_texts, model_name):
    try:
        # 检查模型名称是否在字典中
        if model_name not in MODEL_LIST:
            print(f"模型名称 {model_name} 不在可用模型列表中，请检查。")
            return []

        # 获取模型地址
        MODEL_DIR = MODEL_LIST[model_name]

        # 加载模型
        model = SumModel()
        state_dict = torch.load(MODEL_DIR)
        model.load_state_dict(state_dict)
        model = model.to(DEVICE)
        model.eval()  # 设置模型为评估模式

        # 检查输入是否为列表
        if not isinstance(input_texts, list):
            input_texts = [input_texts]

        # 对输入文本进行编码
        inputs = tokenizer(
            input_texts,
            max_length=MAX_CONTENT_LEN,
            padding="max_length",
            truncation=True,
            return_tensors="pt"
        )
        input_ids = inputs.input_ids.to(DEVICE)
        attention_mask = inputs.attention_mask.to(DEVICE)

        # 生成摘要
        with torch.no_grad():  # 不进行梯度计算，节省内存
            outputs = model.generate(input_ids, attention_mask=attention_mask, max_length=MAX_SUMMARY_LEN)

        # 解码输出
        output_str = tokenizer.batch_decode(outputs, skip_special_tokens=True)
        output_str = [s.replace(" ", "") for s in output_str]

        return output_str

    except FileNotFoundError as e:
        print(f"文件未找到错误: {e}，请检查模型文件路径是否正确。")
    except Exception as e:
        print(f"发生未知错误: {e}")
    return []

# 测试代码——json文件批处理
def test_bart(file, model_name):
    MODEL_DIR = MODEL_LIST[model_name]
    test_dataset = SummarizationDataset(tokenizer, file, MAX_CONTENT_LEN, MAX_SUMMARY_LEN)
    test_loader = data.DataLoader(test_dataset, batch_size=20, shuffle=False)
    # 加载模型
    model = SumModel()
    state_dict = torch.load(MODEL_DIR)
    model.load_state_dict(state_dict)
    model = model.to(DEVICE)
    model.eval()  # 设置模型为评估模式

    test_preds, test_labels = [], []
    total_loss = 0
    # 打开文件用于保存生成的摘要和参考摘要
    with open('summary_results.txt', 'w', encoding='utf-8') as f:
        with torch.no_grad():
            for i, test_batch in enumerate(test_loader):
                input = test_batch['input_ids'].to(DEVICE)
                attention_mask = test_batch['attention_mask'].to(DEVICE)
                labels = test_batch['labels'].to(DEVICE)

                test_pred = model(input, attention_mask, labels)
                loss = test_pred.loss
                total_loss += loss.item()
                print('>> batch:', i + 1, 'loss:', round(loss.item(), 5))

                test_generated = model.generate(input_ids=input, attention_mask=attention_mask, max_length=MAX_SUMMARY_LEN)
                test_preds.extend(test_generated.cpu().numpy())
                test_labels.extend(labels.cpu().numpy())

                # 解码生成的摘要和参考摘要并写入文件
                generated_texts = tokenizer.batch_decode(test_generated, skip_special_tokens=True)
                reference_texts = tokenizer.batch_decode(labels, skip_special_tokens=True)

                generated_texts = [preds.replace(" ", "") for preds in generated_texts]
                reference_texts = [labels.replace(" ", "") for labels in reference_texts]
                for gen_text, ref_text in zip(generated_texts, reference_texts):
                    f.write(f"{model_name}生成: {gen_text}\n")
                    f.write(f"参考摘要: {ref_text}\n\n")

    test_report = evaluate(tokenizer,test_preds, test_labels)
    test_report['loss_avg'] = round(total_loss/len(test_loader), 4)
    return test_report

