import os

# 禁用onednn优化（如果不需要）
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

import pymupdf as fitz
from transformers import T5Tokenizer, T5ForConditionalGeneration
import torch

# 设置工作目录为脚本所在目录
# os.chdir('C://Users//dell//Desktop//dlp_hw2//ieee_Quan')

# 加载预训练的T5模型和分词器
model_name = "t5-small"  # 可以选择"t5-base", "t5-large", "t5-3b", "t5-11b"等
print(f"正在加载预训练模型: {model_name}")
tokenizer = T5Tokenizer.from_pretrained(model_name)
print(f"正在加载分词器: {model_name}")
model = T5ForConditionalGeneration.from_pretrained(model_name)


# 找出工作目录下的所有PDF文件
def find_all_pdf():
    my_path = './/ieee_Quan'
    pdf_list = []
    for root, dirs, files in os.walk(my_path):
        for file in files:
            if file.endswith('.pdf'):
                pdf_full_path = os.path.join(root, file)
                pdf_list.append(pdf_full_path)
                # print(pdf_full_path)
    return pdf_list


def extract_text_from_pdf(pdf_path):
    """从PDF文件中提取文本"""
    try:
        doc = fitz.open(pdf_path)
    except FileNotFoundError:
        print(f"文件未找到: {pdf_path}")
        return ""
    except Exception as e:
        print(f"打开文件时出错: {e}")
        return ""
    text = ""
    print(f"正在提取文本: {pdf_path}")
    for page_num in range(len(doc)):
        page = doc.load_page(page_num)
        text += page.get_text()
    return text

def generate_summary(text, max_length=150, min_length=40, num_beams=4):
    """使用T5模型生成文本总结"""
    # 输入格式： "<summarize>: <text>"
    input_text = f"summarize: {text}"
    
    # 对输入文本进行编码
    inputs = tokenizer(input_text, return_tensors="pt", max_length=1024, truncation=True)
    
    # 生成摘要
    summary_ids = model.generate(inputs["input_ids"], 
                                 max_length=max_length, 
                                 min_length=min_length, 
                                 length_penalty=2.0, 
                                 num_beams=num_beams, 
                                 early_stopping=True)
    
    # 解码摘要
    summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
    
    return summary

def my_summary(pdf_path):
    print("正在提取文本...")
    text = extract_text_from_pdf(pdf_path)
    print("OK")
    if text:
        summary = generate_summary(text)
        # 保存到本地
        with open("summary.txt", "a", encoding="utf-8") as f:
            f.write(f"PDF文件: {pdf_path}\n")
            f.write(summary)
            f.write("\n\n")
        print("Summary:")
        print(summary)
    else:
        print("无法提取文本，无法生成总结。")

class PDFSummary():
    def __init__(self):
        self.pdf_lib=[]
        self.pdf_path=''
    def pdf_summary(self):
        self.pdf_lib=find_all_pdf()
        for pdf_path in self.pdf_lib:
            my_summary(pdf_path)
    



if __name__ == "__main__":
    # print("开始运行...")
    # pdf_path = 'A BERT base model for the analysis of Electronic Health Records from diabetic patients.pdf'
    # # pdf_path = '000.pdf'
    # my_summary(pdf_path)
    pdfsummary=PDFSummary()
    pdfsummary.pdf_summary()