import os
from pathlib import Path
from xid import XID
import shutil
import sys
PROJECT_PATH = Path(__file__).parent.parent.absolute()
sys.path.append(str(PROJECT_PATH))

from langchain_community.document_loaders import UnstructuredFileLoader
from transformers import T5ForConditionalGeneration, T5Tokenizer
from src.summary_database import save_doc_summary, query_all_doc_summary_wrapper

__all__ = ['summarize_wraper']

model = None

def load_file(filepath):
    loader = UnstructuredFileLoader(filepath)
    docs = loader.load()
    content = docs[0].page_content
    #print(f"content: {content}")
    #print(f"content length: {len(content)}")
    return content

def summarize_wraper(file):
    if file is not None:
        content = load_file(filepath=file.name)
        id = XID().string()
        dst = PROJECT_PATH / 'filedb' / id
        copy_file(file.name,dst)
        path = Path(file)
        summary = summarize(content)
        save_doc_summary(id=id, doc_name=path.name, doc_summary=summary)
    return query_all_doc_summary_wrapper()

def copy_file(src, dst):
    os.makedirs(dst, exist_ok=True)
    try:
        shutil.copy2(src, dst)
        #print(f"文件 {src} 已成功复制到 {dst}")
    except Exception as e:
        print(f"复制文件 {src} 时发生错误: {e}")

def summarize(content):
    global model,tokenizer
    if model is None:
        model_name = 'utrobinmv/t5_summary_en_ru_zh_base_2048'
        model = T5ForConditionalGeneration.from_pretrained(model_name)
        tokenizer = T5Tokenizer.from_pretrained(model_name)
    prefix = 'summary big: '
    src_text = prefix + content
    input_ids = tokenizer(src_text, return_tensors="pt")
    generated_tokens = model.generate(**input_ids)
    result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
    return result

if __name__ == '__main__':
    summarized_content = summarize(load_file(PROJECT_PATH / "tmp/开展动态知识管理需要奠定哪些管理基础.docx"))
    print(f"summarized_content: {summarized_content}")