# env

import dotenv

dotenv.load_dotenv()

import warnings

# 忽略所有 FutureWarning
warnings.simplefilter("ignore", FutureWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)

# fast api init

from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware

app = FastAPI()

# 跨域 添加 CORS 中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=[
        # "http://localhost:81",
        # "http://localhost:3000",
        # "http://localhost:1420",
        "*"
    ],  # 允许的原点
    allow_credentials=True,
    allow_methods=["*"],  # 允许所有方法
    allow_headers=["*"],  # 允许所有头部
)
# langchain

from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain_core.prompts import PromptTemplate

# 从models.py导入定义的模型
from models import embeddings, llm

vectorstore = None  # 向量存储
qa_chain = None  # langchain调用链

# %% 书籍处理

import pytesseract  # OCR识别非文本的pdf(有些是扫描版是图片的)

# 需要去pytesseract的github仓库下载.exe然后配置环境变量

# 也可以不配环境变量, 在这里指定
pytesseract.pytesseract.tesseract_cmd = r'D:\applications\TesseractOCR5\tesseract.exe'

import os


# 找到某个目录下所有pdf文件
def find_pdf_files(directory):
    pdf_files = []
    for root, _, files in os.walk(directory):
        for file in files:
            if file.endswith('.pdf'):
                pdf_files.append(os.path.join(root, file))
            elif file.endswith('.epub'):
                pdf_files.append(os.path.join(root, file))
    return pdf_files


import fitz
from PIL import Image
from langchain.schema import Document


# 读取pdf
def process_pdf_func(pdf_path):
    documents = []
    doc = fitz.open(pdf_path)
    for page in doc:
        # 尝试直接获取文本
        text = page.get_text()

        # 如果文本为空，使用 OCR
        if not text.strip():
            print('start OCR')
            # 获取页面的图像
            pix = page.get_pixmap()
            img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
            # text = pytesseract.image_to_string(img)
            # lang指定OCR的语音版本, 可以多个组合, 'chi_sim+jpn+eng'
            text = pytesseract.image_to_string(img, lang='chi_sim')
            print(f'new text: {text}')

        if text.strip():
            documents.append(Document(page_content=text))
            print(f'new text: {text}')
            # print(f'new text: {text[:5]}')
    return documents


from ebooklib import epub
import ebooklib
from bs4 import BeautifulSoup


# 读取epub
def process_epub(epub_path):
    documents = []
    book = epub.read_epub(epub_path)
    for item in book.get_items():
        if item.get_type() == ebooklib.ITEM_DOCUMENT:
            # 获取 HTML 内容并解码
            html_content = item.get_body_content().decode('utf-8')
            # 使用 BeautifulSoup 解析 HTML
            soup = BeautifulSoup(html_content, 'html.parser')
            # 提取纯文本
            text = soup.get_text()
            documents.append(Document(page_content=text))
            print(text)
    return documents


# 总的处理方法
def process_file(file_path):
    if file_path.endswith('.pdf'):
        return process_pdf_func(file_path)
    elif file_path.endswith('.epub'):
        return process_epub(file_path)
    else:
        raise ValueError("Unsupported file format")


# ----------------------------------------------------------------------------------
# 加载pdf, 创建langchain链
import shutil


def load_pdf_and_create_qa_chain(file_path: str, sub_path: str = ''):
    print('enter load and process')
    # load
    global vectorstore, qa_chain

    # 读取pdf
    documents = process_file(file_path)
    print('load file documents over!')

    # 使用文本切分器进行处理
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
    # text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
    # 分割文档内容
    split_docs = text_splitter.split_documents(documents)
    print('split docs over!')

    # 创建目录（如果不存在）
    db_path = "db"
    # 这个是子目录, 备用, 可以用来区分存放不同的领域的文档编码数据(创建对应的db/sub子文件夹存放.db文件)
    if sub_path:
        db_path = os.path.join(db_path, sub_path)
    # 创建目录（如果不存在）
    os.makedirs(db_path, exist_ok=True)

    # Save document vectors to Chroma database
    try:
        # 计算批处理大小
        batch_size = 50
        print(f"Recommended batch size: {batch_size}")
        # 遍历split_docs, 每次50个
        for i in range(0, len(split_docs), batch_size):
            print(f'start batch-{i} persist! total: {len(split_docs)}')
            # 本批的数据
            batch_docs = split_docs[i:i + batch_size]
            # 保存文档向量到 Chroma 数据库
            vectorstore = Chroma.from_documents(batch_docs, embeddings, persist_directory=db_path)
            vectorstore.persist()  # 持久化到本地
            print(f'batch-{i} persisted! total: {len(split_docs)}')
        print('Vectorstore persist over!')
    except Exception as e:
        print(f'Error saving vectors: {e}')
        return

    qa_chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=vectorstore.as_retriever())
    print(f'embedding {file_path} success')

    # Move the processed PDF to the 'over/sub_path' directory
    over_path = os.path.join("over", sub_path)
    os.makedirs(over_path, exist_ok=True)
    # 复制处理完的pdf到over文件夹
    shutil.copy(file_path, over_path)
    print(f'Moved {file_path} to {over_path}')


# %% pdf api
from fastapi import UploadFile, File, Form


# 上传
@app.post("/upload_pdf/{sub_path}")
async def upload_pdf(sub_path: str, file: UploadFile = File(...)):
    # 创建子目录（如果不存在）
    directory = f"files/{sub_path}"
    os.makedirs(directory, exist_ok=True)

    # 保存文件到指定子目录
    pdf_path = os.path.join(directory, f"temp_{file.filename}")
    with open(pdf_path, "wb") as f:
        content = await file.read()
        f.write(content)

    # 调用处理PDF的函数
    load_pdf_and_create_qa_chain(pdf_path)

    return {"message": "PDF uploaded and processed successfully.", "res": "ok"}


# 处理目录下pdf文件
@app.post("/process_pdf/{sub_path}")
async def process_pdf(sub_path: str):
    # 递归查找 files 目录下的所有 PDF 文件
    pdf_files = find_pdf_files(f"files/{sub_path}")
    print(pdf_files)
    # 对每个 PDF 文件创建 QA 链
    [load_pdf_and_create_qa_chain(pdf_path, sub_path) for pdf_path in pdf_files]

    return {"message": "PDF uploaded and processed successfully.", "res": "ok"}


# %% 图像理解
from io import BytesIO

from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer, MarianMTModel, MarianTokenizer

model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning",
                                                  cache_dir='models/huggingface/')
processor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning",
                                              cache_dir='models/huggingface/')
tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning",
                                          cache_dir='models/huggingface/')


def translate(text: str):
    # 选择模型，例如从英语翻译到中文
    model_name = "Helsinki-NLP/opus-mt-en-zh"

    # 加载模型和分词器
    tokenizer = MarianTokenizer.from_pretrained(
        pretrained_model_name_or_path=model_name,
        cache_dir='models/huggingface'
    )
    model = MarianMTModel.from_pretrained(
        pretrained_model_name_or_path=model_name,
        cache_dir='models/huggingface'
    )

    # 对文本进行编码
    encoded_text = tokenizer.prepare_seq2seq_batch([text], return_tensors="pt")

    # 进行翻译
    translated = model.generate(**encoded_text)

    # 解码翻译结果
    translated_text = tokenizer.decode(translated[0], skip_special_tokens=True)

    print(translated_text)
    return translated_text


@app.post("/generate-caption/")
async def generate_caption(file: UploadFile = File(...), prompt: str = Form("")):
    try:
        # Read the uploaded image
        image = Image.open(BytesIO(await file.read())).convert("RGB")

        # Process the image
        pixel_values = processor(images=image, return_tensors="pt").pixel_values

        # Generate description
        outputs = model.generate(pixel_values)
        generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)

        # text = pytesseract.image_to_string(image, lang='chi_sim+eng')
        text = pytesseract.image_to_string(image, lang='chi_sim')

        return {"caption": f"图像理解: {translate(generated_text)}  图像OCR: {text}"}
    except Exception as e:
        return {"error": str(e)}


# %% ws 聊天接口
import re
from models import tavily_tool, search_agent
from models import wikipedia_search, web_search_tool
from langchain.agents import initialize_agent, AgentType
from fastapi import WebSocket, WebSocketDisconnect


@app.websocket("/ws/ask/{area}")
async def websocket_endpoint(websocket: WebSocket, area: str):
    await websocket.accept()
    context = []  # Initialize context list
    max_context_length = 1024  # Define a maximum context length

    # Initialize Chroma vector database
    chroma_db = Chroma(persist_directory=f"db/{area}" if area else "db", embedding_function=embeddings)

    # Create RetrievalQA chain
    qa_chain2 = RetrievalQA.from_chain_type(
        llm=llm,
        chain_type="stuff",
        retriever=chroma_db.as_retriever(
            search_type="mmr",
            search_kwargs={'k': 3, 'lambda_mult': 0.75}
        ),
        return_source_documents=True  # 开启文档检索日志
    )

    # Initialize web search tool (agent)
    agent_chain = initialize_agent(
        [web_search_tool],
        # [tavily_tool],
        search_agent,
        agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
        verbose=True,
    )

    # Define templates based on area
    templates = {
        'knowledge': (
            "You are a professor with expertise in multiple academic disciplines. "
            "Based on the following context and the user's academic question, "
            "provide an authoritative and professional answer. "
            "If unable to answer the user's question based on background knowledge, ask follow-up questions related "
            "to the background knowledge, limited to three questions."
            # "Context: {context}\nQuestion: {question}\nAnswer(in chinese):"
            """
            你是一个专业的知识助手，请根据以下背景信息回答用户的问题。请尽量引用提供的上下文，如果无法找到答案，请说明原因。

            背景信息：
            {context}
            
            问题：{question}
            回答（用中文）：
            """
        ),
        # other area 比如: language:{"xxx"}
    }

    # 默认使用knowledge提示词; 如果有指定别的(area)就用别的提示词
    template = templates.get(area, templates['knowledge'])

    # Define PromptTemplate
    prompt_template = PromptTemplate(template=template, input_variables=["context", "question"])

    try:
        while True:
            data = await websocket.receive_text()

            context.append(f"用户输入：{data}")
            print(f'用户输入: {data}')

            # Truncate context if it exceeds the maximum length
            context_str = "\n".join(context)
            if len(context_str) > max_context_length:
                # 保留最后1024位(防止超出模型输入token上限)
                context_str = context_str[-max_context_length:]

                # Format the prompt with context
            formatted_prompt = prompt_template.format(
                context=context_str[-800:],
                question=data
            )

            # Perform a web search if necessary
            # search_results = await perform_async_web_search(data)

            search_results = (agent_chain.run(data))
            print(f'搜索结果: \n{search_results}')
            combined_prompt = (
                f"{formatted_prompt}\n网络搜索结果: {search_results[-1200:]}"
            )
            print(f"combined prompt: \n{combined_prompt}")

            # Process the prompt using RetrievalQA chain (RAG)
            response = qa_chain2(combined_prompt)
            model_reply = response['result']

            # 正则匹配, 有的模型输出的回答带有用户提问+answer:
            # 一般不需要这个
            # # Extract answer using regex
            match = re.search(r'Helpful Answer:\s*(.*)', model_reply, re.DOTALL)

            if match:
                helpful_answer = match.group(1).strip()
                context.append(f"ai回复：{helpful_answer}")
                await websocket.send_text(helpful_answer)
            else:
                context.append(f"ai回复：{model_reply}")
                await websocket.send_text(model_reply)

    except WebSocketDisconnect:
        print("Client disconnected")
    except Exception as e:
        error_message = f"An error occurred: {str(e)}"
        print(error_message)
        await websocket.send_text(error_message)


# %% start server
import uvicorn

if __name__ == '__main__':
    uvicorn.run(app='app:app', host="127.0.0.1", port=8000, reload=True)
