# app.py
import os
import uuid
import tempfile
import streamlit as st
import chromadb
from chromadb.config import Settings
from dotenv import load_dotenv
from pdfminer.high_level import extract_text
from docx import Document
from openai import OpenAI

# --- 环境初始化 ---
load_dotenv()


# --- 工具类定义 ---

class OpenAIClient:
    """OpenAI 客户端，封装 Embedding 和 Chat 接口"""

    def __init__(self):
        self.client = OpenAI(api_key="sk-8569c070be3b49f4a8c01d3a88d1989b",
                             base_url="https://dashscope.aliyuncs.com/compatible-mode/v1")

    # def get_embeddings(self, texts, model="text-embedding-3-large"):
    #     data = self.client.embeddings.create(input=texts, model=model).data
    #     return [x.embedding for x in data]

    def get_embeddings(self, texts, model="text-embedding-v3"):
        try:
            batch_size = 10
            all_embeddings = []
            for i in range(0, len(texts), batch_size):
                batch = texts[i:i + batch_size]
                response = self.client.embeddings.create(input=batch, model=model)
                all_embeddings.extend([item.embedding for item in response.data])
            return all_embeddings
        except Exception as e:
            print("Embedding error:", e)
            raise

    def ask(self, prompt, model="qwen-turbo"):
        response = self.client.chat.completions.create(
            model=model,
            messages=[{"role": "user", "content": prompt}],
            temperature=0
        )
        return response.choices[0].message.content


class VectorDB:
    """简单封装 ChromaDB，支持文档向量存储与检索"""

    def __init__(self, embedding_fn):
        self.client = chromadb.Client(Settings(allow_reset=True))
        self.collection = self.client.get_or_create_collection(name="rag_collection")
        self.embedding_fn = embedding_fn

    def add_documents(self, documents, metadata=None):
        self.collection.add(
            embeddings=self.embedding_fn(documents),
            documents=documents,
            metadatas=[metadata] * len(documents),
            ids=[str(uuid.uuid4()) for _ in documents]
        )

    def search(self, query, top_n=3, filter=None):
        query_embedding = self.embedding_fn([query])
        return self.collection.query(
            query_embeddings=query_embedding,
            n_results=top_n,
            where=filter
        )


def extract_texts(file):
    """提取上传文件内容"""
    if file.name.endswith(".pdf"):
        with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
            tmp_file.write(file.read())
            text = extract_text(tmp_file.name)
    elif file.name.endswith(".docx"):
        doc = Document(file)
        text = '\n'.join(para.text for para in doc.paragraphs)
    else:
        raise ValueError("Unsupported file format!")
    return text


def split_text(text, max_length=400):
    """文本切片"""
    paragraphs = text.split('\n')
    chunks, current_chunk = [], ""
    for para in paragraphs:
        if len(current_chunk) + len(para) <= max_length:
            current_chunk += para + "\n"
        else:
            chunks.append(current_chunk.strip())
            current_chunk = para + "\n"
    if current_chunk:
        chunks.append(current_chunk.strip())
    return chunks


def build_prompt(info, query):
    """构建问答 Prompt"""
    return f"""
你是一个知识问答机器人。
请基于以下已知内容，回答用户提出的问题。
如果无法根据内容回答，请回复"我无法回答您的问题"。

已知内容:
{info}

问题:
{query}
"""


# --- 应用初始化 ---
llm_client = OpenAIClient()

# 自定义 Streamlit 页面配置
st.set_page_config(
    page_title="🧠 高级版 RAG 问答机器人",
    page_icon="🤖",
    layout="wide",
    initial_sidebar_state="expanded"
)

# 自定义CSS美化
st.markdown("""
<style>
/* 主体背景色 */
body {
    background-color: #f0f2f6;
}
/* 输入框美化 */
input, textarea {
    background-color: #ffffff;
    border-radius: 8px;
}
/* 按钮美化 */
.stButton>button {
    background-color: #4CAF50;
    color: white;
    border-radius: 8px;
    height: 3em;
    font-weight: bold;
}
/* 聊天气泡样式 */
.user-question {
    background-color: #062039;
    padding: 10px;
    border-radius: 10px;
    margin-bottom: 5px;
}
.bot-answer {
    background-color: #262730;
    padding: 10px;
    border-radius: 10px;
    margin-bottom: 15px;
}
</style>
""", unsafe_allow_html=True)

st.title("🤖 RAG问答机器人")
st.caption("作者：何双新 ｜ RAG：RAG问答机器人")

# --- Session State 初始化 ---
if "vector_db" not in st.session_state:
    st.session_state.vector_db = VectorDB(embedding_fn=llm_client.get_embeddings)
if "uploaded_files" not in st.session_state:
    st.session_state.uploaded_files = {}
if "chat_history" not in st.session_state:
    st.session_state.chat_history = []

# --- 页面布局 ---

# 文件上传区
with st.container():
    st.header("📤 上传文档")
    uploaded_files = st.file_uploader(
        "上传 PDF 或 DOCX 文档，可多选",
        type=["pdf", "docx"],
        accept_multiple_files=True
    )

    if uploaded_files:
        with st.spinner("正在处理文档...请稍候"):
            for file in uploaded_files:
                if file.name not in st.session_state.uploaded_files:
                    texts = extract_texts(file)
                    chunks = split_text(texts)

                    st.session_state.vector_db.add_documents(
                        chunks,
                        metadata={"source": file.name}
                    )

                    summary_prompt = build_prompt(info=texts, query="请用中文总结这份文档的主要内容。")
                    summary = llm_client.ask(summary_prompt)

                    st.session_state.uploaded_files[file.name] = {
                        "summary": summary,
                        "chunks": chunks
                    }
            st.success("✅ 文档处理完成！")

# 展示上传历史
with st.expander("📚 已上传文档与总结", expanded=True):
    if st.session_state.uploaded_files:
        for filename, fileinfo in st.session_state.uploaded_files.items():
            st.subheader(f"📄 {filename}")
            st.markdown(f"> ✍️ **总结：** {fileinfo['summary']}")
            st.markdown(f"> 📑 **片段数：** {len(fileinfo['chunks'])}")
    else:
        st.info("暂无上传文档。")

# 问答输入区
st.header("💬 问答区")
cols = st.columns([4, 1])

with cols[0]:
    user_query = st.text_input("请输入你的问题：")
with cols[1]:
    target_doc = st.selectbox(
        "选择提问范围",
        ["所有文档"] + list(st.session_state.uploaded_files.keys())
    )

# 提交问题按钮
if st.button("🚀 提交问题"):
    if user_query.strip():
        with st.spinner("机器人思考中..."):
            if target_doc == "所有文档":
                results = st.session_state.vector_db.search(user_query, top_n=3)
            else:
                results = st.session_state.vector_db.search(user_query, top_n=3, filter={"source": target_doc})

            contexts = results['documents'][0] if results['documents'] else "无相关内容"
            prompt = build_prompt(info=contexts, query=user_query)
            answer = llm_client.ask(prompt)

            st.session_state.chat_history.append((user_query, answer))

            st.success("✅ 回答生成完成！")
    else:
        st.warning("⚠️ 请先输入你的问题！")

# --- 对话历史展示 ---
st.header("🕓 对话记录")
for idx, (q, a) in enumerate(st.session_state.chat_history[::-1], 1):
    st.markdown(f"<div class='user-question'>**Q{idx}:** {q}</div>", unsafe_allow_html=True)
    st.markdown(f"<div class='bot-answer'>**A{idx}:** {a}</div>", unsafe_allow_html=True)

# st.caption("安徽智加数字科技有限公司 · 技术学习组出品 🚀")
