import os
import logging
from flask import Flask, render_template, request, jsonify, Response
from flask_cors import CORS
import boto3
from dotenv import load_dotenv
from langchain_community.document_loaders import DirectoryLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma  # 修改1：替换FAISS为Chroma
import json

import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from LLM.awsllm import AwsLLM
from vecstore.vecstore import VecStore  # 新增导入
from Embeddings.FlagEmbeddingsWrapper import FlagEmbeddingsWrapper

load_dotenv()

app = Flask(__name__)
CORS(app)

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)




LOCAL_KB_DIR = "local_knowledge"
VECTOR_STORE_PATH = "vectorstore/chroma"  # 修改4：指定Chroma存储路径
MAX_CONTEXT_LENGTH = 4096

embeddings = FlagEmbeddingsWrapper()

aws_llm = AwsLLM()

@app.route("/")
def home():
    return render_template("index.html")

# 删除原有向量数据库相关代码
# 初始化配置
LOCAL_KB_DIR = "local_knowledge"
MAX_CONTEXT_LENGTH = 4096

# 初始化向量存储
vec_store = VecStore(embeddings=embeddings)
try:
    vec_store.initialize()
    vectorstore = vec_store.load()
except Exception as e:
    logger.critical(f"系统启动失败: {str(e)}")
    exit(1)

# 修改相似度搜索调用
@app.route('/chat', methods=['POST'])
def chat():
    try:
        data = request.get_json()
        user_message = data.get("message", "").strip()

        if not user_message:
            return jsonify({"message": "消息不能为空"}), 400

        logger.info(f"用户输入: {user_message}")

        try:
            # 修改8：Chroma的相似度搜索
            docs = vec_store.similarity_search(user_message, k=3)
            context = "\n".join([d.page_content for d in docs])[:MAX_CONTEXT_LENGTH]
            sources = [{
                "title": os.path.basename(d.metadata["source"]), #获取源文件
                "excerpt": d.page_content[:100]+"..."
            } for d in docs]
        except Exception as e:
            logger.error(f"知识库检索失败: {str(e)}")
            context = ""
            sources = []

        def generate_stream():
            try:
                conversation = [{
                    "role": "user", 
                    "content": f"请根据以下上下文用中文回答问题：\n<context>\n{context}\n</context>\n问题：{user_message}"
                }]

                for chunk in aws_llm.converse_stream(conversation):
                    yield f"data: {json.dumps({'message': chunk, 'sources': sources})}\n\n"

            except Exception as e:
                logger.error(f"流式响应生成失败: {str(e)}")
                yield f"data: {json.dumps({'error': '生成响应失败: ' + str(e)})}\n\n"

        return Response(generate_stream(), mimetype='text/event-stream')

    except Exception as e:
        logger.error(f"请求处理失败: {str(e)}")
        return jsonify({"message": f"请求处理失败: {str(e)}"}), 500

if __name__ == "__main__":
    app.run(host="0.0.0.0", port=5000, debug=False)