from flask import Flask, render_template, request, jsonify
from werkzeug.utils import secure_filename
from docx import Document
import chromadb
import os
import requests
from sentence_transformers import SentenceTransformer
import logging
from dotenv import load_dotenv
from sentence_transformers import CrossEncoder

# 加载.env文件中的环境变量
load_dotenv()

# 从环境变量获取API密钥和代理设置
siliconflow_api_key = os.getenv('SILICONFLOW_API_KEY')

if not siliconflow_api_key:
    raise ValueError("API密钥未设置")

if len (siliconflow_api_key) < 20:
    raise ValueError("API密钥无效")
    


app = Flask(__name__, static_folder='static', static_url_path='/static')
app.jinja_env.globals.update(zip=zip)
app.config['UPLOAD_FOLDER'] = 'uploads'
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024  # 16MB max-limit

# 配置日志处理器
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
app.logger.addHandler(handler)
app.logger.setLevel(logging.INFO)

# 确保上传目录存在
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)

# 获取代理设置
http_proxy = os.getenv('HTTP_PROXY')
if http_proxy and len(http_proxy) > 18:
    os.environ['HTTP_PROXY'] = http_proxy
    app.logger.info(f'使用HTTP代理: {http_proxy}')
else:
    os.environ['HTTP_PROXY'] = ''
    app.logger.info('未使用HTTP代理')

https_proxy = os.getenv('HTTPS_PROXY')
if https_proxy and len(https_proxy) > 18:
    os.environ['HTTPS_PROXY'] = https_proxy
    app.logger.info(f'使用HTTPS代理: {https_proxy}')
else:
    os.environ['HTTPS_PROXY'] = ''
    app.logger.info('未使用HTTPS代理')

 
# 加载BAAI/bge-m3嵌入模型
app.logger.info('开始加载BAAI/bge-m3嵌入模型...')
model = SentenceTransformer('BAAI/bge-m3')
app.logger.info('BAAI/bge-m3嵌入模型加载成功')



# 开始加载BAAI/bge-reranker-v2-m3重排模型...
app.logger.info('BAAI/bge-reranker-v2-m开始加载')
reranker = CrossEncoder('BAAI/bge-reranker-v2-m3')
app.logger.info('BAAI/bge-reranker-v2-m3重排模型加载完成')

# 定义嵌入函数
from chromadb import EmbeddingFunction

class MyEmbeddingFunction(EmbeddingFunction):
    def __call__(self, input: list):
        return [model.encode(text).tolist() for text in input]

embedding_function = MyEmbeddingFunction()


# 初始化ChromaDB客户端
chroma_client = chromadb.PersistentClient(path="./chroma_db")

# 创建或获取集合
collection = chroma_client.get_or_create_collection(name="documents", embedding_function=embedding_function)

def process_docx(file_path):
    doc = Document(file_path)
    text_content = []
    for paragraph in doc.paragraphs:
        if paragraph.text.strip():
            text_content.append(paragraph.text.strip())
    return text_content

@app.route('/')
def index():
    return render_template('index.html')

@app.route('/upload_page')
def upload_page():
    return render_template('upload.html')

# 在Flask应用初始化后添加日志配置
app.logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
app.logger.addHandler(handler)

@app.route('/upload', methods=['POST'])
def upload_file():
    app.logger.info('收到文件上传请求')
    if 'file' not in request.files:
        return jsonify({'error': '没有文件被上传'}), 400

    file = request.files['file']
    if file.filename == '':
        return jsonify({'error': '没有选择文件'}), 400

    if not file.filename.endswith('.docx'):
        return jsonify({'error': '只支持.docx文件'}), 400

    filename = secure_filename(file.filename)
    file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
    file.save(file_path)

    try:
        # 处理文档内容
        text_content = process_docx(file_path)
        app.logger.info(f'成功处理文档内容，段落数: {len(text_content)}')

        collection.add(
            documents=text_content,
            ids=[f"{filename}_{i}" for i in range(len(text_content))]
        )
        app.logger.info(f'文件已存入知识库: {filename}')
        return jsonify({'success': True, 'message': '文件上传成功'}), 200

    except Exception as e:
        app.logger.error(f'文件处理失败: {str(e)}')
        return jsonify({'error': str(e)}), 500

    finally:
        # 清理上传的文件
        os.remove(file_path)

@app.route('/search', methods=['POST'])
def search():
    query = request.json.get('query')
    context_from_frontend = request.json.get('context')  # 获取前端传来的上下文
    app.logger.info(f'收到搜索请求: "{query}"，前端上下文: "{context_from_frontend}"')

    # 构建大模型提示
    # 获取初步检索结果
    initial_results = collection.query(
        query_texts=[query],
        n_results=10
    )

    # 使用BAAI/bge-reranker-v2-m3进行重排

    pairs = [[query, doc] for doc in initial_results['documents'][0]]
    scores = reranker.predict(pairs)

    # 根据得分排序并取前5个结果
    sorted_indices = scores.argsort()[::-1]
    results = {
        'documents': [[initial_results['documents'][0][i] for i in sorted_indices[:8]]],
        'ids': [[initial_results['ids'][0][i] for i in sorted_indices[:8]]]
    }
    context = '\n'.join(results['documents'][0])
    app.logger.info(f'返回知识库结果数: {len(results["documents"][0])}')

    try:
        msg = {
                'model': 'deepseek-ai/DeepSeek-R1-Distill-Qwen-7B',
                'messages': [{
                    'role': 'user',
                    'content': f'基于以下知识库内容：{context}\n前端上下文: {context_from_frontend}\n\n请回答：{query}'
                }]
            }
        app.logger.info( "msg %s  " ,msg )
        response = requests.post(
            'https://api.siliconflow.cn/v1/chat/completions',
            headers={
                'Authorization': 'Bearer ' + siliconflow_api_key,
                'Content-Type': 'application/json'
            },
            json=msg
        )
        response.raise_for_status()

        llm_response = response.json()['choices'][0]['message']['content']
        app.logger.info( "msg %s  " ,llm_response )

    except Exception as e:
        return jsonify({'error': f'大模型处理失败：{str(e)}'}), 500

    return jsonify({
        'success': True,
        'knowledge_results': results,
        'llm_response': llm_response
    })

# 从外部模块导入路由函数
from route import register_routes

# 注册额外的路由
register_routes(app, collection)

if __name__ == '__main__':
    app.run(debug=True)