from flask import Flask, request, jsonify,Response
from flask_cors import CORS
import requests
import asyncio
import os
import logging
from typing import Dict, Any, Optional, List, Tuple
from werkzeug.utils import secure_filename
from io import BytesIO
from datetime import datetime
from zoneinfo import ZoneInfo
from minio import Minio
from minio.error import S3Error
from thcUtils import crypt
from db_handler import DatabaseHandler
from config import Config
import json
import subprocess
import configparser
from fastapi.responses import StreamingResponse
from datetime import datetime
import httpx
import asyncio
from models.dify import dify_blueprint  
import time
from gpu_client import PDFImageProcessor


# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

app = Flask(__name__)
CORS(app)
#注册dify.py蓝图
app.register_blueprint(dify_blueprint, url_prefix='/python')

# 初始化数据库连接
db_handler = DatabaseHandler(**Config.DATABASE_CONFIG)

# 初始化MinIO客户端
minio_client = Minio(
    Config.MINIO_CONFIG['endpoint'],
    access_key=Config.MINIO_CONFIG['access_key'],
    secret_key=Config.MINIO_CONFIG['secret_key'],
    secure=Config.MINIO_CONFIG['secure']
)

class APIError(Exception):
    """自定义API错误类"""
    def __init__(self, message: str, status_code: int = 500):
        self.message = message
        self.status_code = status_code
        super().__init__(self.message)

async def api_call(url: str, method: str = 'POST', **kwargs) -> Tuple[Dict, int, Dict]:
    """封装异步API调用
    
    Args:
        url: API地址
        method: HTTP方法
        **kwargs: 其他请求参数
        
    Returns:
        Tuple[Dict, int, Dict]: (响应数据, 状态码, 响应头)
        
    Raises:
        APIError: API调用失败时抛出
    """
    try:
        loop = asyncio.get_event_loop()
        future = loop.run_in_executor(None, lambda: requests.request(method, url, **kwargs))
        response = await future
        
        try:
            response_data = response.json()
        except json.JSONDecodeError:
            response_data = {"error": "Invalid JSON response"}
        
        if response.status_code not in [200, 201]:
            logger.error(f"API call failed: {response.text}")
            raise APIError(f"API call failed: {response.text}", response.status_code)
            
        # 确保返回值不为None
        if response_data is None:
            response_data = {"error": "Empty response"}
            
        return response_data, response.status_code, response.headers
    except Exception as e:
        logger.error(f"API call error: {e}")
        raise APIError(str(e))

def get_file_from_minio(bucket_name: str, file_name: str) -> Optional[BytesIO]:
    """从MinIO获取文件流
    
    Args:
        bucket_name: 存储桶名称
        file_name: 文件名
        
    Returns:
        Optional[BytesIO]: 文件流，如果获取失败则返回None
    """
    try:
        response = minio_client.get_object(bucket_name, file_name)
        return BytesIO(response.read())
    except S3Error as e:
        logger.error(f"MinIO error: {e}")
        return None
    finally:
        response.close()
        response.release_conn()

def encrypt_password(data):
    """加密密码"""
    data['password'] = crypt(data['password'])  # 使用crypt函数加密密码
    return data

async def setup_user_environment(authorization_token: str, user_id: str, ragflow_user_id: str) -> Dict[str, str]:
    """设置用户环境（模型配置、知识库等）
    
    Args:
        authorization_token: 授权令牌
        user_id: 用户ID
        ragflow_user_id: RAGFlow用户ID
        
    Returns:
        Dict[str, str]: 包含knowledge_id和api_key的字典
        
    Raises:
        APIError: 设置失败时抛出
    """
    # 根据环境选择配置文件
    config_file = 'config_dev.ini'
    config = configparser.ConfigParser()
    config.read(config_file)
    # 访问配置变量
    TARGET_SET_TENANT_INFO_URL = config['DEFAULT']['TARGET_SET_TENANT_INFO_URL']
    TARGET_NEW_TOKEN_URL = config['DEFAULT']['TARGET_NEW_TOKEN_URL']
    TARGET_DATASETS_URL = config['DEFAULT']['TARGET_DATASETS_URL']
    
    #TODO 需完善只存在密钥或者只存在知识库的情况下的异常情况判定
    # 检查是否已有API密钥
    api_key = db_handler.get_user_api_key(user_id)
    if api_key:
        logger.info(f"Using existing API key for user {user_id}")
        knowledge_id = db_handler.get_user_knowledge_info(user_id)["knowledge_id"]
        return {"knowledge_id": knowledge_id, "api_key": api_key}
    
    logger.info(f"Setting up new environment for user {user_id}")
    headers = {'Authorization': authorization_token}
    
    # 配置LLM模型
    first_add_llm_data = {
        "model_type": "chat", 
        "llm_name": "qwen25-72b", 
        "api_base": "http://192.168.184.157:8010/v1", 
        "max_tokens": 64000, 
        "llm_factory": "OpenAI-API-Compatible",
        "api_key": "sk-5XvbIRBE3dtc5MVH374e4dF7Cf6a4f6cAa219e03E28c883b"
    }
    
    second_add_llm_data = {
        "model_type": "rerank",
        "llm_name": "bge-reranker-base",
        "api_base": "http://192.168.180.74:18000/v1",
        "max_tokens": 512,
        "llm_factory": "Xinference"
    }
    
    third_add_llm_data = {
        "model_type": "embedding",
        "llm_name": "custom-embedding",
        "api_base": "http://192.168.180.74:18000/v1",
        "max_tokens": 512,
        "llm_factory": "Xinference"
    }
    
    # 调用第一个add_llm接口
    first_response, first_status_code, _ = await api_call(
        'http://192.168.184.227/v1/llm/add_llm',
        method='POST',
        json=first_add_llm_data,
        headers=headers
    )
    
    if first_status_code not in [200, 201]:
        raise APIError(f"First add_llm failed: {first_response.get('message', '')}", first_status_code)
    
    # 调用第二个add_llm接口
    second_response, second_status_code, _ = await api_call(
        'http://192.168.184.227/v1/llm/add_llm',
        method='POST',
        json=second_add_llm_data,
        headers=headers
    )
    
    if second_status_code not in [200, 201]:
        raise APIError(f"Second add_llm failed: {second_response.get('message', '')}", second_status_code)
    
    # 调用第三个add_llm接口
    third_response, third_status_code, _ = await api_call(
        'http://192.168.184.227/v1/llm/add_llm',
        method='POST',
        json=third_add_llm_data,
        headers=headers
    )
    
    if third_status_code not in [200, 201]:
        raise APIError(f"Third add_llm failed: {third_response.get('message', '')}", third_status_code)
    
    # 设置租户信息
    tenant_info = {
        "tenant_id": ragflow_user_id,
        "name": user_id,
        "llm_id": "qwen25-72b___OpenAI-API@OpenAI-API-Compatible",
        "embd_id": "custom-embedding@Xinference",
        "img2txt_id": "",
        "asr_id": "",
        "rerank_id": "bge-reranker-base@Xinference",
        "tts_id": None
    }
    
    # 调用set_tenant_info接口
    tenant_response, tenant_status_code, _ = await api_call(
        TARGET_SET_TENANT_INFO_URL,
        method='POST',
        json=tenant_info,
        headers=headers
    )
    
    if tenant_status_code not in [200, 201]:
        raise APIError(f"Set tenant info failed: {tenant_response.get('message', '')}", tenant_status_code)
    
    # 获取新令牌
    new_token_response, new_token_status_code, _ = await api_call(
        TARGET_NEW_TOKEN_URL,
        method='POST',
        headers=headers
    )
    
    if new_token_status_code not in [200, 201] or new_token_response.get('code') != 0:
        raise APIError(f"New token request failed: {new_token_response.get('message', '')}", new_token_status_code)
    
    new_token = new_token_response['data']['token']
    db_handler.update_api_key(new_token, user_id)
    
    # 创建知识库
    dataset_data = {
        "name": "个人知识库",
        "language": "Chinese",
        "parser_config": {"chunk_token_num": 1024}
    }
    
    response, _, _ = await api_call(
        TARGET_DATASETS_URL,
        method='POST',
        json=dataset_data,
        headers={'Authorization': f'Bearer {new_token}'}
    )
    #TODO 当创建的时候，如果存在102情况的异常处理（此账号已经创建了对应名字的知识库)
    # logger.info(response)
    # knowledge_id = response['data']['id']
    if response['code'] == 102:
        # 使用get_user_knowledge_info获取已有的知识库ID
        knowledge_info = db_handler.get_user_knowledge_info(user_id)
        if knowledge_info and knowledge_info.get("knowledge_id"):
            knowledge_id = knowledge_info["knowledge_id"]
        else:
            # 如果knowledge_id为空，尝试创建新的知识库
            attempts = 0
            max_attempts = 3
            while attempts < max_attempts:
                attempts += 1
                # 生成一个新的知识库名称
                dataset_name = f"个人知识库_{attempts}"
                dataset_data = {
                    "name": dataset_name,
                    "language": "Chinese",
                    "parser_config": {"chunk_token_num": 1024}
                }
                
                response, _, _ = await api_call(
                    TARGET_DATASETS_URL,
                    method='POST',
                    json=dataset_data,
                    headers={'Authorization': f'Bearer {new_token}'}
                )
                
                if response['code'] == 0:
                    knowledge_id = response['data']['id']
                    break
                elif response['code'] == 102 and attempts < max_attempts:
                    logger.info(f"尝试创建知识库 {dataset_name} 失败，尝试使用新名称")
                else:
                    raise APIError(f"Failed to create dataset: {response.get('message', '')}", 500)
    else:
        knowledge_id = response['data']['id']
    
    db_handler.update_knowledge_id(user_id,knowledge_id)
    
    return {"knowledge_id": knowledge_id, "api_key": new_token}

@app.route('/python/get_knowledgeid_by_userid', methods=['POST'])
async def get_knowledgeid_by_userid():
    """获取用户的知识库ID
    
    1. 首先尝试从数据库获取用户的知识库ID
    2. 如果不存在，则进行注册、登录、创建知识库流程
    3. 返回知识库ID和相关信息
    """
    try:
        # 根据环境选择配置文件
        config_file = 'config_dev.ini'
        config = configparser.ConfigParser()
        config.read(config_file)
        # 访问配置变量
        TARGET_REGISTER_URL = config['DEFAULT']['TARGET_REGISTER_URL']
        TARGET_LOGIN_URL = config['DEFAULT']['TARGET_LOGIN_URL']

        # 获取用户ID
        userid = request.form.get('userid')
        if not userid:
            raise APIError("User ID is required", 400)
        account = request.form.get('account')
        if not account:
            raise APIError("account is required", 400)
        userid = userid.replace("{", "").replace("}", "")
        logger.info(f"Processing request for user: {userid}")
        
        # 尝试获取knowledgeid
        return_data = db_handler.get_user_knowledge_info(userid)
        
        # 如果成功获取到数据，直接返回
        if return_data is not None:
            logger.info(f"Found existing knowledge base for user: {userid}")
            return jsonify(return_data), 200
            
        # 如果没有获取到数据，进行注册、登录、创建知识库流程
        logger.info(f"Starting registration process for user: {userid}")
        
        # 准备注册数据
        data = {
            "email": f"{account}@swj.com",
            "nickname": userid,
            "password": "Aa_123qwe"
        }
        
        try:
            # 加密密码
            encrypted_data = encrypt_password(data)
            logger.debug(f"Encrypted registration data: {encrypted_data}")
            
            # 调用注册API
            response_data, status_code, _ = await api_call(
                TARGET_REGISTER_URL,
                json=encrypted_data
            )
            logger.info(response_data)
            # 处理注册响应（包括已注册和新注册的情况）
            if status_code in [200, 201] or (response_data.get('code') == 103 and "already registered" in response_data.get('message', '')):
                logger.info(f"User {userid} {'already registered' if status_code == 103 else 'successfully registered'}")
                db_handler.register_user(userid)
                
                # 调用登录API
                login_response_data, login_status_code, login_headers = await api_call(
                    TARGET_LOGIN_URL,
                    json={
                        'email': encrypted_data['email'],
                        'password': encrypted_data['password']
                    }
                )
                
                if login_status_code not in [200, 201]:
                    raise APIError(f"Login failed: {login_response_data.get('message', '')}", login_status_code)
                    
                # 从登录响应的header中获取authorization
                authorization = login_headers.get('Authorization')
                if not authorization:
                    logger.error("Failed to get authorization from login response headers")
                    return jsonify({"message": "官方账号不可执行"}), 403
                    
                logger.info("Successfully obtained authorization from login response")
                
                ragflow_userid = login_response_data.get('data', {}).get('id')
                if not ragflow_userid:
                    raise APIError("Failed to get RAGFlow user ID", 500)
                    
                # 设置用户环境，使用从登录响应获取的authorization
                result = await setup_user_environment(authorization, userid, ragflow_userid)
                
                return jsonify(result), 200
            else:
                raise APIError(f"Registration failed: {response_data.get('message', '')}", status_code)
                
        except requests.exceptions.RequestException as e:
            logger.error(f"Request error: {e}")
            raise APIError(f"Request error: {str(e)}", 500)
            
    except APIError as e:
        logger.error(f"API Error: {e.message}")
        return jsonify({"error": e.message}), e.status_code
    except Exception as e:
        logger.error(f"Unexpected error: {e}")
        return jsonify({"error": "Internal server error"}), 500

import asyncio

@app.route('/python/custom_upload', methods=['POST'])
async def custom_upload():
    try:
        dataset_id = request.form.get('dataset_id')
        raw_file_ids = request.form.get('doc_id')
        api_key = request.form.get('api_key')
        userid = request.form.get('userid') or request.args.get('userid')

        if not dataset_id or not raw_file_ids or not api_key or not userid:
            return jsonify({'error': 'Missing required parameters'}), 400

        file_ids = [x.strip() for x in raw_file_ids.split(',') if x.strip()]
        if not file_ids:
            return jsonify({'error': 'Invalid file IDs'}), 400

        # 更新为处理中
        await asyncio.to_thread(update_file_status, file_ids, 2)

        # 并发处理每个文件
        results = await asyncio.gather(*[
            process_single_doc(file_id, dataset_id, userid, api_key)
            for file_id in file_ids
        ])

        # 返回整体响应
        return jsonify({'results': results}), 200

    except Exception as e:
        logger.error(f"Unexpected error: {str(e)}")
        return jsonify({'error': '服务器错误', 'details': str(e)}), 500

async def process_single_doc(file_id, dataset_id, userid, api_key):
    try:
        # 获取文件信息
        with db_handler.get_connection() as conn:
            with conn.cursor() as cursor:
                cursor.execute(
                    "SELECT bucket_name, file_name FROM knowledge_file WHERE id = %s",
                    (file_id,)
                )
                row = cursor.fetchone()
        if not row:
            await asyncio.to_thread(update_file_status, [file_id], 5)
            return {'file_id': file_id, 'error': '数据库未找到文件'}

        bucket_name, file_name = row
        file_stream = get_file_from_minio(bucket_name, file_name)
        if not file_stream:
            await asyncio.to_thread(update_file_status, [file_id], 5)
            return {'file_id': file_id, 'error': 'MinIO 获取失败'}

        file_stream.seek(0)
        files_for_upload = [('file', (file_name, file_stream))]
        additional_response_json, status_code = await call_additional_upload(userid, files_for_upload, dataset_id, api_key)

        if status_code not in [200, 201]:
            await asyncio.to_thread(update_file_status, [file_id], 5)
            return {'file_id': file_id, 'error': '上传 RAGFlow 失败', 'status': status_code}

        doc_id = additional_response_json.get("chunks_response", {}).get("document_ids", [None])[0]
        if not doc_id:
            await asyncio.to_thread(update_file_status, [file_id], 5)
            return {'file_id': file_id, 'error': '未返回 document_id'}

        headers = {"Authorization": f"Bearer {api_key}"}
        if not await check_chunks_status(Config.RAGFLOW_CONFIG['address'], dataset_id, doc_id, headers):
            await asyncio.to_thread(update_file_status, [file_id], 5)
            return {'file_id': file_id, 'error': 'RAGFlow 处理超时'}

        # GPU 客户端
        file_stream.seek(0)
        gpu_url = f"http://{Config.GPU_CLIENT_CONFIG['address']}:{Config.GPU_CLIENT_CONFIG['port']}/upload_file"
        gpu_resp = requests.post(
            gpu_url,
            files=[('file', (file_name, file_stream, 'application/pdf'))],
            data={'dataset_id': dataset_id, 'document_id': doc_id},
            headers=headers
        )
        gpu_resp.raise_for_status()
        gpu_result = gpu_resp.json()
        if not gpu_result.get('success'):
            await asyncio.to_thread(update_file_status, [file_id], 5)
            return {'file_id': file_id, 'error': 'GPU 客户端处理失败', 'gpu_result': gpu_result}

        # 获取切片
        chunks, doc_info = await get_all_chunk(
            Config.RAGFLOW_CONFIG['address'], dataset_id, doc_id, headers)

        # 写入元数据
        current_time = datetime.now(ZoneInfo("Asia/Shanghai")).strftime('%Y-%m-%d %H:%M:%S')
        meta_data = {"name": file_name, "create_time": current_time, "modify_time": current_time}
        try:
            meta_payload = {"meta": json.dumps(meta_data, ensure_ascii=False), "doc_id": doc_id}
            meta_resp = requests.post("http://192.168.184.227/v1/document/set_meta", json=meta_payload, headers=headers)
            meta_resp.raise_for_status()
        except Exception as e:
            logger.warning(f"[{file_id}] 元数据上传失败: {str(e)}")

        # 更新 knowledge_base_id
        try:
            with db_handler.get_connection() as conn:
                with conn.cursor() as cursor:
                    cursor.execute("UPDATE knowledge_file SET knowledge_base_id = %s WHERE id = %s", (dataset_id, file_id))
                conn.commit()
        except Exception as e:
            logger.warning(f"[{file_id}] 更新 knowledge_base_id 失败: {str(e)}")

        # 成功处理
        await asyncio.to_thread(update_file_status, [file_id], 3)
        return {
            'file_id': file_id,
            'success': True,
            'doc_id': doc_id,
            'chunks': chunks,
            'doc_info': doc_info
        }

    except Exception as e:
        logger.error(f"[{file_id}] 异常: {str(e)}")
        await asyncio.to_thread(update_file_status, [file_id], 5)
        return {'file_id': file_id, 'error': str(e)}

def update_file_status(file_ids, status):
    try:
        with db_handler.get_connection() as conn:
            with conn.cursor() as cursor:
                placeholders = ','.join(['%s'] * len(file_ids))
                sql = f"UPDATE knowledge_file SET file_status = %s WHERE id IN ({placeholders})"
                cursor.execute(sql, [status] + file_ids)
            conn.commit()
    except Exception as e:
        logger.error(f"状态更新失败: {str(e)}")


async def get_all_chunk(address: str, dataset_id: str, document_id: str, headers: Dict[str, str], page_size: int = 30) -> Tuple[List[Dict], Dict]:
    """获取所有切片信息
    
    Args:
        address: 服务器地址
        dataset_id: 数据集ID
        document_id: 文档ID
        headers: 请求头
        page_size: 每页大小
        
    Returns:
        List[Dict]: 切片列表
    """
    logger.info(f"开始获取切片信息...")
    chunks = []
    doc_info = {}
    page = 1
    while True:
        logger.info(f"正在获取第 {page} 页切片...")
        url = f"http://{address}/api/v1/datasets/{dataset_id}/documents/{document_id}/chunks"
        params = {
            'page': page,
            'page_size': page_size,
        }
        response = requests.get(url, headers=headers)
        if response.status_code != 200:
            logger.error(f"Error fetching chunks: {response.text}")
            break
        data = response.json()
        page_chunks = data.get('data', {}).get('chunks', [])
        chunks.extend(page_chunks)

        # 获取文档信息
        doc_info = data.get('data', {}).get('doc', {})
        logger.info(f"文档信息: {doc_info}")

        # 检查文档状态
        doc_status = doc_info.get('status')
        doc_run_status = doc_info.get('run')
        logger.info(f"文档状态: {doc_status}, 运行状态: {doc_run_status}")
        
        if doc_run_status != "RUNNING":
            logger.info("文档状态: %s", doc_run_status)
            logger.info("文档处理已完成")
            break
        
        if len(page_chunks) < page_size:
            logger.info("已获取所有切片")
            break
        page += 1
            
    return chunks, doc_info

async def check_chunks_status(address: str, dataset_id: str, document_id: str, headers: Dict[str, str], max_retries: int = 360, retry_interval: int = 10) -> bool:
    """检查切片状态
    
    Args:
        address: 服务器地址
        dataset_id: 数据集ID
        document_id: 文档ID
        headers: 请求头
        max_retries: 最大重试次数（默认360次，即1小时）
        retry_interval: 重试间隔（默认10秒）
        
    Returns:
        bool: 是否所有切片都处理完成
    """
    for _ in range(max_retries):
        try:
            chunks, doc_info = await get_all_chunk(address, dataset_id, document_id, headers)
            
            if not chunks:
                logger.error("未找到切片")
                
                # 检查文档状态
                doc_status = doc_info.get('status')
                doc_run_status = doc_info.get('run')
                progress = doc_info.get('progress')
                progress_msg = doc_info.get('progress_msg')
                
                logger.info(f"文档状态: {doc_status}, 运行状态: {doc_run_status}, 进度: {progress}, 进度信息: {progress_msg}")
                
                if doc_run_status == "RUNNING":
                    logger.info("文档仍在处理中，等待...")
                    await asyncio.sleep(retry_interval)
                    continue
                else:
                    logger.error("文档处理失败")
                    return False
                
            # 检查所有切片的状态
            all_completed = True
            for chunk in chunks:
                if not chunk.get('content'):
                    all_completed = False
                    break
            
            if all_completed:
                logger.info(f"文档 {document_id} 的所有切片已处理完成")
                return True
                
            await asyncio.sleep(retry_interval)
            
        except Exception as e:
            logger.error(f"检查切片状态失败: {e}")
            return False
            
    logger.error(f"文档 {document_id} 的切片处理超时")
    return False

async def call_additional_upload(userid: str, files_to_send: List[Tuple], dataset_id: str, api_key: str) -> Tuple[Dict, int]:
    """调用额外的上传接口
    
    Args:
        userid: 用户ID
        files_to_send: 要发送的文件列表
        dataset_id: 数据集/知识库ID
        api_key: API密钥
        
    Returns:
        Tuple[Dict, int]: (响应数据, 状态码)
    """
    # 上传文件
    response, status_code, _ = await api_call(
        f"http://{Config.RAGFLOW_CONFIG['address']}/api/v1/datasets/{dataset_id}/documents",
        method='POST',
        files=files_to_send,
        headers={"Authorization": f"Bearer {api_key}"}
    )

    if status_code not in [200, 201]:
        return jsonify({'error': 'update failed', 'details': response.get('message', '')}), status_code

    # 获取文档ID
    document_ids = []
    for doc in response.get('data', []):
        if 'id' in doc:
            document_ids.append(doc['id'])
        else:
            logger.warning(f"无效文档数据: {doc}")
        

      

    if not document_ids:
        return jsonify({'error': '所有文件上传失败', 'details': response}), 500

    # 创建chunks
    chunk_data = {"document_ids": document_ids}
    chunks_response, chunks_status_code, _ = await api_call(
        f"http://{Config.RAGFLOW_CONFIG['address']}/api/v1/datasets/{dataset_id}/chunks",
        method='POST',
        json=chunk_data,
        headers={"Authorization": f"Bearer {api_key}"}
    )

    print("chunks_response:", chunks_response)
    print("chunks_status_code:", chunks_status_code)

    if chunks_status_code not in [200, 201]:
        return jsonify({'error': 'Chunks creation failed', 'details': chunks_response.get('message', '')}), chunks_status_code

    return_data = {
        "knowledge": dataset_id,
        "document_ids": document_ids,
    }

    return {
        'message': 'success',
        'chunks_response': return_data 
    }, chunks_status_code



DIFY_URL = "http://192.168.184.18:8011/v1/chat-messages"

@app.route("/python/water-message", methods=["POST"])
def proxy_dify_stream():
    # 排除 host 避免本地 loopback 地址污染
    headers = {k: v for k, v in request.headers if k.lower() != "host"}
    body = request.get_data()

    def generate():
        with requests.post(DIFY_URL, data=body, headers=headers, stream=True) as resp:
            for chunk in resp.iter_content(chunk_size=None):
                if chunk:
                    yield chunk

    return Response(generate(), content_type='text/event-stream')

if __name__ == '__main__':
    app.run(
        host='0.0.0.0',
        port=9998,
        debug=Config.DEBUG
    ) 
