import logging
import asyncio
import traceback
from fastapi import Depends, HTTPException, Request
from apscheduler.schedulers.background import BackgroundScheduler
from app.controllers.category_controller import add_category
from sqlalchemy.orm import Session
from app.controllers.index_controller import get_ai_index_list, add_documents,update_ai_index,add_index
from app.controllers.upload_controller import get_ai_upload_file_list,apply_file_upload_lease,update_ai_upload_file_record
from app.models.new_upload_model import upload_file
from app.database.db_setup import get_db
from app.controllers.new_upload_controller import add_file
from contextlib import contextmanager
import time
# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# 定义上下文管理器类用于依赖注入
@contextmanager
def get_db_context():
    db = get_db()
    try:
        yield next(db)  # 获取生成器返回的会话对象
    finally:
        try:
            next(db)  # 关闭会话
        except StopIteration:
            pass

# 添加增加知识库定时任务
async def add_indexs():
    logger.info("定时任务正在执行...")
    try:
        with get_db_context() as db:
            lists = await get_ai_index_list(db)  # 假设 get_ai_index_list 需要传入数据库会话
            for list_obj in lists:
                logger.info(f"list_obj 方法的返回结果: {list_obj}")
                try:
                    name=list_obj.get('name')
                    # 给阿里云创建知识库类别
                    add_result = add_category(
                        category_name=name
                    )
                    print(f"add_category 方法的返回结果: {add_result}")

                    if "error" in add_result:
                        return build_response(100, '失败', '名字已存在')

                    # 从返回结果中获取 body 部分
                    body = add_result.get("body")
                    if body is None:
                        return build_response(100, '失败', "返回结果有误")

                    # 从 body 中获取 Data 部分
                    data = body.get("Data")
                    if data is None:
                        return build_response(100, '失败', "返回结果有误")

                    category_id = data.get("CategoryId")
                    print(f"获取到的 CategoryId: {category_id}")

                    if not category_id:
                        return build_response(100, '失败', "返回结果有误")
                    # 给阿里云创建知识库
                    add_result = add_index(
                        name=name,
                        category_id=category_id,
                    )
                    print(f"add_result 方法的返回结果: {add_result}")

                    if "error" in add_result:
                        return build_response(100, '失败', '名字已存在')

                    # 从返回结果中获取 body 部分
                    body = add_result.get("body")
                    if body is None:
                        return build_response(100, '失败', "返回结果有误")

                    # 从 body 中获取 Data 部分
                    data = body.get("Data")
                    if data is None:
                        return build_response(100, '失败', "返回结果有误")

                    IndexId = data.get("Id")
                    print(f"获取到的 CategoryId: {IndexId}")

                    if not IndexId:
                        return build_response(100, '失败', "返回结果有误")

                    # 给阿里云创建知识库
                    add_result = await add_documents(
                        IndexId=IndexId,
                        category_id=category_id,
                    )
                    logger.info(f"add_result 方法的返回结果: {add_result}")
                    res = update_ai_index(db, list_obj.get('id'), category_id, IndexId)
                    logger.info(f"update_ai_index 返回参数: {res}")
                except Exception as e:
                    tb = traceback.format_exc()
                    logger.error(f"调用 add_documents 时出错，IndexId: {list_obj.IndexId}，错误信息: {e}\n{tb}")
            logger.info("定时任务执行成功")
    except Exception as e:
        tb = traceback.format_exc()
        logger.error(f"定时任务add_indexs执行出错: {e}\n{tb}")
    finally:
        db.close()  # 确保数据库会话被关闭
# 添加上传文件定时任务
async def add_uplode():
    logger.info("定时任务正在执行...")
    try:
        with get_db_context() as db:
            lists = await get_ai_upload_file_list(db)  # 假设 get_ai_index_list 需要传入数据库会话
            logger.info(f"lists 方法的返回结果: {lists}")
            for list_obj in lists:
                logger.info(f"list_obj 方法的返回结果: {list_obj}")
                try:
                    lease_result = apply_file_upload_lease(list_obj.get('CategoryId'), list_obj.get('file_name'), list_obj.get('size_in_bytes'), list_obj.get('md5'))
                    if "error" in lease_result:
                        logger.error(f"apply_file_upload_lease 失败，错误信息: {lease_result['error']}")
                        continue
                    logger.info(f"apply_file_upload_lease 成功，返回结果: {lease_result}")

                    # 从返回结果中获取 body 部分
                    body = lease_result.get("body")
                    if body is None:
                        logger.error("apply_file_upload_lease 返回结果中 body 为空")
                        continue

                    # 从 body 中获取 Data 部分
                    data = body.get("Data")
                    if data is None:
                        logger.error("apply_file_upload_lease 返回结果中 Data 为空")
                        continue

                    FileUploadLeaseId = data.get("FileUploadLeaseId")
                    logger.info(f"获取到的 FileUploadLeaseId: {FileUploadLeaseId}")

                    if not FileUploadLeaseId:
                        logger.error("apply_file_upload_lease 返回结果中 FileUploadLeaseId 为空")
                        continue
                    # 从 body 中获取 Param 部分
                    param = data.get("Param")
                    if param is None:
                        logger.error("apply_file_upload_lease 返回结果中 Param 为空")
                        continue

                    headers = param.get("Headers")
                    if headers is None:
                        logger.error("apply_file_upload_lease 返回结果中 Headers 为空")
                        continue
                    logger.info(f"获取到的 headers: {headers}")
                    extra = headers.get("X-bailian-extra")
                    logger.info(f"获取到的 extra: {extra}")
                    if not extra:
                        logger.error("apply_file_upload_lease 返回结果中 X-bailian-extra 为空")
                        continue
                    content_type = headers.get("Content-Type")
                    if not content_type:
                        logger.error("apply_file_upload_lease 返回结果中 Content-Type 为空")
                        continue
                    pre_signed_url = param.get("Url")
                    if not pre_signed_url:
                        logger.error("apply_file_upload_lease 返回结果中 Url 为空")
                        continue
                    res = upload_file(pre_signed_url, list_obj.get('absolute_file_path'), extra, content_type)
                    if res:
                        add_res = add_file(FileUploadLeaseId, 'DASHSCOPE_DOCMIND', list_obj.get('CategoryId'), list_obj.get('file_name'))
                        if "error" in add_res:
                            logger.error(f"add_file 失败，错误信息: {add_res['error']}")
                            continue
                        logger.info(f"add_file 成功，返回结果: {add_res}")

                        # 从返回结果中获取 body 部分
                        body = add_res.get("body")
                        if body is None:
                            logger.error("add_file 返回结果中 body 为空")
                            continue

                        # 从 body 中获取 Data 部分
                        data = body.get("Data")
                        if data is None:
                            logger.error("add_file 返回结果中 Data 为空")
                            continue
                        FileId = data.get("FileId")
                        logger.info(f"获取到的 FileId: {FileId}")
                        if not FileId:
                            logger.error("add_file 返回结果中 FileId 为空")
                            continue
                        Parser = data.get("Parser")
                        logger.info(f"获取到的 Parser: {Parser}")
                        if not Parser:
                            logger.error("add_file 返回结果中 Parser 为空")
                            continue
                        set_time = int(time.time())
                        # 使用上下文管理器获取 db 并调用 update_ai_upload_file_record
                        res = update_ai_upload_file_record(db, list_obj.get('id'), FileId, Parser, 1, set_time)
                        logger.info(f"update_ai_upload_file_record 返回参数: {res}")
                except Exception as e:
                    tb = traceback.format_exc()
                    logger.error(f"定时任务add_uplode执行出错: {e}\n{tb}")
                    continue
            logger.info("定时任务执行成功")
    except Exception as e:
        tb = traceback.format_exc()
        logger.error(f"定时任务add_uplode执行出错: {e}\n{tb}")
    finally:
        db.close()  # 确保数据库会话被关闭
# 初始化调度器
scheduler = BackgroundScheduler()
# 添加定时任务，设置为每隔 1 分钟执行一次
scheduler.add_job(func=lambda: asyncio.run(add_indexs()), trigger="interval", minutes=1)
scheduler.add_job(func=lambda: asyncio.run(add_uplode()), trigger="interval", minutes=1)
def start_scheduler():
    scheduler.start()