from datetime import datetime
from typing import List, Tuple
from fastapi import Request
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.triggers.cron import CronTrigger
from fastapi import HTTPException, UploadFile
from tortoise.contrib.pydantic import pydantic_model_creator, PydanticModel
from model.sysJobModel import SysJob
from schema.sysJobSchema import SysJobCreate, SysJobUpdate, PageParams, SysJobChange
from io import BytesIO
from excel.sysJobExcel import SysJobExcel
import importlib
from service.sysJobLogService import SysJobLogService
from schema.sysJobLogSchema import SysJobLogCreate
import traceback
import sys
import os
import logging


class PydanticConfig:
    alias_generator = lambda x: ''.join(word.capitalize() if i else word for i, word in enumerate(x.split('_')))
    allow_population_by_field_name = True


SysJob_Pydantic = pydantic_model_creator(SysJob, name="SysJob", config_class=PydanticConfig)

# Configure logging
logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class SysJobService:
    @staticmethod
    async def create(data: SysJobCreate, creator: str, request: Request = None) -> SysJob:
        """Create new record"""
        data_dict = data.dict(exclude_unset=True)
        data_dict["create_by"] = creator
        data_dict["create_time"] = datetime.now()
        data_dict["status"] = '0'

        obj = await SysJob.create(**data_dict)
        if request and data_dict.get("job_status") != '1':
            try:
                scheduler = request.app.state.scheduler
                await SysJobService.start_scheduled_job(obj.job_id, scheduler)
            except Exception as e:
                logger.error(f"Failed to schedule new job {obj.job_id}: {str(e)}")

        return await SysJob_Pydantic.from_tortoise_orm(obj)

    @staticmethod
    async def get_by_id(id: int) -> SysJob:
        """Get record by ID"""
        obj = await SysJob.get_or_none(job_id=id, status='0')
        if not obj:
            raise HTTPException(status_code=404, detail="定时任务调度表不存在")
        return await SysJob_Pydantic.from_tortoise_orm(obj)

    @staticmethod
    async def update(id: int, data: SysJobUpdate, updater: str, request: Request = None) -> SysJob:
        """Update record"""
        obj = await SysJob.get_or_none(job_id=id, status='0')
        if not obj:
            raise HTTPException(status_code=404, detail="定时任务调度表不存在")

        old_status = obj.job_status

        update_dict = data.dict(exclude_unset=True)
        update_dict["update_by"] = updater
        update_dict["update_time"] = datetime.now()

        await obj.update_from_dict(update_dict).save()

        if request and hasattr(request.app.state, 'scheduler'):
            scheduler = request.app.state.scheduler
            job_id_str = str(id)

            try:
                if old_status == '1' and update_dict.get("job_status") == '0':
                    await SysJobService.start_scheduled_job(id, scheduler)

                elif old_status == '0' and update_dict.get("job_status") == '1':
                    if job_id_str in [job.id for job in scheduler.get_jobs()]:
                        scheduler.remove_job(job_id_str)

                elif old_status == '0' and (
                        "cron_expression" in update_dict or
                        "invoke_target" in update_dict or
                        "concurrent" in update_dict or
                        "misfire_policy" in update_dict
                ):
                    # Remove old job if exists
                    if job_id_str in [job.id for job in scheduler.get_jobs()]:
                        scheduler.remove_job(job_id_str)
                    # Add updated job
                    await SysJobService.start_scheduled_job(id, scheduler)

            except Exception as e:
                logger.error(f"Failed to update scheduler for job {id}: {str(e)}")
        return await SysJob_Pydantic.from_tortoise_orm(obj)

    @staticmethod
    async def delete(ids: List[int], updater: str, request: Request = None) -> int:
        """Batch delete records"""
        update_data = {
            "status": '2',
            "update_by": updater,
            "update_time": datetime.now()
        }
        count = await SysJob.filter(job_id__in=ids, status='0').update(**update_data)

        if request and hasattr(request.app.state, 'scheduler'):
            scheduler = request.app.state.scheduler
            try:
                for job_id in ids:
                    job_id_str = str(job_id)
                    if job_id_str in [job.id for job in scheduler.get_jobs()]:
                        scheduler.remove_job(job_id_str)
                        logger.info(f"Removed scheduled job {job_id}")
            except Exception as e:
                logger.error(f"Failed to remove scheduled jobs: {str(e)}")

        return count

    @staticmethod
    async def get_page(params: PageParams) -> Tuple[List[SysJob], int]:
        """Get page list"""
        query = SysJob.filter(status='0')

        # Build query conditions

        if params.job_name:
            query = query.filter(job_name__icontains=params.job_name)

        if params.job_group:
            query = query.filter(job_group__icontains=params.job_group)

        if params.invoke_target:
            query = query.filter(invoke_target__icontains=params.invoke_target)

        if params.cron_expression:
            query = query.filter(cron_expression__icontains=params.cron_expression)

        if params.misfire_policy:
            query = query.filter(misfire_policy__icontains=params.misfire_policy)

        if params.concurrent:
            query = query.filter(concurrent__icontains=params.concurrent)

        if params.job_status:
            query = query.filter(job_status__icontains=params.job_status)

        if params.status:
            query = query.filter(status__icontains=params.status)

        if params.create_by:
            query = query.filter(create_by__icontains=params.create_by)

        if params.update_by:
            query = query.filter(update_by__icontains=params.update_by)

        if params.remark:
            query = query.filter(remark__icontains=params.remark)

        total = await query.count()

        records = await query.offset((params.page_num - 1) * params.page_size)             .limit(params.page_size)             .order_by("-create_time")

        return [await SysJob_Pydantic.from_tortoise_orm(record) for record in records], total

    @staticmethod
    async def export_excel() -> BytesIO:
        """Export all data to Excel"""
        records = await SysJob.filter(status='0').all()
        return await SysJobExcel.export_data(records)

    @staticmethod
    def get_import_template() -> BytesIO:
        """Get import template"""
        return SysJobExcel.get_import_template()

    @staticmethod
    async def import_data(file: UploadFile, creator: str) -> Tuple[int, List[str]]:
        """Import data"""
        content = await file.read()
        file_bytes = BytesIO(content)

        try:
            data_list = SysJobExcel.parse_import_data(file_bytes)
        except ValueError as e:
            raise HTTPException(status_code=400, detail=str(e))
        except Exception as e:
            raise HTTPException(status_code=400, detail="Excel文件格式错误")

        success_count = 0
        error_msgs = []

        for index, item_data in enumerate(data_list, start=1):
            try:
                item_data['create_by'] = creator
                item_data['create_time'] = datetime.now()
                item_data['status'] = '0'

                await SysJob.create(**item_data)
                success_count += 1

            except Exception as e:
                error_msgs.append(f"第{index}行: {str(e)}")

        return success_count, error_msgs

    @staticmethod
    async def execute_job(job_id: int) -> dict:
        """
        Execute a specific job by its ID and log its execution

        Args:
            job_id (int): The ID of the job to execute

        Returns:
            dict: Execution result
        """
        # Retrieve the job details
        job = await SysJob.get_or_none(job_id=job_id, status='0')
        if not job:
            raise HTTPException(status_code=404, detail="定时任务不存在")

        start_time = datetime.now()

        # Prepare log entry
        log_data = {
            "job_name": job.job_name,
            "job_group": job.job_group,
            "invoke_target": job.invoke_target,
            "job_message": "开始执行任务",
            "start_time": start_time
        }

        try:
            invoke_parts = job.invoke_target.replace(')', '').split('(')
            module_method = invoke_parts[0].split('.')

            module_name = module_method[0]
            method_name = module_method[1]

            try:
                project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
                task_dir = os.path.join(project_root, 'task')
                if task_dir not in sys.path:
                    sys.path.append(task_dir)

                module = importlib.import_module(module_name)

            except ImportError as e:
                error_msg = f"无法导入模块 {module_name}，请确保文件位于task目录下: {str(e)}"
                log_data.update({
                    "exec_status": "1",
                    "job_message": "任务执行失败",
                    "exception_info": error_msg
                })
                raise ImportError(error_msg)

            method = getattr(module, method_name)

            if len(invoke_parts) > 1:
                params = invoke_parts[1].split(',')
                params = [param.strip("' ") for param in params]
                result = method(*params)
            else:
                result = method()

            log_data.update({
                "exec_status": "0",
                "job_message": f"任务执行成功: {str(result)}",
                "exception_info": None
            })

            return {
                "job_id": job_id,
                "job_name": job.job_name,
                "status": "success",
                "result": str(result)
            }

        except Exception as e:
            error_trace = traceback.format_exc()
            log_data.update({
                "exec_status": "1",
                "job_message": "任务执行失败",
                "exception_info": f"{str(e)}\n{error_trace}"
            })
            logger.error(f"Job {job_id} execution failed: {str(e)}\n{error_trace}")

            return {
                "job_id": job_id,
                "job_name": job.job_name,
                "status": "failed",
                "error": str(e)
            }

        finally:
            # Create log entry
            log_data["end_time"] = datetime.now()
            try:
                log_entry = SysJobLogCreate(**log_data)
                await SysJobLogService.create(log_entry, "system")
            except Exception as log_error:
                logger.error(f"Failed to create job log: {str(log_error)}")


    @staticmethod
    async def start_scheduled_job(job_id: int, scheduler: AsyncIOScheduler) -> dict:
        """
        Start a scheduled job using its cron expression

        Args:
            job_id (int): The ID of the job to schedule
            scheduler (AsyncIOScheduler): The APScheduler AsyncIO scheduler

        Returns:
            dict: Scheduling result
        """
        # Retrieve the job details
        job = await SysJob.get_or_none(job_id=job_id, status='0')
        if not job:
            raise HTTPException(status_code=404, detail="定时任务不存在")

        # Check if job is paused
        if job.job_status == '1':
            raise HTTPException(status_code=400, detail="任务已暂停")

        # Parse the invoke_target
        try:
            # Split the invoke_target into module, method, and potential params
            invoke_parts = job.invoke_target.replace(')', '').split('(')
            module_method = invoke_parts[0].split('.')

            # Import the module dynamically
            module_name = module_method[0]
            method_name = module_method[1]

            try:
                # Try to import from task directory
                import sys
                import os

                # Get the project root directory (parent of current directory)
                project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
                task_dir = os.path.join(project_root, 'task')
                if task_dir not in sys.path:
                    sys.path.append(task_dir)

                module = importlib.import_module(module_name)
            except ImportError as e:
                raise ImportError(f"无法导入模块 {module_name}，请确保文件位于task目录下: {str(e)}")

            # Get the method from the module
            method = getattr(module, method_name)

            # Prepare the parameters
            if len(invoke_parts) > 1:
                params = invoke_parts[1].split(',')
                params = [param.strip("' ") for param in params]
            else:
                params = []

            # Parse cron expression
            cron_parts = job.cron_expression.split()
            cron_parts = ['*' if part == '?' else part for part in cron_parts]

            # Remove existing job if it exists
            job_id_str = str(job_id)
            if job_id_str in [job.id for job in scheduler.get_jobs()]:
                scheduler.remove_job(job_id_str)

            # Determine misfire policy
            if job.misfire_policy == '1':
                misfire_grace_time = 60
            elif job.misfire_policy == '2':
                misfire_grace_time = None
            else:
                misfire_grace_time = 10

            # Create the trigger
            trigger = CronTrigger(
                second=cron_parts[0],
                minute=cron_parts[1],
                hour=cron_parts[2],
                day=cron_parts[3],
                month=cron_parts[4],
                day_of_week=cron_parts[5]
            )

            if job.concurrent == '1':  # 禁止并发
                scheduler.add_job(
                    SysJobService.job_wrapper,  # 使用类的静态方法而不是实例方法
                    trigger,
                    id=job_id_str,
                    misfire_grace_time=misfire_grace_time,
                    max_instances=1,
                    args=[job_id, method] + params
                )
            else:  # 允许并发
                scheduler.add_job(
                    SysJobService.job_wrapper,  # 使用类的静态方法而不是实例方法
                    trigger,
                    id=job_id_str,
                    misfire_grace_time=misfire_grace_time,
                    args=[job_id, method] + params
                )

            return {
                "job_id": job_id,
                "job_name": job.job_name,
                "status": "scheduled",
                "cron_expression": job.cron_expression
            }

        except Exception as e:
            return {
                "job_id": job_id,
                "job_name": job.job_name,
                "status": "scheduling_failed",
                "error": str(e)
            }

    @staticmethod
    async def change_status(data: SysJobChange, updater: str, request: Request = None) -> PydanticModel:
        """Update record"""
        job_id = data.job_id
        obj = await SysJob.get_or_none(job_id=job_id, status='0')
        if not obj:
            raise HTTPException(status_code=404, detail="定时任务调度表不存在")

        old_status = obj.job_status

        update_dict = data.dict(exclude_unset=True)
        update_dict["update_by"] = updater
        update_dict["update_time"] = datetime.now()

        await obj.update_from_dict(update_dict).save()

        if request and hasattr(request.app.state, 'scheduler'):
            scheduler = request.app.state.scheduler
            job_id_str = str(job_id)

            try:
                if old_status == '1' and update_dict.get("job_status") == '0':
                    await SysJobService.start_scheduled_job(job_id, scheduler)

                elif old_status == '0' and update_dict.get("job_status") == '1':
                    if job_id_str in [job.id for job in scheduler.get_jobs()]:
                        scheduler.remove_job(job_id_str)

                elif old_status == '0' and (
                        "cron_expression" in update_dict or
                        "invoke_target" in update_dict or
                        "concurrent" in update_dict or
                        "misfire_policy" in update_dict
                ):
                    if job_id_str in [job.id for job in scheduler.get_jobs()]:
                        scheduler.remove_job(job_id_str)
                    await SysJobService.start_scheduled_job(job_id, scheduler)

            except Exception as e:
                logger.error(f"Failed to update scheduler for job {job_id}: {str(e)}")

        return await SysJob_Pydantic.from_tortoise_orm(obj)

    @staticmethod
    async def job_wrapper(job_id: int, method, *args):
        """
        Wrapper function to execute job and log its execution

        Args:
            job_id (int): The ID of the job being executed
            method: The actual job method to execute
            *args: Arguments to pass to the job method
        """
        start_time = datetime.now()
        job = await SysJob.get(job_id=job_id)

        # Prepare log entry
        log_data = {
            "job_name": job.job_name,
            "job_group": job.job_group,
            "invoke_target": job.invoke_target,
            "job_message": "开始执行任务",
            "start_time": start_time
        }

        try:
            result = method(*args)

            log_data.update({
                "exec_status": "0",
                "job_message": f"任务执行成功: {str(result)}",
                "exception_info": None
            })

        except Exception as e:
            error_trace = traceback.format_exc()
            log_data.update({
                "exec_status": "1",
                "job_message": "任务执行失败",
                "exception_info": f"{str(e)}\n{error_trace}"
            })
            logger.error(f"Job {job_id} execution failed: {str(e)}\n{error_trace}")
            raise

        finally:
            # Create log entry
            log_data["end_time"] = datetime.now()
            try:
                log_entry = SysJobLogCreate(**log_data)
                await SysJobLogService.create(log_entry, "system")
            except Exception as log_error:
                logger.error(f"Failed to create job log: {str(log_error)}")

