# app/services/job_service.py

import os
import docker
from datetime import datetime
from typing import Optional, List
# <<< MODIFIED: Import deque >>>
from collections import deque
# <<< END MODIFIED >>>


from app.core.config import settings
from app.crud.crud_spider import crud_spider # Assuming this exists and is correct
from app.crud.crud_job import crud_job
from app.db.models import JobStatus, JobType, Spider # Assuming these exist and are correct
from app.db.session import SessionLocal

class JobService:
    def __init__(self):
        try:
            self.docker_client = docker.from_env()
            # Test connection
            self.docker_client.ping()
            print("Docker client initialized successfully.")
        except Exception as e:
            print(f"Error initializing Docker client: {e}")
            # Handle situation where Docker is not available
            self.docker_client = None

    def run_spider_job(self, spider_id: str, job_id: str) -> None:
        """运行具体的爬虫任务容器"""
        if not self.docker_client:
            print(f"Docker client not available. Cannot run job {job_id}.")
            # Update job status to FAILED if Docker is not available
            db = None
            try:
                db = SessionLocal()
                job = crud_job.get(db, id=job_id)
                if job:
                    crud_job.update(db, db_obj=job, obj_in={
                        "status": JobStatus.FAILED,
                        "error_message": "Docker client not available.",
                        "completed_at": datetime.now(pytz.timezone(settings.TIMEZONE))
                    })
                    db.commit()
            finally:
                if db:
                    db.close()
            return

        print(f"Starting execution for job {job_id} (Spider: {spider_id})")
        db = None
        job = None
        container = None # Initialize container to None

        # <<< MODIFIED: Move db session creation inside try >>>
        try:
            db = SessionLocal()
            spider = crud_spider.get(db, id=spider_id)
            job = crud_job.get(db, id=job_id)

            if not spider or not spider.code_path or not os.path.exists(spider.code_path):
                error_msg = f"Spider {spider_id} code not found or path invalid: {spider.code_path if spider else 'Spider not found'}"
                print(error_msg)
                if job:
                    crud_job.update(db, db_obj=job, obj_in={
                        "status": JobStatus.FAILED,
                        "error_message": error_msg,
                        "completed_at": datetime.now(pytz.timezone(settings.TIMEZONE))
                    })
                    db.commit()
                return

            if not job:
                print(f"Job {job_id} not found in database. Aborting.")
                return

            # 更新任务状态为运行中
            crud_job.update(
                db,
                db_obj=job,
                obj_in={
                    "status": JobStatus.RUNNING,
                    "started_at": datetime.now(pytz.timezone(settings.TIMEZONE))
                }
            )

            # 创建日志目录和路径
            log_dir = os.path.join(settings.LOGS_DIR, "jobs", spider_id) # Group logs by spider
            os.makedirs(log_dir, exist_ok=True)
            log_path = os.path.join(log_dir, f"job_{job_id}.log")

            # 更新日志路径 (Commit happens later or just before closing session)
            crud_job.update(db, db_obj=job, obj_in={"log_path": log_path})
            db.commit() # Commit status and log path update

            spider_dir = os.path.dirname(spider.code_path)
            spider_filename = os.path.basename(spider.code_path)

            # --- Docker Container Execution ---
            try:
                print(f"Running container for job {job_id} with image {settings.DOCKER_BASE_IMAGE}")
                container = self.docker_client.containers.run(
                    settings.DOCKER_BASE_IMAGE,
                    command=f"python /app/{spider_filename}",
                    volumes={
                        spider_dir: {'bind': '/app', 'mode': 'ro'}, # Keep read-only for running
                        # Consider adding volume for output data if needed
                    },
                    network=settings.DOCKER_NETWORK,
                    detach=True,
                    # environment=spider.environment_vars or {}, # Pass environment variables if needed
                    remove=False # Keep container until logs are fetched and explicitly removed
                )
                print(f"Container {container.id[:12]} started for job {job_id}.")

                # 更新容器ID
                crud_job.update(db, db_obj=job, obj_in={"container_id": container.id})
                db.commit() # Commit container ID update

                # 等待容器完成
                result = container.wait()
                exit_code = result.get('StatusCode', -1) # Use get with default
                print(f"Container {container.id[:12]} finished with status code {exit_code}.")

                # <<< MODIFIED: Get logs BEFORE removing container >>>
                logs = ""
                try:
                    logs = container.logs(stdout=True, stderr=True).decode('utf-8', errors='replace')
                    print(f"Retrieved {len(logs)} bytes of logs for container {container.id[:12]}.")
                except Exception as log_err:
                    print(f"Error retrieving logs for container {container.id[:12]}: {log_err}")
                    logs = f"Error retrieving logs: {log_err}"

                # 保存日志
                try:
                    with open(log_path, "w", encoding="utf-8") as f:
                        f.write(logs)
                    print(f"Logs saved to {log_path}")
                except Exception as file_err:
                     print(f"Error saving logs to {log_path}: {file_err}")
                     # Update job with log saving error?
                # <<< END MODIFIED >>>

                # 更新最终任务状态
                completed_at = datetime.now(pytz.timezone(settings.TIMEZONE))
                final_status_data = {
                    "exit_code": exit_code,
                    "completed_at": completed_at
                }
                if exit_code == 0:
                    final_status_data["status"] = JobStatus.COMPLETED
                    print(f"Job {job_id} completed successfully.")
                else:
                    final_status_data["status"] = JobStatus.FAILED
                    final_status_data["error_message"] = f"爬虫运行失败，退出代码: {exit_code}. 查看日志获取详情。"
                    # Optionally include last N lines of log in error message
                    # log_tail = "\n".join(logs.splitlines()[-10:]) # Get last 10 lines
                    # final_status_data["error_message"] += f"\n\nLog Tail:\n{log_tail}"
                    print(f"Job {job_id} failed with exit code {exit_code}.")

                crud_job.update(db, db_obj=job, obj_in=final_status_data)
                db.commit()

            except docker.errors.ImageNotFound:
                error_msg = f"Docker image not found: {settings.DOCKER_BASE_IMAGE}"
                print(error_msg)
                if job:
                    crud_job.update(db, db_obj=job, obj_in={"status": JobStatus.FAILED, "error_message": error_msg, "completed_at": datetime.now(pytz.timezone(settings.TIMEZONE))})
                    db.commit()
            except docker.errors.APIError as api_err:
                 error_msg = f"Docker API error: {api_err}"
                 print(error_msg)
                 if job:
                     crud_job.update(db, db_obj=job, obj_in={"status": JobStatus.FAILED, "error_message": error_msg, "completed_at": datetime.now(pytz.timezone(settings.TIMEZONE))})
                     db.commit()
            except Exception as run_err:
                error_msg = f"运行爬虫容器时发生未知错误: {str(run_err)}"
                print(error_msg)
                # 更新任务状态为失败
                if job:
                    crud_job.update(
                        db,
                        db_obj=job,
                        obj_in={
                            "status": JobStatus.FAILED,
                            "error_message": error_msg,
                            "completed_at": datetime.now(pytz.timezone(settings.TIMEZONE))
                        }
                    )
                    db.commit()
            finally:
                # 清理容器
                if container:
                    try:
                        container.remove()
                        print(f"Container {container.id[:12]} removed.")
                    except docker.errors.NotFound:
                        print(f"Container {container.id[:12]} already removed.")
                    except Exception as remove_err:
                        print(f"Error removing container {container.id[:12]}: {remove_err}")


        except Exception as e:
             # Catch errors happening before container run (DB access, file checks)
             error_msg_outer = f"运行 Job Service 时发生错误: {str(e)}"
             print(error_msg_outer)
             if db and job: # Check if db and job were successfully fetched
                 try:
                     crud_job.update(
                         db,
                         db_obj=job,
                         obj_in={
                             "status": JobStatus.FAILED,
                             "error_message": error_msg_outer,
                             "completed_at": datetime.now(pytz.timezone(settings.TIMEZONE))
                         }
                     )
                     db.commit()
                 except Exception as db_update_err:
                      print(f"Failed to update job status after outer error: {db_update_err}")
        finally:
            # <<< MODIFIED: Ensure db connection is closed >>>
            if db:
                db.close()
                print(f"Database session closed for job {job_id}.")
            # <<< END MODIFIED >>>

    # <<< MODIFIED: Removed 'since', added 'limit_lines' (replaces tail), improved efficiency >>>
    def get_job_logs(self, job_id: str, limit_lines: Optional[int] = None) -> str:
        """获取任务日志 (从已保存的文件)"""
        db = None
        try:
            db = SessionLocal()
            job = crud_job.get(db, id=job_id)

            if not job or not job.log_path:
                print(f"Job {job_id} not found or log path is missing.")
                return "日志路径不可用或任务不存在"

            if not os.path.exists(job.log_path):
                 print(f"Log file not found at {job.log_path}")
                 # <<< MODIFIED: Removed unreliable container fallback >>>
                 return "日志文件不存在"
                 # <<< END MODIFIED >>>

            try:
                with open(job.log_path, "r", encoding="utf-8") as f:
                    if limit_lines and limit_lines > 0:
                         # Use deque for efficient tailing
                         # <<< MODIFIED: Use deque for tailing >>>
                         log_deque = deque(f, maxlen=limit_lines)
                         return "".join(log_deque)
                         # <<< END MODIFIED >>>
                    else:
                         # Read the whole file if no limit
                         return f.read()
            except Exception as e:
                 print(f"Error reading log file {job.log_path}: {e}")
                 return f"读取日志文件时出错: {e}"

        except Exception as e:
             print(f"Error getting job logs for {job_id}: {e}")
             return f"获取日志时发生错误: {e}"
        finally:
            if db:
                db.close()
    # <<< END MODIFIED >>>


    def stop_job(self, job_id: str) -> bool:
        """停止任务 (通过停止 Docker 容器)"""
        if not self.docker_client:
            print(f"Docker client not available. Cannot stop job {job_id}.")
            return False

        db = None
        container = None
        try:
            db = SessionLocal()
            job = crud_job.get(db, id=job_id)

            if not job or not job.container_id:
                print(f"Job {job_id} not found or no container ID associated.")
                return False

            if job.status in [JobStatus.COMPLETED, JobStatus.FAILED, JobStatus.STOPPED]:
                print(f"Job {job_id} is already in a final state ({job.status}). Not stopping.")
                return True # Indicate success as it's already stopped/finished

            try:
                container = self.docker_client.containers.get(job.container_id)
                print(f"Attempting to stop container {container.id[:12]} for job {job_id}...")
                container.stop(timeout=settings.STOP_TIMEOUT or 10) # Use configured timeout
                print(f"Container {container.id[:12]} stopped.")

                # Optionally remove the container after stopping
                # container.remove()
                # print(f"Container {container.id[:12]} removed after stopping.")

                # 更新任务状态
                crud_job.update(
                    db,
                    db_obj=job,
                    obj_in={
                        "status": JobStatus.STOPPED,
                        "completed_at": datetime.now(pytz.timezone(settings.TIMEZONE)),
                        "error_message": "任务被手动停止。"
                    }
                )
                db.commit()
                return True
            except docker.errors.NotFound:
                 print(f"Container {job.container_id} not found. Updating job status.")
                 # Even if container not found, update job status if it was supposed to be running
                 if job.status == JobStatus.RUNNING or job.status == JobStatus.QUEUED:
                     crud_job.update(
                         db,
                         db_obj=job,
                         obj_in={
                             "status": JobStatus.FAILED, # Or maybe STOPPED? FAILED seems more accurate if container disappeared
                             "completed_at": datetime.now(pytz.timezone(settings.TIMEZONE)),
                             "error_message": "尝试停止时发现容器不存在。"
                         }
                     )
                     db.commit()
                 return False # Indicate failure as container wasn't found to stop
            except Exception as e:
                 print(f"Error stopping container {job.container_id} for job {job_id}: {e}")
                 # Update job status to FAILED if stopping failed?
                 crud_job.update(
                    db,
                    db_obj=job,
                    obj_in={
                        "status": JobStatus.FAILED, # Mark as failed if stop command errors
                        "completed_at": datetime.now(pytz.timezone(settings.TIMEZONE)),
                        "error_message": f"停止任务时发生错误: {e}"
                    }
                 )
                 db.commit()
                 return False
        except Exception as e:
             print(f"Error in stop_job service for {job_id}: {e}")
             return False
        finally:
            if db:
                db.close()

# Instantiate the service (if needed globally, otherwise manage lifespan)
job_service = JobService()