import re
import functools
import concurrent.futures
import pymysql.err
from ..models.task import Task
import threading
from ..logger import logger
from datetime import datetime, timedelta
from ..db_conn_pool import conn_pool
from ..models.task import Task, TaskStatus
from ..config import ConfigHandler
from ..exceptions import TaskStopException
from selenium.common.exceptions import TimeoutException
from urllib.parse import urlparse  # 导入 urlparse 函数
from ..spiders.seller_link_spider import SellerLinkSpider
from ..spiders.product_link_spider import ProductLinkSpider
from ..config import config


class CrawlService:
    
    def __init__(self):
        self.entry_cnt = 5
        self.local_task_list = []
        self.max_workers = 3
        self.executor = None
        self.timer = None
        self.running = False  # 添加运行状态标志
        self.lock = threading.Lock()  # 添加线程锁
        self.running = False
        self.spider_pool = {  # 创建一个字典，用于存储爬虫实例
            SellerLinkSpider.__name__: SellerLinkSpider(),
            ProductLinkSpider.__name__: ProductLinkSpider()
        }

    def init(self):
        self.config = config
        self.max_workers =  config.get_crawl_config().get('max_workers', self.max_workers)
        self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers)

    def close(self):
        with self.lock:
            self.running = False
        if self.timer:
            self.timer.cancel()
        # 等待正在执行的任务完成
        self.executor.shutdown(wait=False)
        logger.info('服务正在优雅退出...')

    def add_task(self, task_id, func_name, max_exec_times, next_exec_time, link):
        conn = conn_pool.get_connection()
        try:
            task = Task.add_task(conn, task_id, func_name, max_exec_times, next_exec_time, link)
            conn.commit()
            return task
        except pymysql.err.IntegrityError as e:
            conn.rollback()
            logger.error(f'唯一性约束冲突，任务 {task_id} 已存在: {e}')
        except Exception as e:
            conn.rollback()
            logger.error(f'添加任务 {task_id} 时发生未知错误: {e}')
        finally:
            conn.close()

    def get_tasks_to_execute(self):
        conn = conn_pool.get_connection()
        try:
            tasks = Task.get_tasks_to_execute(conn, self.entry_cnt)
            return tasks
        except Exception as e:
            raise e
        finally:
            conn.close()

    def update_local_task_list(self):
        # 记录正在运行的任务
        running_tasks = self.local_task_list.copy()
        tasks = self.get_tasks_to_execute()
        if len(tasks) > 0:
            conn = conn_pool.get_connection()
            try:
                current_time = datetime.now()
                all_tasks = running_tasks + tasks
                new_tasks = all_tasks[:self.entry_cnt][len(running_tasks):]
                for task in new_tasks:
                    task.last_fetched_at = current_time
                    Task.update_last_fetched_at(conn, task)
                conn.commit()
                # 合并正在运行的任务和新获取的任务，并确保长度不超过 self.entry_cnt
                all_tasks = running_tasks + tasks
                self.local_task_list = all_tasks[:self.entry_cnt]
            except Exception as e:
                conn.rollback()
                raise e
            finally:
                conn.close()

    def crawl(self, spider_name, url):
        # 取出 url 的 path，以 '/' 进行分割，取第一个元素作为 task_id
        parsed_url = urlparse(url)
        path_parts = parsed_url.path.strip('/').split('/')
        task_id = path_parts[0] 

        func_name = spider_name
        max_exec_times = self.config.get_crawl_config().get('max_exec_times', 3)
        next_exec_time = datetime.now()

        pattern = r'(pg=)\d+'
        if re.search(pattern, url):
            url_1 = re.sub(pattern, f'pg=1', url)
            url_2 = re.sub(pattern, f'pg=2', url)
        else:
            url_1 = url + '?pg=1'
            url_2 = url + '?pg=2'

        try:
            self.add_task(f"{task_id}-1", func_name, max_exec_times, next_exec_time, url_1)
            self.add_task(f"{task_id}-2", func_name, max_exec_times, next_exec_time, url_2)
        except Exception as e:
            logger.error(f'添加任务 {task_id} 时数据库出错: {e}', exc_info=True)
        try:
            self.update_local_task_list()
        except Exception as e:
            logger.error(f'更新本地任务列表时数据库出错: {e}', exc_info=True)
            return f'{e}'
        return task_id

    def start_service(self, background=True):
        self.running = True
        background_thread = threading.Thread(target=self.run, daemon=background)
        background_thread.start()

    def run(self):
        try:
            try:
                # 添加定期检查退出条件的机制
                if not self.running:
                    return
                if self.active_task_cnt > 1:
                    logger.info(f'本地还有任务在运行: {self.active_task_cnt}')
                    self.start_timer()
                    return
                self.update_local_task_list()
                conn = conn_pool.get_connection()
                try:
                    for task in self.local_task_list:
                        task_name = f"{task.func_name}_{task.task_id}"
                        Task.update_last_fetched_at(conn, task)
                        self.executor.submit(functools.partial(self._execute_task, task_name=task_name), task)
                        logger.info(f'任务 {task.task_id} 已提交到线程池')
                    conn.commit()
                except Exception:
                    conn.rollback()
                    logger.error(f'更新任务时数据库出错: {e}', exc_info=True)
                finally:
                    conn.close()
            except Exception as e:
                logger.error(f'执行任务时出错: {e}', exc_info=True)
            self.start_timer()
        except (KeyboardInterrupt, Exception) as e:
            if isinstance(e, KeyboardInterrupt):
                logger.info("接收到中断信号，开始优雅退出...")
            else:
                logger.error(f"发生未捕获异常: {e}", exc_info=True)
            self.shutdown()

    def start_timer(self):
        if self.timer:
            self.timer.cancel()
            logger.debug('定时器已取消，重新设置')
        sched_interval = self.config.get_crawl_config().get('sched_interval', 60)
        self.timer = threading.Timer(sched_interval, self.run)
        self.timer.daemon = True  # 设置为守护线程
        trigger_time = (datetime.now() + timedelta(seconds=sched_interval)).strftime('%Y-%m-%d %H:%M:%S')
        logger.debug(f'定时器已设置，下次触发时间为: {trigger_time}')
        self.timer.start()

    @property
    def active_task_cnt(self):
        with threading.Lock():  # 使用 Lock 保护线程安全访问 _threads 列表
            return len(self.local_task_list)

    def run_now(self):
        if self.active_task_cnt >= self.max_workers:
            logger.info('线程池已满，跳过执行')
            return
        if self.timer:
            self.timer.cancel()
            logger.info('定时器已取消，立即执行任务')
        timer = threading.Timer(0, self.run)
        timer.start()

    def _execute_task(self, task: Task, **kwargs):
        if not self.running:
            logger.info(f'任务已停止, 跳过执行: {task.task_id}')
            return
        conn = conn_pool.get_connection()
        try:
            spider = self.spider_pool.get(task.func_name)
            if not spider:
                logger.error(f'找不到名为 {task.func_name} 的爬虫')
                self.remove_task(task)
                return False
            success = True
            try:
                spider.crawl(task.link)
            except TaskStopException as e:
                logger.error(f'爬取 {task.link} 时遇到任务停止异常: {e}')
                success = True
                conn.rollback()
            except TimeoutException as e:
                logger.error(f'爬取 {task.link} 时遇到超时异常')
                success = False
                conn.rollback()
            except Exception as e:
                logger.error(f'爬虫执行出错 {task.task_id}: {e}', exc_info=True)
                success = False
                conn.rollback()
            finally:
                Task.update_task_status(conn, task, success)
                conn.commit()
                self.remove_task(task)
                conn.close()
                logger.info(f'任务 {task.task_id} 执行完成，状态: {success}')
            return success
        except Exception as e:
            logger.error(f'执行任务 {task.task_id} 时发生未捕获异常: {e}', exc_info=True)
            return False

    def remove_task(self, task: Task):
        if task in self.local_task_list:
            self.local_task_list.remove(task)
        

    def restart_tasks(self, task_ids):
        conn = conn_pool.get_connection()
        try:
            for task_id in task_ids:
                task = Task.get_task(conn, task_id)
                if task:
                    task.status = TaskStatus.PENDING
                    task.exec_times = 0
                    task.next_exec_time = datetime.now()
                    Task.restart_task(conn, task)
            conn.commit()
        except Exception as e:
            conn.rollback()
            logger.error(f'批量重启任务时出错: {e}', exc_info=True)
            raise e
        finally:
            conn.close()
        logger.info(f'批量重启任务 {task_ids} 成功')


crawl_svc = CrawlService()