import asyncio
import contextlib
import json
import queue
import threading
import time
from abc import ABC
from concurrent.futures import as_completed
from typing import Any, Generator, AsyncGenerator

import requests
from DrissionPage._base.chromium import Chromium
from DrissionPage._pages.chromium_tab import ChromiumTab
from DrissionPage._pages.session_page import SessionPage
from fake_useragent import UserAgent
from sqlalchemy.dialects.mysql import insert
from sqlalchemy.exc import OperationalError
from config.db import get_session
from config.get_db import get_db
from util.mylog import my_logger
from urllib3 import Retry
from requests.adapters import HTTPAdapter

thread_local = threading.local()
adapter = HTTPAdapter(pool_connections=50, pool_maxsize=100, max_retries=Retry(total=5, backoff_factor=5, status_forcelist=[500, 502, 503, 504]))
class AbstractCrawlerHandler(ABC):
    def __init__(self, browser: Chromium):
        self.browser = browser
        self.lock = threading.Lock()

    # success -> 返回list
    # fail -> 返回model
    @staticmethod
    def process_futures(futures: list) -> (list, list):
        success_result = []
        fail_result = []
        task_cnt = len(futures)
        for idx, future in enumerate(as_completed(futures), start=1):
            try:
                is_success, result = future.result()
                if is_success:
                    success_result.extend(result)
                else:
                    my_logger.warning(f"任务失败: {result}")
                    fail_result.append(result)
                my_logger.debug(f"任务执行进度: {idx} / {task_cnt}")
            except Exception as e:
                my_logger.error(f"Error execute task: {e}")
        return success_result, fail_result
    @staticmethod
    def process_produce_futures(futures: list) -> (list, list):
        task_cnt = len(futures)
        for idx, future in enumerate(as_completed(futures), start=1):
            try:
                future.result()
                my_logger.debug(f"任务执行进度: {idx} / {task_cnt}")
            except Exception as e:
                my_logger.error(f"Error execute task: {e}")
    @staticmethod
    async def get_result(futures: list) -> (list, list):
        success_result = []
        fail_result = []
        task_cnt = len(futures)
        for idx, future in enumerate(futures, start=1):
            try:
                success_task, fail_task = future
                success_result.extend(success_task)
                fail_result.append(fail_task)
                my_logger.debug(f"任务执行进度: {idx} / {task_cnt}")
            except Exception as e:
                my_logger.error(f"Error execute task: {e}")
        return success_result, fail_result

    @staticmethod
    async def process_async_futures(futures: list) -> (list, list):
        success_result = []
        fail_result = []
        task_cnt = len(futures)
        for idx, future in enumerate(asyncio.as_completed(futures), start=1):
            try:
                is_success, result = await future
                if is_success:
                    success_result.extend(result)
                else:
                    print(f"任务失败: {result}")  # 使用print代替my_logger.warning
                    fail_result.append(result)
                print(f"任务执行进度: {idx} / {task_cnt}")  # 使用print代替my_logger.debug
            except Exception as e:
                print(f"Error execute task: {e}")  # 使用print代替my_logger.error
        return success_result, fail_result

    def _create_session(self):
        if not hasattr(thread_local, "session"):
            session = requests.Session()
            session.mount('http://', adapter)
            session.mount('https://', adapter)
            thread_local.session = session
        return thread_local.session

    @staticmethod
    @contextlib.contextmanager
    def fetch_page(url) -> Generator[SessionPage, Any, None]:
        """封装了获取页面的逻辑"""
        page = SessionPage()
        try:
            page.set.user_agent(UserAgent().random)
            page.set.retry_times(3)
            page.set.retry_interval(30)
            page.set.timeout(60)
            proxy = AbstractCrawlerHandler.get_proxy()
            if proxy:
                proxies = {"http": "http://{}".format(proxy)}
                page.get(url, proxies=proxies)
            else:
                page.get(url)
            yield page
        finally:
            page.close()
    @staticmethod
    @contextlib.asynccontextmanager
    async def async_fetch_page(url) -> AsyncGenerator[SessionPage, Any]:
        """封装了获取页面的逻辑"""
        page = SessionPage()
        try:
            page.set.user_agent(UserAgent().random)
            page.set.retry_times(3)
            page.set.retry_interval(30)
            page.set.timeout(60)
            await asyncio.to_thread(page.get, url)
            yield page
        finally:
            page.close()

    @staticmethod
    @contextlib.contextmanager
    def fetch_data(url) -> Generator[str, Any, None]:
        """封装了获取数据的逻辑"""
        page = SessionPage()
        page.set.user_agent(UserAgent().random)
        page.set.retry_times(3)
        page.set.retry_interval(30)
        page.set.timeout(60)
        proxy = AbstractCrawlerHandler.get_proxy()
        if proxy:
            proxies = {"http": "http://{}".format(proxy)}
            page.get(url, proxies=proxies)
        else:
            page.get(url)
        try:
            data = json.loads(page.html)
            yield data
        except Exception as e:
            my_logger.error(f"Error fetch_data task: {e}")
            yield ''
        page.close()

    @contextlib.contextmanager
    def fetch_tab(self, url) -> Generator[ChromiumTab, Any, None]:
        """封装了获取页面的逻辑"""
        with self.lock:
            # 存在线程不安全
            tab = self.browser.new_tab(url)
            # tab = self.browser.latest_tab
            # my_logger.debug(id(tab))
            # tab.get(url, proxies={"http": "http://{}".format(AbstractCrawlerHandler.get_proxy())})
            # my_logger.debug(id(tab))
            # self.browser.wait.new_tab(raise_err=True)
        try:
            yield tab
        finally:
            tab.close()
            # pass

    @staticmethod
    def bulk_insert(mapper, data: list):
        try:
            with get_session() as session:
                session.bulk_insert_mappings(mapper, data)
                session.commit()
        except OperationalError as e:
            if e.args[0] == 1205:
                my_logger.warning("重试...")
                session.bulk_insert_mappings(mapper, data)
                session.commit()
        except Exception as e:
            my_logger.error(f"Error bulk_insert task: {e}")
            session.rollback()

    @staticmethod
    def bulk_update(mapper, data: list):
        try:
            with get_session() as session:
                session.bulk_update_mappings(mapper, data)
                session.commit()
        except OperationalError as e:
            if e.args[0] == 1205:
                my_logger.warning("重试...")
                session.bulk_update_mappings(mapper, data)
                session.commit()
        except Exception as e:
            my_logger.error(f"Error bulk_update task: {e}")
            session.rollback()

    @staticmethod
    def bulk_save_or_update(model, data_list: list, unique_keys: list, batch_size = 1000):
        if not data_list:
            return
        unique_keys.append("id")
        unique_keys.append("create_time")
        unique_keys.append("update_time")
        i = 0
        while True:
            try:
                with get_session() as session:
                    # 获取所有字段名
                    columns = list(model.model_fields.keys())
                    # 分批次执行
                    for i in range(0, len(data_list), batch_size):
                        batch = data_list[i:i + batch_size]  # 获取当前批次的数据
                        data = [{col: getattr(obj, col) for col in columns} for obj in batch]
                        # 构造 INSERT 语句
                        stmt = insert(model).values(data)
                        # 生成 `ON DUPLICATE KEY UPDATE` 语句（排除主键和唯一键）
                        update_dict = {col: getattr(stmt.inserted, col) for col in columns if col not in unique_keys}
                        stmt = stmt.on_duplicate_key_update(**update_dict)
                        # 执行 SQL
                        session.exec(stmt)
                        session.commit()  # 每次批量提交一次
                return
            except OperationalError as e:
                i += 1
                if i > 3:
                    return
                my_logger.warning(f"操作错误，尝试重连 ({i + 1}/{3})... 错误: {e}")
                time.sleep(10)
            except Exception as e:
                my_logger.error(f"Error bulk_save_or_update task: {e}")
                return
    @staticmethod
    async def async_bulk_save_or_update(model, data_list: list, unique_keys: list, batch_size = 1000):
        if not data_list:
            return
        unique_keys.append("create_time")
        unique_keys.append("update_time")
        unique_keys.append("id")
        i = 0
        while True:
            try:
                # 获取所有字段名
                columns = list(model.model_fields.keys())
                # 分批次执行
                for i in range(0, len(data_list), batch_size):
                    async for session in get_db():
                        batch = data_list[i:i + batch_size]  # 获取当前批次的数据
                        data = [{col: getattr(obj, col) for col in columns} for obj in batch]
                        # 构造 INSERT 语句
                        stmt = insert(model).values(data)
                        # 生成 `ON DUPLICATE KEY UPDATE` 语句（排除主键和唯一键）
                        update_dict = {col: getattr(stmt.inserted, col) for col in columns if col not in unique_keys}
                        stmt = stmt.on_duplicate_key_update(**update_dict)
                        # 执行 SQL
                        await session.exec(stmt)
                        await session.commit()  # 每次批量提交一次
                return
            except OperationalError as e:
                i += 1
                if i > 3:
                    return
                my_logger.warning(f"操作错误，尝试重连 ({i + 1}/{3})... 错误: {e}")
                time.sleep(10)
            except Exception as e:
                my_logger.error(f"Error bulk_save_or_update task: {e}")
                return

    @staticmethod
    def get_proxy():
        try:
            return requests.get('http://127.0.0.1:5011/get?type=https', timeout=3).json()['proxy']
        except Exception as e:
            my_logger.warning(f"Error get_proxy task: {e}")
            return None

    @staticmethod
    def process_tasks(task_queue, executor, task_function):
        """
        从任务队列中取出任务并使用指定的执行器和任务函数进行处理。

        :param task_queue: 包含待处理任务的队列。
        :param executor: 用于并发执行任务的执行器实例。
        :param task_function: 针对每个任务项要调用的函数。
        :return: 返回成功完成的任务结果列表和失败的任务列表。
        """
        futures = []
        while not task_queue.empty():
            try:
                args = task_queue.get_nowait()
                future = executor.submit(task_function, *args)
                futures.append(future)
            except queue.Empty:
                break
        success_tasks, failed_tasks = AbstractCrawlerHandler.process_futures(futures)
        return success_tasks, failed_tasks