import json
import re
import os
import threading
import time
import requests
from bs4 import BeautifulSoup

import redis
import logging

import inspect
import importlib

import concurrent.futures

from typing import List, Tuple

from .model import ProcessResult, db_session, CustomDict

from utils import *
from utils import htmlprocessor
from urllib.parse import urlparse, urljoin

from rich.logging import RichHandler

# 关闭 requests 的日志
logging.getLogger("urllib3").propagate = False

FORMAT = "%(message)s"
logging.basicConfig(
    level="DEBUG", format=FORMAT, datefmt="[%X]", handlers=[RichHandler()]
)

worker_list = []


def parser_assign(url):
    """
    将特点的网址分配到指定的处理器中。

    :param url:
    :return: WebParser 对象
    """
    parsers = []
    for parser in worker_list:
        if isinstance(parser.regex_url, str) and re.search(parser.regex_url, url):
            parsers.append(parser)
        elif callable(parser.regex_url) and callable(parser.regex_url):
            parsers.append(parser)
    return parsers


class WebParser:
    def __init__(self, mark_id,
                 max_depth=1,
                 already_urls=None,
                 queue_urls=None,
                 ignore_static=True,
                 search_whitelist=(),
                 search_blacklist=(),
                 redis_host='127.0.0.1',
                 redis_port=6379,
                 redis_db=0,
                 max_workers=5, ):
        """
        网页处理

        :param mark_id: 标记的ID
        :param max_depth: 最大解析深度
        :param already_urls: 已经解析的链接，防止重复解析
        :param queue_urls: 处于队列中的链接，如果是 None 将会按照 Redis 中的记录继续检查。
        :param ignore_static: 是否忽略静态资源
        :param search_whitelist: 往下搜索的白名单
        :param search_blacklist: 往下搜索的黑名单
        :param redis_host: redis host
        :param redis_port: redis port
        :param redis_db: redis db
        """
        self.mark_id = mark_id
        self.max_depth = max_depth
        self.already_urls = already_urls

        self.ignore_static = ignore_static
        self.search_whitelist = search_whitelist
        self.search_blacklist = search_blacklist

        # 初始化 Redis
        self.redis = redis.Redis(host=redis_host, port=redis_port, db=redis_db)
        self.queue_urls = queue_urls

        # 初始化队列、已经访问过了列表等等
        self.queue_id = 'WebLexGuard:queue-' + mark_id
        self.already_urls_id = 'WebLexGuard:already-' + mark_id

        # 如果用户提供了队列内容，就清空先前队列元素
        if self.queue_urls is not None:
            self.redis.delete(self.queue_id)
            for url in self.queue_urls:
                self.queue_push(url=url,
                                forward_urls=[],
                                depth=0)

        # 如果用户提供了 already_urls 那么就采用用户提供的，清空先前的数据
        if self.already_urls is not None:
            self.redis.delete(self.already_urls_id)
            for url in self.already_urls:
                self.set_already_url(url=url)

        # 删除所有正在处理的任务
        for i in range(max_workers):
            self.process_set(i, '-', '空闲等待任务中', code=0)

        # 定义线程池相关内容
        self.thread_local = threading.local()
        self.thread_id_counter = 0
        self.thread_id_lock = threading.Lock()

        # 初始化线程池
        self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
        self.semaphore = threading.Semaphore(max_workers)

    def get_current_worker_id(self):
        if not hasattr(self.thread_local, 'worker_id'):
            with self.thread_id_lock:
                self.thread_local.worker_id = self.thread_id_counter
                self.thread_id_counter += 1
        return self.thread_local.worker_id

    def process_set(self, index: int, url: str, status: str, note='', code=1):
        self.redis.set('WebLexGuard:process-' + self.mark_id + ':' + str(index), json.dumps({
            'url': url,
            'code': code,
            'status': status,
            'note': note
        }))

    def queue_push(self, url: str, forward_urls: list, depth: int):
        queue_rpush(self.redis, self.queue_id, url, forward_urls, depth)

    def queue_pop(self) -> [dict | None]:
        return queue_lpop(self.redis, self.queue_id)

    def set_already_url(self, url):
        self.redis.sadd(self.already_urls_id, url)

    def is_already_url(self, url) -> bool:
        return self.redis.sismember(self.already_urls_id, url) == 1

    def process(self):
        """
        全自动处理则自动找寻处理器。

        :return:
        """
        # 开始线程处理

        while True:
            data = self.queue_pop()

            if data is None:
                time.sleep(1)
                continue

            url, forward_urls, depth = data['url'], data['forward_urls'], data['depth']

            # 是否需要继续处理
            if depth > self.max_depth:
                continue

            # 找过了
            if self.is_already_url(url):
                continue

            # 搜索所有 worker 开始处理
            logging.info(f"正在为 {url} 寻找匹配处理器......")

            workers = parser_assign(url=url)
            if workers:
                self.semaphore.acquire(blocking=True)
                future = self.executor.submit(self.work, workers,
                                              url,
                                              forward_urls,
                                              depth,
                                              self.search_whitelist,
                                              self.search_blacklist
                                              )
                future.add_done_callback(lambda f: self.semaphore.release())
            else:
                logging.warning(f"{url} 没有合适的处理器......")

    def work(self, workers, url, forward_urls, depth, search_whitelist, search_blacklist):
        worker_id = self.get_current_worker_id()

        # 改为之前就记录已经处理
        self.set_already_url(url)

        deliver = CustomDict()
        for worker in workers:
            try:
                deliver = worker(url, worker_id, self.mark_id,
                                 forward_urls,
                                 depth,
                                 search_whitelist,
                                 search_blacklist,
                                 self.redis,
                                 deliver).deliver
            except Exception as e:
                logging.error(f"子处理器处理 {url} 时出现异常。")
                logging.exception(e)




class WebWorker:
    NAME = "通用处理器"
    regex_url = '.*'  # 匹配规则，默认正则表达式匹配，如果是个函数则调用，传入 URL 参数。

    def __init__(self, url,
                 worker_id,
                 mark_id,
                 forward_url,
                 depth,
                 search_whitelist,
                 search_blacklist,
                 redis_,
                 deliver):
        """
        网页处理器，这个主类展示了主要的处理逻辑
        """

        # 目前的 redis 链接
        self.redis = redis_
        self.mark_id = mark_id
        self.queue_id = 'WebLexGuard:queue-' + mark_id
        self.already_urls_id = 'WebLexGuard:already-' + mark_id

        # 基本数据
        self.worker_id = worker_id
        self.url = url
        self.depth = depth
        self.forward_url = forward_url
        self.search_whitelist = search_whitelist
        self.search_blacklist = search_blacklist

        self.db_session = db_session()

        # 多处理器上下文传递的数据
        self.deliver = deliver

        self.process_set(self.url, self.NAME + "正在处理该链接......")

        # 正式处理的函数，需要返回该网页的处理结果与新链接，格式为：[ ([处理结果......], [新链接......]) ]
        for process_result, new_link in self.process():

            # 对于处理结果写入数据库
            if process_result:
                for result in process_result:  # type: ProcessResult
                    if not result.valid:
                        max_try = 3
                        while max_try > 0:
                            try:
                                self.db_session.add(result.to_sqlalchemy())
                                break
                            except BaseException as e:
                                logging.error("写入数据库出错，重试：" + str(e))
                                self.db_session.close()
                                self.db_session = db_session()
                                max_try -= 1

                self.db_session.commit()

            if new_link:
                # 将新链接扔给队列
                for link in new_link:
                    self.queue_push(url=link,
                                    forward_urls=self.forward_url + [url],
                                    depth=self.depth + 1)
        self.db_session.close()
        self.process_set('-', '空闲等待任务中', code=0)

    def summary_process(self, valid, reason, url, note=""):
        """生成 ProcessResult 对象"""

        pr = ProcessResult(
            valid=valid,
            reason=reason,
            url=url,
            forward_urls=self.forward_url + [url],
            depth=self.depth + 1,
            note=note
        )

        if not valid:
            logging.warning(f"检测链接 {url} 时，发现：\n{pr}")
        else:
            logging.debug(f"链接 {url} 有效：\n{pr}")

        return pr

    def process(self) -> List[Tuple[List[ProcessResult], List[str]]]:
        return [([], [])]

    def process_set(self, url: str, status: str, note='', code=1, index=None):
        if index is None:
            index = self.worker_id
        self.redis.set('WebLexGuard:process-' + self.mark_id + ':' + str(index), json.dumps({
            'url': url,
            'code': code,
            'status': status,
            'note': note
        }))

    def queue_push(self, url: str, forward_urls: list, depth: int):
        queue_rpush(self.redis, self.queue_id, url, forward_urls, depth)

    def queue_pop(self) -> [dict | None]:
        return queue_lpop(self.redis, self.queue_id)

    def set_already_url(self, url):
        self.redis.sadd(self.already_urls_id, url)

    def is_already_url(self, url) -> bool:
        return self.redis.sismember(self.already_urls_id, url) == 1

    def search_all_links(self, url, soup):

        return htmlprocessor.get_links(soup, url,
                                       whitelist=self.search_whitelist,
                                       blacklist=self.search_blacklist)

    def get_normal(self, need_soup=True, need_links=True):
        """
        获取常用的页面对象，比如 response 、 soup 、 links 这些
        """
        try:
            if self.deliver.response is None:
                self.deliver.response = requests.get(self.url)

            response = self.deliver.response

            if self.deliver.soup is None and need_soup:
                if 'text/html' in response.headers.get('Content-Type', '') or \
                        'application/xhtml+xml' in response.headers.get('Content-Type', ''):
                    self.deliver.soup = BeautifulSoup(response.content.decode(), 'lxml')
                else:
                    self.deliver.soup = None

            soup = self.deliver.soup

            if self.deliver.links is None and need_links:
                if soup:
                    self.deliver.links = self.search_all_links(self.url, soup)
                else:
                    self.deliver.links = None

            links = self.deliver.links

            if not need_soup:
                soup = None

            if not need_links:
                links = None

            return response, soup, links
        except Exception as e:
            logging.error(f'尝试获取服务器内容 {self.url} 失败。')
            logging.exception(e)


# 找出所有 WebWorker
for filename in os.listdir('./core/webparser'):
    if filename.endswith('.py') and filename not in ('__init__.py', 'model.py'):
        module = importlib.import_module(__name__ + "." + filename[:filename.rfind(".py")])

        for object_name in dir(module):
            object = getattr(module, object_name)
            if inspect.isclass(object) and \
                    issubclass(object, WebWorker) and \
                    object is not WebWorker and \
                    object not in worker_list:
                worker_list.append(object)

# logging.info("找出所有子处理器 WebWorker :" + str(worker_list))
