import multiprocessing
import os
import httpx
import time
import re
from bs4 import BeautifulSoup
from lib.sqlite_ext import DBApiExt
from lib.process_queue import ProcessQueue
from lib import http_fetch

from lib.log import DBG, INFO, ERROR, EXCEPTION


class FetchQueueItem(object):
    PARAMS_TYPE_KEY = "type"
    PARAMS_SAVE_DIR_KEY = "save_dir"

    def __init__(self, url, callback, params):
        self.url = url
        self.callback = callback
        self.params = params


class ParseQueueItem(object):
    def __init__(self, url, callback, html, params):
        self.url = url
        self.callback = callback
        self.html = html
        self.params = params


class WebSiteDb(object):
    # 资源分类
    RES_TYPE_VIDEO = "video"
    RES_TYPE_IMAGE = "image"
    RES_TYPE_TEXT = "text"
    RES_TYPE_bt = "bt"

    # 网站分类表
    TABLE_CLASS_NAME = "class"
    TABLE_CLASS_STRUCT = """
            id TEXT PRIMARY KEY, 
            name TEXT NOT NULL, 
            res_type TEXT NOT NULL,
            url TEXT NOT NULL,
            page_count INTEGER,
            res_count INTEGER
            """

    # 资源表
    TABLE_RESOURCE_NAME = "resource"
    TABLE_RESOURCE_STRUCT = f"""
            id TEXT NOT NULL, 
            class_id TEXT NOT NULL, 
            name TEXT NOT NULL,
            url TEXT,
            res_type TEXT,
            head_image TEXT,
            text TEXT,
            images TEXT,
            video TEXT,
            label1 TEXT,
            label2 TEXT,
            label3 TEXT,
            parse INTEGER,
            finish INTEGER,
            PRIMARY KEY (id, class_id)
            foreign key(class_id) references class (id)
            """

    @classmethod
    def create_table(cls, db):
        dbapi = DBApiExt.new_dbapi(db)
        DBApiExt.create_table(dbapi, cls.TABLE_CLASS_NAME, cls.TABLE_CLASS_STRUCT)
        DBApiExt.create_table(dbapi, cls.TABLE_RESOURCE_NAME, cls.TABLE_RESOURCE_STRUCT)
        DBApiExt.close_dbapi(dbapi)


class WebSiteBase(object):
    INDEX_URL = None
    FETCH_TASK_COUNT = 10
    DB = "sqlite.db"
    SAVE_DIR = ""

    def __init__(self):
        self.fetch_queue = multiprocessing.Queue()  # {url:xxx, callback:"", type:"fetch"}
        self.parse_queue = multiprocessing.Queue()  # {html:xxx, callback:"", type:"parse"}
        self.error_queue = multiprocessing.Queue()  # 以上2种情况
        WebSiteDb.create_table(self.DB)
        self.import_class_request()

    def stop(self):
        if self.fetch_queue:
            self.fetch_queue.close()
        if self.parse_queue:
            self.parse_queue.close()
        if self.error_queue:
            self.error_queue.close()

    def import_class_request(self):

        dbapi = DBApiExt.new_dbapi(self.DB)
        count = DBApiExt.count_record(dbapi=dbapi, table_name=WebSiteDb.TABLE_CLASS_NAME)
        class_record_data = self.get_class_record_data()
        DBG(f"======={class_record_data}======")
        if count == 0:
            DBApiExt.insert_record(
                dbapi=dbapi,
                table_name=WebSiteDb.TABLE_CLASS_NAME,
                ins_datas=class_record_data
            )

            DBApiExt.close_dbapi(dbapi=dbapi)

        # 插入下载url到队列
        for i in range(len(class_record_data)):
            url = class_record_data[i]["url"]
            classid = class_record_data[i]["id"]
            # DBG(f"请求队列put {url}")
            save_path = os.path.join(self.SAVE_DIR, "class", f"{classid}_1.html")
            self.fetch_queue.put(FetchQueueItem(
                url=url,
                callback="parse_class_index_page",
                params={"id": classid, "save_path": save_path}
            ))

    def get_class_sub_page_url(self, class_index_url, page_num):
        """

        :param page_num: 多少页
        :param class_index_url: 类型首页url
        :return: class_url
        """

        return ""

    @classmethod
    def _class_config(cls):
        """
        类型配置
        返回: [{"id": xxxx, "name": xxxx}]
        """
        return []

    def get_class_record_data(self):
        """
        获取所有分类参数列表

        :return:
        """
        class_config = self.__class__._class_config()
        return class_config

    def parse_class_index_page(self, from_url, html_text, dbapi, params):
        """

        :param from_url: html来自哪个url
        :param html_text: 解析的html内容
        :param dbapi: 解析进程的dbapi
        :param params: put fetch_queue的时候传的参数
        :return:
        """
        try:
            DBG(f"解析 url: {from_url}, size:{len(html_text)}")
            page_count, res_count = self._parse_class_index_page(from_url, html_text)
            if page_count > 0:
                class_id = params.get('id')
                DBApiExt.update_record(
                    dbapi=dbapi,
                    table_name=WebSiteDb.TABLE_CLASS_NAME,
                    update_data={"page_count": page_count, "res_count": res_count},
                    where_data={"id": class_id}
                )

                DBG(f"共{page_count}页, 共{res_count}条数据")  # 输出: 262
                for i in range(1, page_count):
                    page_num = i + 1
                    page_url = self.get_class_sub_page_url(class_index_url=from_url, page_num=page_num)
                    save_path = os.path.join(self.SAVE_DIR, "class", f"{class_id}_{page_num}.html")
                    self.fetch_queue.put(FetchQueueItem(
                        url=page_url,
                        callback="parse_class_page",
                        params={"id": class_id, "save_path": save_path}
                    ))
            else:
                ERROR(f"未找到总页数: {page_count}, {res_count}")

            self.parse_class_page(from_url=from_url, html_text=html_text, dbapi=dbapi, params=params)

        except Exception as exc:
            # 处理其他异常情况
            ERROR(f"解析class index page 异常: {exc}")

    def _parse_class_index_page(self, from_url, html_text):
        """

        :param from_url: html来自哪个url
        :param html_text: 解析的html内容
        :param dbapi: 解析进程的dbapi
        :param params: put fetch_queue的时候传的参数
        :return:
        """
        return 0, 0

    def parse_class_page(self, from_url, html_text, dbapi, params):
        # html_text = open("第1页.html")

        try:
            ins_datas = self._parse_class_page(from_url, html_text, params)
            ins_datas = list(filter(lambda x: DBApiExt.exist_record(
                dbapi=dbapi,
                table_name=WebSiteDb.TABLE_RESOURCE_NAME,
                where_data={"id": x["id"], "class_id": x["class_id"]}
            ) is False, ins_datas))
            DBG(f"包含了{len(ins_datas)}个新资源")
            if len(ins_datas) > 0:

                DBApiExt.insert_record(
                    dbapi=dbapi,
                    table_name=WebSiteDb.TABLE_RESOURCE_NAME,
                    ins_datas=ins_datas
                )

                for ins_date in ins_datas:
                    """
                    {
                        "id": res_id,
                        "class_id": class_id,
                        "name": title,
                        "res_type": WebSiteDb.RES_TYPE_VIDEO,
                        "head_image": head_img_src,
                        "url": link_hover,
                        "parse": 0
                    }
                    """

                    resid = ins_date['id']
                    url = ins_date['url']

                    url = f"{self.INDEX_URL}{url}"
                    save_path = os.path.join(self.SAVE_DIR, "res", f"{resid}.html")
                    # DBG(f"准备下载资源页面: {url}, {save_path}")
                    self.fetch_queue.put(FetchQueueItem(
                        url=url,
                        callback="parse_res_page",
                        params={"id": resid, "save_path": save_path}
                    ))

        except Exception as exc:
            ERROR(f"解析class page 异常: {exc}， {params}")

    def _parse_class_page(self, from_url, html_text, params):
        # html_text = open("第1页.html")
        ins_datas = []
        try:
            DBG(f"解析 url: {from_url}, size:{len(html_text)}")
            class_id = params.get("id")
            soup = BeautifulSoup(html_text, features="html.parser")
            # 得到包含所有a连接的列表
            a_list_content = self._parse_class_page_a_list_content(soup=soup)
            count = len(a_list_content)
            for i in range(count):
                a_content = a_list_content[i]
                ins_data = self.for_class_page_a_list(a_content=a_content, class_id=class_id)
                if ins_data:
                    ins_datas.append(ins_data)

            DBG(f"解析出了{len(ins_datas)}个资源")
        except Exception as exc:
            ERROR(f"解析class page 异常: {exc}")

        return ins_datas

    def _parse_class_page_a_list_content(self, soup):
        return []

    def for_class_page_a_list(self, a_content, class_id):
        a = a_content.find("a")
        link_hover = a.attrs["href"]
        title = a.attrs["title"]
        title = title.replace("\"", "")
        title = title.replace("\'", "")
        img = a_content.find("img")
        head_img_src = img.attrs["src"]
        res_id = self._get_res_id_from_href(link_hover)
        if res_id is None:
            return None
        else:
            record = {
                "id": res_id,
                "class_id": class_id,
                "name": title,
                "res_type": WebSiteDb.RES_TYPE_VIDEO,
                "head_image": head_img_src,
                "url": link_hover,
                "parse": 0
            }
            return record

    def _get_res_id_from_href(self, href):
        match = re.search(r'video_detail/(\d+)', href)
        res_id = None
        if match:
            res_id = match.group(1)
        else:
            ERROR(f"未解析出资源ID: {href}")

        return res_id

    def parse_res_page(self, from_url, html_text, dbapi, params):
        update_data = self._parse_res_page(from_url, html_text, params)
        if type(update_data) == dict:
            update_data["parse"] = 1

            DBApiExt.update_record(
                dbapi=dbapi,
                table_name=WebSiteDb.TABLE_RESOURCE_NAME,
                update_data=update_data,
                where_data={"id": update_data["id"]}
            )

    def _parse_res_page(self, from_url, html_text, params):
        return None

    async def do_process_fetch(self, fetch_item: FetchQueueItem):
        url = fetch_item.url
        callback_str = fetch_item.callback
        params = fetch_item.params
        if type(params) == dict:
            stream = True if params.get("stream") else False
            save_path = params.get("save_path")
        else:
            stream = False
            save_path = None

        # content = http_fetch.fetch(url=url, stream=stream, save_path=save_path)
        content = await http_fetch.async_fetch(url=url, save_type=("wb" if stream else "w"), save_path=save_path)
        if content is None:
            pass
        elif not stream:
            # 文本文件，传入解析队列
            # DBG(f"入队======{url}, {fetch_item.callback}, {fetch_item.params}")
            self.parse_queue.put(ParseQueueItem(
                url=url,
                html=content,
                callback=fetch_item.callback,
                params=fetch_item.params
            ))
        else:
            if callback_str:
                callback = getattr(self, callback_str)
                if callable(callback):
                    # 二进制文件，传入url和保存路径
                    callback(url, save_path)

    def fetch_work(self, name, alias, db):
        pid = os.getpid()
        # pname = psutil.Process(pid).name()

        # name = params.get("name")
        # alias = params.get("alias")
        # db = params.get("db")

        DBG(f"加载网页进程启动({name}): {alias}: {pid}")

        pqt = ProcessQueue(
            queue=self.fetch_queue,
            queue_params={"name": name, "alias": alias, "db": db},
            task_count=self.FETCH_TASK_COUNT,
            async_do_process=self.do_process_fetch
        )
        pqt.run()

    def parse_work(self, name, alias, db):
        pid = os.getpid()
        # pname = psutil.Process(pid).name()
        DBG(f"解析进程启动({name}): {alias}: {db}: {pid}")
        dbapi = DBApiExt.new_dbapi(db)
        empty_count = 0

        while True:
            if empty_count > 60:
                break

            if self.parse_queue.empty():
                time.sleep(1)
                empty_count += 1
                DBG(f"解析队列空了-->({empty_count})")
                continue
            try:
                empty_count = 0
                parse_item: ParseQueueItem = self.parse_queue.get()
                html = parse_item.html
                callback_str = parse_item.callback
                url = parse_item.url
                params = parse_item.params
                callback = getattr(self, callback_str)
                # DBG(f"解析11{url},{params},{callback}")
                if callable(callback):
                    # DBG(f"解析22{url},{params},{callback}")
                    is_success = callback(url, html, dbapi, params)

                    if not is_success:
                        self.error_queue.put(parse_item)
                else:
                    ERROR(f"callback not found: {callback_str}")
            except Exception as exc:
                # 处理其他异常情况
                ERROR(f"解析异常33: {exc}")

    def echo_work(self, name, alias, db):
        pid = os.getpid()
        # name = params.get("name")
        # alias = params.get("alias")
        # db = params.get("db")

        DBG(f"回显进程启动({name}): {alias}: {pid}")
        dbapi = DBApiExt.new_dbapi(db)
        empty_count = 0
        while True:
            if empty_count > 60:
                break
            if self.parse_queue.empty() and self.fetch_queue.empty() and self.error_queue.empty():
                empty_count += 1
                DBG(f"所有队列空了-->({empty_count})")
                time.sleep(1)
                continue
            empty_count = 0
            time.sleep(10)
            count = DBApiExt.count_record(
                dbapi=dbapi,
                table_name=WebSiteDb.TABLE_RESOURCE_NAME
            )
            DBG(f"资源总数: {count}")
            # DBG(f"parse_queue size: {self.parse_queue.qsize()}")
            # DBG(f"fetch_queue size: {self.fetch_queue.qsize()}")
            # DBG(f"error_queue size: {self.error_queue.qsize()}")

    def run(self):

        p_params = [
            {"target": self.fetch_work, "params": {"name": "fetch", "alias": "请求网页", "db": self.DB}},
            {"target": self.parse_work, "params": {"name": "parse", "alias": "解析进程1", "db": self.DB}},
            {"target": self.parse_work, "params": {"name": "parse", "alias": "解析进程2", "db": self.DB}},
            {"target": self.echo_work, "params": {"name": "echo", "alias": "回显进程", "db": self.DB}},
        ]

        processes = []
        for i in range(len(p_params)):
            p = multiprocessing.Process(target=p_params[i]["target"], kwargs=p_params[i]["params"])
            processes.append(p)
            p.start()

        for p in processes:
            p.join()

        self.stop()
