import re, os
from website.website_base import WebSiteBase, FetchQueueItem, WebSiteDb
from bs4 import BeautifulSoup
from lib.log import DBG, INFO, ERROR, EXCEPTION
from lib.sqlite_ext import DBApiExt


class Luncaosp(WebSiteBase):
    """
    """
    INDEX_URL = "https://luncaosp2gt.top"
    cmdpath = os.path.abspath(os.getcwd())
    DB = os.path.join(cmdpath, "website", "luncaosp", "luncaosp.db")
    SAVE_DIR = os.path.join(cmdpath, "website", "luncaosp", "page")

    def __init__(self):
        super().__init__()

    @classmethod
    def _class_config(cls):
        return [
            {"id": "118", "name": "国产"},
            {"id": "119", "name": "日韩"},
            {"id": "120", "name": "欧美"},
            {"id": "121", "name": "动漫"},
            {"id": "122", "name": "高清资源"}
        ]

    def get_class_record_data(self):
        """
        获取所有分类记录信息

        :return:
        """

        class_list = self.__class__._class_config()

        class_list = list(map(lambda x: {
            "id": x['id'],
            "url": f"{self.INDEX_URL}/video_list/{x['id']}/1/index.html",
            "name": x['name'],
            "res_type": WebSiteDb.RES_TYPE_VIDEO,
            "page_count": 0,
            "res_count": 0
        }, class_list))

        return class_list

    def get_class_sub_page_url(self, class_index_url, page_num):
        """
        根据首页地址，获取其他分页地址

        :param page_num: 分页号
        :param class_index_url: 类型首页url
        :return: class_url
        """
        page_url = re.sub(r"/([0-9]+)/index\.html", rf"/{page_num}/index.html", class_index_url)
        return page_url

    def _parse_class_index_page(self, from_url, html_text):
        """

        :param from_url: html来自哪个url
        :param html_text: 解析的html内容
        :param dbapi: 解析进程的dbapi
        :param params: put fetch_queue的时候传的参数
        :return:
        """
        try:
            DBG(f"解析 url: {from_url}, size:{len(html_text)}")
            # "共34752条数据,当前1/2172页"
            # "共45371条数据&nbsp;当前:1/2161页"
            res_count = 0
            page_count = 0
            match1 = re.search(r'共(\d+)部', html_text)
            # match2 = re.search(r'当前\D*\d+/(\d+)页', html_text)
            if match1:
                number = match1.group(1)
                if number:
                    res_count = int(number)

            pattern = re.compile(r'<option value="(.*?)">(.*?)</option>', re.DOTALL)
            matches = pattern.findall(html_text)
            last_option = matches[-1]
            number = last_option[1].strip()  # 去除首尾空格
            if number:
                page_count = int(number)

            return page_count, res_count

        except Exception as exc:
            # 处理其他异常情况
            ERROR(f"解析class index page 异常: {exc}")
            return 0, 0

    def _parse_class_page_a_list_content(self, soup):
        lis = soup.select(".space-sm")
        return lis

    def for_class_page_a_list(self, a_content, class_id):
        div = a_content.select_one(".video-item-title")
        a = div.find("a")
        link_hover = a.attrs["href"]

        title = a.contents[0]
        title = title.replace("\"", "")
        title = title.replace("\'", "")

        img = a_content.find("img")
        head_img_src = img.attrs["src"]
        res_id = self._get_res_id_from_href(link_hover)
        if res_id is None:
            return None
        else:
            record = {
                "id": res_id,
                "class_id": class_id,
                "name": title,
                "res_type": WebSiteDb.RES_TYPE_VIDEO,
                "head_image": head_img_src,
                "url": link_hover,
                "parse": 0
            }
            return record

    def _get_res_id_from_href(self, href):
        match = re.search(r'video_detail/(\d+)', href)
        res_id = None
        if match:
            res_id = match.group(1)
        else:
            ERROR(f"未解析出资源ID: {href}")

        return res_id

    def _parse_res_page(self, html_text, params):
        try:
            # DBG(f"解析 url: {from_url}, size:{len(html_text)}")

            result = re.search(r'player_data=\{(.+?)\"url\":\"(.+?)\"', html_text)
            play_href = None
            res_id = params.get("id")
            if result:
                play_href = result.group(2)
                return {"id": res_id, "video": play_href}
            else:
                return None

        except Exception as exc:
            ERROR(f"解析资源播放页异常: {exc}")
            return None

    def save_image_callback(self, from_url, save_path):
        pass

    def run(self):
        # with open('aaaaa.html', 'r') as file:
        #     content = file.read()
        #
        #     self.parse_class_page(
        #         from_url="xxxx",
        #         html_text=content,
        #         dbapi=None,
        #         params={}
        #     )
        super().run()
