import re, os
from website.website_base import WebSiteBase, FetchQueueItem, WebSiteDb
from bs4 import BeautifulSoup
from lib.log import DBG, INFO, ERROR, EXCEPTION
from lib.sqlite_ext import DBApiExt
from website.homeweb.homeweb import Homeweb


class Ziluoli(Homeweb):
    """
    """
    INDEX_URL = "https://www.ziluoli9.skin"
    cmdpath = os.path.abspath(os.getcwd())
    DB = os.path.join(cmdpath, "website", "ziluoli", "ziluoli.db")
    SAVE_DIR = os.path.join(cmdpath, "website", "ziluoli", "page")

    def __init__(self):
        super().__init__()

    @classmethod
    def _class_config(cls):
        class_list = [
            {"name": "女神学生", "id": "21"},
            {"name": "美女直播", "id": "22"},
            {"name": "人妻系列", "id": "23"},
            {"name": "强奸乱伦", "id": "24"},
            {"name": "自拍偷拍", "id": "25"},
            {"name": "制服诱惑", "id": "26"},
            {"name": "巨乳系列", "id": "27"},
            {"name": "自慰系列", "id": "28"},
            {"name": "国产视频", "id": "29"},
            {"name": "无码视频", "id": "30"},
            {"name": "有码视频", "id": "31"},
            {"name": "中文字幕", "id": "32"},
            {"name": "日韩精品", "id": "33"},
            {"name": "欧美精品", "id": "34"},
            {"name": "动漫精品", "id": "35"},
            {"name": "三级伦理", "id": "36"}
        ]

        return class_list

    def _parse_class_page_a_list_content(self, soup):
        lis = soup.select(".item")
        return lis

    def for_class_page_a_list(self, a_content, class_id):
        a = a_content.find("a")
        link_hover = a.attrs["href"]
        title = a.attrs["title"]
        title = title.replace("\"", "")
        title = title.replace("\'", "")
        img = a_content.find("img")
        head_img_src = img.attrs["data-original"]
        res_id = self._get_res_id_from_href(link_hover)
        if res_id is None:
            return None
        else:
            record = {
                "id": res_id,
                "class_id": class_id,
                "name": title,
                "res_type": WebSiteDb.RES_TYPE_VIDEO,
                "head_image": head_img_src,
                "url": link_hover,
                "parse": 0
            }
            return record

    def _parse_class_index_page(self, from_url, html_text):
        """

        :param from_url: html来自哪个url
        :param html_text: 解析的html内容
        :param dbapi: 解析进程的dbapi
        :param params: put fetch_queue的时候传的参数
        :return:
        """
        try:
            DBG(f"解析 url: {from_url}, size:{len(html_text)}")
            # "共34752条数据,当前1/2172页"
            # "共45371条数据&nbsp;当前:1/2161页"
            res_count = 0
            page_count = 0
            match1 = re.search(r'共(\d+)条数据', html_text)
            if match1:
                number = match1.group(1)
                if number:
                    res_count = int(number)

            pattern = re.compile(r'<a class="pagelink_b"(.*?)page/(\d+)(.*?)</a>', re.DOTALL)
            matches = pattern.findall(html_text)
            last_option = matches[-1]
            number = last_option[1].strip()  # 去除首尾空格
            if number:
                page_count = int(number)

            return page_count, res_count

        except Exception as exc:
            # 处理其他异常情况
            ERROR(f"解析class index page 异常: {exc}")
            return 0, 0
