import re, os
from website.website_base import WebSiteBase, FetchQueueItem, WebSiteDb
from bs4 import BeautifulSoup
from lib.log import DBG, INFO, ERROR, EXCEPTION
from website.homeweb.homeweb import Homeweb


class Yxg(Homeweb):
    """
    "https://www.yxg5.homes/cn/home/web/ 欲仙阁",
    """
    INDEX_URL = "https://www.yxg5.homes"
    cmdpath = os.path.abspath(os.getcwd())
    DB = os.path.join(cmdpath, "website", "yxg", "yxg.db")
    SAVE_DIR = os.path.join(cmdpath, "website", "yxg", "page")

    def __init__(self):
        super().__init__()

    @classmethod
    def _class_config(cls):
        class_list = [
            {"name": "熟母少妇", "id": "20"},
            {"name": "网红直播", "id": "21"},
            {"name": "自拍偷拍", "id": "22"},
            {"name": "强奸乱伦", "id": "23"},
            {"name": "高清国产", "id": "24"},
            {"name": "韩国专区", "id": "25"},
            {"name": "日本有码", "id": "26"},
            {"name": "日本无码", "id": "27"},
            {"name": "欧美情色", "id": "28"},
            {"name": "动漫卡通", "id": "29"},
            {"name": "三级伦理", "id": "30"}
        ]

        return class_list

    def _parse_class_page_a_list_content(self, soup):
        li_divs = soup.select(".portfolio-item")
        return li_divs

    def _parse_class_index_page(self, from_url, html_text):
        """

        :param from_url: html来自哪个url
        :param html_text: 解析的html内容
        :param dbapi: 解析进程的dbapi
        :param params: put fetch_queue的时候传的参数
        :return:
        """
        try:
            DBG(f"解析class首页 url: {from_url}, size:{len(html_text)}")
            # "共34752条数据,当前1/2172页"
            res_count = 0
            page_count = 0

            soup = BeautifulSoup(html_text, features="html.parser")
            content = soup.select_one(".news-feed-btn")
            a_list = content.find_all("a")
            li_count = len(a_list)
            for i in range(li_count):
                a = a_list[i]
                if a.text == "尾页":
                    print(a)
                    href = a.attrs["href"]
                    match = re.search(r'(\d+).html', href)
                    if match:
                        page_count = int(match.group(1))
                        res_count = page_count * 60

            return page_count, res_count

        except Exception as exc:
            # 处理其他异常情况
            ERROR(f"解析class index page 异常: {exc}")
            return 0, 0

