# _*_ encoding: utf-8 _*_


import os
import re
import sys
import json
import js2py
import requests
from urllib.parse import quote
from prettytable import PrettyTable
# 设置标题行颜色 (需安装 colorama)
from colorama import Fore
from bs4 import BeautifulSoup, ResultSet, Tag
from transformers.modeling_gguf_pytorch_utils import read_field


def parse_selector(rule: str) -> tuple:
    """
    解析选择器规则，分离类型、名称、索引
    :param rule: 如 "tag.a:0" 或 "class.btn:2"
    :return: (elem_type, selector, index)
    """
    # 匹配类型前缀
    type_map = {
        "tag": r"^tag\.([^\s:]+):?(\d*)$",
        "class": r"^class\.([^\s:]+):?(\d*)$",
        "id": r"^id\.([^\s:]+):?(\d*)$"
    }

    for elem_type, pattern in type_map.items():
        match = re.match(pattern, rule)
        if match:
            print(match.groups())
            selector = match.group(1)
            index = int(match.group(2)) if match.group(2) else None
            return (elem_type, selector, index)

    raise ValueError(f"Invalid selector rule: {rule}")

class BookSourceParser:
    def __init__(self, html: str):
        self.soup = BeautifulSoup(html, "html.parser")

    def _parse_rule(self, rule: str) -> list:
        """
        解析复合规则如 "class.grid@tag.a:0"
        """
        steps = rule.split("@")
        parsed = []
        for step in steps:
            elem_type, selector, index = parse_selector(step)
            parsed.append((elem_type, selector, index))
        return parsed

    def find(self, rule: str):
        current = [self.soup]
        for elem_type, selector, index in self._parse_rule(rule):
            new_elements = []
            for elem in current:
                found = self._find_children(elem, elem_type, selector)
                if index is not None:
                    found = [found[index]] if index < len(found) else []
                new_elements.extend(found)
            current = new_elements
        return current

    def _find_children(self, parent: Tag, elem_type: str, selector: str):
        if elem_type == "tag":
            return parent.find_all(selector)
        elif elem_type == "class":
            return parent.find_all(class_=selector)
        elif elem_type == "id":
            return parent.find_all(id=selector)
        return []

# ------------------------- 规则解析函数 -------------------------
def parse_selector_rule_old(rule_str):
    """
    优化后的规则解析函数，支持更智能的选择器组合
    输入示例: "class.grid@tag.tr!0" 表示：
    - 父级选择器 .grid
    - 子元素标签 tr
    - 排除第一个元素
    """
    # 分离排除标记
    if '!' in rule_str:
        rule_part, exclude_str = rule_str.rsplit('!', 1)
        exclude_index = int(exclude_str)
    else:
        rule_part = rule_str
        exclude_index = None

    # 解析父级和子级选择器
    parts = rule_part.split('@')
    parent_selector = ""
    child_selector = ""

    # 解析父级选择器
    if len(parts) > 1:
        parent_rules = parts[0].split('.')
        if parent_rules[0] == 'class':
            parent_selector = f'.{" ".join(parent_rules[1:])}'  # 支持多class组合


    # 解析子级选择器
    child_rule = parts[-1].split('.')
    print(parent_rules, child_rule)
    if child_rule[0] == 'tag':
        child_selector = child_rule[1]
    elif child_rule[0] == 'class':
        child_selector = f'.{child_rule[1]}'

    return parent_selector.strip(), child_selector.strip(), exclude_index


def parse_selector_rule(rule_str: str) -> Tuple[dict, Union[str, None], Union[int, None]]:
    """
    优化后的规则解析函数，支持：
    - 多级选择器 (tag/class/id)
    - 索引选择 (如 tag.a.0)
    - 排除标记 (!)
    - 文本提取标记 (@text)

    输入示例：
    "tag.a.0@text" → 选择第一个<a>标签的文本
    "class.grid!1@tag.tr" → 排除第一个.grid元素后找<tr>

    返回值结构：
    (
        {"type": "tag", "selector": "a", "index": 0},  # 主选择器
        "text",                                         # 提取类型 (text/attr/src等)
        1                                               # 排除数
    )
    """
    # 分离提取类型标记
    extract_type = None
    if "@" in rule_str:
        parts = rule_str.split("@")
        rule_part, extract_part = parts[0], parts[1]
        if extract_part in ["text", "href", "src"]:
            extract_type = extract_part
    else:
        rule_part = rule_str

    # 分离排除标记
    exclude_index = None
    if "!" in rule_part:
        rule_part, exclude_str = rule_part.rsplit("!", 1)
        exclude_index = int(exclude_str)

    # 解析主选择器
    selector_pattern = re.compile(r"(tag|class|id)\.([\w-]+)(?:\.(\d+))?")
    match = selector_pattern.match(rule_part)

    if not match:
        raise ValueError(f"Invalid rule format: {rule_part}")

    selector_type = match.group(1)
    selector_value = match.group(2)
    index = int(match.group(3)) if match.group(3) else None

    return (
        {"type": selector_type, "selector": selector_value, "index": index},
        extract_type,
        exclude_index
    )

def parse_rule_value(rule_str):
    """
    解析单条规则字符串，支持以下格式：
    - 选择器路径: "selector@step1@step2"
    - 正则过滤: "selector@step##regex_pattern"
    - JavaScript处理: "selector@step<js>js_code</js>"
    - 混合模式: "selector@step##regex<js>js_code</js>"
    """
    rule = {
        "selector": "",
        "steps": [],
        "regex": None,
        "js": None
    }

    # 分离JavaScript代码块
    js_match = re.search(r'<js>(.*?)</js>', rule_str, re.DOTALL)
    if js_match:
        rule["js"] = js_match.group(1).strip()
        rule_str = rule_str.replace(js_match.group(0), "")  # 移除JS部分

    # 分离正则表达式
    if "##" in rule_str:
        selector_part, regex_part = rule_str.split("##", 1)
        rule["regex"] = regex_part.strip()
    else:
        selector_part = rule_str

    # 解析选择器和步骤
    parts = selector_part.split("@")
    if len(parts) > 0:
        rule["selector"] = parts[0].strip()
        rule["steps"] = [p.strip() for p in parts[1:] if p.strip()]

    return rule


# ------------------------- 应用解析步骤到HTML元素 -------------------------
def apply_rule_steps(element, steps):
    """
    应用解析步骤到HTML元素
    """
    result = element
    for step in steps:
        if step.startswith('tag.'):
            tag_name = step.split('.')[1]
            result = result.find(tag_name)
        elif step == 'text':
            result = result.get_text(strip=True)
        elif step == 'href':
            result = result.get('href')
    return result






# ------------------------- 书籍信息提取类 -------------------------
def fetch_book_list(search_url_rules, rule_search, keyword="玄幻"):
    """
    执行书籍搜索的爬虫逻辑
    :param search_url_rules: 书源的搜索URL规则
    :param rule_search: 书源的ruleSearch解析规则
    :param keyword: 搜索关键词
    :return: 书籍信息列表
    """

    """优化后的书籍搜索函数"""
    try:
        # ================== 请求阶段 ==================
        encoded_keyword = quote(keyword.encode('gbk'))
        search_url = search_url_rules.replace("{{key}}", encoded_keyword)
        if 1:
            search_url = 'https://www.ishubao.org/modules/article/search.php'
            print("请求地址：", search_url)

            headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
            "x-cache-status": "EXPIRED",
            }

            # response = requests.get(search_url, headers=headers, timeout=10)
            # response.encoding = 'utf-8'  # 强制使用utf-8编码

            #
            post_data = {"s": 12839966820499815668, "entry": 1, "ie": "gbk", "q": encoded_keyword}

            response = requests.post(search_url, headers=headers, timeout=10, data=post_data)
            response.encoding = 'utf-8'  # 强制使用utf-8编码



        # ================== 规则解析阶段 ==================
        soup = BeautifulSoup(response.text, 'html.parser')


        # 解析书籍列表容器规则
        booklist_rule = rule_search.get("bookList", "")
        parent_selector, child_selector, exclude_index = parse_selector_rule(booklist_rule)

        # 定位书籍列表容器
        container = soup.select_one(parent_selector) if parent_selector else soup
        if not container:
            raise ValueError("书籍列表容器定位失败")

        # 获取书籍行列表（新增调试输出）
        print(f"父级选择器: {parent_selector or 'root'}")
        print(f"子级选择器: {child_selector}")
        print(f"排除前 {len(container.find_all(child_selector))} 个元素")

        # 使用find_all替代select提升性能
        book_rows = container.find_all(child_selector, recursive=False)  # 只查找直接子元素

        # 应用排除规则
        if exclude_index is not None and len(book_rows) > exclude_index:
            print(f"排除前 {exclude_index + 1} 个元素")
            book_rows = book_rows[exclude_index + 1:]

        print(f"实际获取 {len(book_rows)} 个书籍条目")

        # ================== 数据提取阶段 ==================
        books = []
        field_rules = {
            'name': rule_search.get('name', ''),
            'author': rule_search.get('author', ''),
            'coverUrl': rule_search.get('coverUrl', ''),
            'bookUrl': rule_search.get('bookUrl', ''),
            'lastChapter': rule_search.get('lastChapter', '')
        }

        for row in book_rows:
            book_info = {}

            # 遍历每个字段的解析规则
            for field, rule_str in field_rules.items():
                if not rule_str:
                    continue
                try:
                    # 解析复合规则
                    print(row, rule_str)
                    # rule = parse_rule_value(rule_str)
                    rule = parse_selector_rule(rule_str)
                    # rule = parse_selector(rule_str)
                    print(rule)
                    # 执行元素定位
                    current_element = row

                    if rule['selector']:
                        current_element = row.select_one(rule['selector'])
                        if not current_element:
                            continue

                    # 应用处理步骤
                    value = apply_rule_steps(current_element, rule['steps'])
                    print(value)
                    sys.exit()
                    # 正则处理
                    if rule['regex'] and value:
                        value = re.sub(rule['regex'], '', value).strip()

                    # # JS处理（示例：封面URL生成）
                    if rule['js'] and 'coverUrl' in field:
                        book_id = re.search(r'/(\d+)/', value).group(1)
                        exec_js = f"""
                            var id = {book_id};
                            var iid = parseInt(id/1000);
                            `https://www.ishubao.org/files/article/image/${{iid}}/${{id}}/${{id}}s.jpg`
                            """
                        value = js2py.eval_js(exec_js)

                    book_info[field] = value
                except Exception as e:
                    print(f"字段[{field}]解析失败: {str(e)}")
                    book_info[field] = None
                break
            break
            if book_info.get('name'):  # 有效数据才保留
                books.append(book_info)

        return books[:20]  # 限制最大返回数量

    except Exception as e:
        print(f"书籍搜索失败: {str(e)}")
        return []



# ------------------------- 解析书源json文件 -------------------------
def parse_book_sources(json_file):
    # 读取JSON文件
    with open(json_file, 'r', encoding='utf-8') as f:
        book_sources = json.load(f)

    results = []
    for source in book_sources:
        # 提取书源基本信息
        source_info = {
            "name": source.get("bookSourceName", ""),
            "source_group": source.get("bookSourceGroup", ""),
            "searchUrl": source.get('searchUrl',''),
            "exploreUrl": [],
            "ruleBookInfo": source.get('ruleBookInfo', {}),
            "ruleSearch": source.get('ruleSearch', {}),
        }

        # 解析exploreUrl（可能为字符串或嵌套JSON）
        explore_url = source.get("exploreUrl", "")
        if isinstance(explore_url, str):
            try:
                # 尝试解析为JSON数组
                # exploreUrl分类列表
                explore_url_parsed = json.loads(explore_url.replace("\\", ""))
                source_info["exploreUrl"] = [
                    {"title": item.get("title", ""), "url": item.get("url", "")}
                    for item in explore_url_parsed
                ]
            except json.JSONDecodeError:
                # 处理非标准格式（如用换行分隔的键值对）
                explore_lines = explore_url.split("\n")
                for line in explore_lines:
                    if "::" in line:
                        title, url = line.split("::", 1)
                        source_info["exploreUrl"].append({"title": title.strip(), "url": url.strip()})
        else:
            source_info["exploreUrl"] = explore_url

        # 解析ruleBookInfo规则
        rule_book_info = source.get("ruleBookInfo", {})


        parsed_rules = {}
        for key, value in rule_book_info.items():
            if isinstance(value, str):
                # 解析复杂规则
                parsed_rules[key] = parse_rule_value(value)
            else:
                # 非字符串规则直接保留
                parsed_rules[key] = value


        source_info["ruleBookInfo"] = parsed_rules

        results.append(source_info)

    return results

# ------------------------- 解析书源 -------------------------
def parse_data(json_file = "book_source.json"):

    # 解析书源
    sources = parse_book_sources(json_file)  # 使用之前优化后的 parse_book_sources 函数

    # 打印第一个书源的解析结果
    i=1
    for parsed_data  in sources:
        print(f"第{i}个 书源名称: {parsed_data['name']}")

        books = fetch_book_list(
            search_url_rules=parsed_data['searchUrl'],
            rule_search=parsed_data['ruleSearch'],
            keyword="古暖暖江尘御"
        )

        # 打印结果
        print(f"找到 {len(books)} 本书籍：")
        for book in books:
            print(f"""
                书名：{book.get('name', '未知')}
                作者：{book.get('author', '未知')}
                封面：{book.get('coverUrl', '无')}
            """)
        sys.exit()

        i+=1
        break

# ------------------------- 主程序 -------------------------
def main():
    parse_data()
    sys.exit()

if __name__ == '__main__':
    main()
