# _*_ encoding: utf-8 _*_


import os
import re
import sys
import json
import requests
from bs4 import BeautifulSoup, ResultSet, Tag
from urllib.parse import quote
from prettytable import PrettyTable
# 设置标题行颜色 (需安装 colorama)
from colorama import Fore

# ------------------------- 规则解析函数 -------------------------
def parse_selector_rule(rule_str):
    """
    解析复合CSS选择器规则，支持 class、tag 和位置索引
    规则格式示例: "class.grid@tag.tr!0"
    返回: (parent_selector, child_selector, index)
    """
    parts = rule_str.split('@')

    parent_rule = parts[0] if len(parts) > 1 else None
    child_rule = parts[-1]

    # 解析父级选择器
    parent_selector = ''
    if parent_rule:
        if parent_rule.startswith('class.'):
            parent_selector = f'.{parent_rule.split(".", 1)[1].replace(".", " ")}'  # 处理多class情况
        elif parent_rule.startswith('tag.'):
            parent_selector = parent_rule.split(".", 1)[1]

    # 解析子级选择器和索引
    child_parts = child_rule.split('!')
    child_selector = child_parts[0]
    index = int(child_parts[1]) if len(child_parts) > 1 else 0

    if child_selector.startswith('tag.'):
        child_selector = child_selector.split('.', 1)[1]
    elif child_selector.startswith('class.'):
        child_selector = f'.{child_selector.split(".", 1)[1].replace(".", " ")}'

    return parent_selector, child_selector, index


def parse_rule_value(rule_str):
    """
    解析单条规则字符串，支持以下格式：
    - 选择器路径: "selector@step1@step2"
    - 正则过滤: "selector@step##regex_pattern"
    - JavaScript处理: "selector@step<js>js_code</js>"
    - 混合模式: "selector@step##regex<js>js_code</js>"
    """
    rule = {
        "selector": "",
        "steps": [],
        "regex": None,
        "js": None
    }

    # 分离JavaScript代码块
    js_match = re.search(r'<js>(.*?)</js>', rule_str, re.DOTALL)
    if js_match:
        rule["js"] = js_match.group(1).strip()
        rule_str = rule_str.replace(js_match.group(0), "")  # 移除JS部分

    # 分离正则表达式
    if "##" in rule_str:
        selector_part, regex_part = rule_str.split("##", 1)
        rule["regex"] = regex_part.strip()
    else:
        selector_part = rule_str

    # 解析选择器和步骤
    parts = selector_part.split("@")
    if len(parts) > 0:
        rule["selector"] = parts[0].strip()
        rule["steps"] = [p.strip() for p in parts[1:] if p.strip()]

    return rule


# ------------------------- 应用解析步骤到HTML元素 -------------------------
def apply_rule_steps(element, steps):
    """
    应用解析步骤到HTML元素
    """
    result = element
    for step in steps:
        if step.startswith('tag.'):
            tag_name = step.split('.')[1]
            result = result.find(tag_name)
        elif step == 'text':
            result = result.get_text(strip=True)
        elif step == 'href':
            result = result.get('href')
    return result



# ------------------------- 解析书源json文件 -------------------------
def parse_book_sources(json_file):
    # 读取JSON文件
    with open(json_file, 'r', encoding='utf-8') as f:
        book_sources = json.load(f)

    results = []
    for source in book_sources:
        # 提取书源基本信息
        source_info = {
            "name": source.get("bookSourceName", ""),
            "source_group": source.get("bookSourceGroup", ""),
            "searchUrl": source.get('searchUrl',''),
            "exploreUrl": [],
            "ruleBookInfo": source.get('ruleBookInfo', {}),
            "ruleSearch": source.get('ruleSearch', {}),
        }

        # 解析exploreUrl（可能为字符串或嵌套JSON）
        explore_url = source.get("exploreUrl", "")
        if isinstance(explore_url, str):
            try:
                # 尝试解析为JSON数组
                # exploreUrl分类列表
                explore_url_parsed = json.loads(explore_url.replace("\\", ""))
                source_info["exploreUrl"] = [
                    {"title": item.get("title", ""), "url": item.get("url", "")}
                    for item in explore_url_parsed
                ]
            except json.JSONDecodeError:
                # 处理非标准格式（如用换行分隔的键值对）
                explore_lines = explore_url.split("\n")
                for line in explore_lines:
                    if "::" in line:
                        title, url = line.split("::", 1)
                        source_info["exploreUrl"].append({"title": title.strip(), "url": url.strip()})
        else:
            source_info["exploreUrl"] = explore_url

        # 解析ruleBookInfo规则
        rule_book_info = source.get("ruleBookInfo", {})


        parsed_rules = {}
        for key, value in rule_book_info.items():
            if isinstance(value, str):
                # 解析复杂规则
                parsed_rules[key] = parse_rule_value(value)
            else:
                # 非字符串规则直接保留
                parsed_rules[key] = value


        source_info["ruleBookInfo"] = parsed_rules

        results.append(source_info)

    return results


# ------------------------- 书籍信息提取类 -------------------------
def fetch_book_list(search_url_rules, rule_search, keyword="玄幻"):
    """
    执行书籍搜索的爬虫逻辑
    :param search_url_rules: 书源的搜索URL规则
    :param rule_search: 书源的ruleSearch解析规则
    :param keyword: 搜索关键词
    :return: 书籍信息列表
    """

    # 构造请求URL
    encoded_keyword = quote(keyword.encode('gbk'))  # 该网站使用gbk编码
    search_url = search_url_rules.replace("{{key}}", encoded_keyword)

    # 添加浏览器头
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
    }

    # 发送请求
    response = requests.get(search_url, headers=headers)
    response.encoding = 'utf-8'  # 显式指定编码
    soup = BeautifulSoup(response.text, 'html.parser')

    # 解析 bookList 规则
    booklist_rule = rule_search.get("bookList", "")
    parent_selector, child_selector, index = parse_selector_rule(booklist_rule)
    print(parent_selector, child_selector, index)
    sys.exit()

    # 层级查找容器
    container = soup
    for selector in booklist_rule["selectors"][:-1]:
        container = container.select_one(selector)
        if not container:
            print(f"未找到容器元素: {selector}")
            return []

    # 获取所有<tr>元素
    tr_selector = booklist_rule["selectors"][-1]
    book_rows = container.select(tr_selector)

    print(book_rows)
    sys.exit()

    # 应用过滤条件
    if booklist_rule["exclude_index"] is not None:
        book_rows = book_rows[booklist_rule["exclude_index"] + 1:]

    print(f"实际找到 {len(book_rows)} 行数据")

    sys.exit(1)
    for row in book_rows[1:]:  # !0表示排除第一个元素
        book_info = {}

        # 解析书名和链接
        if 'name' in rule_search:
            name_rule = parse_rule_value(rule_search['name'])
            name_element = row.select_one(name_rule['selector'])
            if name_element:
                book_info['name'] = apply_rule_steps(name_element, name_rule['steps'])

        # 解析作者
        if 'author' in rule_search:
            author_rule = parse_rule_value(rule_search['author'])
            author_element = row.select_one(author_rule['selector'])
            if author_element:
                book_info['author'] = apply_rule_steps(author_element, author_rule['steps'])

        # 解析封面URL（需要处理JS逻辑）
        if 'coverUrl' in rule_search:
            cover_rule = parse_rule_value(rule_search['coverUrl'])
            if '<js>' in rule_search['coverUrl']:
                # 提取书籍ID的逻辑
                link_element = row.select_one(cover_rule['selector'])
                if link_element and 'href' in cover_rule['steps']:
                    book_url = link_element['href']
                    # 执行JS逻辑
                    match = re.search(r'/(\d+)/?$', book_url)
                    if match:
                        book_id = match.group(1)
                        iid = int(int(book_id) / 1000)
                        cover_url = f"https://www.ishubao.org/files/article/image/{iid}/{book_id}/{book_id}s.jpg"
                        book_info['coverUrl'] = cover_url

        # 其他字段解析（分类、最新章节等）
        # ...

        book_list.append(book_info)
    print(book_list)
    sys.exit()
    return book_list





# ------------------------- 解析书源 -------------------------
def parse_data(json_file = "book_source.json"):

    # 解析书源
    sources = parse_book_sources(json_file)  # 使用之前优化后的 parse_book_sources 函数

    # 打印第一个书源的解析结果
    i=1
    for parsed_data  in sources:
        print(f"第{i}个 书源名称: {parsed_data['name']}")

        books = fetch_book_list(
            search_url_rules=parsed_data['searchUrl'],
            rule_search=parsed_data['ruleSearch'],
            keyword="古暖暖江尘御"
        )

        # 打印结果
        print(f"找到 {len(books)} 本书籍：")
        for book in books:
            print(f"""
                书名：{book.get('name', '未知')}
                作者：{book.get('author', '未知')}
                封面：{book.get('coverUrl', '无')}
            """)
        sys.exit()


        #类别列表
        table_explore = PrettyTable()
        table_explore.field_names = parsed_data["exploreUrl"][0].keys()
        print("\nExploreUrl列表:")
        for item in parsed_data["exploreUrl"]:
            #print(f"- {item['title']}: {item['url']}")
            table_explore.add_row(item.values())
        print(table_explore)

        # 初始化提取器
        extractor = BookInfoExtractor(parsed_data['ruleBookInfo'])
        print("\nRuleBookInfo解析:")
        for key, rule in parsed_data["ruleBookInfo"].items():
            if isinstance(rule, dict):
                print(f"{key}:")
                print(f"  - Selector: {rule.get('selector', '')}")
                print(f"  - Steps: {rule.get('steps', [])}")
                print(f"  - Regex: {rule.get('regex', '')}")
                print(f"  - JS: {rule.get('js', '')}")
            else:
                print(f"{key}: {rule}")
                # 提取信息并生成表格



            # table_data = []
            # for book in mock_books:
            #     info = extractor.extract_info(book['raw_html'])
            #     table_data.append([
            #         info.get('name', 'N/A'),
            #         info.get('author', 'N/A'),
            #         info.get('kind', 'N/A'),
            #         info.get('lastChapter', 'N/A'),
            #         info.get('intro', '')[:50] + "..."  # 简介截断
            #     ])
            #
            # # 打印表格
            # headers = ["书名", "作者", "分类", "最新章节", "简介"]
            # print(tabulate(table_data, headers=headers, tablefmt="grid", maxcolwidths=20))



        i+=1
        break

# ------------------------- 主程序 -------------------------
def main():
    parse_data()
    sys.exit()


    with open("./book_source.json", 'r', encoding="utf-8") as f:
        book_site_str = f.read()

    book_site_list = json.loads(book_site_str)
    # df = pd.DataFrame(book_site_list)
    # print(df)

    # 初始化表格
    table = PrettyTable()
    table.field_names = ["bookSourceGroup", "bookSourceName", "bookSourceType", "bookSourceUrl", "bookUrlPattern", "exploreUrl"]
    for row in book_site_list:
        """
            {
            "bookSourceComment": "",
            "bookSourceGroup": "🥇小黑网络素材",
            "bookSourceName": "🌸 爱书包",
            "bookSourceType": 0,
            "bookSourceUrl": "https://www.ishubao.org",
            "bookUrlPattern": "https://www.ishubao.org/files/article/html/\\d+/\\d+",
            "concurrentRate": "",
            "coverDecodeJs": "",
            "customOrder": 10000,
            "enabled": true,
            "enabledCookieJar": false,
            "enabledExplore": true,
            "exploreUrl": "[{\"title\":\"玄幻魔法\",\"url\":\"/list/1_{{page}}.html\",\"style\":{\"layout_flexGrow\":0.25,\"layout_flexBasisPercent\":-1}},{\"title\":\"武侠修真\",\"url\":\"/list/2_{{page}}.html\",\"style\":{\"layout_flexGrow\":0.25,\"layout_flexBasisPercent\":-1}},{\"title\":\"都市言情\",\"url\":\"/list/3_{{page}}.html\",\"style\":{\"layout_flexGrow\":0.25,\"layout_flexBasisPercent\":-1}},{\"title\":\"历史军事\",\"url\":\"/list/4_{{page}}.html\",\"style\":{\"layout_flexGrow\":0.25,\"layout_flexBasisPercent\":-1}},{\"title\":\"网游竞技\",\"url\":\"/list/5_{{page}}.html\",\"style\":{\"layout_flexGrow\":0.25,\"layout_flexBasisPercent\":-1}},{\"title\":\"科幻灵异\",\"url\":\"/list/6_{{page}}.html\",\"style\":{\"layout_flexGrow\":0.25,\"layout_flexBasisPercent\":-1}},{\"title\":\"其它类型\",\"url\":\"/list/7_{{page}}.html\",\"style\":{\"layout_flexGrow\":0,\"layout_flexBasisPercent\":0.29}}]",
            "exploreScreen": "",
            "header": "{\n  \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 Safari/537.36\"\n}",
            "lastUpdateTime": 1629620763101,
            "loginCheckJs": "",
            "loginUi": "",
            "loginUrl": "{\n  \"url\": \"\"\n}",
            "respondTime": 9223372036854775807,
            "ruleBookInfo": {
                "init": "",
                "author": "class.info@h3@a@text",
                "coverUrl": "class.img@tag.img@src",
                "intro": "id.intro@text",
                "kind": "class.info@tag.p.0@text##更新时间：",
                "lastChapter": "class.info@tag.span@a@text",
                "name": "class.info@tag.h1@text",
                "canReName": "",
                "tocUrl": "",
                "wordCount": ""
            },
            "ruleContent": {
                "content": "id.book_text@html##一秒记住.*免费阅读！",
                "nextContentUrl": "class.book_content_text_next@tag.a.4@href",
                "imageStyle": "",
                "webJs": "",
                "sourceRegex": "",
                "replaceRegex": ""
            },
            "ruleExplore": {
                "author": "class.name@tag.a.1@text",
                "bookList": "id.BookList@li",
                "bookUrl": "class.name@tag.a.0@href",
                "coverUrl": "img@src",
                "intro": "tag.p@text",
                "kind": "tag.dt@tag.em.0@text",
                "lastChapter": "tag.dt@a@text",
                "name": "class.name@tag.a.0@text"
            },
            "ruleReview": {
                "review": ""
            },
            "ruleSearch": {
                "author": "class.odd.1@text",
                "bookList": "class.grid@tag.tr!0",
                "bookUrl": "tag.a.0@href",
                "coverUrl": "tag.a.0@href<js>\nvar id = result.match(/(\\d+)\\/?$/)[1];\nvar iid = parseInt(id/1000);\n'https://www.ishubao.org/files/article/image/'+iid+'/'+id+'/'+id+'s.jpg';\n</js>",
                "intro": "",
                "kind": "class.odd.2@text",
                "lastChapter": "tag.a.1@text",
                "name": "tag.a.0@text",
                "wordCount": "class.even.1@text"
            },
            "ruleToc": {
                "chapterList": "id.chapterlist@tag.li",
                "chapterName": "tag.a@text",
                "chapterUrl": "tag.a@href",
                "isVip": "",
                "isVolume": "",
                "nextTocUrl": "",
                "updateTime": ""
            },
            "searchUrl": "https://www.ishubao.org/modules/article/search.php?s=12839966820499815668&entry=1&ie=gbk&q={{key}}",
            "variableComment": "",
            "weight": 0
        }
        """
        print(json.dumps(row, ensure_ascii=False,indent=4))

        temp_list = [
            row['bookSourceGroup'],
            row['bookSourceName'],
            row['bookSourceType'],
            row['bookSourceUrl'],
            row['bookUrlPattern'],
            row['exploreUrl']
        ]

        table.add_row(temp_list)

        break
        # 设置样式
    table.align["bookSourceGroup"] = "l"
    table.align["bookSourceName"] = "l"
    table.max_width["bookSourceName"] = 30

    # 设置表格样式
    table.align = "l"  # 左对齐 (默认居中)
    table.valign = "t"  # 顶部对齐
    table.horizontal_char = "═"  # 水平线符号
    table.vertical_char = "║"  # 垂直线符号
    table.junction_char = "╬"  # 交叉线符号

    # 设置数字列右对齐
    table.align["bookSourceType"] = "r"


    table.header = True
    table.header_style = "title"
    table.title = f"{Fore.RED}经典小说列表{Fore.RESET}"  # 红色标题

    print(table)
    # print()


def get_book_content():
    # 假设小说目录页URL
    toc_url = "https://www.ishubao.org/files/article/html/233/233085/"

    # 请求目录页
    response = requests.get(toc_url, headers={
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36"})
    html = response.text
    soup = BeautifulSoup(html, 'html.parser')

    # 提取章节列表
    chapter_list = []
    for li in soup.select("#chapterlist li"):
        a_tag = li.find("a")
        if a_tag:
            name = a_tag.get_text().strip()
            url = "https://www.ishubao.org" + a_tag["href"]
            chapter_list.append({"name": name, "url": url})

    print(chapter_list)

if __name__ == '__main__':
    main()

    # 15378707120