import asyncio
import json
import os
import re
import sys
from urllib.parse import urlparse, parse_qs, urlencode, urlunparse

from bs4 import BeautifulSoup, Comment
from crawl4ai import AsyncWebCrawler, CacheMode
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy

sys.setrecursionlimit(3000)
folder_path = 'collection_file'
rules = []


def getDomain():
    file_path = os.path.join(os.path.dirname(__file__), 'baidu.txt')
    txt_content = read_txt(file_path)
    result = {
        'domain': 'https://author.baidu.com',
        'urls': txt_content,
        'file_path': file_path,
        'level': 2,
    }
    setRules()
    return result


def read_txt(file_path):
    try:
        with open(file_path, 'r', encoding='utf-8') as file:
            lines = file.readlines()
            content = [line.strip() for line in lines]
            return content
    except FileNotFoundError:
        print(f"文件 {file_path} 未找到")
    except Exception as e:
        print(f"读取文件时发生错误: {e}")


def extract_book_list_urls(result, key="book_list_url"):
    book_list_urls = []
    if not result:
        return result

    for book_list_url in find_key_in_dict(result, key):
        if book_list_url:
            book_list_urls.append(book_list_url)

    return book_list_urls


def extract_book_content(results, key="title"):
    if not results:
        return results
    content = ""
    for result in find_key_in_dict(results, key):
        if result:
            content = result
            break
    return content


def find_key_in_dict(d, key):
    """递归地在字典中查找键"""
    if isinstance(d, dict):
        for k, v in d.items():
            if k == key:
                yield v
            elif isinstance(v, (dict, list)):
                yield from find_key_in_dict(v, key)
    elif isinstance(d, list):
        for item in d:
            yield from find_key_in_dict(item, key)


def re_load_url(domain, url, add_params=None, exclude=None):
    parsed_url = urlparse(url)
    if not parsed_url.scheme and not parsed_url.netloc:
        if url.startswith('/'):
            url = domain['domain'] + url
        else:
            url = domain['domain'] + "/" + url

    parsed_url = urlparse(url)
    query_params = parse_qs(parsed_url.query)
    if exclude:
        for param in exclude:
            query_params.pop(param, None)
    if add_params:
        for param, value in add_params.items():
            query_params[param] = value
    new_query_string = urlencode(query_params, doseq=True)
    return urlunparse(parsed_url._replace(query=new_query_string))


async def extract_content(url, level=1):
    global rules
    rule = {}
    if f"level_{level}" in rules:
        rule = rules[f"level_{level}"]
    other_info = {}
    js_code = {}
    wait_for = ""
    delay_before_return_html = None
    print(f"level:{level}，请求链接:{url}")

    if f"level_{level}_other" in rules:
        other_info = rules[f"level_{level}_other"]
    if 'js_code' in other_info:
        js_code = other_info['js_code']
    if 'wait_for' in other_info:
        wait_for = other_info['wait_for']
    if 'delay_before_return_html' in other_info:
        delay_before_return_html = other_info['delay_before_return_html']

    await asyncio.sleep(0.5)
    result = await extract_books_level(url, rule, js_code, wait_for, delay_before_return_html)
    return result


async def extract_books():
    domain = getDomain()
    global rules
    # 根据层级循环 获取到内部的值 然后跳出循环 用所有的值 再去循环
    urls = domain['urls']

    for i in range(1, domain['level'] + 1):
        news_urls = []
        loop_count = 0
        file_suffix = 1
        if not urls:
            print("没有需要爬取的链接")
        for url in urls:
            result = await extract_content(url, i)
            if not result:
                continue
            # 保存URL给下一层
            print(f"当前level:{i}")
            if i < domain['level']:
                book_list_urls = extract_book_list_urls(result, 'book_list_url')
                print(book_list_urls)
                news_urls = news_urls + book_list_urls
            # 写入文件
            else:
                success_file_path = os.path.join(folder_path, f"SuccessUrl.txt")
                with open(success_file_path, 'a', encoding='utf-8') as file:
                    file.write(f"{url}\n")
                title = extract_book_content(result, 'title')
                title = title.encode('utf-8')
                title = title.decode('utf-8')
                title = re.sub(r'[\\/*?:"<>|]', '', title)
                title = title + "******"
                content = extract_book_content(result, 'content')
                content = content.encode('utf-8')
                content = content.decode('utf-8')
                content = BeautifulSoup(content, 'html.parser')
                for comment in content.findAll(string=lambda text: isinstance(text, Comment)):
                    comment.extract()
                for span in content.find_all('span'):
                    if span.get('data-testid') == 'report-btn':
                        span.extract()
                    if '本文转自' in span.get_text():
                        span.extract()
                for div in content.find_all('div'):
                    div.name = 'p'
                    div.attrs.clear()
                # 提取处理后的纯文本
                text_content = content.get_text(strip=True)
                # 使用正则表达式统计汉字数量
                hanzi_count = len(text_content)
                if hanzi_count < 100:
                    continue
                content = str(content)
                try:
                    loop_count+=1
                    if loop_count % 500 == 0:
                        file_suffix += 1
                    # 定义文件路径，使用 title 作为文件名
                    file_path = os.path.join(folder_path, f"baidu_{file_suffix}.txt")
                    # 将 title 和 content 写入文件
                    with open(file_path, 'a', encoding='utf-8') as file:
                        file.write(f"{title}{content}\n")
                except Exception as e:
                    print(f"写入失败: {e}")
            # break
        urls = []
        if news_urls and i < domain['level']:
            urls = news_urls


async def extract_books_level(url, rule=None, js_code=None, wait_for=None, delay_before_return_html=None,
                              reload=0):
    extraction_strategy = None
    if rule:
        extraction_strategy = JsonCssExtractionStrategy(rule, verbose=True, cache_mode=CacheMode.BYPASS, )
    try:
        headers = {}
        async with AsyncWebCrawler(cache_mode=CacheMode.BYPASS, verbose=True, browser_type="chromium", headers=headers) as crawler:
            result = await crawler.arun(
                url=url,
                extraction_strategy=extraction_strategy,
                cache_mode=CacheMode.BYPASS,
                js_code=js_code,
                wait_for=wait_for,
                page_timeout=60000,
                delay_before_return_html=delay_before_return_html,
            )
            assert result.success, "Failed to crawl the page"
            commits = json.loads(result.extracted_content)
            return commits
    except Exception as e:
        # 定义文件路径，使用 title 作为文件名
        print(e)
        if reload < 2:
            return await extract_books_level(url, rule, js_code, wait_for, delay_before_return_html, reload + 1)
        file_path = os.path.join(folder_path, f"failedUrl.txt")
        if not os.path.exists(file_path):
            with open(file_path, 'w', encoding='utf-8') as file:
                file.write(f"【失败链接】 {url}\n")
        else:
            with open(file_path, 'a', encoding='utf-8') as file:
                file.write(f"【失败链接】 {url}\n")
        print(f"【失败链接】: {url}")
        return None


def setRules(data=None):
    global rules
    # rules = data
    rules = {
        'level_1': {
            "name": "Commit Extractor",
            "baseSelector": "div#article",
            "fields": [
                {
                    "name": "book_list",
                    "selector": "div.feed-list > .s-list > .feed-item",
                    "type": "list",
                    "fields": [
                        {
                            "name": "book_list_url",
                            "selector": ".feed-item > div",
                            "type": "attribute",
                            "attribute": "url",
                        }
                    ]
                },
            ],
        },
        'level_1_other': {
            'delay_before_return_html': 10,
            'js_code': [
                "setTimeout(() => {document.querySelectorAll('.s-tabs-nav-line .s-tab').forEach(tab => tab.textContent.trim() === '文章' && tab.click());}, 2000);",
                "window.scrollTo(0, document.body.scrollHeight);",
                "document.querySelectorAll('.s-tabs-nav-line .s-tab').forEach(tab => tab.textContent.trim() === '文章' && tab.click());",
                "setInterval(()=>{window.scrollTo(0, document.body.scrollHeight);},1000);"
            ],
            "wait_for": "js:()=>document.querySelectorAll('.feed-item').length > 50",
        },
        'level_2': {"name": "Commit Extractor",
                    "baseSelector": "div#ssr-content",
                    "fields": [
                        {
                            "name": "book_list",
                            "selector": "div:nth-of-type(2)",
                            "type": "list",
                            "fields": [
                                {
                                    "name": "title",
                                    "selector": "div#header > div:nth-of-type(1)",
                                    "type": "text",
                                },
                                {
                                    "name": "content",
                                    "selector": 'div[data-testid="article"]',
                                    "type": "html",
                                }
                            ]
                        },
                    ]

                    },
        'level_2_other': {
        },

    }


if __name__ == '__main__':
    asyncio.run(extract_books())
