import asyncio
import json
import os
import re
import time
import sys

from crawl4ai import AsyncWebCrawler, CacheMode
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy

folder_path = 'baidu/collection_file'
sys.setrecursionlimit(3000)


def getDomain():
    domain = {
        'url': 'https://www.goodreads.com',
        'page': 100,
        'level': 3,
        'uri': '/list/popular_lists',
        'params': '?page={page}&ref=ls_pl_seeall',
    }

    return domain


def extract_book_list_urls(result):
    book_list_urls = []
    if not result:
        return result
    for item in result:
        if not item['book_list']:
            continue
        for book in item['book_list']:
            if not book or not book['book_list_url']:
                continue
            book_list_urls.append(book['book_list_url'])
    return book_list_urls


async def extract_books():
    domain = getDomain()
    rules = getRules()
    async with AsyncWebCrawler(verbose=True, sleep_on_close=True) as crawler:
        for i in range(1, domain['page'] + 1):
            url = domain['url'] + domain['uri'] + domain['params'].format(page=i)
            print(url)
            result = await extract_books_level(crawler, url, rules['level_1'])
            print(result)
            book_list_urls = extract_book_list_urls(result)
            print(book_list_urls)
            if not book_list_urls:
                continue

            for book_list_url in book_list_urls:
                book_list_url = domain['url'] + book_list_url
                print(book_list_url)
                try:
                    next_result = await extract_books_level(crawler, book_list_url, rules['level_2'])
                except Exception as e:
                    print(e)
                    continue
                book_info = extract_book_list_urls(next_result)
                if not book_info:
                    continue
                print(book_info)
                book_list_next = next((link['book_list_next'] for link in next_result if 'book_list_next' in link), 0)
                print(book_list_next)
                for book in book_info:
                    book_info_url = domain['url'] + book
                    print(book_info_url)
                    try:
                        book_result_list = await extract_books_level(crawler, book_info_url, rules['level_3'])
                    except Exception as e:
                        print(e)
                        continue
                    if not book_result_list:
                        continue
                    for book_result in book_result_list:
                        title = book_result.get('title')
                        title = title.encode('utf-8')
                        title = title.decode('utf-8')
                        title = re.sub(r'[\\/*?:"<>|]', '', title)
                        content = book_result.get('content')
                        content = content.encode('utf-8')
                        content = content.decode('utf-8')
                        content = re.sub(r'<br\s*/?>', '__BR_TAG__', content)
                        content = content.replace('__BR_TAG__', '</p><p>')
                        content = '<p>' + content + '</p>'

                        try:
                            # 定义文件路径，使用 title 作为文件名
                            file_path = os.path.join(folder_path, f"{title}.txt")
                            # 将 title 和 content 写入文件
                            if not os.path.exists(file_path):
                                with open(file_path, 'w', encoding='utf-8') as file:
                                    file.write(f"【标题】 {title}\n")
                                    file.write(f"【内容】 {content}\n")
                        except Exception as e:
                            print(f"Error occurred: {e}")
                    await asyncio.sleep(1)
                if book_list_next == 0 or book_list_next.isdigit():
                    book_list_next = int(book_list_next)
                    if book_list_next > 1:
                        for j in range(2, book_list_next + 1):
                            book_list_url = domain['url'] + book_list_url + f"?page={j}"
                            print(book_list_url)
                            try:
                                next_result = await extract_books_level(crawler, book_list_url, rules['level_2'])
                            except Exception as e:
                                print(e)
                                continue
                            book_info = extract_book_list_urls(next_result)
                            if not book_info:
                                continue
                            print(book_info)
                            for book in book_info:
                                book_info_url = domain['url'] + book
                                print(book_info_url)
                                try:
                                    book_result_list = await extract_books_level(crawler, book_info_url,
                                                                                 rules['level_3'])
                                except Exception as e:
                                    print(e)
                                    continue
                                if not book_result_list:
                                    continue
                                for book_result in book_result_list:
                                    title = book_result.get('title')
                                    title = title.encode('utf-8')
                                    title = title.decode('utf-8')
                                    title = re.sub(r'[\\/*?:"<>|]', '', title)
                                    content = book_result.get('content')
                                    content = content.encode('utf-8')
                                    content = content.decode('utf-8')
                                    content = re.sub(r'<br\s*/?>', '__BR_TAG__', content)
                                    content = content.replace('__BR_TAG__', '</p><p>')
                                    content = '<p>' + content + '</p>'
                                    try:
                                        # 定义文件路径，使用 title 作为文件名
                                        file_path = os.path.join(folder_path, f"{title}.txt")
                                        # 将 title 和 content 写入文件
                                        if not os.path.exists(file_path):
                                            with open(file_path, 'w', encoding='utf-8') as file:
                                                file.write(f"【标题】 {title}\n")
                                                file.write(f"【内容】 {content}\n")
                                    except Exception as e:
                                        print(f"Error occurred: {e}")
                                await asyncio.sleep(1)
                else:
                    break
                await asyncio.sleep(1)
            await asyncio.sleep(1)


async def extract_books_level(crawler, url, rule, reload=0):
    extraction_strategy = JsonCssExtractionStrategy(rule, verbose=True, cache_mode=CacheMode.BYPASS)
    try:
        result = await crawler.arun(url=url,
                                    extraction_strategy=extraction_strategy,
                                    cache_mode=CacheMode.BYPASS,
                                    verbose=True
                                    )

        assert result.success, "Failed to crawl the page"
        commits = json.loads(result.extracted_content)
        return commits
    except Exception as e:
        if reload < 2:
            return await extract_books_level(crawler, url, rule, reload + 1)
        # 定义文件路径，使用 title 作为文件名
        file_path = os.path.join(folder_path, f"failedUrl.txt")
        with open(file_path, 'a', encoding='utf-8') as file:
            file.write(f"【失败链接】 {url}\n")
        print(f"【失败链接】: {url}")
        return None


def getRules():
    rules = {
        'level_1': {
            "name": "Commit Extractor",
            "baseSelector": "div.leftContainer",
            "fields": [
                {
                    "name": "book_list",
                    "selector": ".listRowsFull > .row > .cell",
                    "type": "list",
                    "fields": [
                        {
                            "name": "book_list_url",
                            "selector": ".cell > a",
                            "type": "attribute",
                            "attribute": "href",
                        }
                    ]
                }
            ],
        },
        "level_2": {
            "name": "Commit Extractor",
            "baseSelector": "div#all_votes",
            "fields": [
                {
                    "name": "book_list",
                    "selector": "table.tableList.js-dataTooltip > tbody > tr > td",
                    "type": "list",
                    "fields": [
                        {
                            "name": "book_list_url",
                            "selector": "td > a.bookTitle",
                            "type": "attribute",
                            "attribute": "href",
                        }
                    ]
                },
                {
                    "name": "book_list_next",
                    "selector": ".pagination > a:nth-last-of-type(2)",
                    "type": "text"
                }
            ]
        },
        "level_3": {
            "name": "Commit Extractor",
            "baseSelector": "div.BookPage__mainContent",
            "fields": [
                {
                    "name": "title",
                    "selector": "div.BookPageTitleSection > div.BookPageTitleSection__title > h1",
                    "type": "text"
                },
                {
                    "name": "content",
                    "selector": ".BookPageMetadataSection > div.BookPageMetadataSection__description >.TruncatedContent > div > .DetailsLayoutRightParagraph> .DetailsLayoutRightParagraph__widthConstrained > span.Formatted",
                    "type": "html"
                }

            ]
        }
    }
    return rules


if __name__ == '__main__':
    asyncio.run(extract_books())
