import asyncio
import json
import os
import re
import sys
import time
from concurrent.futures import ThreadPoolExecutor
from urllib.parse import urlparse, parse_qs, urlencode, urlunparse

import requests
from bs4 import BeautifulSoup, Comment
from crawl4ai import AsyncWebCrawler, CacheMode
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy

from loccommon import mysql_operate

sys.setrecursionlimit(3000)
folder_path = 'collection_file'
rules = []
# 创建线程池
executor = ThreadPoolExecutor(max_workers=40)


def getDomain():
    result = getEnd0Spider()
    txt_content = []
    if result['file_path']:
        file_path = result['file_path']
        txt_content = read_txt(file_path)
    else:
        for i in range(1, result['page'] + 1):
            txt_content.append(result['domain'] + result['path'] + result['params'].format(page=i))
    setRules(result['rules'])
    result['urls'] = txt_content
    return result


def read_txt(file_path):
    try:
        with open(file_path, 'r', encoding='utf-8') as file:
            lines = file.readlines()
            content = [line.strip() for line in lines]
            return content
    except FileNotFoundError:
        print(f"文件 {file_path} 未找到")
    except Exception as e:
        print(f"读取文件时发生错误: {e}")


def getEnd0Spider():
    sql = "SELECT * FROM zbp_py_spider where `end`=0 order by id desc limit 1"
    data = mysql_operate.db.select_db(sql)
    return data[0]


def extract_book_content(results, key="title"):
    if not results:
        return results
    content = ""
    for result in find_key_in_dict(results, key):
        if result:
            content = result
            break
    return content


def extract_book_list_urls(result, key="book_list_url"):
    book_list_urls = []
    if not result:
        return result

    for book_list_url in find_key_in_dict(result, key):
        if book_list_url:
            book_list_urls.append(book_list_url)

    return book_list_urls


def find_key_in_dict(d, key):
    """递归地在字典中查找键"""
    if isinstance(d, dict):
        for k, v in d.items():
            if k == key:
                yield v
            elif isinstance(v, (dict, list)):
                yield from find_key_in_dict(v, key)
    elif isinstance(d, list):
        for item in d:
            yield from find_key_in_dict(item, key)


def re_load_url(domain, url, add_params=None, exclude=None):
    parsed_url = urlparse(url)
    if not parsed_url.scheme and not parsed_url.netloc:
        if url.startswith('/'):
            url = domain['domain'] + url
        else:
            url = domain['domain'] + "/" + url

    parsed_url = urlparse(url)
    query_params = parse_qs(parsed_url.query)
    if exclude:
        for param in exclude:
            query_params.pop(param, None)
    if add_params:
        for param, value in add_params.items():
            query_params[param] = value
    new_query_string = urlencode(query_params, doseq=True)
    return urlunparse(parsed_url._replace(query=new_query_string))


async def extract_content(domain, url, level=1):
    global rules
    rule = {}
    if f"level_{level}" in rules:
        rule = rules[f"level_{level}"]
    other_info = {}
    js_code = {}
    wait_for = ""
    delay_before_return_html = None
    total_result = []
    if f"level_{level}_other" in rules:
        other_info = rules[f"level_{level}_other"]

    await asyncio.sleep(1)
    print(f"请求链接:{url}")

    if 'request' in other_info and other_info['request'] == "api":
        # 接口
        print("接口")
        result = api_books_get_level(url)
        total_result.append(result)
    else:
        # 抓取网页
        print("网页")
        if 'js_code' in other_info:
            js_code = other_info['js_code']
        if 'wait_for' in other_info:
            wait_for = other_info['wait_for']
        if 'delay_before_return_html' in other_info:
            delay_before_return_html = other_info['delay_before_return_html']
        result = await extract_books_level(url, rule, js_code, wait_for, delay_before_return_html)
        total_result.append(result)

    # 判断是否有分页 是从当前链接进行分页 获取总分页 并进行分页
    if 'next_page' in other_info:
        total_page = extract_book_content(result, 'next_page')
        for page in range(2, total_page + 1):
            add_params = {}
            exclude = {}
            if 'add_params' in other_info:
                add_params = other_info['add_params']
                if 'page' in other_info['add_params'] and other_info['add_params']['page']:
                    add_params[other_info['add_params']['page']] = page
            if 'exclude' in other_info:
                exclude = other_info['exclude']
            new_url = re_load_url(domain, url, add_params=add_params, exclude=exclude)
            result = await extract_books_level(new_url, rule, js_code, wait_for, delay_before_return_html)
            total_result.append(result)

    return total_result


# except Exception as e:
#     print(f"Error occurred: {e}")


async def extract_books():
    domain = getDomain()
    global rules
    # 根据层级循环 获取到内部的值 然后跳出循环 用所有的值 再去循环
    urls = domain['urls']
    # 根据层级循环 获取到内部的值 然后跳出循环 用所有的值 再去循环
    for i in range(1, domain['level'] + 1):
        news_urls = []
        loop_count = 0
        file_suffix = "1"
        if not urls:
            print("没有需要爬取的链接")

        for url in urls:
            result = await extract_content(domain, url, i)
            # 保存URL给下一层
            print(f"当前level:{i}")
            if i < domain['level']:
                # 下一层内容链接
                book_list_urls = extract_book_list_urls(result, 'book_list_url')
                print(f"获取的链接:{book_list_urls}")
                news_urls = news_urls + book_list_urls
            # 写入文件
            else:
                success_file_path = os.path.join(folder_path, f"SuccessUrl.txt")
                with open(success_file_path, 'a', encoding='utf-8') as file:
                    file.write(f"{url}\n")

                title = extract_book_content(result, 'title')
                title = title.encode('utf-8')
                title = title.decode('utf-8')
                title = re.sub(r'[\\/*?:"<>|]', '', title)
                # 判断是否加特殊符号
                if 'prefix' in domain['title_attach']:
                    title = domain['title_attach']['prefix'] + title
                if 'suffix' in domain['title_attach']:
                    title = title + domain['title_attach']['suffix']
                content = extract_book_content(result, 'content')
                content = content.encode('utf-8')
                content = content.decode('utf-8')
                content = BeautifulSoup(content, 'html.parser')
                for comment in content.findAll(string=lambda text: isinstance(text, Comment)):
                    comment.extract()
                if 'exclude' in domain['content_attach']:
                    for span in content.find_all(f'{domain["content_attach"]["exclude"]["label"]}'):
                        for label_txt in domain['content_attach']['label_txt']:
                            if f'{label_txt}' in span.get_text():
                                span.extract()
                for div in content.find_all('div'):
                    div.name = 'p'
                    div.attrs.clear()
                # 提取处理后的纯文本
                text_content = content.get_text(strip=True)
                # 使用正则表达式统计汉字数量
                hanzi_count = len(text_content)
                if hanzi_count < 100:
                    continue
                content = str(content)
                content = re.sub(r'<br\s*/?>', '__BR_TAG__', content)
                content = content.replace('__BR_TAG__', '</p><p>')
                content = '<p>' + content + '</p>'
                try:

                    if loop_count % 1000 == 0:
                        file_suffix += 1
                    # 定义文件路径，使用 title 作为文件名
                    file_path = os.path.join(folder_path, f"baidu_{file_suffix}.txt")
                    # 将 title 和 content 写入文件
                    with open(file_path, 'a', encoding='utf-8') as file:
                        file.write(f"{title}{content}\n")
                except Exception as e:
                    print(f"写入失败: {e}")
        urls = []
        if news_urls and i < domain['level']:
            urls = news_urls


def api_books_get_level(url, params=None, reload=0):
    payload = {}
    cookies = {}
    headers = {
        'Accept': '*/*',
        'Content-Type': 'application/json',
        'Connection': 'keep-alive',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
    }
    session = requests.Session()
    session.get(url)
    # print(session.cookies)
    # cookies = getCookies()
    response = session.get(url, params=params, headers=headers, cookies=cookies)
    print(response.cookies)
    print(response.text)


def api_books_post_level(url, params=None, reload=0):
    headers = {
        'Accept': '*/*',
        "Content-Type": "application/json"
    }
    timeout_seconds = 30
    response = requests.post(url, headers=headers, data=params.encode('utf-8'), timeout=timeout_seconds)
    return response.json()


async def extract_books_level(url, rule=None, js_code=None, wait_for=None, delay_before_return_html=None, reload=0):
    extraction_strategy = None
    if rule:
        extraction_strategy = JsonCssExtractionStrategy(rule, verbose=True, cache_mode=CacheMode.BYPASS, )
    try:
        async with AsyncWebCrawler(cache_mode=CacheMode.BYPASS, verbose=True, browser_type="chromium") as crawler:
            result = await crawler.arun(
                url=url,
                extraction_strategy=extraction_strategy,
                cache_mode=CacheMode.BYPASS,
                js_code=js_code,
                wait_for=wait_for,
                page_timeout=60000,
                delay_before_return_html=delay_before_return_html
            )
            assert result.success, "Failed to crawl the page"
            commits = json.loads(result.extracted_content)
            return commits
    except Exception as e:
        # 定义文件路径，使用 title 作为文件名
        print(e)
        if reload < 2:
            return await extract_books_level(url, rule, reload + 1)
        file_path = os.path.join(folder_path, f"failedUrl.txt")
        if not os.path.exists(file_path):
            with open(file_path, 'w', encoding='utf-8') as file:
                file.write(f"【失败链接】 {url}\n")
        else:
            with open(file_path, 'a', encoding='utf-8') as file:
                file.write(f"【失败链接】 {url}\n")
        print(f"【失败链接】: {url}")
        return None


def setRules(data=None):
    global rules
    # rules = data
    rules = {
        'level_1': {
            "name": "Commit Extractor",
            "baseSelector": "div#article",
            "fields": [
                {
                    "name": "book_list",
                    "selector": "div.feed-list > .s-list > .feed-item",
                    "type": "list",
                    "fields": [
                        {
                            "name": "book_list_url",
                            "selector": ".feed-item > div",
                            "type": "attribute",
                            "attribute": "url",
                        }
                    ]
                },
            ],
        },
        'level_1_other': {
            'exclude': {'params'},
            'add_params': {
                'otherext': 'h5_20241120131359',
                'format': 'json',
                'Tenger-Mhor': '975261235',
            },
            'delay_before_return_html': 60,
            'js_code': [
                "setTimeout(() => {}, 1000);",
                "window.scrollTo(0, document.body.scrollHeight);",
                "document.querySelectorAll('.s-tabs-nav-line .s-tab').forEach(tab => tab.textContent.trim() === '文章' && tab.click());",
                "let count = 0, interval = setInterval(() => { window.scrollTo(0, document.body.scrollHeight); if (++count >= 50) { clearInterval(interval); return; } }, 1000);"
            ],
            # 'wait_for': "js:() => {let count = 0, interval = setInterval(() => { window.scrollTo(0, document.body.scrollHeight); if (++count >= 50) { clearInterval(interval); return; } }, 1000);}",
        },
        'level_2_other': {
            'request': 'api',
        },

    }
    # rules = {
    #     'level_1': {
    #         "name": "Commit Extractor",
    #         "baseSelector": "div.leftContainer",
    #         "fields": [
    #             {
    #                 "name": "book_list",
    #                 "selector": ".listRowsFull > .row > .cell",
    #                 "type": "list",
    #                 "fields": [
    #                     {
    #                         "name": "book_list_url",
    #                         "selector": ".cell > a",
    #                         "type": "attribute",
    #                         "attribute": "href",
    #                     }
    #                 ]
    #             }
    #         ],
    #     },
    #     "level_2": {
    #         "name": "Commit Extractor",
    #         "baseSelector": "div#all_votes",
    #         "fields": [
    #             {
    #                 "name": "book_list",
    #                 "selector": "table.tableList.js-dataTooltip > tbody > tr > td",
    #                 "type": "list",
    #                 "fields": [
    #                     {
    #                         "name": "book_list_url",
    #                         "selector": "td > a.bookTitle",
    #                         "type": "attribute",
    #                         "attribute": "href",
    #                     }
    #                 ]
    #             },
    #             {
    #                 "name": "book_list_page",
    #                 "selector": ".pagination > a:nth-last-of-type(2)",
    #                 "type": "text"
    #             }
    #         ]
    #     },
    #     "level_3": {
    #         "name": "Commit Extractor",
    #         "baseSelector": "div.BookPage__mainContent",
    #         "fields": [
    #             {
    #                 "name": "title",
    #                 "selector": "div.BookPageTitleSection > div.BookPageTitleSection__title > h1",
    #                 "type": "text"
    #             },
    #             {
    #                 "name": "content",
    #                 "selector": ".BookPageMetadataSection > div.BookPageMetadataSection__description >.TruncatedContent > div > .DetailsLayoutRightParagraph> .DetailsLayoutRightParagraph__widthConstrained > span.Formatted",
    #                 "type": "html"
    #             }
    #
    #         ]
    #     }
    # }


if __name__ == '__main__':
    asyncio.run(extract_books())
