# -*- coding: utf-8 -*-

from bs4 import BeautifulSoup
from datetime import datetime
from playwright.async_api import async_playwright
from urllib.parse import urlparse

header = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/604.1 Edg/112.0.100.0'}


async def bike_pcauto_crawler(url: str, logger) -> (int, dict):
    if not url.startswith('https://baike.pcauto.com.cn') and not url.startswith('http://baike.pcauto.com.cn'):
        logger.warning(f'{url} is not a baike.pcauto.com.cn url, you should not use this function')
        return -5, {}

    url = url.replace("http://", "https://", 1)

    async with async_playwright() as p:
        browser = await p.chromium.launch(headless=True)
        page = await browser.new_page()
        await page.goto(url)

        parsed_url = urlparse(url)
        path = parsed_url.path
        return await get_bike_pcauto(page, url, path, logger)


# https://movie.douban.com相关页面爬取
async def get_bike_pcauto(page, url, path, logger):
    # 首页,返回每个知识点的链接
    if path.startswith("/cs") or path.startswith("/cs"):
        await page.goto(url+'/')
        await page.wait_for_load_state('networkidle')

        all_links = []

        page1_urls = await baike_page_list_url(page)
        all_links.extend(page1_urls)

        next_btn = await page.query_selector('a.next')
        while next_btn:
            await next_btn.click()
            await page.wait_for_load_state('networkidle')
            page_urls = await baike_page_list_url(page)
            all_links.extend(page_urls)
            next_btn = await page.query_selector('a.next')

        return 1, all_links

    # 解析每个知识点
    await page.goto(url)  # 打开链接页面
    await page.wait_for_load_state('domcontentloaded')

    details = await bike_page_parse(page)

    return 22, {
        'title': details["title"],
        'detail': details["detail"],
        'url': url,
        "page_type": "baike_pcauto_baoyang"
    }


# 获取页面url
async def baike_page_list_url(page):
    all_href_set = []

    uls = await page.locator('//ul[contains(@class,"data-list")]').locator('li').all()

    for li in uls:
        href = await li.locator('div.data-info').locator('div.data-tit').locator('a').first.get_attribute('href')
        all_href_set.append(href)

    return all_href_set


# BeautifulSoup 解析页面
async def bike_page_parse(page):
    info_html = await page.locator('div.con-bd ').inner_html()

    soup = BeautifulSoup(info_html, 'html.parser')

    # 获取标题
    div_tag = soup.find('div', class_='top-part')
    h1_tag = div_tag.find('h1').get_text()

    abstract = ''
    # 获取摘要
    if soup.find('div', class_='bk-des'):
        abstract = soup.find('div', class_='bk-des').find('div', class_='para').get_text()

    # 获取文章内容
    div = soup.find('div', class_='bk-area')
    result = abstract + '\n' + process_element(div)

    return {
        "title": h1_tag,
        "detail": result
    }


# 定义一个函数来处理div中的所有子孙元素
def process_element(element):

    if element is None:
        return ''

    result = ''

    if element.name == 'p':
        # 判断<p>标签内是否包含<img>标签，并获取src属性
        img_tag = element.find('img')
        if img_tag:
            result += f"'\n'{img_tag['src']}"
        result += f"'\n'{element.get_text()}"

    elif element.name == 'img':
        print(f"Image source: {element['src']}")
        result += f"'\n'{element['src']}"

    elif element.name == 'a':
        print(f"Link URL: {element['href']}")
        result += f"'\n'{element['href']}"

    elif element.name == 'div' and element.get('name') == 'bk-head-tit':
        result += f"\n{element.find('h2').get_text()}"

    elif element.name == 'table':
        # 提取表头
        headers = [th.get_text() for th in element.find_all('th')]
        # 提取表格内容
        rows = []
        rows.append(headers)
        for row in element.find_all('tr')[1:]:  # 跳过表头行
            cells = [cell.get_text() for cell in row.find_all('td')]
            rows.append(cells)
        result += str(rows)

    elif element.name == 'ul':
        # 获取所有li标签并打印其内容
        for li in element.find_all('li'):
            # result += li.get_text(strip=True)
            # 获取li标签内的所有文本
            text = li.get_text(separator=" ", strip=True) if li else ''
            result += text
            
            # 获取li标签内的所有图片src，如果没有img标签返回空列表
            images = [img['src'] for img in li.find_all('img')] if li and li.find_all('img') else []
            result += str(images)

    # 递归处理当前元素的所有子元素
    for child in element.children:
        if child.name:  # 只处理标签元素，忽略字符串等非标签内容
            result += process_element(child)

    return result
