# -*- coding: utf-8 -*-
import re

from bs4 import BeautifulSoup
from playwright.async_api import async_playwright
from urllib.parse import urlparse
import os
import json

header = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/604.1 Edg/112.0.100.0'}


# 只爬https://you.ctrip.com/开头的网页
async def ctrip_crawler(url: str, logger) -> (int, dict):
    if not url.startswith('https://you.ctrip.com') and not url.startswith('http://you.ctrip.com'):
        logger.warning(f'{url} is not a ctrip url, you should not use this function')
        return -5, {}

    url = url.replace("http://", "https://", 1)

    async with async_playwright() as p:
        browser = await p.chromium.launch(headless=True)
        page = await browser.new_page()
        await page.goto(url)

        parsed_url = urlparse(url)
        path = parsed_url.path

        # 获取城市链接
        if path == '':
            # 方法一：从初始页面中获取城市的Link，该方法既可以获得全部的城市链接，也可用获取到某一个城市的页面地址
            # flag, city_href = await get_link_by_city(page, '北京')

            # 方法二：模拟点击事件，跳转到具体城市的页面内部,这个方法必须传外部一级菜单名称和具体的城市名称
            city_href = await get_link_for_city_by_click(page, '国内（含港澳台）', '上海')

            # 进入到城市页面
            await page.goto(city_href)
            await page.wait_for_load_state('networkidle')

            # 获取游记dom 并点击
            travel_dom = await page.query_selector("div.entry-item:has-text('游记')")
            await travel_dom.click()
            await page.wait_for_load_state('domcontentloaded')
            await page.wait_for_timeout(1000)

        if path.startswith('place') or path.startswith('/place'):
            # 获取游记dom 并点击
            travel_dom = await page.query_selector("div.entry-item:has-text('游记')")
            await travel_dom.click()
            await page.wait_for_load_state('domcontentloaded')

        # 获取当前有多少个页面, 并只取最新的一个页面
        new_pages = browser.contexts[0].pages[-1]
        print(f"now pages = {browser.contexts[0].pages}")

        if path.startswith('/travels') or path.startswith('travels'):
            # 如果以数字加html结尾，则解析具体的页面
            if re.search(r'\d+\.html$', url):

                # 直接解析页面

                return 11

            # 进入到游记列表的页面了，开始解析每一篇游记的link
            links = await get_log_link(new_pages)

            return 1, links

        # 进入到游记列表的页面了，开始解析每一篇游记的link
        links = await get_log_link(new_pages)

        return 1, links


# 从城市页面中点击”游记“按钮
async def get_log_link(new_pages):
    project_dir = os.environ.get("PROJECT_DIR", "")

    await go_to_page(new_pages, 1)

    result = []

    # 进入到游记列表的页面了，开始解析每一篇游记的link
    links = await new_pages.locator('a.journal-item').evaluate_all(
        'nodes => nodes.map(node => node.getAttribute("href"))')
    result.extend(links)

    next_btn = await new_pages.query_selector('a.nextpage')
    is_disabled = await next_btn.is_disabled()
    while next_btn and not is_disabled:
        await next_btn.click()
        await new_pages.wait_for_load_state('networkidle')
        await new_pages.wait_for_timeout(1000)

        current_btn = await new_pages.locator('div.pager_v1').locator('a.current').inner_text()
        print(f"current is page {current_btn}")

        now_links = await new_pages.locator('a.journal-item').evaluate_all(
            'nodes => nodes.map(node => node.getAttribute("href"))')
        result.extend(now_links)

        formatted_urls = ",\n".join(f'"{url}"' for url in now_links) + ","
        with open(os.path.join(project_dir, 'ctrip_travel_links.json'), 'a', encoding='utf-8') as f:
            # json.dump(formatted_urls, f, ensure_ascii=False, indent=4)
            f.write("\n" + formatted_urls)

        next_btn = await new_pages.query_selector('a.nextpage')

    return result


async def go_to_page(page, target_page_number):

    while True:
        # 获取当前页码
        current_btn = await page.locator('div.pager_v1').locator('a.current').inner_text()
        current_page_number = int(current_btn)

        if current_page_number == target_page_number:
            print(f"已经到达第 {target_page_number} 页")
            break

        # 两种方式

        # 第一种：有跳转按钮 和 跳转输入框，通过跳转按钮跳转
        await page.fill("input#gopagetext", str(target_page_number))
        submit_btn = await page.query_selector('a.gopage')
        await submit_btn.click()
        await page.wait_for_load_state('networkidle')

        # 第一种：没有跳转按钮，通过"下一页"，连续翻页到当前页面
        # "下一页"按钮, 连续翻页
        # next_button = await page.query_selector("a.nextpage")
        # if next_button:
        #     await next_button.click()
        #     await page.wait_for_load_state('networkidle')
        # else:
        #     print("无法找到下一页按钮，可能已经到达最后一页")
        #     break


# 获取具体城市的内部页面 方法一：从首页中获取所有城市的链接，并可以通过城市名称获取到对应的link
async def get_link_by_city(page, city='北京'):
    all_cities_link_map = {}
    try:
        all_cities_link = await page.locator('a.city-selector-tab-main-city-list-item').all()
        for link in all_cities_link:
            href = await link.get_attribute('href')
            title = await link.get_attribute('title')
            all_cities_link_map[title] = href
    except Exception as e:
        print(f"error is {e}")

    # 如果city参数不为空，返回当前这个城市的link，否则返回全部的list
    if city is not None:
        return 0, all_cities_link_map[city]
    else:
        return 1, all_cities_link_map


# 获取具体城市的内部页面 方法一：从首页中获取所有城市的链接，并可以通过城市名称获取到对应的link
# 该方法必须指定具体的城市名称
async def get_link_for_city_by_click(page, level1_name='国内（含港澳台）', city_name='北京'):

    # 定位到“国内”菜单项
    domestic_menu = await page.query_selector(f"div.city-selector-tab-item-title:text('{level1_name}')")

    city_link = ''
    if domestic_menu:
        await domestic_menu.hover()
        await page.wait_for_load_state('networkidle')

        city_link = await page.locator(f'a.city-selector-tab-main-city-list-item:text("{city_name}")').get_attribute('href')

    return city_link


# BeautifulSoup 解析页面
async def page_parse(info_html):

    soup = BeautifulSoup(info_html, 'html.parser')

    # 获取标题
    div_tag = soup.find('div', class_='top-part')
    h1_tag = div_tag.find('h1').get_text()

    abstract = ''
    # 获取摘要
    if soup.find('div', class_='bk-des'):
        abstract = soup.find('div', class_='bk-des').find('div', class_='para').get_text()

    # 获取文章内容
    div = soup.find('div', class_='bk-area')
    result = abstract + '\n' + process_element(div)

    return {
        "title": h1_tag,
        "detail": result
    }


# 定义一个函数来处理div中的所有子孙元素
def process_element(element):
    if element is None:
        return ''

    result = ''

    if element.name == 'p':
        # 判断<p>标签内是否包含<img>标签，并获取src属性
        img_tag = element.find('img')
        if img_tag:
            result += f"'\n'{img_tag['src']}"
        result += f"'\n'{element.get_text()}"

    elif element.name == 'img':
        print(f"Image source: {element['src']}")
        result += f"'\n'{element['src']}"

    elif element.name == 'a':
        print(f"Link URL: {element['href']}")
        result += f"'\n'{element['href']}"

    elif element.name == 'div' and element.get('name') == 'bk-head-tit':
        result += f"\n{element.find('h2').get_text()}"

    elif element.name == 'table':
        # 提取表头
        headers = [th.get_text() for th in element.find_all('th')]
        # 提取表格内容
        rows = []
        rows.append(headers)
        for row in element.find_all('tr')[1:]:  # 跳过表头行
            cells = [cell.get_text() for cell in row.find_all('td')]
            rows.append(cells)
        result += str(rows)

    elif element.name == 'ul':
        # 获取所有li标签并打印其内容
        for li in element.find_all('li'):
            # result += li.get_text(strip=True)
            # 获取li标签内的所有文本
            text = li.get_text(separator=" ", strip=True) if li else ''
            result += text

            # 获取li标签内的所有图片src，如果没有img标签返回空列表
            images = [img['src'] for img in li.find_all('img')] if li and li.find_all('img') else []
            result += str(images)

    # 递归处理当前元素的所有子元素
    for child in element.children:
        if child.name:  # 只处理标签元素，忽略字符串等非标签内容
            result += process_element(child)

    return result
