# app.py
# -*- coding: utf-8 -*-

import re
import html
import json
import urllib.parse
import random
from bs4 import BeautifulSoup
from flask import Flask, request, jsonify, render_template, send_from_directory
from playwright.sync_api import sync_playwright
import os

app = Flask(__name__)

# Ensure the 'static' directory exists
os.makedirs('static', exist_ok=True)

# User-Agent列表
USER_AGENTS = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.1 Safari/605.1.15',
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36',
    # 添加更多User-Agent
]

################################################################################
# 辅助函数
################################################################################

def get_random_headers():
    """
    随机选择一个User-Agent，并添加其他常见的HTTP头。
    """
    headers = {
        "User-Agent": random.choice(USER_AGENTS),
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
        "Accept-Language": "en-US,en;q=0.5",
        "Accept-Encoding": "gzip, deflate, br",
        "Connection": "keep-alive",
        "Upgrade-Insecure-Requests": "1"
    }
    return headers

def extract_and_concatenate_div_content(input_str):
    """
    提取所有 <div>...</div> 标签内的文本，并拼接成一个字符串。
    """
    soup = BeautifulSoup(input_str, 'html.parser')
    divs = soup.find_all('div')
    concatenated_text = ""
    for div in divs:
        concatenated_text += " " + div.get_text(strip=True)
    return concatenated_text.strip()

def add_spaces_to_text(text):
    """
    - 在字母和大写字母之间添加空格 (HelloWorld -> Hello World)
    - 在字母和数字之间、数字和字母之间添加空格 (Item42 -> Item 42, 42Items -> 42 Items)
    """
    # 在字母与大写字母之间
    text = re.sub(r'([a-zA-Z])([A-Z])', r'\1 \2', text)
    # 在字母与数字之间
    text = re.sub(r'([a-zA-Z])([0-9])', r'\1 \2', text)
    # 在数字与字母之间
    text = re.sub(r'([0-9])([a-zA-Z])', r'\1 \2', text)
    return text

def extract_content_between(input_str, start_delimiter, end_delimiter):
    """
    从 input_str 中提取 start_delimiter 和 end_delimiter 之间的内容。
    找不到则返回空字符串。
    """
    start_pos = input_str.find(start_delimiter)
    end_pos = input_str.find(end_delimiter)
    if start_pos == -1 or end_pos == -1:
        return ''
    start_pos += len(start_delimiter)
    return input_str[start_pos:end_pos]

def parse_questions_and_answers(questions, javascript_code):
    """
    从 javascript_code 中提取 People Also Ask 中的 URL、Title、Answer 等。
    """
    results = []

    # 处理 \uXXXX 形式的 unicode 转义
    def decode_unicode_escapes(match):
        return bytes.fromhex(match.group(1)).decode('utf-16-be')
    javascript_code = re.sub(r'\\u([0-9a-fA-F]{4})', decode_unicode_escapes, javascript_code)

    # 查找 href="/url?xxx"
    url_matches = re.findall(r'href="/url\?([^"]+)"', javascript_code)
    for index, url_params in enumerate(url_matches):
        parts = url_params.split("url=")
        if not parts:
            continue
        the_url = parts[-1]
        if not the_url or 'google.com' in the_url:
            continue

        context_pos = javascript_code.find(the_url)
        if context_pos == -1:
            continue

        context_start = max(0, context_pos - 1000)
        context_end = context_pos + 2000
        context = javascript_code[context_start:context_end]

        # 提取标题
        title_match = re.search(
            r'<div class="BNeawe vvjwJb AP7Wnd[^"]*"[^>]*>(.*?)</div>',
            context, re.DOTALL
        )
        if title_match:
            title = re.sub(r'<.*?>', '', title_match.group(1)).strip()
        else:
            title = ''

        # 提取答案
        answer_match = re.search(
            r'<div class="BNeawe">(.*?)</div>',
            context, re.DOTALL
        )
        if answer_match:
            answer = re.sub(r'<.*?>', '', answer_match.group(1)).strip()
        else:
            answer = ''

        if title and answer:
            results.append({
                "question": questions[index] if index < len(questions) else "",
                "url": html.escape(the_url),
                "title": title,
                "answer": answer
            })

    return results

def extract_url(link):
    """
    从 '/url?url=xxx&...' 中解析出真正的 url。
    """
    match = re.search(r'url=([^&]+)', link)
    if match:
        return urllib.parse.unquote(match.group(1))
    return link

################################################################################
# 拆分后的核心解析函数
################################################################################

def parse_serp_data(html_text):
    """
    提取 SERP 结果、相关问题、相关搜索等，返回字典：
    {
      'results': [...],
      'questions': [...],
      'related_searches': [...],
      'main_text': '...'
    }
    """
    soup = BeautifulSoup(html_text, 'html.parser')
    main_node = soup.find('div', id='main')

    # 1) 提取搜索结果
    results = []
    nodes = soup.find_all('div', class_='Gx5Zad')
    for node in nodes:
        try:
            title_node = node.find('h3', class_='zBAuLc')
            link_node = node.find('a')
            snippet_node = node.find('div', class_=['BNeawe', 's3v9rd'])

            if title_node and link_node and snippet_node:
                link = link_node.get('href', '')
                real_url = extract_url(link)
                results.append({
                    'title': title_node.get_text(strip=True),
                    'link': link,
                    'url': real_url,
                    'snippet': snippet_node.get_text(strip=True)
                })
        except Exception as e:
            print(f"[parse_serp_data] Error: {e}")
            continue

    # 2) 提取相关问题
    questions = []
    question_nodes = soup.find_all('div', class_='Lt3Tzc')
    for node in question_nodes:
        questions.append(node.get_text(strip=True))

    # 3) 提取相关搜索
    related_searches = []
    related_nodes = soup.find_all('div', class_='gGQDvd')
    for rnode in related_nodes:
        sub = rnode.find_all('div', class_='BNeawe')
        for s in sub:
            related_searches.append(s.get_text(strip=True))

    # 4) main_node 的全部文本
    main_text = main_node.get_text(separator=" ") if main_node else ""

    return {
        'results': results,
        'questions': questions,
        'related_searches': related_searches,
        'main_text': main_text
    }

def parse_video_data(html_text):
    """
    解析Google搜索结果页面（tbm=vid时）的HTML，
    提取标题、链接、简要描述(若有)等信息。
    """
    soup = BeautifulSoup(html_text, 'html.parser')
    results = []

    # 1) 每条搜索结果通常都包裹在形如 <div class="Gx5Zad xpd EtOod pkphOe"> 中
    #    我们可以用这类选择器拿到结果容器
    containers = soup.select("div.Gx5Zad")

    for block in containers:
        # 2) 查找链接：一般在 <div class="egMi0 kCrYT"> 中的 <a> 标签
        anchor = block.select_one("div.egMi0.kCrYT a[href]")
        if not anchor:
            # 有些容器不一定都是视频搜索结果，跳过
            continue
        google_link = anchor["href"].strip()
        real_link = extract_url(google_link)

        # 3) 标题：一般在 <h3> 下的 <div class="BNeawe vvjwJb AP7Wnd UwRFLe">
        title_tag = block.select_one("h3 .BNeawe.vvjwJb.AP7Wnd.UwRFLe")
        if title_tag:
            video_title = title_tag.get_text(strip=True)
        else:
            # 如果这个结构不在，可能就不是视频结果
            continue

        # 4) 简要描述（snippet），若需要的话可在
        #    <div class="BNeawe s3v9rd AP7Wnd"> 中获取
        snippet_tag = block.select_one(".BNeawe.s3v9rd.AP7Wnd")
        snippet = snippet_tag.get_text(strip=True) if snippet_tag else ""

        # 5) 若链接是指向 youtube，则可以抓取其ID
        video_id = None
        # 常见YouTube形式
        match_id = re.search(r'(?:v=|/watch\?v=|/embed/|/vi/)([\w-]+)', real_link)
        if match_id:
            video_id = match_id.group(1)

        results.append({
            'title': video_title,
            'google_link': google_link,  # Google中转链接
            'real_link': real_link,  # 解析后的真实URL
            'snippet': snippet,
            'video_id': video_id
        })
    return results

def parse_paa_data(html_text, questions):
    """
    提取 People Also Ask (PAA) 的结构化数据，返回字典：
    {
      'paa_text': '所有<div>的拼接文本',
      'paa_data': [ {...}, {...} ]  # 结构化 Q&A
    }
    """
    soup = BeautifulSoup(html_text, 'html.parser')

    # 1) 找到包含 window.jsl.dh 的脚本内容并合并
    scripts = soup.find_all('script', text=re.compile(r'window\.jsl\.dh'))
    people_also_ask_text = ""
    for sc in scripts:
        content = sc.string
        if not content:
            continue
        # 将 \xNN 形式还原
        content_decoded = re.sub(
            r'\\x([0-9A-Fa-f]{2})',
            lambda m: chr(int(m.group(1), 16)),
            content
        )
        people_also_ask_text += content_decoded

    # 2) 拼接所有 <div> 中的文本 -> paa_text
    paa_text = extract_and_concatenate_div_content(people_also_ask_text)

    # 3) 进一步解析结构化 Q&A
    paa_data = parse_questions_and_answers(questions, people_also_ask_text)

    return {
        'paa_text': paa_text,
        'paa_data': paa_data
    }

################################################################################
# 提供给外部调用的主函数
################################################################################

def get_structured_data(keywords):
    """
    给定关键词，抓取 Google 并返回结构化数据:
    {
        "search_results": [...],
        "related_questions": [...],
        "related_searches": [...],
        "paa": [...],
        "page_text": ...
    }
    注意：Google 的反爬策略随时可能变，此示例不保证长期有效。
    """
    with sync_playwright() as p:
        browser = p.chromium.launch(
            executable_path="/Users/ruanbanyong/Library/Caches/ms-playwright/chromium-1067/chrome-mac/Chromium.app/Contents/MacOS/Chromium",
            headless=False
        )
        context = browser.new_context(
            user_agent=random.choice(USER_AGENTS),
            viewport={"width": 1920, "height": 1080}
        )
        page = context.new_page()

        headers = get_random_headers()
        # Playwright handles headers differently; to set headers, use `page.set_extra_http_headers`
        page.set_extra_http_headers(headers)

        # 拼接请求地址
        encoded_keywords = urllib.parse.quote_plus(keywords)
        url = f"https://www.google.com/search?q={encoded_keywords}&num=20&hl=en"
        print(f"[INFO] Fetching URL: {url}")

        # 发起请求
        page.goto(url)
        print("[INFO] Waiting for page to load")
        page.wait_for_load_state("networkidle")

        # 获取页面内容
        html_content = page.content()
        print("[INFO] Page fetched successfully")

        # 解析 SERP 主页面
        serp_result = parse_serp_data(html_content)
        # 解析 PAA
        paa_result = parse_paa_data(html_content, serp_result["questions"])

        # 自定义逻辑：把 "word-wrap:break-word" 和 "Next >" 之间的文本抽取并整合
        partial_text = extract_content_between(
            serp_result["main_text"],
            '{word-wrap:break-word}',
            'Next >'
        )
        partial_text = add_spaces_to_text(partial_text)

        # 最终的 page_text = partial_text + PAA 的文本
        page_text = f"{partial_text} {paa_result['paa_text']}"

        # 保存 SERP HTML
        with open("static/serp.html", "w", encoding="utf-8") as f:
            f.write(html_content)

        # 保存 PAA 文本
        with open("static/ppa.html", "w", encoding="utf-8") as f:
            f.write(paa_result['paa_text'])

        browser.close()

        return {
            'search_results': serp_result['results'],
            'related_questions': serp_result['questions'],
            'related_searches': serp_result['related_searches'],
            'paa': paa_result['paa_data'],
            'page_text': page_text
        }

################################################################################
# Flask Routes
################################################################################

@app.route('/')
def index():
    return render_template('index.html')

@app.route('/search', methods=['GET'])
def search():
    keyword = request.args.get('keywords', '')
    if not keyword:
        return jsonify({'error': 'No keyword provided'}), 400
    try:
        data = get_structured_data(keyword)
        return jsonify(data)
    except Exception as e:
        return jsonify({'error': str(e)}), 500

################################################################################
# Run the app
################################################################################

if __name__ == "__main__":
    app.run(debug=True, port=9000)
