import requests
from bs4 import BeautifulSoup
import asyncio
from pyppeteer import launch
import hashlib
import json
import os
from config import TRANSLATIONS_DIR
from utils import normalize_url

def scan_static_page(page_url):
    response = requests.get(page_url, timeout=10)
    response.raise_for_status()
    html = response.text

    soup = BeautifulSoup(html, 'html.parser')
    text_nodes = []

    for text_node in soup.find_all(string=True):
        text = text_node.strip()
        if not text or text_node.parent.name in ['script', 'style']:
            continue
        node_id = f"text_{hashlib.md5(text.encode()).hexdigest()}"
        text_nodes.append({"id": node_id, "text": text})

    page_hash = hashlib.md5(normalize_url(page_url).encode()).hexdigest()
    dictionary = {"hash": page_hash, "type": "page", "texts": text_nodes}

    os.makedirs(TRANSLATIONS_DIR, exist_ok=True)
    output_file = os.path.join(TRANSLATIONS_DIR, f"pre_trans_{page_hash}.json")
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(dictionary, f, ensure_ascii=False, indent=2)

    return dictionary

async def scan_Ajax_content_async(page_url):
    browser = await launch(headless=True, args=['--no-sandbox', '--disable-dev-shm-usage'])
    page = await browser.newPage()
    ajax_requests = []

    async def capture_request(request):
        if request.resourceType == 'xhr':
            ajax_requests.append(request.url)

    page.on('request', capture_request)

    await page.goto(page_url, {'waitUntil': 'networkidle2', 'timeout': 10000})
    await asyncio.sleep(5)

    dictionaries = []
    for url in set(ajax_requests):
        try:
            response = requests.get(url, timeout=10)
            if response.status_code != 200:
                continue

            content_type = response.headers.get('content-type', '')
            text_nodes = []

            if 'json' in content_type.lower():
                data = response.json()
                text_nodes = extract_json_texts(data)
            elif 'html' in content_type.lower():
                soup = BeautifulSoup(response.text, 'html.parser')
                for text_node in soup.find_all(string=True):
                    text = text_node.strip()
                    if not text or text_node.parent.name in ['script', 'style']:
                        continue
                    if not is_dynamic_text(text):
                        node_id = f"text_{hashlib.md5(text.encode()).hexdigest()}"
                        text_nodes.append({"id": node_id, "text": text})

            if text_nodes:
                ajax_hash = hashlib.md5(normalize_url(url).encode()).hexdigest()
                dictionary = {"hash": ajax_hash, "type": "ajax", "texts": text_nodes}
                output_file = os.path.join(TRANSLATIONS_DIR, f"pre_trans_{ajax_hash}.json")
                with open(output_file, 'w', encoding='utf-8') as f:
                    json.dump(dictionary, f, ensure_ascii=False, indent=2)
                dictionaries.append(dictionary)
        except Exception as e:
            print(f"Error processing AJAX URL {url}: {e}")

    await browser.close()
    return dictionaries

def scan_Ajax_content(page_url):
    return asyncio.run(scan_Ajax_content_async(page_url))

def extract_json_texts(data, text_nodes=None):
    if text_nodes is None:
        text_nodes = []

    if isinstance(data, dict):
        for value in data.values():
            extract_json_texts(value, text_nodes)
    elif isinstance(data, list):
        for item in list(data):
            extract_json_texts(item, text_nodes)
    elif isinstance(data, str):
        text = data.strip()
        if text and not is_dynamic_text(text):
            node_id = f"text_{hashlib.md5(text.encode()).hexdigest()}"
            text_nodes.append({"id": node_id, "text": text})

    return text_nodes

def is_dynamic_text(text):
    import re
    return bool(re.match(r'^\d+$', text) or '{' in text or '%' in text)
