#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
调试页面内容
查看网站返回的实际HTML内容
"""

import requests
from bs4 import BeautifulSoup
import urllib3
import time
import random

# 禁用SSL警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

def debug_page_content():
    """调试页面内容"""
    url = "https://jjcos.com/tag/EbKijoqVm671/"
    
    # 创建会话
    session = requests.Session()
    
    # 设置请求头
    session.headers.update({
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'none',
        'Sec-Fetch-User': '?1',
        'Cache-Control': 'max-age=0',
        'DNT': '1'
    })
    
    try:
        print(f"正在访问: {url}")
        
        # 添加延迟
        time.sleep(2)
        
        response = session.get(url, timeout=30, verify=False)
        response.raise_for_status()
        
        print(f"状态码: {response.status_code}")
        print(f"响应头: {dict(response.headers)}")
        print(f"内容长度: {len(response.text)} 字符")
        print(f"编码: {response.encoding}")
        
        # 保存原始HTML到文件
        with open('debug_page_raw.html', 'w', encoding='utf-8') as f:
            f.write(response.text)
        print("原始HTML已保存到 debug_page_raw.html")
        
        # 显示前1000个字符
        print("\n=== 页面内容前1000字符 ===")
        print(response.text[:1000])
        print("\n=== 页面内容后1000字符 ===")
        print(response.text[-1000:])
        
        # 解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 获取页面标题
        title = soup.find('title')
        if title:
            print(f"\n页面标题: {title.text.strip()}")
        
        # 查找所有脚本标签
        scripts = soup.find_all('script')
        print(f"\n找到 {len(scripts)} 个脚本标签")
        
        # 查找所有div标签
        divs = soup.find_all('div')
        print(f"找到 {len(divs)} 个div标签")
        
        # 查找所有带class的元素
        elements_with_class = soup.find_all(attrs={'class': True})
        print(f"找到 {len(elements_with_class)} 个带class的元素")
        
        # 显示前10个带class的元素
        print("\n前10个带class的元素:")
        for i, elem in enumerate(elements_with_class[:10], 1):
            classes = ' '.join(elem.get('class', []))
            print(f"  {i}. <{elem.name}> class='{classes}'")
        
        # 查找所有链接
        links = soup.find_all('a', href=True)
        print(f"\n找到 {len(links)} 个链接")
        
        # 显示所有链接
        if links:
            print("所有链接:")
            for i, link in enumerate(links, 1):
                href = link['href']
                text = link.get_text(strip=True)[:50]
                print(f"  {i}. {text} -> {href}")
        
        # 查找所有图片
        images = soup.find_all('img')
        print(f"\n找到 {len(images)} 个图片")
        
        # 显示所有图片
        if images:
            print("所有图片:")
            for i, img in enumerate(images, 1):
                src = img.get('src', 'No src')
                alt = img.get('alt', 'No alt')
                print(f"  {i}. {alt[:30]} -> {src}")
        
        # 检查是否有JavaScript重定向或动态加载
        if 'window.location' in response.text or 'document.location' in response.text:
            print("\n⚠️  检测到JavaScript重定向")
        
        if 'ajax' in response.text.lower() or 'fetch' in response.text.lower():
            print("\n⚠️  检测到AJAX或动态加载")
        
        # 查找可能的API端点
        import re
        api_patterns = [
            r'["\']https?://[^"\']*/api/[^"\'\ ]*',
            r'["\']https?://[^"\']*/ajax/[^"\'\ ]*',
            r'["\']https?://[^"\']*/json/[^"\'\ ]*'
        ]
        
        print("\n=== 查找API端点 ===")
        for pattern in api_patterns:
            matches = re.findall(pattern, response.text)
            if matches:
                print(f"找到API端点: {matches}")
        
        print("\n调试完成！")
        
    except Exception as e:
        print(f"错误: {str(e)}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    debug_page_content()