#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
简单的网站可访问性测试爬虫
测试52it.cc论坛的基本信息
"""

import requests
from bs4 import BeautifulSoup
import json
import time
from datetime import datetime

def test_website_accessibility():
    """测试网站可访问性"""
    url = "https://52it.cc/forum.php?mod=guide&view=newthread"
    
    print("=== 网站可访问性测试 ===")
    print(f"目标URL: {url}")
    print(f"测试时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    print("-" * 50)
    
    # 设置请求头
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
    }
    
    try:
        # 发送请求
        print("正在发送请求...")
        start_time = time.time()
        
        response = requests.get(url, headers=headers, timeout=10)
        response.raise_for_status()
        
        end_time = time.time()
        response_time = end_time - start_time
        
        print(f"✅ 请求成功!")
        print(f"状态码: {response.status_code}")
        print(f"响应时间: {response_time:.2f} 秒")
        print(f"内容长度: {len(response.text)} 字符")
        
        # 解析HTML
        print("\n正在解析HTML内容...")
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取基本信息
        title = soup.find('title')
        title_text = title.get_text(strip=True) if title else "无标题"
        print(f"页面标题: {title_text}")
        
        # 查找帖子列表
        threads = soup.find_all('tr')
        thread_count = 0
        forum_data = []
        
        for thread in threads:
            # 查找包含帖子链接的行
            title_link = thread.find('a', href=lambda x: x and 'thread-' in x)
            if title_link:
                thread_count += 1
                title_text = title_link.get_text(strip=True)
                thread_url = title_link.get('href')
                
                # 提取版块信息
                forum_link = thread.find('a', href=lambda x: x and 'forum-' in x)
                forum_name = forum_link.get_text(strip=True) if forum_link else "未知版块"
                
                # 提取作者信息
                author_link = thread.find('a', href=lambda x: x and 'space-uid-' in x)
                author = author_link.get_text(strip=True) if author_link else "未知作者"
                
                forum_data.append({
                    'title': title_text,
                    'forum': forum_name,
                    'author': author,
                    'url': thread_url
                })
        
        print(f"✅ 找到 {thread_count} 个帖子")
        
        # 保存测试结果
        test_result = {
            'test_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'url': url,
            'status_code': response.status_code,
            'response_time': response_time,
            'content_length': len(response.text),
            'page_title': title_text,
            'thread_count': thread_count,
            'threads': forum_data[:10]  # 只保存前10个帖子
        }
        
        with open('test_result.json', 'w', encoding='utf-8') as f:
            json.dump(test_result, f, ensure_ascii=False, indent=2)
        
        print(f"\n✅ 测试结果已保存到 test_result.json")
        
        # 显示前5个帖子
        print(f"\n=== 前5个帖子 ===")
        for i, thread in enumerate(forum_data[:5], 1):
            print(f"{i}. {thread['title']}")
            print(f"   版块: {thread['forum']}")
            print(f"   作者: {thread['author']}")
            print()
        
        return True
        
    except requests.exceptions.Timeout:
        print("❌ 请求超时")
        return False
    except requests.exceptions.ConnectionError:
        print("❌ 连接错误")
        return False
    except requests.exceptions.HTTPError as e:
        print(f"❌ HTTP错误: {e}")
        return False
    except Exception as e:
        print(f"❌ 未知错误: {e}")
        return False

def test_different_pages():
    """测试不同的页面"""
    pages = [
        "https://52it.cc/forum.php?mod=guide&view=newthread",
        "https://52it.cc/forum.php",
        "https://52it.cc/"
    ]
    
    print("\n=== 多页面测试 ===")
    
    for i, url in enumerate(pages, 1):
        print(f"\n测试页面 {i}: {url}")
        try:
            response = requests.get(url, timeout=5)
            print(f"✅ 状态码: {response.status_code}")
        except Exception as e:
            print(f"❌ 错误: {e}")

if __name__ == "__main__":
    # 运行可访问性测试
    success = test_website_accessibility()
    
    if success:
        print("\n✅ 网站可访问性测试通过!")
        # 运行多页面测试
        test_different_pages()
    else:
        print("\n❌ 网站可访问性测试失败!") 