#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
高级网站访问器 - 专门针对CSDN等有严格反爬虫机制的网站
使用requests库模拟真实浏览器行为
"""

import time
import random
import csv
import os
import requests
from datetime import datetime
import numpy as np

# ============= 配置区域（在这里修改参数） =================
CONFIG = {
    "URL": "https://blog.csdn.net/Zero_VPN/article/details/154238580?spm=1001.2014.3001.5501",
    "MAX_VISITS": 20,  # 访问次数
    "INTERVAL_MEAN": 120,  # 平均间隔时间（秒）
    "CSV_FILE": "visits_log_advanced.csv",
}
# ========================================================

# 常见设备的 User-Agent 列表
USER_AGENTS = [
    # Desktop - Chrome
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
    
    # Desktop - Firefox
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:121.0) Gecko/20100101 Firefox/121.0",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:121.0) Gecko/20100101 Firefox/121.0",
    
    # Desktop - Safari
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.1 Safari/605.1.15",
    
    # Mobile - iPhone
    "Mozilla/5.0 (iPhone; CPU iPhone OS 17_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.1 Mobile/15E148 Safari/604.1",
    
    # Mobile - Android
    "Mozilla/5.0 (Linux; Android 13; SM-S918B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.6099.144 Mobile Safari/537.36",
]

# 常见Referer列表
REFERERS = [
    "https://www.google.com/search?q=rust+csv+processing",
    "https://www.baidu.com/s?wd=rust%E5%A4%84%E7%90%86csv%E6%95%B0%E6%8D%AE",
    "https://blog.csdn.net/nav/rust",
    "https://www.bing.com/search?q=rust+csv+tutorial",
    "https://stackoverflow.com/questions/tagged/rust",
    "https://github.com/search?q=rust+csv",
    "https://www.zhihu.com/search?q=rust+csv",
    "https://www.google.com/",
    "https://www.baidu.com/",
]

def get_log_file_path():
    """获取日志文件路径"""
    script_dir = os.path.dirname(os.path.abspath(__file__))
    logs_dir = os.path.join(script_dir, "logs")
    if not os.path.exists(logs_dir):
        os.makedirs(logs_dir)
    return os.path.join(logs_dir, CONFIG["CSV_FILE"])

def now_iso():
    """返回当前时间的 ISO 格式字符串"""
    return datetime.utcnow().isoformat() + "Z"

def get_poisson_interval(mean):
    """使用泊松分布生成访问间隔时间，增加随机性"""
    interval = np.random.exponential(mean)
    # 增加随机波动，避免固定间隔
    random_factor = random.uniform(0.8, 1.5)
    return max(60.0, interval * random_factor)  # 最小间隔60秒

def get_random_user_agent():
    """随机选择一个 User-Agent"""
    return random.choice(USER_AGENTS)

def get_random_referer():
    """随机选择一个 Referer"""
    return random.choice(REFERERS)

def write_csv_header(csvfile):
    """写入 CSV 文件头"""
    with open(csvfile, "a", newline="", encoding="utf-8") as f:
        writer = csv.writer(f)
        if f.tell() == 0:
            writer.writerow([
                "timestamp_utc", 
                "visit_number", 
                "url", 
                "user_agent",
                "referer",
                "status_code",
                "response_time",
                "content_length",
                "status", 
                "note"
            ])

def log_visit(visit_num, url, user_agent, referer, status_code, response_time, content_length, status, note=""):
    """记录访问日志到 CSV"""
    log_file = get_log_file_path()
    with open(log_file, "a", newline="", encoding="utf-8") as f:
        writer = csv.writer(f)
        writer.writerow([
            now_iso(),
            visit_num,
            url,
            user_agent,
            referer,
            status_code,
            response_time,
            content_length,
            status,
            note
        ])

def create_session():
    """创建带有真实浏览器特征的会话"""
    session = requests.Session()
    
    # 设置默认请求头
    session.headers.update({
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
        'Accept-Encoding': 'gzip, deflate, br',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'cross-site',
        'Cache-Control': 'max-age=0',
    })
    
    return session

def visit_with_advanced_headers(url, user_agent, referer):
    """使用高级请求头模拟真实浏览器访问"""
    try:
        session = create_session()
        
        # 设置本次请求的特殊头部
        headers = {
            'User-Agent': user_agent,
            'Referer': referer,
            'DNT': '1',  # Do Not Track
        }
        
        start_time = time.time()
        
        # 发送请求
        response = session.get(url, headers=headers, timeout=30, allow_redirects=True)
        
        response_time = round(time.time() - start_time, 2)
        content_length = len(response.content)
        
        # 检查响应内容
        if response.status_code == 200:
            content = response.text
            
            # 检查是否被重定向到验证页面
            if "验证" in content or "captcha" in content.lower() or "验证码" in content:
                return {
                    'success': False,
                    'status_code': response.status_code,
                    'response_time': response_time,
                    'content_length': content_length,
                    'status': '验证页面',
                    'note': '触发了验证机制'
                }
            
            # 检查是否包含CSDN内容
            if "CSDN" in content or "博客" in content:
                return {
                    'success': True,
                    'status_code': response.status_code,
                    'response_time': response_time,
                    'content_length': content_length,
                    'status': '访问成功',
                    'note': f'页面加载成功，内容长度: {content_length}'
                }
            else:
                return {
                    'success': False,
                    'status_code': response.status_code,
                    'response_time': response_time,
                    'content_length': content_length,
                    'status': '内容异常',
                    'note': '页面内容不符合预期'
                }
        else:
            return {
                'success': False,
                'status_code': response.status_code,
                'response_time': response_time,
                'content_length': content_length,
                'status': 'HTTP错误',
                'note': f'HTTP状态码: {response.status_code}'
            }
        
    except requests.exceptions.Timeout:
        return {
            'success': False,
            'status_code': 0,
            'response_time': 0,
            'content_length': 0,
            'status': '超时错误',
            'note': '请求超时'
        }
    except requests.exceptions.RequestException as e:
        return {
            'success': False,
            'status_code': 0,
            'response_time': 0,
            'content_length': 0,
            'status': '请求错误',
            'note': f'请求异常: {str(e)}'
        }
    except Exception as e:
        return {
            'success': False,
            'status_code': 0,
            'response_time': 0,
            'content_length': 0,
            'status': '未知错误',
            'note': f'未知错误: {str(e)}'
        }

def main():
    """主函数"""
    print("=== 高级网站访问器启动 ===")
    print("⚠️  注意：CSDN等大型网站有严格的反爬虫机制")
    print("⚠️  频繁访问可能导致IP被限制或需要验证")
    print()
    
    # 初始化日志文件
    log_file = get_log_file_path()
    write_csv_header(log_file)
    print(f"✅ 日志文件: {log_file}")
    
    visit_count = 0
    max_visits = CONFIG["MAX_VISITS"] if CONFIG["MAX_VISITS"] > 0 else float('inf')
    
    print(f"🎯 目标URL: {CONFIG['URL']}")
    print(f"📊 计划访问次数: {max_visits if max_visits != float('inf') else '无限'}")
    print(f"⏰ 平均间隔时间: {CONFIG['INTERVAL_MEAN']}秒")
    print("🚀 开始访问...\n")
    
    try:
        while visit_count < max_visits:
            visit_count += 1
            user_agent = get_random_user_agent()
            referer = get_random_referer()
            
            print(f"第 {visit_count} 次访问 - {datetime.now().strftime('%H:%M:%S')}")
            print(f"🔧 User-Agent: {user_agent[:40]}...")
            print(f"🔗 Referer: {referer[:40]}...")
            
            # 执行访问
            result = visit_with_advanced_headers(CONFIG['URL'], user_agent, referer)
            
            # 记录结果
            log_visit(
                visit_count, 
                CONFIG['URL'], 
                user_agent,
                referer,
                result['status_code'],
                result['response_time'],
                result['content_length'],
                result['status'],
                result['note']
            )
            
            if result['success']:
                print(f"✅ {result['status']}")
                print(f"📊 状态码: {result['status_code']}, 耗时: {result['response_time']}秒")
                print(f"📄 内容长度: {result['content_length']}字节")
            else:
                print(f"❌ {result['status']}")
                print(f"💡 {result['note']}")
            
            # 如果不是最后一次访问，等待间隔时间
            if visit_count < max_visits:
                interval = get_poisson_interval(CONFIG['INTERVAL_MEAN'])
                print(f"⏳ 等待 {interval:.1f} 秒后继续...\n")
                time.sleep(interval)
            else:
                print("\n🎉 所有访问完成！")
    
    except KeyboardInterrupt:
        print("\n⏹️ 用户中断程序")
    except Exception as e:
        print(f"\n💥 程序异常: {str(e)}")
    
    print(f"\n📊 总计访问次数: {visit_count}")
    print(f"📋 日志文件: {log_file}")
    print("\n💡 提示：如果遇到验证页面，请等待更长时间后再试")

if __name__ == "__main__":
    main()