#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
极速网站访问器
使用 curl 命令访问网站，速度最快

依赖：系统已安装 curl
"""

import time
import random
import csv
import os
import subprocess
from datetime import datetime
import numpy as np

# ============= 配置区域（在这里修改参数） =================
CONFIG = {
    "URL": "https://blog.csdn.net/Zero_VPN/article/details/154238580?spm=1001.2014.3001.5501",
    "MAX_VISITS": 0,  # 设置为0表示无限循环
    "INTERVAL_MEAN": 30,  # 增加间隔时间，避免被识别为机器人
    "CSV_FILE": "visits_log_curl.csv",
}
# ========================================================

# 常见设备的 User-Agent 列表
USER_AGENTS = [
    # Desktop - Chrome
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
    
    # Desktop - Firefox
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:121.0) Gecko/20100101 Firefox/121.0",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:121.0) Gecko/20100101 Firefox/121.0",
    
    # Desktop - Safari
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.1 Safari/605.1.15",
    
    # Mobile - iPhone
    "Mozilla/5.0 (iPhone; CPU iPhone OS 17_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.1 Mobile/15E148 Safari/604.1",
    
    # Mobile - Android
    "Mozilla/5.0 (Linux; Android 13; SM-S918B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.6099.144 Mobile Safari/537.36",
]

def check_curl():
    """检查系统是否安装了 curl"""
    try:
        result = subprocess.run(["which", "curl"], capture_output=True, text=True)
        return result.returncode == 0
    except:
        return False

def get_log_file_path():
    """获取日志文件路径"""
    script_dir = os.path.dirname(os.path.abspath(__file__))
    logs_dir = os.path.join(script_dir, "logs")
    if not os.path.exists(logs_dir):
        os.makedirs(logs_dir)
    return os.path.join(logs_dir, CONFIG["CSV_FILE"])

def now_iso():
    """返回当前时间的 ISO 格式字符串"""
    return datetime.utcnow().isoformat() + "Z"

def get_poisson_interval(mean):
    """使用泊松分布生成访问间隔时间，增加随机性"""
    interval = np.random.exponential(mean)
    # 增加随机波动，避免固定间隔
    random_factor = random.uniform(0.8, 1.5)
    return max(10.0, interval * random_factor)  # 最小间隔10秒

def get_random_user_agent():
    """随机选择一个 User-Agent"""
    return random.choice(USER_AGENTS)

def write_csv_header(csvfile):
    """写入 CSV 文件头"""
    with open(csvfile, "a", newline="", encoding="utf-8") as f:
        writer = csv.writer(f)
        if f.tell() == 0:
            writer.writerow([
                "timestamp_utc", 
                "visit_number", 
                "url", 
                "user_agent",
                "status_code",
                "response_time",
                "status", 
                "note"
            ])

def log_visit(visit_num, url, user_agent, status_code, response_time, status, note=""):
    """记录访问日志到 CSV"""
    log_file = get_log_file_path()
    with open(log_file, "a", newline="", encoding="utf-8") as f:
        writer = csv.writer(f)
        writer.writerow([
            now_iso(),
            visit_num,
            url,
            user_agent,
            status_code,
            response_time,
            status,
            note
        ])

def visit_with_curl(url, user_agent):
    """使用 curl 访问页面，模拟真实浏览器行为"""
    try:
        # 生成随机Referer，模拟从其他页面跳转过来
        referers = [
            "https://www.google.com/search?q=rust+csv+processing",
            "https://www.baidu.com/s?wd=rust%E5%A4%84%E7%90%86csv%E6%95%B0%E6%8D%AE",
            "https://blog.csdn.net/nav/rust",
            "https://www.bing.com/search?q=rust+csv+tutorial",
            "https://stackoverflow.com/questions/tagged/rust"
        ]
        referer = random.choice(referers)
        
        cmd = [
            "curl", "-s", "-o", "/dev/null", "-w", "%{http_code} %{time_total}",
            "-H", f"User-Agent: {user_agent}",
            "-H", "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
            "-H", "Accept-Language: zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
            "-H", "Accept-Encoding: gzip, deflate, br",
            "-H", f"Referer: {referer}",
            "-H", "DNT: 1",  # Do Not Track
            "-H", "Upgrade-Insecure-Requests: 1",
            "-H", "Sec-Fetch-Dest: document",
            "-H", "Sec-Fetch-Mode: navigate",
            "-H", "Sec-Fetch-Site: cross-site",
            "-H", "Cache-Control: max-age=0",
            "--connect-timeout", "10",
            "--max-time", "30",  # 30秒超时
            "--retry", "0",
            "--compressed",
            url
        ]
        
        start_time = time.time()
        result = subprocess.run(cmd, capture_output=True, text=True, timeout=35)
        response_time = round(time.time() - start_time, 2)
        
        if result.returncode == 0:
            output_parts = result.stdout.strip().split()
            if len(output_parts) >= 2:
                status_code = int(output_parts[0])
                curl_time = float(output_parts[1])
                
                if 200 <= status_code < 400:
                    return {
                        'success': True,
                        'status_code': status_code,
                        'response_time': response_time,
                        'note': f'访问成功，状态码: {status_code}, 耗时: {response_time}秒'
                    }
                else:
                    return {
                        'success': False,
                        'status_code': status_code,
                        'response_time': response_time,
                        'note': f'HTTP错误，状态码: {status_code}'
                    }
        
        return {
            'success': False,
            'status_code': 0,
            'response_time': response_time,
            'note': f'curl执行失败，返回码: {result.returncode}'
        }
            
    except subprocess.TimeoutExpired:
        return {
            'success': False,
            'status_code': 0,
            'response_time': 30,
            'note': '访问超时（30秒）'
        }
    except Exception as e:
        return {
            'success': False,
            'status_code': 0,
            'response_time': 0,
            'note': f'访问异常: {str(e)}'
        }

def main():
    """主函数"""
    print("🚀 开始极速访问任务")
    print(f"📍 目标 URL: {CONFIG['URL']}")
    print(f"🔢 最大访问次数: {CONFIG['MAX_VISITS']}")
    print(f"⏱️  平均间隔: {CONFIG['INTERVAL_MEAN']} 秒")
    
    # 检查 curl
    if not check_curl():
        print("❌ 错误：系统中未找到 curl")
        print("💡 请安装 curl: sudo apt install curl")
        return
    
    print("✅ curl 已就绪")
    print(f"💾 日志文件: {get_log_file_path()}")
    print("-" * 60)
    
    # 确保日志文件存在
    write_csv_header(get_log_file_path())
    
    visit_count = 0
    max_visits = CONFIG["MAX_VISITS"] if CONFIG["MAX_VISITS"] > 0 else float('inf')
    
    while visit_count < max_visits:
        visit_count += 1
        
        print(f"[访问 #{visit_count}]")
        
        # 随机选择 User-Agent
        user_agent = get_random_user_agent()
        print(f"  🌐 UA: {user_agent[:60]}...")
        
        # 访问页面
        result = visit_with_curl(CONFIG["URL"], user_agent)
        
        if result['success']:
            status = "✅ 成功"
            print(f"  {status}")
            print(f"  📊 {result['note']}")
        else:
            status = "❌ 失败"
            print(f"  {status}")
            print(f"  💥 {result['note']}")
        
        # 记录日志
        log_visit(
            visit_count, 
            CONFIG["URL"], 
            user_agent, 
            result['status_code'], 
            result['response_time'], 
            "成功" if result['success'] else "失败", 
            result['note']
        )
        
        # 如果不是最后一次访问，计算等待时间
        if visit_count < max_visits:
            wait_time = get_poisson_interval(CONFIG["INTERVAL_MEAN"])
            print(f"  ⏳ 等待 {wait_time:.1f} 秒后进行下一次访问...")
            time.sleep(wait_time)
        
        print()
    
    print("✅ 已完成所有访问")
    print(f"📊 总访问次数: {visit_count}")
    print(f"💾 日志已保存到: {get_log_file_path()}")

if __name__ == "__main__":
    main()