import asyncio
import aiofiles
import aiohttp
import time
import os
import random
from datetime import datetime
from playwright.async_api import async_playwright

# ==================== 配置参数 ====================
DEFAULT_PROXY_LIST = ["127.0.0.1:7897", "127.0.0.1:10808"]
DEFAULT_MAX_CONCURRENT = 40
DEFAULT_POOL_SIZE = 42

# ==================== 页面池管理 ====================
class PagePool:
    def __init__(self, contexts_list, proxy_list, size=5):
        self.contexts_list = contexts_list
        self.proxy_list = proxy_list
        self.size = size
        self.pages = asyncio.Queue()
        self.all_pages = []
        self.initialized = False
    
    async def initialize(self):
        """初始化页面池"""
        if self.initialized:
            return
            
        # 从所有代理上下文中平均创建页面，并记录每个页面的代理信息
        pages_per_context = self.size // len(self.contexts_list)
        
        for i, (context, proxy) in enumerate(zip(self.contexts_list, self.proxy_list)):
            for j in range(pages_per_context):
                try:
                    page = await context.new_page()
                    # 为页面添加代理标识
                    page._proxy_info = proxy
                    self.all_pages.append(page)
                    await self.pages.put(page)
                except Exception as e:
                    print(f"  ❌ 创建页面失败: {e}")
        
        self.initialized = True
    
    async def get_page(self):
        """获取页面"""
        if not self.initialized:
            await self.initialize()
        page = await self.pages.get()
        return page
    
    async def return_page(self, page):
        """归还页面"""
        try:
            await page.goto("about:blank", timeout=5000)
            await self.pages.put(page)
        except Exception as e:
            await self.pages.put(page)
    
    async def close_all(self):
        """关闭所有页面"""
        for i, page in enumerate(self.all_pages):
            try:
                await page.close()
            except:
                pass

# ==================== 浏览器创建 ====================
async def create_browser_with_proxies(proxy_list):
    """创建多代理浏览器上下文"""
    print("🚀 启动浏览器...")
    
    playwright = await async_playwright().start()
    browser = await playwright.firefox.launch(headless=False)
    
    contexts_list = []
    
    for proxy_address in proxy_list:
        print(f"🌐 为代理 {proxy_address} 创建上下文...")
        
        host, port = proxy_address.split(':')
        proxy_config = {'server': f"http://{host}:{port}"}
        
        context = await browser.new_context(
            proxy=proxy_config,
            user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36 Edg/136.0.0.0",
            extra_http_headers={
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
                "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
                "Accept-Encoding": "gzip, deflate, br",
                "Cache-Control": "max-age=0",
                "Upgrade-Insecure-Requests": "1",
                "Connection": "keep-alive"
            }
        )
        
        # 添加Cookie
        await context.add_cookies([
            {
                "name": "ZXniXLfxkjSbO",
                "value": "60aM3.ZDMjYI1AIq.BtTYlzfWBDe3cNlxvDNJ57mnpwE1.2zMIDD9104UznvloEHM41BuSGeWJbVMdAB954rLYfq",
                "domain": "www.cqut.edu.cn",
                "path": "/"
            },
            {
                "name": "JSESSIONID", 
                "value": "80A9CA6E463A29132574FC5E838D61B7",
                "domain": "www.cqut.edu.cn",
                "path": "/"
            },
            {
                "name": "ZXniXLfxkjSbP",
                "value": "0Yn8ZHPknKFBRRl2QZqHllOl5RQj2rzNkZb5TyqN3T5lypDaSIWx4LY0FLVkpMaor9hHx1zN6HvphJStQtZhhVuUBmVJIrRKsQo.elUImf6ax2QrVIK1fgaG4jQW7sNyDsMPedPoUZoEHvN11dV1woBMjifuVIcz4JAoqVPcJOlhgxWuEX4485ba6xoS0ErIcwBZUPPQk_P8CFp0KBvFTHIcYQ_CbTYhauhYKcDsgkAOYfGda8gssEkHbP.3YbhpMbKfhvMS_AAi.Ji5ooxCa1gtONLwhzo4P1Jb9yFPCOPR_a3.B7POESdfwgMFgQ0JsdwROz0nBFAK46RFM2zV_YrkcFfiDI2q3ooOvGoGsNXLcfuDZvyjSX.pQkJkvqhYuCqQGx_6Ymni70LlGMba0_xpuvb1fR_wg1AGJsmTK6VA",
                "domain": "www.cqut.edu.cn", 
                "path": "/"
            }
        ])
        
        contexts_list.append(context)
        print(f"  ✅ 代理 {proxy_address} 上下文创建完成")
    
    return playwright, browser, contexts_list

# ==================== 内容保存 ====================
async def save_content(content, url, lock):
    """保存网页内容到文件"""
    timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    separator = f"\n\n<!-- URL: {url} | 时间: {timestamp} -->\n\n"
    
    async with lock:
        try:
            async with aiofiles.open("cqut_content.html", "a", encoding="utf-8") as f:
                await f.write(separator + content + "\n\n")
        except:
            pass

# ==================== 网页爬取 ====================
async def crawl_url(page_pool, url, semaphore, save_lock, progress_counter, total_urls):
    """爬取单个网页"""
    async with semaphore:
        page = None
        try:
            # 获取页面（页面已经绑定了代理）
            page = await page_pool.get_page()
            proxy_info = getattr(page, '_proxy_info', '未知')
            
            # 更新进度计数
            async with progress_counter['lock']:
                progress_counter['started'] += 1
                current_started = progress_counter['started']
                current_completed = progress_counter['completed']
                print(f"🚀 [{current_started}/{total_urls}] 开始爬取: {url} (代理: {proxy_info})")
            
            # 重试机制
            for retry in range(3):
                try:
                    response = await page.goto(url, wait_until="networkidle", timeout=30000)
                    
                    if response and response.status >= 400:
                        print(f"⚠️  HTTP状态码: {response.status} - {url}")
                        if retry < 2:
                            await asyncio.sleep(2)
                            continue
                    
                    await asyncio.sleep(2)
                    content = await page.content()
                    
                    if len(content) > 100:
                        await save_content(content, url, save_lock)
                        
                        # 更新完成进度
                        async with progress_counter['lock']:
                            progress_counter['completed'] += 1
                            progress_counter['success'] += 1
                            current_completed = progress_counter['completed']
                            current_success = progress_counter['success']
                            print(f"✅ [{current_completed}/{total_urls}] 成功: {url} (成功率: {current_success}/{current_completed})")
                        return True
                    
                except Exception as e:
                    print(f"⚠️  重试 {retry + 1}/3: {url} - {str(e)[:50]}...")
                    if retry < 2:
                        await page.goto("about:blank")
                        await asyncio.sleep(2)
                        continue
                    break
            
            # 失败情况
            async with progress_counter['lock']:
                progress_counter['completed'] += 1
                current_completed = progress_counter['completed']
                current_success = progress_counter['success']
                print(f"❌ [{current_completed}/{total_urls}] 失败: {url} (成功率: {current_success}/{current_completed})")
            return False
                    
        finally:
            if page:
                await page_pool.return_page(page)

# ==================== 主爬取函数 ====================
async def run_crawler(urls, max_concurrent=3, pool_size=5, proxy_list=None):
    """执行并发爬取"""
    if not proxy_list:
        proxy_list = DEFAULT_PROXY_LIST
    
    total_urls = len(urls)
    print(f"🎯 开始爬取 {total_urls} 个网页")
    print(f"⚙️  并发数: {max_concurrent}, 页面池: {pool_size}")
    print(f"🌐 使用代理: {', '.join(proxy_list)}")
    print(f"{'='*50}")
    
    start_time = time.time()
    
    # 创建进度计数器
    progress_counter = {
        'started': 0,
        'completed': 0,
        'success': 0,
        'lock': asyncio.Lock()
    }
    
    # 创建浏览器
    playwright, browser, contexts_list = await create_browser_with_proxies(proxy_list)
    
    try:
        # 创建并初始化页面池（传入代理列表）
        page_pool = PagePool(contexts_list, proxy_list, pool_size)
        await page_pool.initialize()
        
        # 创建并发控制
        semaphore = asyncio.Semaphore(max_concurrent)
        save_lock = asyncio.Lock()
        
        # 执行并发爬取
        print("📥 开始并发执行...")
        print(f"{'='*50}")
        tasks = [crawl_url(page_pool, url, semaphore, save_lock, progress_counter, total_urls) for url in urls]
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 统计结果
        success_count = sum(1 for result in results if result is True)
        elapsed_time = time.time() - start_time
        
        # 输出结果
        print(f"\n{'='*50}")
        print(f"🎉 爬取完成！")
        print(f"📈 总耗时: {elapsed_time:.2f}秒")
        print(f"📊 总计: {total_urls}个URL")
        print(f"✅ 成功: {success_count}个 ({success_count/total_urls*100:.1f}%)")
        print(f"❌ 失败: {total_urls - success_count}个 ({(total_urls-success_count)/total_urls*100:.1f}%)")
        print(f"⚡ 平均速度: {total_urls/elapsed_time:.2f} URL/秒")
        
        # 清理资源
        await page_pool.close_all()
        
    finally:
        # 关闭浏览器
        for context in contexts_list:
            await context.close()
        await browser.close()
        await playwright.stop()

# ==================== URL加载 ====================
def load_urls():
    """从文件加载URL列表"""
    if not os.path.exists("href.txt"):
        print("❌ href.txt 文件不存在")
        return []
    
    try:
        with open("href.txt", 'r', encoding='utf-8') as f:
            urls = [line.strip() for line in f if line.strip() and line.strip().startswith('http')]
        print(f"📖 从文件读取 {len(urls)} 个URL")
        return urls
    except:
        print("❌ 读取文件失败")
        return []

def get_default_urls():
    """获取默认URL列表"""
    return [
        "https://www.cqut.edu.cn/xxgk/xxjj.htm",
        "https://www.cqut.edu.cn/xxgk/xxzc.htm",
        "https://www.cqut.edu.cn/xxgk/xxyg.htm",
        "https://www.cqut.edu.cn/xxgk/lrld.htm",
        "https://www.cqut.edu.cn/xxgk/xrld.htm",
        "https://www.cqut.edu.cn/xxgk/bswh.htm",
        "https://www.cqut.edu.cn/jgsz.htm",
        "https://zs.cqut.edu.cn/",
        "https://zs.cqut.edu.cn/xxzy/clgcxy.htm"
    ]

# ==================== 主程序入口 ====================
def main():
    """主程序入口"""
    print("🚀 启动多代理并发网页爬虫")
    
    # 配置参数
    url_source = 'file'  # 'file' 或 'default'
    max_concurrent = DEFAULT_MAX_CONCURRENT
    pool_size = DEFAULT_POOL_SIZE
    proxy_list = DEFAULT_PROXY_LIST
    
    # 加载URL列表
    if url_source == 'file':
        urls = load_urls()
    else:
        urls = get_default_urls()
        print(f"📋 使用默认URL列表: {len(urls)}个")
    
    if not urls:
        print("❌ 没有可爬取的URL")
        return
    
    # 执行爬取
    try:
        asyncio.run(run_crawler(urls, max_concurrent, pool_size, proxy_list))
    except KeyboardInterrupt:
        print("\n⚠️  用户中断程序")
    except Exception as e:
        print(f"❌ 程序执行出错: {e}")

if __name__ == "__main__":
    main()
