import json
import os
import random
import time
import logging
import requests
from bs4 import BeautifulSoup
from dotenv import load_dotenv
# 删除这些导入
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager

load_dotenv()

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("crawler.log"),
        logging.StreamHandler()
    ]
)

class Spider:
    def __init__(self):
        self.user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.5 Safari/605.1.15',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58'
        ]
        self.proxy_configuration = {
            'useApifyProxy': False,
            'proxyUrls': [os.getenv('HTTP_PROXY')] if os.getenv('HTTP_PROXY') else []
        }
        self.session = requests.Session()
        self.request_delay = float(os.getenv('REQUEST_DELAY', 2.0))  # 默认2秒延迟
    
    async def crawl_weibo(self):
        """采集微博热搜数据"""
        url = 'https://s.weibo.com/top/summary'
        hot_items = []
        
        try:
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36',
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
                'Referer': 'https://weibo.com/',
                'Cookie': 'XSRF-TOKEN=ISbdvm4CH4S6TjxBOqXy1pP1; SCF=AvmTuet0uDhczg3TX6rJ5_sHkeC-GtIw-bZfBPAU4dGLlCu5eWp54KsH-n4aCyjsLuazTD6278Q-T1gkjy7N3Vs.; SUB=_2A25FD3bFDeRhGeFJ71AV8yzLzTiIHXVmZfYNrDV8PUNbmtANLU_dkW9Nf_bz_gE3LwYT_pxBp4frvmRc2ZgGBoZP; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWo.NR.6YwyU_Mgd2LYMKMq5NHD95QNS0BESheES0qXWs4Dqcj6i--fiK.4i-8Fi--fiK.7iKnpi--fiKLWi-zpi--RiKLhiKn7i--4iKnpiK.0i--Xi-z4iKyFi--4iKLWi-i8; ALF=02_1748145045; WBPSESS=CnZ5sWHsEKt9zQCBl6hR5ZbYF7DF5Xlq3u2OmjuZ9MLiqyKpp94TlkY4QPb-LtgiAun3BwmVrzbmqocNjVPo06QlmvpFnJ9gxxivJ37zfNR8GtrYjA59nvartRVvmslB9nZjzS43GQNT52zRtEWOBQ=='
            }
            
            proxies = {'http': os.getenv('HTTP_PROXY')} if os.getenv('HTTP_PROXY') else None
            response = await self._make_request(url, headers, proxies)
            if not response:
                return None
            response.raise_for_status()
            
            soup = BeautifulSoup(response.text, 'html.parser')
            items = soup.select('tbody tr') if soup.select('tbody tr') else [] if soup.select('tbody tr') else []
            
            for item in items:
                rank_elem = item.select_one('.td-01')
                title_elem = item.select_one('.td-02 a')
                hot_elem = item.select_one('.td-02 span')
                
                if title_elem:
                    rank = rank_elem.get_text(strip=True) if rank_elem else ''
                    title = title_elem.get_text(strip=True)
                    hot = hot_elem.get_text(strip=True) if hot_elem else ''
                    link_path = title_elem.get('href')
                    link = 'https://s.weibo.com' + link_path if link_path else ''
                    
                    hot_items.append({
                        'rank': rank,
                        'title': title,
                        'hot': hot,
                        'link': link
                    })
            
            logging.info(f'微博热搜采集成功，获取{len(hot_items)}条数据')
            return {'platform': '微博', 'data': hot_items}
        except Exception as e:
            print(f'微博热搜采集失败: {e}')
            logging.error(f'微博热搜采集失败 - URL: {url}, 错误详情: {str(e)}')
            return None
    
    async def crawl_zhihu(self):
        """采集知乎热榜数据（修复版）"""
        url = 'https://www.zhihu.com/billboard'
        hot_items = []
        
        try:
            headers = {
                'User-Agent': random.choice(self.user_agents),
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
                'Referer': 'https://www.zhihu.com/'
            }
            
            # 添加DNS解析重试机制
            max_retries = 3
            for attempt in range(max_retries):
                try:
                    response = await self._make_request(url, headers)
                    if response is None:
                        continue
                        
                    soup = BeautifulSoup(response.text, 'html.parser')
                    items = soup.select('.HotList-item')[:50]
                    
                    for i, item in enumerate(items):
                        title_elem = item.select_one('.HotList-itemTitle')
                        hot_elem = item.select_one('.HotList-itemMetrics')
                        
                        if title_elem:
                            hot_items.append({
                                'rank': str(i + 1),
                                'title': title_elem.get_text(strip=True),
                                'hot': hot_elem.get_text(strip=True) if hot_elem else '',
                                'link': f"https://www.zhihu.com{item.select_one('a')['href']}" if item.select_one('a') else ''
                            })
                    
                    return {'platform': '知乎', 'data': hot_items}
                    
                except Exception as e:
                    if attempt == max_retries - 1:
                        logging.error(f'知乎采集失败(尝试{attempt+1}次): {e}')
                        return None
                    await asyncio.sleep(2 ** attempt)  # 指数退避
                    
        except Exception as e:
            logging.error(f'知乎采集发生未捕获异常: {e}')
            return None
    
    async def crawl_douyin(self):
        """采集抖音热榜数据"""
        url = 'https://www.douyin.com/aweme/v1/web/hot/search/list/'
        hot_items = []
        
        try:
            headers = {
                'User-Agent': random.choice(self.user_agents),
                'Accept': 'application/json, text/plain, */*',
                'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
                'Referer': 'https://www.douyin.com/discover',
                'Cookie': os.getenv('DOUYIN_COOKIE')
            }
            
            proxies = {'http': os.getenv('HTTP_PROXY')} if os.getenv('HTTP_PROXY') else None
            response = self.session.get(url, headers=headers, proxies=proxies)
            response.raise_for_status()
            
            data = response.json()
            if data.get('status_code') == 0:
                for i, item in enumerate(data['data']['word_list']):
                    hot_items.append({
                        'rank': str(i + 1),
                        'title': item.get('word', ''),
                        'hot': str(item.get('hot_value', '')) + ' 热度' if item.get('hot_value') else '',
                        'link': f"https://www.douyin.com/search/{item.get('word', '')}" if item.get('word') else ''
                    })
            
            logging.info(f'抖音热榜采集成功，获取{len(hot_items)}条数据')
            return {'platform': '抖音', 'data': hot_items}
        except Exception as e:
            print(f'抖音热榜采集失败: {e}')
            logging.error(f'抖音热榜采集失败 - URL: {url}, 错误详情: {str(e)}')
            return None
    
    async def crawl_toutiao(self):
        """采集今日头条热榜数据（新版本）"""
        url = 'https://www.toutiao.com/hot-event/hot-board/?origin=toutiao_pc'
        hot_items = []
        
        try:
            headers = {
                'User-Agent': random.choice(self.user_agents),
                'Referer': 'https://www.toutiao.com/',
                'X-Requested-With': 'XMLHttpRequest'
            }
            
            response = await self._make_request(url, headers)
            data = response.json()
            
            for i, item in enumerate(data.get('data', [])[:50]):
                hot_items.append({
                    'rank': str(i + 1),
                    'title': item.get('Title'),
                    'hot': f"{item.get('HotValue', '')} 热度",
                    'link': f"https://www.toutiao.com/trending/{item.get('ClusterIdStr', '')}"
                })
            
            return {'platform': '头条', 'data': hot_items}
        except Exception as e:
            logging.error(f'头条热榜采集失败: {e}')
            return None
    
    def save_data(self, data, platform):
        """保存采集到的数据"""
        storage_path = os.getenv('STORAGE_PATH', './reports')
        os.makedirs(storage_path, exist_ok=True)
        
        filename = f"{storage_path}/{platform}_data.json"
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
        
        return filename

    async def run_all(self):
        """运行所有爬虫任务"""
        weibo_data = await self.crawl_weibo()
        zhihu_data = await self.crawl_zhihu()
        douyin_data = await self.crawl_douyin()  # 替换为抖音爬虫
        
        results = {}
        if weibo_data:
            self.save_data(weibo_data, 'weibo')
            results['weibo'] = weibo_data
        if zhihu_data:
            self.save_data(zhihu_data, 'zhihu')
            results['zhihu'] = zhihu_data
        if douyin_data:
            self.save_data(douyin_data, 'douyin')
            results['douyin'] = douyin_data
            
        return results

    async def _make_request(self, url, headers, proxies=None):
        try:
            logging.info(f"正在请求: {url}")
            delay = self.request_delay * (0.8 + random.random() * 0.4)
            time.sleep(delay)
            
            try:
                response = self.session.get(url, headers=headers, proxies=proxies)
            except requests.exceptions.SSLError:
                # 尝试禁用SSL验证
                response = self.session.get(url, headers=headers, proxies=proxies, verify=False)
                
            logging.info(f"请求完成: {url} - 状态码: {response.status_code}")
            response.raise_for_status()
            return response
            
        except requests.exceptions.RequestException as e:
            logging.error(f"请求失败: {url} - 错误: {str(e)}")
            return None

if __name__ == '__main__':
    import asyncio
    
    async def main():
        spider = Spider()
        print("开始执行爬虫任务...")
        results = await spider.run_all()
        print(f"爬取完成，获取数据: {', '.join(results.keys())}")
    
    # 确保事件循环正确运行
    loop = asyncio.get_event_loop()
    try:
        loop.run_until_complete(main())
    except KeyboardInterrupt:
        print('\n程序已手动终止')
    finally:
        loop.close()