import requests
from bs4 import BeautifulSoup
import concurrent.futures
import time
import logging
from urllib.parse import urlparse
import json
import os

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)

# 微博用户URL列表
weibo_urls = [
    'https://m.weibo.cn/u/5069029750',
    'https://m.weibo.cn/u/6875784070',
    'https://m.weibo.cn/u/1987241375',
    'https://m.weibo.cn/u/2048344461',
    'https://m.weibo.cn/u/5748988380',
    'https://m.weibo.cn/u/2794284831',
    'https://m.weibo.cn/u/3194506490',
    'https://m.weibo.cn/u/7494417148',
    'https://m.weibo.cn/u/5744916923'
]

# 请求头模拟浏览器
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
    'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
    'Accept-Encoding': 'gzip, deflate',
    'Connection': 'keep-alive',
    'Upgrade-Insecure-Requests': '1',
}

def fetch_weibo_user_info(url):
    """获取微博用户信息"""
    try:
        # 发送HTTP请求
        response = requests.get(url, headers=headers, timeout=10)
        response.raise_for_status()
        
        # 解析HTML内容
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取用户信息
        user_info = {}
        
        # 尝试提取用户昵称
        title_tag = soup.find('title')
        if title_tag:
            user_info['name'] = title_tag.text.replace('的微博', '').strip()
        
        # 尝试提取用户描述
        description_tag = soup.find('meta', attrs={'name': 'description'})
        if description_tag and 'content' in description_tag.attrs:
            user_info['description'] = description_tag['content']
        
        # 提取用户ID
        parsed_url = urlparse(url)
        user_info['id'] = parsed_url.path.split('/')[-1]
        user_info['url'] = url
        
        # 尝试提取微博数量、关注、粉丝
        counts = soup.find_all('div', class_='m-font-box')
        if len(counts) >= 3:
            user_info['weibo_count'] = counts[0].text.strip()
            user_info['following'] = counts[1].text.strip()
            user_info['followers'] = counts[2].text.strip()
        
        logger.info(f"成功获取用户信息: {user_info.get('name', '未知')}")
        return user_info
        
    except requests.RequestException as e:
        logger.error(f"请求失败 {url}: {str(e)}")
        return {'url': url, 'error': str(e)}
    except Exception as e:
        logger.error(f"解析失败 {url}: {str(e)}")
        return {'url': url, 'error': str(e)}

def process_weibo_url(url):
    """处理单个微博URL"""
    logger.info(f"开始处理: {url}")
    result = fetch_weibo_user_info(url)
    logger.info(f"完成处理: {url}")
    return result

def main():
    """主函数"""
    # 设置最大线程数
    MAX_WORKERS = 5
    
    logger.info(f"开始抓取 {len(weibo_urls)} 个微博用户页面")
    start_time = time.time()
    
    # 使用ThreadPoolExecutor并发处理
    results = {}
    with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
        # 提交所有任务
        future_to_url = {executor.submit(process_weibo_url, url): url for url in weibo_urls}
        
        # 处理完成的任务
        for future in concurrent.futures.as_completed(future_to_url):
            url = future_to_url[future]
            try:
                result = future.result()
                results[url] = result
            except Exception as e:
                logger.error(f"处理 {url} 时发生异常: {str(e)}")
                results[url] = {'url': url, 'error': str(e)}
    
    end_time = time.time()
    logger.info(f"所有任务完成，耗时 {end_time - start_time:.2f} 秒")
    
    # 统计结果
    success_count = sum(1 for result in results.values() if 'error' not in result)
    logger.info(f"成功获取 {success_count} 个用户信息，失败 {len(results) - success_count} 个")
    
    # 保存结果到文件
    with open('weibo_users_results.json', 'w', encoding='utf-8') as f:
        json.dump(results, f, ensure_ascii=False, indent=2)
    
    logger.info("结果已保存到 weibo_users_results.json")
    
    # 打印摘要信息
    print("\n=== 微博用户信息摘要 ===")
    for url, info in results.items():
        if 'error' not in info:
            print(f"用户: {info.get('name', '未知')} (ID: {info.get('id')})")
            print(f"  微博: {info.get('weibo_count', '未知')} | 关注: {info.get('following', '未知')} | 粉丝: {info.get('followers', '未知')}")
            print(f"  描述: {info.get('description', '无')[:50]}...")
            print()

if __name__ == "__main__":
    main()
