#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
吾爱我家IT论坛爬虫
爬取论坛最新发表的数据
"""

import requests
from bs4 import BeautifulSoup
import json
import time
import csv
from datetime import datetime
import re
from urllib.parse import urljoin, urlparse
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('scraper.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)

class ForumScraper:
    def __init__(self):
        self.base_url = "https://52it.cc"
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
        })
        
    def get_page(self, url):
        """获取页面内容"""
        try:
            response = self.session.get(url, timeout=10)
            response.raise_for_status()
            response.encoding = 'utf-8'
            return response.text
        except requests.RequestException as e:
            logging.error(f"请求失败: {url}, 错误: {e}")
            return None
    
    def parse_forum_data(self, html_content):
        """解析论坛数据"""
        soup = BeautifulSoup(html_content, 'html.parser')
        forum_data = []
        
        try:
            # 查找帖子列表
            thread_list = soup.find_all('tr', class_=lambda x: x and 'thread' in x)
            
            for thread in thread_list:
                try:
                    # 提取帖子标题和链接
                    title_element = thread.find('a', href=re.compile(r'thread-\d+'))
                    if not title_element:
                        continue
                    
                    title = title_element.get_text(strip=True)
                    thread_url = urljoin(self.base_url, title_element.get('href'))
                    thread_id = re.search(r'thread-(\d+)', thread_url)
                    thread_id = thread_id.group(1) if thread_id else ''
                    
                    # 提取版块信息
                    forum_element = thread.find('a', href=re.compile(r'forum-\d+'))
                    forum_name = forum_element.get_text(strip=True) if forum_element else ''
                    
                    # 提取作者信息
                    author_element = thread.find('a', href=re.compile(r'space-uid-\d+'))
                    author = author_element.get_text(strip=True) if author_element else ''
                    
                    # 提取回复数和查看数
                    stats_text = thread.get_text()
                    reply_match = re.search(r'(\d+)_(\d+)', stats_text)
                    replies = reply_match.group(1) if reply_match else '0'
                    views = reply_match.group(2) if reply_match else '0'
                    
                    # 提取最后回复信息
                    last_reply_element = thread.find('a', href=re.compile(r'space-username'))
                    last_reply_author = last_reply_element.get_text(strip=True) if last_reply_element else ''
                    
                    # 提取时间信息
                    time_element = thread.find('a', href=re.compile(r'goto=lastpost'))
                    last_reply_time = time_element.get_text(strip=True) if time_element else ''
                    
                    forum_data.append({
                        'thread_id': thread_id,
                        'title': title,
                        'url': thread_url,
                        'forum': forum_name,
                        'author': author,
                        'replies': replies,
                        'views': views,
                        'last_reply_author': last_reply_author,
                        'last_reply_time': last_reply_time,
                        'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                    })
                    
                except Exception as e:
                    logging.error(f"解析帖子数据失败: {e}")
                    continue
            
            return forum_data
            
        except Exception as e:
            logging.error(f"解析页面失败: {e}")
            return []
    
    def get_forum_categories(self, html_content):
        """获取论坛分类信息"""
        soup = BeautifulSoup(html_content, 'html.parser')
        categories = []
        
        try:
            # 查找导航菜单中的分类
            nav_links = soup.find_all('a', href=re.compile(r'forum-\d+'))
            
            for link in nav_links:
                category_name = link.get_text(strip=True)
                category_url = urljoin(self.base_url, link.get('href'))
                category_id = re.search(r'forum-(\d+)', category_url)
                category_id = category_id.group(1) if category_id else ''
                
                if category_name and category_id:
                    categories.append({
                        'id': category_id,
                        'name': category_name,
                        'url': category_url
                    })
            
            return categories
            
        except Exception as e:
            logging.error(f"解析分类信息失败: {e}")
            return []
    
    def save_to_json(self, data, filename):
        """保存数据到JSON文件"""
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
            logging.info(f"数据已保存到 {filename}")
        except Exception as e:
            logging.error(f"保存JSON文件失败: {e}")
    
    def save_to_csv(self, data, filename):
        """保存数据到CSV文件"""
        try:
            if not data:
                logging.warning("没有数据可保存")
                return
                
            with open(filename, 'w', newline='', encoding='utf-8') as f:
                writer = csv.DictWriter(f, fieldnames=data[0].keys())
                writer.writeheader()
                writer.writerows(data)
            logging.info(f"数据已保存到 {filename}")
        except Exception as e:
            logging.error(f"保存CSV文件失败: {e}")
    
    def crawl_forum(self, url, max_pages=3):
        """爬取论坛数据"""
        all_data = []
        
        for page in range(1, max_pages + 1):
            logging.info(f"正在爬取第 {page} 页...")
            
            # 构建分页URL
            if page == 1:
                page_url = url
            else:
                page_url = f"{url}&page={page}"
            
            html_content = self.get_page(page_url)
            if not html_content:
                logging.error(f"无法获取第 {page} 页内容")
                continue
            
            # 解析数据
            page_data = self.parse_forum_data(html_content)
            all_data.extend(page_data)
            
            logging.info(f"第 {page} 页爬取完成，获取 {len(page_data)} 条数据")
            
            # 添加延迟，避免请求过于频繁
            if page < max_pages:
                time.sleep(2)
        
        return all_data
    
    def generate_report(self, data):
        """生成爬取报告"""
        if not data:
            return "没有数据可生成报告"
        
        report = {
            'summary': {
                'total_threads': len(data),
                'unique_authors': len(set(item['author'] for item in data if item['author'])),
                'unique_forums': len(set(item['forum'] for item in data if item['forum'])),
                'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            },
            'top_forums': {},
            'top_authors': {},
            'recent_threads': sorted(data, key=lambda x: x.get('last_reply_time', ''), reverse=True)[:10]
        }
        
        # 统计版块分布
        for item in data:
            forum = item.get('forum', '未知版块')
            report['top_forums'][forum] = report['top_forums'].get(forum, 0) + 1
        
        # 统计作者分布
        for item in data:
            author = item.get('author', '未知作者')
            if author:
                report['top_authors'][author] = report['top_authors'].get(author, 0) + 1
        
        return report

def main():
    """主函数"""
    scraper = ForumScraper()
    
    # 目标URL
    target_url = "https://52it.cc/forum.php?mod=guide&view=newthread"
    
    logging.info("开始爬取吾爱我家IT论坛数据...")
    
    try:
        # 爬取数据
        forum_data = scraper.crawl_forum(target_url, max_pages=2)
        
        if forum_data:
            # 保存数据
            scraper.save_to_json(forum_data, 'forum_data.json')
            scraper.save_to_csv(forum_data, 'forum_data.csv')
            
            # 生成报告
            report = scraper.generate_report(forum_data)
            scraper.save_to_json(report, 'forum_report.json')
            
            # 打印摘要
            print(f"\n=== 爬取完成 ===")
            print(f"总帖子数: {report['summary']['total_threads']}")
            print(f"独立作者数: {report['summary']['unique_authors']}")
            print(f"版块数量: {report['summary']['unique_forums']}")
            print(f"爬取时间: {report['summary']['crawl_time']}")
            
            print(f"\n=== 热门版块 ===")
            for forum, count in sorted(report['top_forums'].items(), key=lambda x: x[1], reverse=True)[:5]:
                print(f"{forum}: {count} 帖")
            
            print(f"\n=== 最新帖子 ===")
            for i, thread in enumerate(report['recent_threads'][:5], 1):
                print(f"{i}. {thread['title']} (作者: {thread['author']})")
                
        else:
            logging.warning("没有获取到任何数据")
            
    except Exception as e:
        logging.error(f"爬取过程中发生错误: {e}")

if __name__ == "__main__":
    main() 