import requests
import json
import time
import logging
from bs4 import BeautifulSoup
from config import WEIBO_CONFIG, CRAWLER_CONFIG, TOPICS
from db_handler import DBHandler
from utils import (
    setup_logging,
    extract_media_urls,
    parse_time,
    extract_location,
    extract_topics,
    retry_on_failure,
    clean_text,
    extract_user_info,
    calculate_hot_score
)

class WeiboCrawler:
    def __init__(self):
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': WEIBO_CONFIG['user_agent'],
            'Cookie': WEIBO_CONFIG['cookie']
        })
        self.db = DBHandler()
        setup_logging()

    @retry_on_failure(max_retries=WEIBO_CONFIG['retry_times'], delay=WEIBO_CONFIG['retry_interval'])
    def fetch_page(self, url, params=None):
        """获取页面内容"""
        response = self.session.get(url, params=params, timeout=WEIBO_CONFIG['timeout'])
        response.raise_for_status()
        return response.text

    def parse_weibo_item(self, item):
        """解析单条微博数据"""
        try:
            # 提取用户信息
            user_info = extract_user_info(str(item))
            if not user_info['username']:
                return None

            # 保存用户信息
            user_id = self.db.save_user(
                username=user_info['username'],
                nickname=user_info['username'],
                avatar=user_info['avatar'],
                location=None
            )

            # 提取微博内容
            content_elem = item.find('div', class_='content')
            if not content_elem:
                return None

            content = clean_text(content_elem.get_text())
            media_urls = extract_media_urls(str(item))
            location = extract_location(content)
            
            # 保存微博内容
            post_id = self.db.save_post(
                user_id=user_id,
                content=content,
                media_urls=media_urls,
                location=location
            )

            # 提取并保存话题
            topics = extract_topics(content)
            for topic in topics:
                topic_id = self.db.save_topic(topic)
                if topic_id:
                    self.db.save_post_topic(post_id, topic_id)

            # 提取互动数据
            like_count = int(item.find('span', title='赞').get_text(strip=True) or 0)
            comment_count = int(item.find('span', title='评论').get_text(strip=True) or 0)
            repost_count = int(item.find('span', title='转发').get_text(strip=True) or 0)

            # 保存互动数据
            if like_count > 0:
                self.db.save_interaction(user_id, post_id, 1)  # 点赞
            if comment_count > 0:
                self.db.save_interaction(user_id, post_id, 2)  # 评论
            if repost_count > 0:
                self.db.save_interaction(user_id, post_id, 3)  # 转发

            # 提取并保存用户标签
            for topic in topics:
                self.db.save_user_tag(user_id, topic, weight=0.5)

            return True

        except Exception as e:
            logging.error(f"解析微博数据失败: {str(e)}")
            return None

    def crawl_topic(self, topic):
        """爬取指定话题的微博"""
        base_url = 'https://s.weibo.com/weibo'
        page = 1
        
        while page <= CRAWLER_CONFIG['max_pages']:
            try:
                params = {
                    'q': f'#{topic}#',
                    'page': page
                }
                
                html = self.fetch_page(base_url, params)
                soup = BeautifulSoup(html, 'html.parser')
                items = soup.find_all('div', class_='card-wrap')

                if not items:
                    logging.info(f"话题 {topic} 已爬取完毕，共 {page-1} 页")
                    break

                for item in items:
                    if self.parse_weibo_item(item):
                        logging.info(f"成功解析话题 {topic} 的一条微博")

                page += 1
                time.sleep(CRAWLER_CONFIG['interval'])

            except Exception as e:
                logging.error(f"爬取话题 {topic} 第 {page} 页失败: {str(e)}")
                break

    def run(self):
        """运行爬虫"""
        try:
            logging.info("开始爬取微博数据...")
            for topic in TOPICS:
                logging.info(f"开始爬取话题: {topic}")
                self.crawl_topic(topic)
                time.sleep(CRAWLER_CONFIG['interval'])
            logging.info("微博数据爬取完成")
        except Exception as e:
            logging.error(f"爬虫运行失败: {str(e)}")
        finally:
            self.db.close()

if __name__ == '__main__':
    crawler = WeiboCrawler()
    crawler.run() 