import requests
import json
import pandas as pd
from datetime import datetime
import time
import random
from bs4 import BeautifulSoup
import logging
import re
import os
from dotenv import load_dotenv

# 加载环境变量
load_dotenv()

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[logging.FileHandler("social_media_crawler.log"), logging.StreamHandler()]
)
logger = logging.getLogger("social_media_crawler")

class SocialMediaCrawler:
    """社交媒体爬虫，用于获取与品牌相关的社交媒体讨论"""
    
    def __init__(self, headers=None):
        """初始化社交媒体爬虫"""
        self.headers = headers or {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        # 可以在环境变量中设置这些API密钥
        self.weibo_cookie = os.getenv('WEIBO_COOKIE', '')
        self.zhihu_cookie = os.getenv('ZHIHU_COOKIE', '')
    
    def search_weibo(self, keyword, pages=3):
        """
        从微博搜索获取相关内容
        
        Args:
            keyword: 搜索关键词，如品牌名称
            pages: 搜索页数
            
        Returns:
            包含微博内容的列表
        """
        weibo_list = []
        
        try:
            if not self.weibo_cookie:
                logger.warning("未设置微博Cookie，可能无法获取完整数据")
            
            headers = self.headers.copy()
            if self.weibo_cookie:
                headers['Cookie'] = self.weibo_cookie
            
            for page in range(1, pages+1):
                # 微博搜索URL
                url = f"https://s.weibo.com/weibo?q={keyword}&page={page}"
                response = requests.get(url, headers=headers)
                
                if response.status_code != 200:
                    logger.error(f"微博搜索请求失败: {response.status_code}")
                    continue
                
                soup = BeautifulSoup(response.text, 'html.parser')
                weibo_cards = soup.select('.card-wrap')
                
                for card in weibo_cards:
                    try:
                        # 跳过广告
                        if card.select_one('.icon-member') or card.select_one('.icon-advertisefill'):
                            continue
                        
                        # 获取微博ID
                        mid = ""
                        action_link = card.select_one('.from a')
                        if action_link and 'href' in action_link.attrs:
                            mid_match = re.search(r'/(\d+)/(\w+)', action_link['href'])
                            if mid_match:
                                mid = mid_match.group(2)
                        
                        # 获取微博内容
                        content_elem = card.select_one('.content p.txt')
                        content = content_elem.get_text().strip() if content_elem else ""
                        
                        # 获取用户信息
                        user_elem = card.select_one('.info .name')
                        username = user_elem.get_text().strip() if user_elem else "未知用户"
                        
                        # 获取发布时间
                        time_elem = card.select_one('.from')
                        pub_time = time_elem.get_text().strip() if time_elem else "未知时间"
                        
                        # 获取互动数据
                        like_count = 0
                        repost_count = 0
                        comment_count = 0
                        
                        action_data = card.select('.card-act .txt')
                        if len(action_data) >= 3:
                            repost_text = action_data[0].get_text().strip()
                            repost_count = int(re.search(r'\d+', repost_text).group()) if re.search(r'\d+', repost_text) else 0
                            
                            comment_text = action_data[1].get_text().strip()
                            comment_count = int(re.search(r'\d+', comment_text).group()) if re.search(r'\d+', comment_text) else 0
                            
                            like_text = action_data[2].get_text().strip()
                            like_count = int(re.search(r'\d+', like_text).group()) if re.search(r'\d+', like_text) else 0
                        
                        weibo_list.append({
                            'platform': '微博',
                            'content': content,
                            'user': username,
                            'published_time': pub_time,
                            'likes': like_count,
                            'reposts': repost_count,
                            'comments': comment_count,
                            'weibo_id': mid,
                            'keyword': keyword,
                            'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                        })
                        
                    except Exception as e:
                        logger.error(f"处理微博内容时出错: {str(e)}")
                        continue
                
                # 页面间延迟
                time.sleep(random.uniform(3, 5))
                
        except Exception as e:
            logger.error(f"微博搜索出错: {str(e)}")
        
        return weibo_list
    
    def search_zhihu(self, keyword, pages=2):
        """
        从知乎搜索获取相关内容
        
        Args:
            keyword: 搜索关键词
            pages: 搜索页数
            
        Returns:
            包含知乎内容的列表
        """
        zhihu_list = []
        
        try:
            headers = self.headers.copy()
            if self.zhihu_cookie:
                headers['Cookie'] = self.zhihu_cookie
            
            for page in range(1, pages+1):
                # 知乎搜索URL
                url = f"https://www.zhihu.com/api/v4/search_v3?t=general&q={keyword}&correction=1&offset={20*(page-1)}&limit=20&filter_fields=&lc_idx=0&show_all_topics=0"
                response = requests.get(url, headers=headers)
                
                if response.status_code != 200:
                    logger.error(f"知乎搜索请求失败: {response.status_code}")
                    continue
                
                data = response.json()
                if 'data' not in data:
                    logger.error("知乎API返回格式异常")
                    continue
                
                for item in data['data']:
                    try:
                        if 'object' not in item or 'type' not in item:
                            continue
                        
                        item_type = item['type']
                        obj = item['object']
                        
                        # 获取内容类型（问题、回答、文章等）
                        content_type = obj.get('type', '')
                        
                        # 标题
                        title = ""
                        if content_type == 'answer':
                            title = obj.get('question', {}).get('title', '')
                        elif 'title' in obj:
                            title = obj.get('title', '')
                        
                        # 内容
                        content = obj.get('content', '')
                        # 清除HTML标签
                        content = re.sub(r'<[^>]+>', '', content)
                        
                        # 用户信息
                        author = obj.get('author', {}).get('name', '未知用户')
                        
                        # 赞同数
                        upvotes = obj.get('voteup_count', 0)
                        
                        # 评论数
                        comments = obj.get('comment_count', 0)
                        
                        # 链接
                        url = obj.get('url', '')
                        
                        # 创建时间
                        created_time = obj.get('created_time', 0)
                        if created_time:
                            created_time = datetime.fromtimestamp(created_time).strftime('%Y-%m-%d %H:%M:%S')
                        else:
                            created_time = "未知时间"
                        
                        zhihu_list.append({
                            'platform': '知乎',
                            'content_type': content_type,
                            'title': title,
                            'content': content,
                            'author': author,
                            'upvotes': upvotes,
                            'comments': comments,
                            'url': url,
                            'published_time': created_time,
                            'keyword': keyword,
                            'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                        })
                        
                    except Exception as e:
                        logger.error(f"处理知乎内容时出错: {str(e)}")
                        continue
                
                # 页面间延迟
                time.sleep(random.uniform(3, 5))
                
        except Exception as e:
            logger.error(f"知乎搜索出错: {str(e)}")
        
        return zhihu_list
    
    def search_douyin_comments(self, keyword, pages=2):
        """
        搜索抖音相关评论 (模拟实现，实际需要通过接口或Selenium实现)
        
        Args:
            keyword: 搜索关键词
            pages: 搜索页数
            
        Returns:
            包含抖音评论的列表
        """
        # 此方法仅为示例，实际抖音抓取需要更复杂的技术手段
        logger.warning("抖音评论抓取需要额外的技术手段，此方法为模拟实现")
        
        return [{
            'platform': '抖音',
            'content': f"这是抖音上关于{keyword}的一条模拟评论",
            'user': '模拟用户',
            'published_time': datetime.now().strftime('%Y-%m-%d'),
            'likes': random.randint(0, 1000),
            'keyword': keyword,
            'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        } for _ in range(5)]  # 返回5条模拟数据
    
    def save_to_excel(self, social_data, filename='social_media_data.xlsx'):
        """将社交媒体数据保存为Excel文件"""
        try:
            df = pd.DataFrame(social_data)
            df.to_excel(filename, index=False, engine='openpyxl')
            logger.info(f"数据已保存到 {filename}")
            return True
        except Exception as e:
            logger.error(f"保存Excel出错: {str(e)}")
            return False

if __name__ == "__main__":
    # 测试代码
    crawler = SocialMediaCrawler()
    keyword = "小米手机"  # 示例关键词
    
    # 从微博获取数据
    weibo_data = crawler.search_weibo(keyword, pages=1)
    print(f"从微博获取了 {len(weibo_data)} 条内容")
    
    # 从知乎获取数据
    zhihu_data = crawler.search_zhihu(keyword, pages=1)
    print(f"从知乎获取了 {len(zhihu_data)} 条内容")
    
    # 获取抖音模拟数据
    douyin_data = crawler.search_douyin_comments(keyword)
    print(f"从抖音获取了 {len(douyin_data)} 条内容")
    
    # 合并数据
    all_social_data = weibo_data + zhihu_data + douyin_data
    
    # 保存到Excel
    crawler.save_to_excel(all_social_data, f"{keyword}_social_data.xlsx") 