import requests
import pandas as pd
from datetime import datetime
import time
import random
from bs4 import BeautifulSoup
import logging
import re
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
import json
import os
from dotenv import load_dotenv

# 加载环境变量
load_dotenv()

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[logging.FileHandler("official_statement_crawler.log"), logging.StreamHandler()]
)
logger = logging.getLogger("official_statement_crawler")

class OfficialStatementCrawler:
    """官方声明爬虫，用于获取品牌官方发布的声明和回应"""
    
    def __init__(self, use_selenium=False, headers=None):
        """
        初始化官方声明爬虫
        
        Args:
            use_selenium: 是否使用Selenium(用于需要渲染JavaScript的网站)
            headers: 请求头
        """
        self.headers = headers or {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        self.use_selenium = use_selenium
        self.driver = None
        
        # 官方微博账号Cookie
        self.weibo_cookie = os.getenv('WEIBO_COOKIE', '')
        
        if use_selenium:
            self._init_selenium()
    
    def _init_selenium(self):
        """初始化Selenium WebDriver"""
        try:
            chrome_options = Options()
            chrome_options.add_argument("--headless")  # 无头模式
            chrome_options.add_argument("--disable-gpu")
            chrome_options.add_argument("--window-size=1920,1080")
            chrome_options.add_argument("--disable-extensions")
            chrome_options.add_argument("--disable-infobars")
            chrome_options.add_argument("--no-sandbox")
            chrome_options.add_argument("--disable-dev-shm-usage")
            
            service = Service(ChromeDriverManager().install())
            self.driver = webdriver.Chrome(service=service, options=chrome_options)
            logger.info("Selenium WebDriver初始化成功")
            
        except Exception as e:
            logger.error(f"Selenium WebDriver初始化失败: {str(e)}")
            self.use_selenium = False
    
    def close(self):
        """关闭Selenium WebDriver"""
        if self.driver:
            self.driver.quit()
            self.driver = None
    
    def scrape_official_website(self, website_url, css_selector, keyword=None, max_pages=50000):
        """
        从官方网站抓取声明内容
        
        Args:
            website_url: 官方网站URL
            css_selector: CSS选择器，用于定位新闻/公告列表
            keyword: 关键词过滤，如"声明"、"回应"等
            max_pages: 最大页数
            
        Returns:
            官方声明列表
        """
        statements = []
        current_page = 1
        
        try:
            while current_page <= max_pages:
                # 处理分页URL
                page_url = website_url
                if '{page}' in website_url:
                    page_url = website_url.format(page=current_page)
                
                if self.use_selenium and self.driver:
                    try:
                        self.driver.get(page_url)
                        # 等待页面加载
                        WebDriverWait(self.driver, 10).until(
                            EC.presence_of_element_located((By.CSS_SELECTOR, css_selector))
                        )
                        html = self.driver.page_source
                    except Exception as e:
                        logger.error(f"Selenium获取页面失败: {str(e)}")
                        break
                else:
                    response = requests.get(page_url, headers=self.headers)
                    if response.status_code != 200:
                        logger.error(f"获取官方网站页面失败: {response.status_code}")
                        break
                    html = response.text
                
                soup = BeautifulSoup(html, 'html.parser')
                items = soup.select(css_selector)
                
                if not items:
                    logger.warning(f"在页面 {page_url} 未找到任何匹配的元素")
                    break
                
                for item in items:
                    try:
                        # 获取标题
                        title_elem = item.select_one('a') or item
                        title = title_elem.get_text().strip()
                        
                        # 如果提供了关键词，检查标题是否包含关键词
                        if keyword and keyword not in title:
                            continue
                        
                        # 获取链接
                        link = ''
                        if title_elem.name == 'a' and 'href' in title_elem.attrs:
                            link = title_elem['href']
                            # 处理相对URL
                            if link.startswith('/'):
                                base_url = '/'.join(page_url.split('/')[:3])  # 例如 https://example.com
                                link = base_url + link
                        
                        # 获取发布日期
                        date_elem = item.select_one('.date') or item.select_one('.time')
                        publish_date = date_elem.get_text().strip() if date_elem else ""
                        
                        # 获取详细内容
                        content = ""
                        if link:
                            content = self._get_statement_content(link)
                        
                        statements.append({
                            'title': title,
                            'publish_date': publish_date,
                            'url': link,
                            'content': content,
                            'source': '官方网站',
                            'platform': '官方网站',
                            'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                        })
                        
                        # 避免请求频率过高
                        time.sleep(random.uniform(1, 3))
                        
                    except Exception as e:
                        logger.error(f"处理官方网站声明时出错: {str(e)}")
                        continue
                
                # 检查是否有下一页
                next_button = soup.select_one('.next') or soup.select_one('.pagination .next')
                if not next_button or 'disabled' in next_button.get('class', []):
                    break
                
                current_page += 1
                # 页面间延迟
                time.sleep(random.uniform(3, 5))
        
        except Exception as e:
            logger.error(f"抓取官方网站出错: {str(e)}")
        
        return statements
    
    def _get_statement_content(self, url):
        """
        获取声明的详细内容
        
        Args:
            url: 声明详情页URL
            
        Returns:
            声明正文内容
        """
        try:
            if self.use_selenium and self.driver:
                try:
                    self.driver.get(url)
                    # 等待页面加载
                    WebDriverWait(self.driver, 10).until(
                        EC.presence_of_element_located((By.CSS_SELECTOR, 'body'))
                    )
                    html = self.driver.page_source
                except Exception as e:
                    logger.error(f"Selenium获取详情页失败: {str(e)}")
                    return ""
            else:
                response = requests.get(url, headers=self.headers)
                if response.status_code != 200:
                    logger.error(f"获取详情页失败: {response.status_code}")
                    return ""
                html = response.text
            
            soup = BeautifulSoup(html, 'html.parser')
            
            # 尝试多种常见的内容选择器
            content_selectors = [
                '.article-content', '.content', '#content', 
                '.news-detail', '.detail-content', '.article'
            ]
            
            for selector in content_selectors:
                content_elem = soup.select_one(selector)
                if content_elem:
                    # 清除脚本和样式标签
                    for script in content_elem.select('script'):
                        script.decompose()
                    for style in content_elem.select('style'):
                        style.decompose()
                    
                    return content_elem.get_text().strip()
            
            # 如果没有找到内容，尝试取body的文本
            body = soup.select_one('body')
            if body:
                return body.get_text().strip()
            
            return ""
            
        except Exception as e:
            logger.error(f"获取声明内容出错 {url}: {str(e)}")
            return ""
    
    def get_official_weibo_statements(self, account_id, keyword=None, pages=3):
        """
        从官方微博获取声明
        
        Args:
            account_id: 微博账号ID
            keyword: 关键词过滤
            pages: 获取页数
            
        Returns:
            包含微博声明的列表
        """
        statements = []
        
        try:
            headers = self.headers.copy()
            if self.weibo_cookie:
                headers['Cookie'] = self.weibo_cookie
            
            for page in range(1, pages+1):
                # 微博主页URL
                url = f"https://weibo.com/ajax/statuses/mymblog?uid={account_id}&page={page}&feature=0"
                response = requests.get(url, headers=headers)
                
                if response.status_code != 200:
                    logger.error(f"获取官方微博失败: {response.status_code}")
                    break
                
                try:
                    data = response.json()
                    if 'data' not in data or 'list' not in data['data']:
                        logger.error("微博API返回格式异常")
                        break
                    
                    for post in data['data']['list']:
                        try:
                            # 微博内容
                            content = post.get('text', '')
                            # 清除HTML标签
                            content = re.sub(r'<[^>]+>', '', content)
                            
                            # 如果有关键词过滤，检查内容是否包含关键词
                            if keyword and keyword not in content:
                                continue
                            
                            # 创建时间
                            created_at = post.get('created_at', '')
                            if created_at:
                                try:
                                    created_timestamp = datetime.strptime(created_at, '%a %b %d %H:%M:%S %z %Y')
                                    created_at = created_timestamp.strftime('%Y-%m-%d %H:%M:%S')
                                except Exception:
                                    pass
                            
                            # 微博ID
                            mid = post.get('mid', '')
                            
                            # 转发、评论、点赞数
                            reposts_count = post.get('reposts_count', 0)
                            comments_count = post.get('comments_count', 0)
                            attitudes_count = post.get('attitudes_count', 0)
                            
                            # 微博链接
                            link = f"https://weibo.com/{account_id}/{mid}"
                            
                            statements.append({
                                'title': content[:50] + ('...' if len(content) > 50 else ''),
                                'content': content,
                                'publish_date': created_at,
                                'url': link,
                                'source': '官方微博',
                                'platform': '微博',
                                'reposts': reposts_count,
                                'comments': comments_count,
                                'likes': attitudes_count,
                                'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                            })
                            
                        except Exception as e:
                            logger.error(f"处理微博声明时出错: {str(e)}")
                            continue
                    
                except json.JSONDecodeError:
                    logger.error("微博返回数据格式错误")
                    break
                
                # 页面间延迟
                time.sleep(random.uniform(3, 5))
                
        except Exception as e:
            logger.error(f"获取官方微博声明出错: {str(e)}")
        
        return statements
    
    def save_to_excel(self, statements, filename='official_statements.xlsx'):
        """将官方声明数据保存为Excel文件"""
        try:
            df = pd.DataFrame(statements)
            df.to_excel(filename, index=False, engine='openpyxl')
            logger.info(f"数据已保存到 {filename}")
            return True
        except Exception as e:
            logger.error(f"保存Excel出错: {str(e)}")
            return False

if __name__ == "__main__":
    # 测试代码
    crawler = OfficialStatementCrawler(use_selenium=True)
    
    try:
        # 抓取官方网站声明(示例:小米官网的新闻公告)
        website_url = "https://www.mi.com/about/news/index{page}"
        statements = crawler.scrape_official_website(
            website_url="https://www.mi.com/about/news/index?page={page}",
            css_selector=".news-list .news-item",
            keyword="声明",
            max_pages=2
        )
        print(f"从官方网站获取了 {len(statements)} 条声明")
        
        # 获取官方微博声明(示例:小米官方微博)
        xiaomi_weibo_id = "1771925961"  # 小米官方微博ID
        weibo_statements = crawler.get_official_weibo_statements(
            account_id=xiaomi_weibo_id,
            keyword="声明",
            pages=2
        )
        print(f"从官方微博获取了 {len(weibo_statements)} 条声明")
        
        # 合并数据
        all_statements = statements + weibo_statements
        
        # 保存到Excel
        crawler.save_to_excel(all_statements, "小米_official_statements.xlsx")
        
    finally:
        # 关闭Selenium
        crawler.close() 