#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
舆情监控系统 - 青岛民营企业联合投资集团
功能：爬取各大新闻网站，监控公司相关舆情信息
"""

import requests
from bs4 import BeautifulSoup
import time
import json
import re
import sqlite3
from datetime import datetime
import logging
import random
import jieba
from urllib.parse import urljoin, urlparse
import schedule
import threading

class SentimentMonitor:
    def __init__(self):
        self.company_name = "西海岸新区民营企业联合投资集团"
        self.keywords = [
            "西海岸新区民营企业联合投资集团",
            
        ]
        self.user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/91.0.864.59'
        ]
        self.session = requests.Session()
        self.setup_logging()
        self.setup_database()
        
    def setup_logging(self):
        """设置日志"""
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(levelname)s - %(message)s',
            handlers=[
                logging.FileHandler('sentiment_monitor.log', encoding='utf-8'),
                logging.StreamHandler()
            ]
        )
        self.logger = logging.getLogger(__name__)
        
    def setup_database(self):
        """初始化数据库"""
        self.conn = sqlite3.connect('sentiment_data.db', check_same_thread=False)
        cursor = self.conn.cursor()
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS news_articles (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                title TEXT NOT NULL,
                content TEXT,
                url TEXT UNIQUE,
                source TEXT,
                publish_time TEXT,
                keywords_found TEXT,
                sentiment_score REAL,
                created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
            )
        ''')
        self.conn.commit()
        
    def get_headers(self):
        """获取随机请求头"""
        return {
            'User-Agent': random.choice(self.user_agents),
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive',
        }
        
    def check_keywords(self, text):
        """检查文本中是否包含关键词"""
        found_keywords = []
        text_lower = text.lower()
        
        # 更灵活的关键词匹配
        flexible_keywords = [
            ["西海岸", "民营企业", "投资"],
            ["青岛", "西海岸", "投资集团"],
            ["西海岸新区民营企业联合投资集团"],
            ["民营企业联合投资"],
            ["西海岸新区", "投资"]
        ]
        
        # 原有精确匹配
        for keyword in self.keywords:
            if keyword in text:
                found_keywords.append(keyword)
                
        # 灵活匹配：如果包含关键词组合也算匹配
        for keyword_group in flexible_keywords:
            if all(kw in text_lower for kw in keyword_group):
                found_keywords.append(" + ".join(keyword_group))
                
        return list(set(found_keywords))  # 去重
        
    def crawl_sina_news(self):
        """爬取新浪新闻"""
        try:
            search_url = f"https://search.sina.com.cn/?q={self.company_name}&c=news&from=index"
            response = self.session.get(search_url, headers=self.get_headers(), timeout=10)
            response.encoding = 'utf-8'
            
            soup = BeautifulSoup(response.text, 'html.parser')
            articles = soup.find_all('div', class_='box-result')
            
            for article in articles[:5]:  # 限制爬取数量
                try:
                    title_elem = article.find('h2')
                    if title_elem and title_elem.find('a'):
                        title = title_elem.find('a').get_text().strip()
                        url = title_elem.find('a').get('href')
                        
                        # 检查关键词
                        keywords_found = self.check_keywords(title)
                        if keywords_found:
                            self.save_article(title, '', url, '新浪新闻', '', keywords_found)
                            self.logger.info(f"发现相关新闻: {title}")
                            
                except Exception as e:
                    self.logger.error(f"处理新浪新闻文章时出错: {e}")
                    
        except Exception as e:
            self.logger.error(f"爬取新浪新闻失败: {e}")
            
    def crawl_baidu_news(self):
        """爬取百度新闻"""
        # 使用多个关键词进行搜索
        search_keywords = [
            self.company_name,
            "青岛西海岸新区民营企业联合投资集团",
            "西海岸新区联合投资集团",
     
        ]
        
        for keyword in search_keywords:
            try:
                search_url = f"https://www.baidu.com/s?tn=news&word={keyword}"
                response = self.session.get(search_url, headers=self.get_headers(), timeout=10)
                response.encoding = 'utf-8'
                
                soup = BeautifulSoup(response.text, 'html.parser')
                
                # 尝试多种选择器
                selectors = [
                    'div.result',
                    'div[class*="result"]',
                    'div.c-container',
                    'h3'
                ]
                
                articles = []
                for selector in selectors:
                    found_articles = soup.select(selector)
                    if found_articles and len(found_articles) > 1:  # 排除只有1个结果的情况
                        articles = found_articles
                        self.logger.info(f"使用选择器 '{selector}' 找到 {len(articles)} 个结果")
                        break
                
                for article in articles[:10]:  # 增加处理数量
                    try:
                        title_elem = article.find('h3') or article.find('a')
                        if title_elem:
                            if title_elem.find('a'):
                                title = title_elem.find('a').get_text().strip()
                                url = title_elem.find('a').get('href')
                            else:
                                title = title_elem.get_text().strip()
                                url = article.find('a')['href'] if article.find('a') else ''
                            
                            # 过滤掉明显不是新闻的结果
                            if title and len(title) > 5 and '网页' not in title:
                                keywords_found = self.check_keywords(title)
                                if keywords_found:
                                    self.save_article(title, '', url, f'百度新闻-{keyword}', '', keywords_found)
                                    self.logger.info(f"发现相关新闻: {title}")
                                
                    except Exception as e:
                        self.logger.error(f"处理百度新闻文章时出错: {e}")
                        
                time.sleep(1)  # 避免请求过快
                        
            except Exception as e:
                self.logger.error(f"爬取百度新闻失败 (关键词: {keyword}): {e}")
            
    def crawl_tencent_news(self):
        """爬取腾讯新闻"""
        try:
            # 腾讯新闻搜索API
            search_url = f"https://pacaio.match.qq.com/irs/rcd?cid=137&token=d0f13d594edfc180f5bf6b845456f3ea&id=&gr=1&num=10&key={self.company_name}"
            response = self.session.get(search_url, headers=self.get_headers(), timeout=10)
            
            if response.status_code == 200:
                data = response.json()
                if 'data' in data and 'list' in data['data']:
                    for item in data['data']['list'][:5]:
                        try:
                            title = item.get('title', '')
                            url = item.get('url', '')
                            
                            keywords_found = self.check_keywords(title)
                            if keywords_found:
                                self.save_article(title, '', url, '腾讯新闻', '', keywords_found)
                                self.logger.info(f"发现相关新闻: {title}")
                                
                        except Exception as e:
                            self.logger.error(f"处理腾讯新闻文章时出错: {e}")
                            
        except Exception as e:
            self.logger.error(f"爬取腾讯新闻失败: {e}")
            
    def crawl_sogou_news(self):
        """爬取搜狗新闻"""
        search_keywords = [
            self.company_name,
            "青岛西海岸新区民营企业联合投资集团",
        ]
        
        for keyword in search_keywords:
            try:
                search_url = f"https://news.sogou.com/news?query={keyword}"
                response = self.session.get(search_url, headers=self.get_headers(), timeout=10)
                response.encoding = 'utf-8'
                
                soup = BeautifulSoup(response.text, 'html.parser')
                articles = soup.find_all('div', class_='news-item') or soup.find_all('h3')
                
                for article in articles[:8]:
                    try:
                        title_elem = article.find('a') or article
                        if title_elem:
                            title = title_elem.get_text().strip()
                            url = title_elem.get('href', '') if hasattr(title_elem, 'get') else ''
                            
                            if title and len(title) > 5:
                                keywords_found = self.check_keywords(title)
                                if keywords_found:
                                    self.save_article(title, '', url, f'搜狗新闻-{keyword}', '', keywords_found)
                                    self.logger.info(f"发现相关新闻: {title}")
                                    
                    except Exception as e:
                        self.logger.error(f"处理搜狗新闻文章时出错: {e}")
                        
                time.sleep(1)
                        
            except Exception as e:
                self.logger.error(f"爬取搜狗新闻失败 (关键词: {keyword}): {e}")
                
    def crawl_360_news(self):
        """爬取360搜索新闻"""
        search_keywords = [
            self.company_name,
            "青岛西海岸新区民营企业",
            "瑞源集团",
            "西海岸新区民营企业联合投资集团"
        ]
        
        for keyword in search_keywords:
            try:
                search_url = f"https://www.so.com/s?q={keyword}&src=news"
                response = self.session.get(search_url, headers=self.get_headers(), timeout=10)
                response.encoding = 'utf-8'
                
                soup = BeautifulSoup(response.text, 'html.parser')
                articles = soup.find_all('h3') or soup.find_all('a')
                
                for article in articles[:8]:
                    try:
                        if article.name == 'h3':
                            title_elem = article.find('a')
                        else:
                            title_elem = article
                            
                        if title_elem:
                            title = title_elem.get_text().strip()
                            url = title_elem.get('href', '')
                            
                            if title and len(title) > 5 and '搜索' not in title:
                                keywords_found = self.check_keywords(title)
                                if keywords_found:
                                    self.save_article(title, '', url, f'360搜索-{keyword}', '', keywords_found)
                                    self.logger.info(f"发现相关新闻: {title}")
                                    
                    except Exception as e:
                        self.logger.error(f"处理360搜索文章时出错: {e}")
                        
                time.sleep(1)
                        
            except Exception as e:
                self.logger.error(f"爬取360搜索失败 (关键词: {keyword}): {e}")
                
    def crawl_wechat_search(self):
        """爬取微信搜索"""
        search_keywords = [
            self.company_name,
            "青岛西海岸新区民营企业联合投资",
            "西海岸新区民营企业联合投资集团",
            "瑞源集团",
            "瑞源控股"
        ]
        
        for keyword in search_keywords:
            try:
                # 微信搜索接口
                search_url = f"https://weixin.sogou.com/weixin?type=2&query={keyword}"
                response = self.session.get(search_url, headers=self.get_headers(), timeout=10)
                response.encoding = 'utf-8'
                
                soup = BeautifulSoup(response.text, 'html.parser')
                
                # 微信文章结果
                articles = soup.find_all('div', class_='news-box') or soup.find_all('h3')
                
                for article in articles[:8]:
                    try:
                        title_elem = None
                        url = ''
                        
                        # 尝试不同的选择器
                        if article.find('h3'):
                            title_elem = article.find('h3').find('a')
                        elif article.find('a'):
                            title_elem = article.find('a')
                        
                        if title_elem:
                            title = title_elem.get_text().strip()
                            url = title_elem.get('href', '')
                            
                            # 处理相对链接
                            if url and not url.startswith('http'):
                                url = urljoin('https://weixin.sogou.com', url)
                            
                            if title and len(title) > 5:
                                keywords_found = self.check_keywords(title)
                                if keywords_found:
                                    self.save_article(title, '', url, f'微信搜索-{keyword}', '', keywords_found)
                                    self.logger.info(f"发现相关微信文章: {title}")
                                    
                    except Exception as e:
                        self.logger.error(f"处理微信搜索文章时出错: {e}")
                        
                time.sleep(2)  # 微信搜索需要更长的间隔
                        
            except Exception as e:
                self.logger.error(f"爬取微信搜索失败 (关键词: {keyword}): {e}")
                
        # 尝试微信公众号搜索
        try:
            for keyword in search_keywords[:3]:  # 限制公众号搜索的关键词数量
                search_url = f"https://weixin.sogou.com/weixin?type=1&query={keyword}"
                response = self.session.get(search_url, headers=self.get_headers(), timeout=10)
                response.encoding = 'utf-8'
                
                soup = BeautifulSoup(response.text, 'html.parser')
                accounts = soup.find_all('div', class_='news-box2') or soup.find_all('h3')
                
                for account in accounts[:5]:
                    try:
                        title_elem = account.find('a')
                        if title_elem:
                            title = f"微信公众号: {title_elem.get_text().strip()}"
                            url = title_elem.get('href', '')
                            
                            if url and not url.startswith('http'):
                                url = urljoin('https://weixin.sogou.com', url)
                            
                            keywords_found = self.check_keywords(title)
                            if keywords_found:
                                self.save_article(title, '', url, f'微信公众号-{keyword}', '', keywords_found)
                                self.logger.info(f"发现相关微信公众号: {title}")
                                
                    except Exception as e:
                        self.logger.error(f"处理微信公众号时出错: {e}")
                        
                time.sleep(2)
                
        except Exception as e:
            self.logger.error(f"爬取微信公众号失败: {e}")
            
    def save_article(self, title, content, url, source, publish_time, keywords_found):
        """保存文章到数据库"""
        try:
            cursor = self.conn.cursor()
            cursor.execute('''
                INSERT OR IGNORE INTO news_articles 
                (title, content, url, source, publish_time, keywords_found)
                VALUES (?, ?, ?, ?, ?, ?)
            ''', (title, content, url, source, publish_time, ','.join(keywords_found)))
            self.conn.commit()
        except Exception as e:
            self.logger.error(f"保存文章失败: {e}")
            
    def run_monitoring(self):
        """执行监控任务"""
        self.logger.info("开始舆情监控...")
        
        # 爬取各个新闻源
        self.logger.info("正在爬取新浪新闻...")
        self.crawl_sina_news()
        time.sleep(2)
        
        self.logger.info("正在爬取百度新闻...")
        self.crawl_baidu_news()
        time.sleep(2)
        
        self.logger.info("正在爬取腾讯新闻...")
        self.crawl_tencent_news()
        time.sleep(2)
        
        self.logger.info("正在爬取搜狗新闻...")
        self.crawl_sogou_news()
        time.sleep(2)
        
        self.logger.info("正在爬取360搜索...")
        self.crawl_360_news()
        time.sleep(2)
        
        self.logger.info("正在爬取微信搜索...")
        self.crawl_wechat_search()
        time.sleep(2)
        
        # 统计本轮监控结果
        cursor = self.conn.cursor()
        cursor.execute('SELECT COUNT(*) FROM news_articles')
        total_count = cursor.fetchone()[0]
        self.logger.info(f"本轮监控完成，数据库中共有 {total_count} 条新闻")
        
    def get_recent_articles(self, limit=10):
        """获取最近的文章"""
        cursor = self.conn.cursor()
        cursor.execute('''
            SELECT title, url, source, keywords_found, created_at 
            FROM news_articles 
            ORDER BY created_at DESC 
            LIMIT ?
        ''', (limit,))
        return cursor.fetchall()
        
    def start_scheduled_monitoring(self):
        """启动定时监控"""
        # 每30分钟执行一次监控
        schedule.every(30).minutes.do(self.run_monitoring)
        
        self.logger.info("定时监控已启动，每30分钟执行一次")
        
        while True:
            schedule.run_pending()
            time.sleep(60)
            
    def close(self):
        """关闭数据库连接"""
        if self.conn:
            self.conn.close()

def main():
    monitor = SentimentMonitor()
    
    try:
        print(f"舆情监控系统启动 - 监控目标: {monitor.company_name}")
        print("正在执行首次监控...")
        
        # 执行一次监控
        monitor.run_monitoring()
        
        # 显示结果
        articles = monitor.get_recent_articles(5)
        if articles:
            print("\n=== 最近发现的相关新闻 ===")
            for i, article in enumerate(articles, 1):
                print(f"{i}. {article[0]}")
                print(f"   来源: {article[2]} | 关键词: {article[3]}")
                print(f"   链接: {article[1]}")
                print(f"   时间: {article[4]}")
                print("-" * 50)
        else:
            print("暂未发现相关新闻")
            
        # 询问是否启动定时监控
        choice = input("\n是否启动定时监控？(y/n): ")
        if choice.lower() == 'y':
            monitor.start_scheduled_monitoring()
        else:
            print("监控结束")
            
    except KeyboardInterrupt:
        print("\n监控已停止")
    finally:
        monitor.close()

if __name__ == "__main__":
    main()