import sys
import os

# 获取项目根目录
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 将项目根目录添加到sys.path
sys.path.append(project_root)

import requests
from bs4 import BeautifulSoup
from datetime import datetime
from utils.data_cleaner import clean_data, save_to_csv
import os

class InternationalNewsCrawler:
    def __init__(self):
        # 定义敏感词集合
        self.sensitive_words = {'小八': '乌萨奇'}

    def extract_news_items(self, soup):
        """
        从HTML中提取新闻项
        返回新闻项列表
        """
        news_items = []
        # 选择新闻项容器
        news_containers = soup.select('.news-item.img-news-item, .news-item.txt-news-item')
        for container in news_containers:
            try:
                # 提取标题和链接
                title_elem = container.select_one('h2 a[target="_blank"]')
                if not title_elem:
                    continue
                title = title_elem.get_text(strip=True)
                url = title_elem.get('href', '')
                # 提取时间
                time_elem = container.select_one('.time')
                pub_time = time_elem.get_text(strip=True) if time_elem else "时间未知"
                # 保存新闻信息
                news_items.append({
                    'title': title,
                    'url': url,
                    'time': pub_time
                })
            except Exception as e:
                print(f"提取新闻项时出错: {e}")
                continue
        return news_items

    def crawl_news_with_bs4(self):
        """
        使用BeautifulSoup爬取新浪国际新闻页面内容
        返回清洗后的新闻列表
        """
        # 网址
        url = "https://news.sina.com.cn/world/"
        try:
            # 发送HTTP请求
            response = requests.get(url)
            response.raise_for_status()  # 检查请求是否成功
            response.encoding = response.apparent_encoding  # 设置正确的编码
            # 使用BeautifulSoup解析HTML
            soup = BeautifulSoup(response.text, 'html.parser')
            # 提取新闻内容
            news_items = self.extract_news_items(soup)
            # 数据清洗
            cleaned_news_items = clean_data(news_items, self.sensitive_words)
            # 保存为CSV文件
            file_path = f"data/csv/{datetime.now().date()}international_news.csv"
            save_to_csv(cleaned_news_items, file_path)
            print(f"数据已保存到 {file_path}")
            return cleaned_news_items
        except requests.exceptions.RequestException as e:
            print(f"请求出错: {e}")
        except Exception as e:
            print(f"处理页面时出错: {e}")

if __name__ == "__main__":
    print(f"\n开始爬取 {datetime.now().strftime('%Y-%m-%d')} 的新浪国际新闻...")
    crawler = InternationalNewsCrawler()
    crawler.crawl_news_with_bs4()  # 保存CSV文件