import re
import random
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException, TimeoutException
import time
from datetime import datetime
import sys
import os

# 获取项目根目录
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 将项目根目录添加到sys.path
sys.path.append(project_root)
from utils.data_cleaner import clean_data, save_to_csv
import pandas as pd



class NationalNewsCrawler:
    def __init__(self):
        chrome_options = webdriver.ChromeOptions()
        chrome_options.add_argument("--headless")
        chrome_options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36")
        service = Service(executable_path=r"chromedriver.exe")
        self.driver = webdriver.Chrome(service=service, options=chrome_options)

    def extract_news_info(self):
        """提取当前页新闻信息"""
        news_items = self.driver.find_elements(By.CSS_SELECTOR, 'div.feed-card-item')
        page_news = []

        for item in news_items:
            try:
                title_elem = item.find_element(By.CSS_SELECTOR, 'h2 > a[target="_blank"]')
                title = title_elem.text
                url = title_elem.get_attribute('href')
                summary = item.find_element(By.CSS_SELECTOR, 'a.feed-card-txt-summary').text
                pub_time = item.find_element(By.CSS_SELECTOR, 'div.feed-card-time').text
                
                page_news.append({
                    'title': title,
                    'url': url,
                    'summary': summary,
                    'time': pub_time,
                    'crawl_time': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                })
            except Exception as e:
                print(f"提取新闻时出错: {str(e)}")
                continue
        return page_news

    def is_today_news(self, pub_time_str):
        """判断是否为当天新闻"""
        now = datetime.now()
        try:
            if '分钟前' in pub_time_str or '小时前' in pub_time_str or '今天' in pub_time_str:
                return True
            match = re.search(r'(\d+)月(\d+)日\s+(\d+):(\d+)', pub_time_str)
            if match:
                month, day, hour, minute = map(int, match.groups())
                pub_date = datetime(now.year, month, day)
                return pub_date.date() == now.date()
        except Exception:
            pass
        return False

    def crawl_today_news(self, max_pages=5):
        """只爬取当天新闻（最多5页）"""
        self.driver.get("https://news.sina.com.cn/china/")
        time.sleep(3)
        today_news = []
        current_page = 1
        stop_crawling = False

        while current_page <= max_pages and not stop_crawling:
            print(f"正在爬取第 {current_page} 页...")
            page_news = self.extract_news_info()
            
            # 筛选当天新闻
            current_page_today_news = [news for news in page_news if self.is_today_news(news['time'])]
            today_news.extend(current_page_today_news)
            
            print(f"本页获取到 {len(current_page_today_news)} 条当天新闻")
            
            # 如果本页没有当天新闻，停止爬取
            if not current_page_today_news:
                stop_crawling = True
                print("本页没有当天新闻，停止爬取")
                break

            # 翻页逻辑
            try:
                next_button = WebDriverWait(self.driver, 10).until(
                    EC.element_to_be_clickable((By.CSS_SELECTOR, 'span.pagebox_next > a'))
                )
                self.driver.execute_script("arguments[0].click();", next_button)
                WebDriverWait(self.driver, 10).until(
                    lambda d: len(d.find_elements(By.CSS_SELECTOR, 'div.feed-card-item')) > 0)
                current_page += 1
                time.sleep(random.uniform(1, 3))
            except (NoSuchElementException, TimeoutException):
                print("已到最后一页")
                break
            except Exception as e:
                print(f"翻页时发生异常: {str(e)}")
                break
        
        self.driver.quit()
        return today_news

    def save_news_data(self, today_news):
        """保存新闻数据到不同文件"""
        # 确保目录存在
        os.makedirs("data/csv", exist_ok=True)
        
        # 清洗数据
        sensitive_words = set(['xs', '乌萨奇'])
        cleaned_today = clean_data(today_news, sensitive_words)
        
        # 当天新闻文件路径
        today_file = f"data/csv/{datetime.now().date()}national_news.csv"
        # 全部新闻文件路径
        all_file = "data/csv/sina_national_news.csv"
        
        # 保存当天新闻
        save_to_csv(cleaned_today, today_file)
        print(f"当天新闻已保存到: {today_file}")
        
        # 更新全部新闻文件（只添加新新闻）
        if os.path.exists(all_file):
            existing_df = pd.read_csv(all_file)
            new_df = pd.DataFrame(cleaned_today)
            # 只保留不在现有文件中的新闻（基于URL去重）
            combined_df = pd.concat([existing_df, new_df]).drop_duplicates(subset=['url'], keep='first')
            combined_df.to_csv(all_file, index=False, encoding='utf-8-sig')
            new_count = len(combined_df) - len(existing_df)
            print(f"新增 {new_count} 条新闻到全部新闻文件")
        else:
            save_to_csv(cleaned_today, all_file)
            print(f"创建新的全部新闻文件: {all_file}")

if __name__ == "__main__":
    print("开始爬取新浪国内当天新闻...")
    crawler = NationalNewsCrawler()
    
    try:
        # 只爬取当天新闻（最多5页）
        today_news = crawler.crawl_today_news(max_pages=5)
        print(f"共获取到 {len(today_news)} 条当天新闻")
        
        # 保存数据
        crawler.save_news_data(today_news)
        
    except Exception as e:
        print(f"爬取过程中发生错误: {str(e)}")