from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import csv
import pandas as pd
from tqdm import tqdm
import logging

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class EastMoneyBatchCrawler:
    def __init__(self, headless=True):
        """初始化爬虫"""
        self.driver = self.setup_driver(headless)
        self.all_news_data = []

    def setup_driver(self, headless=True):
        """设置浏览器驱动"""
        chrome_options = Options()
        if headless:
            chrome_options.add_argument('--headless')
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_argument('--user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36')
        chrome_options.add_argument('--window-size=1920,1080')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument('--disable-blink-features=AutomationControlled')
        chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
        chrome_options.add_experimental_option('useAutomationExtension', False)

        driver = webdriver.Chrome(options=chrome_options)
        driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")

        return driver

    def generate_urls(self, base_url, total_pages=50):
        """生成所有页面的URL"""
        urls = []
        for i in range(1, total_pages + 1):
            if i == 1:
                url = "https://finance.eastmoney.com/a/cywjh.html"  # 第一页
            else:
                url = f"https://finance.eastmoney.com/a/cywjh_{i}.html"  # 其他页
            urls.append(url)
        return urls

    def crawl_single_page(self, url, page_num):
        """爬取单个页面"""
        logger.info(f"正在爬取第 {page_num} 页: {url}")

        try:
            self.driver.get(url)

            # 等待页面加载
            time.sleep(5)

            # 滚动页面触发可能的懒加载
            self.scroll_page()

            # 等待新闻元素加载
            try:
                wait = WebDriverWait(self.driver, 10)
                wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '[id^="newsTr"]')))
            except:
                logger.warning(f"第 {page_num} 页未找到新闻元素，继续尝试...")

            # 提取新闻数据
            page_news = self.extract_news_from_page(page_num)
            logger.info(f"第 {page_num} 页提取到 {len(page_news)} 条新闻")

            return page_news

        except Exception as e:
            logger.error(f"爬取第 {page_num} 页时出错: {e}")
            # 保存错误页面用于调试
            self.save_error_page(page_num)
            return []

    def scroll_page(self):
        """滚动页面以触发加载"""
        try:
            # 滚动到页面底部
            self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(2)

            # 滚动回顶部
            self.driver.execute_script("window.scrollTo(0, 0);")
            time.sleep(1)

            # 滚动到中间
            self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight/2);")
            time.sleep(1)
        except:
            pass

    def extract_news_from_page(self, page_num):
        """从当前页面提取新闻数据"""
        news_data = []

        try:
            # 查找所有 newsTr 元素
            news_elements = self.driver.find_elements(By.CSS_SELECTOR, '[id^="newsTr"]')
            logger.info(f"第 {page_num} 页找到 {len(news_elements)} 个新闻元素")

            for element in news_elements:
                try:
                    element_id = element.get_attribute('id')
                    element_text = element.text.strip()

                    # 跳过空元素或时钟图标
                    if not element_text or element_text == '⏰':
                        continue

                    # 提取标题和链接
                    title, link = self.extract_title_and_link(element)

                    # 提取时间
                    time_text = self.extract_time(element)

                    # 提取信息摘要
                    info_text = self.extract_info(element)

                    news_item = {
                        'page': page_num,
                        'id': element_id,
                        'title': title if title else element_text,
                        'link': link,
                        'time': time_text,
                        'info': info_text,
                        'full_text': element_text,
                        'page_url': self.driver.current_url
                    }

                    news_data.append(news_item)

                    # 实时显示进度
                    if len(news_data) % 5 == 0:
                        logger.info(f"第 {page_num} 页已提取 {len(news_data)} 条新闻")

                except Exception as e:
                    logger.warning(f"处理元素 {element_id} 时出错: {e}")
                    continue

        except Exception as e:
            logger.error(f"提取第 {page_num} 页新闻时出错: {e}")

        return news_data

    def extract_title_and_link(self, element):
        """提取标题和链接"""
        try:
            link_elements = element.find_elements(By.TAG_NAME, 'a')
            if link_elements:
                link_element = link_elements[0]
                title = link_element.text.strip()
                link = link_element.get_attribute('href')
                return title, link
        except:
            pass

        return element.text.strip(), ""

    def extract_time(self, element):
        """提取时间"""
        try:
            # 尝试多种时间选择器
            selectors = ['.time', 'n.time', 'span.time', '[class*="time"]']
            for selector in selectors:
                time_elements = element.find_elements(By.CSS_SELECTOR, selector)
                if time_elements:
                    return time_elements[0].text.strip()
        except:
            pass
        return ""

    def extract_info(self, element):
        """提取信息摘要"""
        try:
            # 尝试多种信息选择器
            selectors = ['.info', '[class*="info"]', '[title]']
            for selector in selectors:
                info_elements = element.find_elements(By.CSS_SELECTOR, selector)
                if info_elements:
                    info_element = info_elements[0]
                    # 优先获取title属性，没有则获取文本
                    info = info_element.get_attribute('title')
                    if not info:
                        info = info_element.text.strip()
                    return info
        except:
            pass
        return ""

    def save_error_page(self, page_num):
        """保存错误页面用于调试"""
        try:
            filename = f"error_page_{page_num}.html"
            with open(filename, 'w', encoding='utf-8') as f:
                f.write(self.driver.page_source)
            logger.info(f"错误页面已保存到 {filename}")
        except:
            pass

    def crawl_all_pages(self, total_pages=50, delay=3):
        """爬取所有页面"""
        # 生成所有URL
        urls = self.generate_urls("https://finance.eastmoney.com/a/cywjh.html", total_pages)

        logger.info(f"开始爬取 {total_pages} 个页面...")

        # 使用进度条
        for i, url in enumerate(tqdm(urls, desc="爬取进度"), 1):
            page_news = self.crawl_single_page(url, i)
            self.all_news_data.extend(page_news)

            # 显示当前页统计
            if page_news:
                logger.info(f"第 {i} 页成功提取 {len(page_news)} 条新闻")
                # 显示前2条新闻标题
                for j, news in enumerate(page_news[:2], 1):
                    logger.info(f"  示例 {j}: {news['title'][:50]}...")

            # 延迟，避免请求过快
            if i < len(urls):  # 最后一页不需要延迟
                time.sleep(delay)

        logger.info(f"爬取完成！总共提取 {len(self.all_news_data)} 条新闻")

    def save_to_csv(self, filename='eastmoney_all_news.csv'):
        """保存所有数据到CSV"""
        if not self.all_news_data:
            logger.warning("没有数据可保存")
            return

        try:
            df = pd.DataFrame(self.all_news_data)
            df.to_csv(filename, index=False, encoding='utf-8-sig')
            logger.info(f"数据已保存到 {filename}，共 {len(df)} 条记录")

            # 显示统计信息
            self.show_statistics(df)

        except Exception as e:
            logger.error(f"保存CSV文件时出错: {e}")

    def show_statistics(self, df):
        """显示统计信息"""
        print("\n" + "=" * 50)
        print("数据统计信息:")
        print(f"总新闻条数: {len(df)}")
        print(f"页面数量: {df['page'].nunique()}")
        print(f"每页平均新闻数: {len(df) / df['page'].nunique():.1f}")

        # 各页新闻数量统计
        page_stats = df['page'].value_counts().sort_index()
        print("\n各页新闻数量:")
        for page, count in page_stats.items():
            print(f"  第 {page} 页: {count} 条")

    def close(self):
        """关闭浏览器"""
        if self.driver:
            self.driver.quit()
            logger.info("浏览器已关闭")


def main():
    """主函数"""
    crawler = None
    try:
        # 创建爬虫实例
        crawler = EastMoneyBatchCrawler(headless=True)  # 设置为False可以看到浏览器操作

        # 爬取所有页面（可以调整页面数量）
        total_pages = 50  # 您可以根据需要调整
        crawler.crawl_all_pages(total_pages=total_pages, delay=2)

        # 保存数据
        crawler.save_to_csv('eastmoney_50_pages_news.csv')

        # 显示前几条数据
        if crawler.all_news_data:
            print("\n前5条新闻示例:")
            for i, news in enumerate(crawler.all_news_data[:5], 1):
                print(f"{i}. [第{news['page']}页] {news['title']}")
                if news['time']:
                    print(f"   时间: {news['time']}")
                if news['link']:
                    print(f"   链接: {news['link']}")
                print()

    except Exception as e:
        logger.error(f"程序执行出错: {e}")

    finally:
        if crawler:
            crawler.close()


if __name__ == "__main__":
    main()