#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
新浪财经历史分红数据爬取脚本
功能：爬取新浪财经网站历史分红数据第1-100页的内容，并将其存储为本地Excel文件
"""

import time
import random
import logging
import requests
from bs4 import BeautifulSoup
import pandas as pd
from typing import List, Dict, Any

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("crawler.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

class SinaFinanceCrawler:
    """新浪财经分红数据爬虫类"""
    
    def __init__(self, start_page: int = 1, end_page: int = 100):
        """
        初始化爬虫类
        
        Args:
            start_page: 起始页码
            end_page: 结束页码
        """
        self.start_page = start_page
        self.end_page = end_page
        # 使用新浪财经分红数据HTML页面
        self.base_url = "https://vip.stock.finance.sina.com.cn/q/go.php/vFinanceAnalyze/kind/dividend/index.phtml"
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Referer': 'https://vip.stock.finance.sina.com.cn/'
        }
        self.all_data = []
    
    def get_page_data(self, page_num: int) -> List[Dict[str, Any]]:
        """
        获取指定页码的数据
        
        Args:
            page_num: 页码
            
        Returns:
            包含当前页所有数据的列表
        """
        try:
            # 构造请求参数
            params = {
                'p': page_num
            }
            
            # 发送请求
            response = requests.get(self.base_url, headers=self.headers, params=params, timeout=10)
            response.raise_for_status()  # 检查请求是否成功
            response.encoding = 'gb2312'  # 设置正确的编码
            
            logger.info(f"第{page_num}页响应状态码: {response.status_code}")
            
            # 使用BeautifulSoup解析HTML
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 查找表格
            table = soup.find('table', {'id': 'dataTable'})
            if not table:
                logger.warning(f"第{page_num}页未找到数据表格")
                return []
            
            # 获取表格行
            rows = table.find_all('tr')[1:]  # 跳过表头
            page_data = []
            
            for row in rows:
                cells = row.find_all('td')
                if len(cells) < 8:  # 确保有足够的列
                    continue
                
                # 提取股票代码和名称
                stock_link = cells[1].find('a')
                if stock_link:
                    code = stock_link.text.strip()
                    name = cells[2].text.strip()
                    detail_url = stock_link['href']
                else:
                    code = cells[1].text.strip()
                    name = cells[2].text.strip()
                    detail_url = f"https://finance.sina.com.cn/realstock/company/{code}/nc.shtml"
                
                # 提取其他数据
                list_date = cells[3].text.strip()
                total_dividend = cells[4].text.strip()
                avg_dividend = cells[5].text.strip()
                dividend_count = cells[6].text.strip()
                financing_amount = cells[7].text.strip()
                
                # 创建股票信息字典
                stock_info = {
                    '代码': code,
                    '名称': name,
                    '详细': detail_url,
                    '上市日期': list_date,
                    '累计股息(%)': total_dividend if total_dividend != '--' else '0',
                    '年均股息(%)': avg_dividend if avg_dividend != '--' else '0',
                    '分红次数': dividend_count if dividend_count != '--' else '0',
                    '融资总额(亿元)': financing_amount if financing_amount != '--' else '0'
                }
                
                page_data.append(stock_info)
            
            logger.info(f"成功获取第{page_num}页数据，共{len(page_data)}条记录")
            return page_data
            
        except requests.exceptions.RequestException as e:
            logger.error(f"获取第{page_num}页数据时发生错误: {str(e)}")
            return []
        except Exception as e:
            logger.error(f"处理第{page_num}页数据时发生未知错误: {str(e)}")
            return []
    
    def crawl_all_data(self) -> None:
        """爬取所有页面的数据"""
        logger.info(f"开始爬取新浪财经历史分红数据，页码范围：{self.start_page}-{self.end_page}")
        logger.info(f"使用URL: {self.base_url}")
        
        for page_num in range(self.start_page, self.end_page + 1):
            page_data = self.get_page_data(page_num)
            if page_data:
                self.all_data.extend(page_data)
            
            # 添加随机延时，避免被封
            delay = random.uniform(1, 3)
            logger.info(f"爬取第{page_num}页后，休眠{delay:.2f}秒")
            time.sleep(delay)
        
        logger.info(f"爬取完成，共获取{len(self.all_data)}条记录")
    
    def save_to_excel(self, file_path: str = "新浪财经历史分红数据.xlsx") -> None:
        """
        将数据保存为Excel文件
        
        Args:
            file_path: 保存文件的路径
        """
        try:
            if not self.all_data:
                logger.warning("没有数据可保存")
                return
            
            # 创建DataFrame
            df = pd.DataFrame(self.all_data)
            
            # 保存为Excel文件
            df.to_excel(file_path, index=False)
            logger.info(f"数据已成功保存到：{file_path}")
            
        except Exception as e:
            logger.error(f"保存数据时发生错误: {str(e)}")


if __name__ == "__main__":
    # 创建爬虫实例
    crawler = SinaFinanceCrawler(start_page=1, end_page=100)
    
    try:
        # 爬取所有数据
        crawler.crawl_all_data()
        
        # 保存数据到Excel文件
        output_file = "新浪财经历史分红数据.xlsx"
        crawler.save_to_excel(output_file)
        
    except KeyboardInterrupt:
        logger.info("用户中断了爬取过程")
        
    except Exception as e:
        logger.error(f"程序运行过程中发生错误: {str(e)}")