#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SSE报告发布报告数据爬虫
用于爬取上海证券交易所报告发布信息并存储到数据库
"""

import requests
import time
import json
import logging
import sqlite3
import re
from typing import Dict, List, Optional, Any
from urllib.parse import urlencode
from bs4 import BeautifulSoup
from dataclasses import dataclass
from datetime import datetime
import sys

# ===== 全局配置常量 =====
DB_PATH: str = "database.sqlite"
DEFAULT_MAX_PAGES: int = 33


@dataclass
class SSEReportItem:
    """SSE报告条目数据类"""
    id: str
    id_ref: str
    stock_name: str
    stock_code: str
    stock_uid: str
    stock_image: str
    stock_report_name: str
    stock_report_url: str
    stock_report_time: str
    page: int


class SSEConfig:
    """SSE爬虫配置类"""
    
    BASE_URL = "https://sns.sseinfo.com/ajax/feeds.do"
    DEFAULT_HEADERS = {
        'Accept': '*/*',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ga;q=0.6,ja;q=0.5',
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
        'DNT': '1',
        'Pragma': 'no-cache',
        'Referer': 'https://sns.sseinfo.com/index.do',
        'Sec-Fetch-Dest': 'empty',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Site': 'same-origin',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36',
        'X-Requested-With': 'XMLHttpRequest',
        'sec-ch-ua': '"Google Chrome";v="141", "Not?A_Brand";v="8", "Chromium";v="141"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"'
    }
    
    # 报告发布接口的Cookies
    REPORT_COOKIES = {
        'SSESNSXMLSID': '0d0e4457-8370-4b52-960c-9a2a7901cf04',
        'ba17301551dcbaf9_gdp_user_key': '',
        'gdp_user_id': 'gioenc-d83b4295%2Ca36a%2C5gd3%2Cage3%2C800951b4ccd5',
        'JSESSIONID': '4A3B6D185254FAB3D78C692F6A3637E9',
        'ba17301551dcbaf9_gdp_session_id': '2101501c-7367-47fb-b5c9-89c9bb9e72ad',
        'ba17301551dcbaf9_gdp_session_id_2101501c-7367-47fb-b5c9-89c9bb9e72ad': 'true',
        'ba17301551dcbaf9_gdp_sequence_ids': '{%22globalKey%22:90%2C%22VISIT%22:5%2C%22PAGE%22:16%2C%22VIEW_CLICK%22:71}'
    }
    
    # 报告发布接口参数
    REPORT_PARAMS = {
        'type': '30',
        'pageSize': '10',
        'lastid': '-1',
        'show': '1'
    }


class TimestampGenerator:
    """时间戳生成器"""
    
    @staticmethod
    def generate_timestamp() -> str:
        """生成当前时间戳（毫秒）"""
        return str(int(time.time() * 1000))
    
    @staticmethod
    def parse_chinese_time(time_str: str) -> int:
        """
        解析中文时间格式为时间戳
        
        Args:
            time_str: 中文时间字符串，如 "2025年10月17日 18:24"
            
        Returns:
            时间戳（秒）
        """
        try:
            if not time_str:
                return 0
            
            # 移除可能的空格
            time_str = time_str.strip()
            
            # 解析格式：2025年10月17日 18:24
            pattern = r'(\d{4})年(\d{1,2})月(\d{1,2})日\s*(\d{1,2}):(\d{1,2})'
            match = re.match(pattern, time_str)
            
            if match:
                year, month, day, hour, minute = map(int, match.groups())
                dt = datetime(year, month, day, hour, minute)
                return int(dt.timestamp())
            else:
                print(f"⚠️  无法解析时间格式: {time_str}")
                return 0
                
        except Exception as e:
            print(f"⚠️  时间解析错误: {time_str} - {e}")
            return 0


class ReportHTMLParser:
    """报告HTML解析器"""
    
    @staticmethod
    def parse_report_html(html_content: str, page: int) -> List[SSEReportItem]:
        """
        解析报告HTML内容，提取SSE报告条目
        
        Args:
            html_content: HTML内容
            page: 当前页码
            
        Returns:
            SSE报告条目列表
        """
        items = []
        
        try:
            soup = BeautifulSoup(html_content, 'html.parser')
            
            # 查找所有报告条目容器 - 根据HTML样例，使用m_feed_item类
            report_containers = soup.find_all('div', class_='m_feed_item')
            
            for container in report_containers:
                item = ReportHTMLParser._extract_report_data(container, page)
                if item:
                    items.append(item)
                    
        except Exception as e:
            logging.error(f"解析报告HTML时出错: {e}")
            
        return items
    
    @staticmethod
    def _extract_report_data(container, page: int) -> Optional[SSEReportItem]:
        """从容器中提取单个报告条目数据"""
        try:
            # 提取主ID
            item_id = container.get('id', '')
            
            # 提取股票信息
            stock_info = ReportHTMLParser._extract_stock_info(container)
            
            # 提取报告信息
            report_info = ReportHTMLParser._extract_report_info(container)
            
            # 提取用户引用链接
            id_ref = ReportHTMLParser._extract_id_ref(container)
            
            # 如果提取到有效数据，创建条目
            if item_id and stock_info['name'] and report_info['name']:
                return SSEReportItem(
                    id=item_id,
                    id_ref=id_ref,
                    stock_name=stock_info['name'],
                    stock_code=stock_info['code'],  # 注意：这个接口不返回股票代码
                    stock_uid=stock_info['uid'],
                    stock_image=stock_info['image'],
                    stock_report_name=report_info['name'],
                    stock_report_url=report_info['url'],
                    stock_report_time=report_info['time'],
                    page=page
                )
                
        except Exception as e:
            logging.error(f"提取报告条目数据时出错: {e}")
            
        return None
    
    @staticmethod
    def _extract_stock_info(container) -> Dict[str, str]:
        """提取股票信息"""
        stock_info = {'name': '', 'code': '', 'uid': '', 'image': ''}
        
        try:
            # 查找股票信息部分
            face_elem = container.find('div', class_='m_feed_face')
            if face_elem:
                # 提取股票名称
                p_elem = face_elem.find('p')
                if p_elem:
                    stock_info['name'] = p_elem.get_text(strip=True)
                
                # 提取股票UID
                link_elem = face_elem.find('a', href=True)
                if link_elem:
                    uid = link_elem.get('uid', '')
                    stock_info['uid'] = uid
                
                # 提取股票图片
                img_elem = face_elem.find('img')
                if img_elem:
                    stock_info['image'] = img_elem.get('src', '')
                    # 从图片URL中提取股票代码
                    src = img_elem.get('src', '')
                    if 'avatar/company/' in src:
                        code_match = re.search(r'/(\d+)\.png', src)
                        if code_match:
                            stock_info['code'] = code_match.group(1)
                        
        except Exception as e:
            logging.error(f"提取股票信息时出错: {e}")
            
        return stock_info
    
    @staticmethod
    def _extract_report_info(container) -> Dict[str, str]:
        """提取报告信息"""
        report_info = {'name': '', 'url': '', 'time': ''}
        
        try:
            # 查找报告内容部分
            cnt_elem = container.find('div', class_='m_feed_cnt')
            if cnt_elem:
                # 提取报告名称和URL
                txt_elem = cnt_elem.find('div', class_='m_feed_txt')
                if txt_elem:
                    # 先获取直系文本（排除 a 标签文本），避免“标题 + 链接标题”重复
                    direct_text_nodes = txt_elem.find_all(string=True, recursive=False)
                    name_text = ''.join(s.strip() for s in direct_text_nodes).strip()
                    
                    # 查找报告链接
                    link_elem = txt_elem.find('a', href=True)
                    if link_elem:
                        report_info['url'] = link_elem.get('href', '')
                        if not name_text:
                            # 回退：从完整文本中减去链接文字，得到纯标题
                            full_text = txt_elem.get_text(" ", strip=True)
                            link_text = link_elem.get_text(" ", strip=True)
                            name_text = full_text.replace(link_text, '').strip()
                    
                    # 最后兜底：仍为空则使用完整文本
                    if not name_text:
                        name_text = txt_elem.get_text(" ", strip=True)
                    
                    report_info['name'] = name_text
                
                # 提取报告时间
                from_elem = cnt_elem.find('div', class_='m_feed_from')
                if from_elem:
                    span_elem = from_elem.find('span')
                    if span_elem:
                        report_info['time'] = span_elem.get_text(strip=True)
                        
        except Exception as e:
            logging.error(f"提取报告信息时出错: {e}")
            
        return report_info
    
    @staticmethod
    def _extract_id_ref(container) -> str:
        """提取用户引用链接"""
        try:
            # 查找用户链接
            face_elem = container.find('div', class_='m_feed_face')
            if face_elem:
                link_elem = face_elem.find('a', href=True)
                if link_elem:
                    return link_elem.get('href', '')
        except Exception as e:
            logging.error(f"提取用户引用链接时出错: {e}")
            
        return ''


class DatabaseManager:
    """数据库管理器"""
    
    def __init__(self, db_path: str = DB_PATH):
        """
        初始化数据库管理器
        
        Args:
            db_path: 数据库文件路径
        """
        self.db_path = db_path
        self._create_tables()
    
    def _create_tables(self):
        """创建数据表"""
        try:
            with sqlite3.connect(self.db_path) as conn:
                cursor = conn.cursor()
                
                # 检查表是否存在
                cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='sseinfo_report'")
                table_exists = cursor.fetchone() is not None
                
                if table_exists:
                    # 检查是否需要添加新字段
                    cursor.execute("PRAGMA table_info(sseinfo_report)")
                    columns = [col[1] for col in cursor.fetchall()]
                    
                    if 'stock_report_timestamp' not in columns:
                        print("🔄 更新报告数据库表结构...")
                        cursor.execute('ALTER TABLE sseinfo_report ADD COLUMN stock_report_timestamp INTEGER')
                        print("✅ 报告数据库表结构更新完成")
                else:
                    # 创建报告发布数据表
                    cursor.execute('''
                        CREATE TABLE sseinfo_report (
                            id TEXT PRIMARY KEY,
                            id_ref TEXT,
                            stock_name TEXT,
                            stock_code TEXT,
                            stock_uid TEXT,
                            stock_image TEXT,
                            stock_report_name TEXT,
                            stock_report_url TEXT,
                            stock_report_time TEXT,
                            stock_report_timestamp INTEGER,
                            created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
                        )
                    ''')
                    print("✅ 报告数据库表创建成功")
                
                conn.commit()
                logging.info("报告数据表创建成功")
                
        except Exception as e:
            logging.error(f"创建报告数据表时出错: {e}")
    
    def insert_report_data(self, items: List[SSEReportItem]) -> int:
        """
        插入报告数据到数据库
        
        Args:
            items: SSE报告条目列表
            
        Returns:
            成功插入的记录数
        """
        success_count = 0
        timestamp_generator = TimestampGenerator()
        
        print(f"📝 开始写入数据库，共 {len(items)} 条数据...")
        
        try:
            with sqlite3.connect(self.db_path) as conn:
                cursor = conn.cursor()
                
                for i, item in enumerate(items, 1):
                    try:
                        # 提取ID中的数字部分作为主键
                        item_id = item.id.replace('item-', '') if item.id.startswith('item-') else item.id
                        
                        # 转换时间格式为时间戳
                        report_timestamp = timestamp_generator.parse_chinese_time(item.stock_report_time)
                        
                        cursor.execute('''
                            INSERT OR REPLACE INTO sseinfo_report 
                            (id, id_ref, stock_name, stock_code, stock_uid, stock_image,
                             stock_report_name, stock_report_url, stock_report_time, stock_report_timestamp)
                            VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
                        ''', (
                            item_id,
                            item.id_ref,
                            item.stock_name,
                            item.stock_code,
                            item.stock_uid,
                            item.stock_image,
                            item.stock_report_name,
                            item.stock_report_url,
                            item.stock_report_time,
                            report_timestamp
                        ))
                        success_count += 1
                        print(f"✅ 第 {i}/{len(items)} 条数据写入成功 - ID: {item_id}, 公司: {item.stock_name}")
                        
                    except Exception as e:
                        print(f"❌ 第 {i}/{len(items)} 条数据写入失败 - ID: {item_id} - 错误: {e}")
                        logging.error(f"插入单条报告数据时出错: {e}")
                        continue
                
                conn.commit()
                print(f"💾 数据库写入完成，成功 {success_count}/{len(items)} 条")
                logging.info(f"成功插入 {success_count} 条报告数据")
                
        except Exception as e:
            print(f"❌ 数据库连接失败: {e}")
            logging.error(f"插入报告数据时出错: {e}")
            
        return success_count


class SSEReportCrawler:
    """SSE报告爬虫主类"""
    
    def __init__(self, cookies: Optional[Dict[str, str]] = None):
        """
        初始化报告爬虫
        
        Args:
            cookies: 自定义cookies，如果为None则使用默认报告cookies
        """
        self.session = requests.Session()
        self.cookies = cookies or SSEConfig.REPORT_COOKIES
        self.headers = SSEConfig.DEFAULT_HEADERS.copy()
        self.timestamp_generator = TimestampGenerator()
        self.html_parser = ReportHTMLParser()
        self.db_manager = DatabaseManager()
        
        # 设置session的cookies和headers
        self.session.cookies.update(self.cookies)
        self.session.headers.update(self.headers)
        
        # 设置日志
        self._setup_logging()
        
    def _setup_logging(self):
        """设置日志配置"""
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(levelname)s - %(message)s',
            handlers=[
                logging.FileHandler('sse_report_crawler.log', encoding='utf-8'),
                logging.StreamHandler()
            ]
        )
        self.logger = logging.getLogger(__name__)
    
    def _build_request_params(self, page: int) -> Dict[str, str]:
        """
        构建请求参数
        
        Args:
            page: 页码
            
        Returns:
            请求参数字典
        """
        params = SSEConfig.REPORT_PARAMS.copy()
        params['page'] = str(page)
        params['_'] = self.timestamp_generator.generate_timestamp()
        return params
    
    def fetch_page(self, page: int) -> Optional[str]:
        """
        获取指定页面的数据
        
        Args:
            page: 页码
            
        Returns:
            HTML内容，如果请求失败返回None
        """
        try:
            params = self._build_request_params(page)
            
            print(f"🌐 正在请求第 {page} 页报告数据...")
            self.logger.info(f"正在请求第 {page} 页报告数据...")
            
            response = self.session.get(
                SSEConfig.BASE_URL,
                params=params,
                timeout=30
            )
            
            response.raise_for_status()
            
            print(f"✅ 第 {page} 页请求成功，状态码: {response.status_code}")
            self.logger.info(f"成功获取第 {page} 页报告数据，状态码: {response.status_code}")
            return response.text
            
        except requests.exceptions.RequestException as e:
            print(f"❌ 第 {page} 页请求失败: {e}")
            self.logger.error(f"请求第 {page} 页报告数据时出错: {e}")
            return None
        except Exception as e:
            print(f"❌ 第 {page} 页处理失败: {e}")
            self.logger.error(f"处理第 {page} 页报告数据时出错: {e}")
            return None
    
    def crawl_all_pages(self, max_pages: Optional[int] = None) -> List[SSEReportItem]:
        """
        爬取所有页面的报告数据
        
        Args:
            max_pages: 最大页数限制，None表示无限制
            
        Returns:
            所有页面的SSE报告条目列表
        """
        all_items = []
        page = 1
        consecutive_empty_pages = 0
        max_empty_pages = 3  # 连续3页无数据则停止
        
        print(f"🚀 开始爬取SSE报告信息，最大页数: {max_pages}")
        self.logger.info("开始爬取SSE报告信息...")
        
        while True:
            # 检查最大页数限制
            if max_pages and page > max_pages:
                print(f"📄 达到最大页数限制 {max_pages}，停止爬取")
                self.logger.info(f"达到最大页数限制 {max_pages}，停止爬取")
                break
            
            # 获取页面数据
            html_content = self.fetch_page(page)
            
            if html_content is None:
                consecutive_empty_pages += 1
                print(f"⚠️  第 {page} 页请求失败，连续失败页数: {consecutive_empty_pages}")
                self.logger.warning(f"第 {page} 页请求失败，连续失败页数: {consecutive_empty_pages}")
                
                if consecutive_empty_pages >= max_empty_pages:
                    print(f"🛑 连续 {max_empty_pages} 页请求失败，停止爬取")
                    self.logger.info("连续多页请求失败，停止爬取")
                    break
                    
                page += 1
                continue
            
            # 解析HTML内容
            page_items = self.html_parser.parse_report_html(html_content, page)
            
            if not page_items:
                consecutive_empty_pages += 1
                print(f"⚠️  第 {page} 页无有效数据，连续空页数: {consecutive_empty_pages}")
                self.logger.warning(f"第 {page} 页无有效数据，连续空页数: {consecutive_empty_pages}")
                
                if consecutive_empty_pages >= max_empty_pages:
                    print(f"🛑 连续 {max_empty_pages} 页无数据，停止爬取")
                    self.logger.info("连续多页无数据，停止爬取")
                    break
            else:
                consecutive_empty_pages = 0
                all_items.extend(page_items)
                print(f"📊 第 {page} 页解析到 {len(page_items)} 条报告数据")
                self.logger.info(f"第 {page} 页解析到 {len(page_items)} 条报告数据")
            
            page += 1
            
            # 添加请求间隔，避免过于频繁的请求
            time.sleep(1)
        
        print(f"🎉 报告爬取完成，共获取 {len(all_items)} 条数据")
        self.logger.info(f"报告爬取完成，共获取 {len(all_items)} 条数据")
        return all_items
    
    def crawl_and_save(self, max_pages: Optional[int] = None) -> int:
        """
        爬取数据并保存到数据库
        
        Args:
            max_pages: 最大页数限制
            
        Returns:
            成功保存的记录数
        """
        # 爬取数据
        items = self.crawl_all_pages(max_pages)
        
        if items:
            # 保存到数据库
            saved_count = self.db_manager.insert_report_data(items)
            self.logger.info(f"报告数据爬取和保存完成，共保存 {saved_count} 条记录")
            return saved_count
        else:
            self.logger.warning("未获取到任何报告数据")
            return 0


def main():
    """主函数"""
    print("=== SSE报告发布数据爬虫 ===")
    
    try:
        # 检查命令行参数；未提供参数时直接使用默认33页（不阻塞输入）
        if len(sys.argv) > 1:
            try:
                max_pages = int(sys.argv[1])
            except ValueError:
                print(f"⚠️  无效的页数参数，使用默认值{DEFAULT_MAX_PAGES}页")
                max_pages = DEFAULT_MAX_PAGES
        else:
            max_pages = DEFAULT_MAX_PAGES
        
        print(f"📋 设置最大页数: {max_pages}")
        
        # 创建爬虫实例
        print("🔧 初始化爬虫...")
        crawler = SSEReportCrawler()
        
        # 爬取并保存数据
        print("🎯 开始爬取和保存数据...")
        saved_count = crawler.crawl_and_save(max_pages)
        
        print(f"\n{'='*50}")
        print(f"🎉 爬取完成！")
        print(f"✅ 成功保存 {saved_count} 条报告数据到数据库")
        print(f"📁 数据库文件: database.sqlite")
        print(f"📊 数据表: sseinfo_report")
        print(f"{'='*50}")
        
    except KeyboardInterrupt:
        print("\n⚠️  用户中断程序")
    except Exception as e:
        print(f"❌ 程序执行出错: {e}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    main()
