#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Cninfo问董秘数据爬虫
用于爬取Cninfo问董秘信息并存储到数据库
"""

import requests
import time
import json
import logging
import sqlite3
from typing import Dict, List, Optional
from dataclasses import dataclass
from datetime import datetime
import sys

# ===== 全局配置常量 =====
DB_PATH: str = "database.sqlite"
DEFAULT_MAX_PAGES: int = 33


@dataclass
class QuestionData:
    """问题数据类"""
    question_text: str
    question_time: str

@dataclass
class AnswerData:
    """回答数据类"""
    answer_text: str
    answer_time: str

@dataclass
class CninfoFeedItem:
    """Cninfo信息条目数据类"""
    id: str
    stock_name: str
    stock_code: str
    stock_image: str  # 新增
    question: QuestionData
    answer: AnswerData
    page: int
    question_timestamp: int  # 新增
    answer_timestamp: int    # 新增


class CninfoConfig:
    """Cninfo爬虫配置类"""
    
    BASE_URL = "https://irm.cninfo.com.cn/newircs/index/search"
    DEFAULT_HEADERS = {
        'Accept': 'application/json, text/plain, */*',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ga;q=0.6,ja;q=0.5',
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
        'Content-Type': 'application/x-www-form-urlencoded',
        'DNT': '1',
        'Origin': 'https://irm.cninfo.com.cn',
        'Pragma': 'no-cache',
        'Referer': 'https://irm.cninfo.com.cn/views/interactiveAnswer',
        'Sec-Fetch-Dest': 'empty',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Site': 'same-origin',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36',
        'X-Requested-With': 'XMLHttpRequest',
        'sec-ch-ua': '"Google Chrome";v="141", "Not?A_Brand";v="8", "Chromium";v="141"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
        'sendType': 'formdata'
    }
    
    # Cookies from the curl request
    COOKIES = {
        'routeId': '.uc2',
        'SID': '3b719ced-0eb8-4dac-9b0f-4f17d99abb0d',
        'IRMCOOKIE2': '1760968191.291.943.192179|ea4d8e74be9fd7004510aad8d4906407',
        'JSESSIONID': 'bf11b4e3-c20b-4c5d-881d-4bf510fc69c1'
    }
    
    # 默认参数
    PARAMS = {
        'searchTypes': '1,11',
        'highLight': 'true'
    }


class TimestampGenerator:
    """时间戳生成器"""
    
    @staticmethod
    def generate_timestamp() -> str:
        """生成当前时间戳（毫秒）"""
        return str(int(time.time() * 1000))
    
    @staticmethod
    def parse_timestamp(millis: int) -> str:
        """将毫秒时间戳转换为可读日期时间"""
        if millis == 0:
            return ""
        dt = datetime.fromtimestamp(millis / 1000)
        return dt.strftime('%Y-%m-%d %H:%M:%S')


class JsonParser:
    """JSON解析器"""
    
    @staticmethod
    def parse_feed_json(json_data: Dict, page: int) -> List[CninfoFeedItem]:
        """
        解析JSON内容，提取Cninfo信息条目
        
        Args:
            json_data: JSON内容字典
            page: 当前页码
            
        Returns:
            Cninfo信息条目列表
        """
        items = []
        
        try:
            results = json_data.get('results', [])
            for result in results:
                item = JsonParser._extract_item_data(result, page)
                if item:
                    items.append(item)
                    
        except Exception as e:
            logging.error(f"解析JSON时出错: {e}")
            
        return items
    
    @staticmethod
    def _extract_item_data(result: Dict, page: int) -> Optional[CninfoFeedItem]:
        """从结果中提取单个条目数据"""
        try:
            item_id = result.get('esId', '')
            stock_name = result.get('companyShortName', '')
            stock_code = result.get('stockCode', '')
            stock_image_rel = result.get('companyLogo', '')
            stock_image = f"https://resstatic.cninfo.com.cn/irm/ssgs/{stock_image_rel}" if stock_image_rel else ''
            
            # 问题数据
            question_text = result.get('mainContent', '')
            question_time_millis = int(result.get('pubDate', '0'))
            question_time = TimestampGenerator.parse_timestamp(question_time_millis)
            question_timestamp = int(question_time_millis / 1000) if question_time_millis else 0
            
            # 回答数据
            answer_text = result.get('attachedContent', '')
            answer_time_millis = int(result.get('attachedPubDate', '0'))
            answer_time = TimestampGenerator.parse_timestamp(answer_time_millis)
            answer_timestamp = int(answer_time_millis / 1000) if answer_time_millis else 0
            
            if item_id and question_text and answer_text:
                return CninfoFeedItem(
                    id=item_id,
                    stock_name=stock_name,
                    stock_code=stock_code,
                    stock_image=stock_image,
                    question=QuestionData(
                        question_text=question_text,
                        question_time=question_time
                    ),
                    answer=AnswerData(
                        answer_text=answer_text,
                        answer_time=answer_time
                    ),
                    page=page,
                    question_timestamp=question_timestamp,
                    answer_timestamp=answer_timestamp
                )
                
        except Exception as e:
            logging.error(f"提取条目数据时出错: {e}")
            
        return None


class DatabaseManager:
    """数据库管理器"""
    
    def __init__(self, db_path: str = DB_PATH):
        """
        初始化数据库管理器
        
        Args:
            db_path: 数据库文件路径
        """
        self.db_path = db_path
        self._create_tables()
    
    def _create_tables(self):
        """创建数据表"""
        try:
            with sqlite3.connect(self.db_path) as conn:
                cursor = conn.cursor()
                
                # 创建cninfo问董秘数据表
                cursor.execute('''
                    CREATE TABLE IF NOT EXISTS cninfo_iwen (
                        id TEXT PRIMARY KEY,
                        stock_name TEXT,
                        stock_code TEXT,
                        stock_image TEXT,  -- 新增
                        question_text TEXT,
                        question_time TEXT,
                        question_timestamp INTEGER,
                        answer_text TEXT,
                        answer_time TEXT,
                        answer_timestamp INTEGER,
                        created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
                    )
                ''')
                conn.commit()
                logging.info("数据库表创建成功")
                
        except Exception as e:
            logging.error(f"创建数据库表时出错: {e}")
    
    def insert_iwen_data(self, items: List[CninfoFeedItem]) -> int:
        """
        插入问董秘数据到数据库
        
        Args:
            items: Cninfo信息条目列表
            
        Returns:
            成功插入的记录数
        """
        success_count = 0
        
        print(f"📝 开始写入数据库，共 {len(items)} 条数据...")
        
        try:
            with sqlite3.connect(self.db_path) as conn:
                cursor = conn.cursor()
                
                for i, item in enumerate(items, 1):
                    try:
                        # 时间戳（从毫秒转换为秒）
                        question_timestamp = item.question_timestamp
                        answer_timestamp = item.answer_timestamp
                        
                        # 检查是否存在
                        cursor.execute('SELECT created_at FROM cninfo_iwen WHERE id=?', (item.id,))
                        existing = cursor.fetchone()
                        
                        if existing:
                            cursor.execute('''
                                UPDATE cninfo_iwen SET
                                    stock_name=?,
                                    stock_code=?,
                                    stock_image=?,
                                    question_text=?,
                                    question_time=?,
                                    question_timestamp=?,
                                    answer_text=?,
                                    answer_time=?,
                                    answer_timestamp=?
                                WHERE id=?
                            ''', (
                                item.stock_name,
                                item.stock_code,
                                item.stock_image,
                                item.question.question_text,
                                item.question.question_time,
                                question_timestamp,
                                item.answer.answer_text,
                                item.answer.answer_time,
                                answer_timestamp,
                                item.id
                            ))
                        else:
                            cursor.execute('''
                                INSERT INTO cninfo_iwen 
                                (id, stock_name, stock_code, stock_image,
                                 question_text, question_time, question_timestamp,
                                 answer_text, answer_time, answer_timestamp)
                                VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
                            ''', (
                                item.id,
                                item.stock_name,
                                item.stock_code,
                                item.stock_image,
                                item.question.question_text,
                                item.question.question_time,
                                question_timestamp,
                                item.answer.answer_text,
                                item.answer.answer_time,
                                answer_timestamp
                            ))
                        success_count += 1
                        print(f"✅ 第 {i}/{len(items)} 条数据写入成功 - ID: {item.id}, 公司: {item.stock_name}")
                        
                    except Exception as e:
                        print(f"❌ 第 {i}/{len(items)} 条数据写入失败 - ID: {item.id} - 错误: {e}")
                        logging.error(f"插入单条数据时出错: {e}")
                        continue
                
                conn.commit()
                print(f"💾 数据库写入完成，成功 {success_count}/{len(items)} 条")
                logging.info(f"成功插入 {success_count} 条问董秘数据")
                
        except Exception as e:
            print(f"❌ 数据库连接失败: {e}")
            logging.error(f"插入问董秘数据时出错: {e}")
            
        return success_count


class CninfoCrawler:
    """Cninfo信息爬虫主类"""
    
    def __init__(self, cookies: Optional[Dict[str, str]] = None):
        """
        初始化爬虫
        
        Args:
            cookies: 自定义cookies，如果为None则使用默认cookies
        """
        self.session = requests.Session()
        self.cookies = cookies or CninfoConfig.COOKIES
        self.headers = CninfoConfig.DEFAULT_HEADERS.copy()
        self.timestamp_generator = TimestampGenerator()
        self.json_parser = JsonParser()
        self.db_manager = DatabaseManager()
        
        # 设置session的cookies和headers
        self.session.cookies.update(self.cookies)
        self.session.headers.update(self.headers)
        
        # 设置日志
        self._setup_logging()
        
    def _setup_logging(self):
        """设置日志配置"""
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(levelname)s - %(message)s',
            handlers=[
                logging.FileHandler('cninfo_iwen_crawler.log', encoding='utf-8'),
                logging.StreamHandler()
            ]
        )
        self.logger = logging.getLogger(__name__)
    
    def _build_request_data(self, page: int) -> Dict[str, str]:
        """
        构建请求数据
        
        Args:
            page: 页码
            
        Returns:
            请求数据字典
        """
        data = CninfoConfig.PARAMS.copy()
        data['pageNo'] = str(page)
        data['pageSize'] = '10'
        return data
    
    def fetch_page(self, page: int) -> Optional[Dict]:
        """
        获取指定页面的数据
        
        Args:
            page: 页码
            
        Returns:
            JSON内容字典，如果请求失败返回None
        """
        try:
            data = self._build_request_data(page)
            url = f"{CninfoConfig.BASE_URL}?_t={self.timestamp_generator.generate_timestamp()}"
            
            print(f"🌐 正在请求第 {page} 页问董秘数据...")
            self.logger.info(f"正在请求第 {page} 页问董秘数据...")
            
            response = self.session.post(
                url,
                data=data,
                timeout=30
            )
            
            response.raise_for_status()
            json_content = response.json()
            
            print(f"✅ 第 {page} 页请求成功，状态码: {response.status_code}")
            self.logger.info(f"成功获取第 {page} 页问董秘数据，状态码: {response.status_code}")
            return json_content
            
        except requests.exceptions.RequestException as e:
            print(f"❌ 第 {page} 页请求失败: {e}")
            self.logger.error(f"请求第 {page} 页问董秘数据时出错: {e}")
            return None
        except Exception as e:
            print(f"❌ 第 {page} 页处理失败: {e}")
            self.logger.error(f"处理第 {page} 页问董秘数据时出错: {e}")
            return None
    
    def crawl_all_pages(self, max_pages: Optional[int] = None) -> List[CninfoFeedItem]:
        """
        爬取所有页面的数据
        
        Args:
            max_pages: 最大页数限制，None表示无限制
            
        Returns:
            所有页面的Cninfo信息条目列表
        """
        all_items = []
        page = 1
        consecutive_empty_pages = 0
        max_empty_pages = 3  # 连续3页无数据则停止
        
        print(f"🚀 开始爬取Cninfo问董秘信息，最大页数: {max_pages}")
        self.logger.info("开始爬取Cninfo问董秘信息...")
        
        while True:
            # 检查最大页数限制
            if max_pages and page > max_pages:
                print(f"📄 达到最大页数限制 {max_pages}，停止爬取")
                self.logger.info(f"达到最大页数限制 {max_pages}，停止爬取")
                break
            
            # 获取页面数据
            json_content = self.fetch_page(page)
            
            if json_content is None:
                consecutive_empty_pages += 1
                print(f"⚠️  第 {page} 页请求失败，连续失败页数: {consecutive_empty_pages}")
                self.logger.warning(f"第 {page} 页请求失败，连续失败页数: {consecutive_empty_pages}")
                
                if consecutive_empty_pages >= max_empty_pages:
                    print(f"🛑 连续 {max_empty_pages} 页请求失败，停止爬取")
                    self.logger.info("连续多页请求失败，停止爬取")
                    break
                    
                page += 1
                continue
            
            # 解析JSON内容
            page_items = self.json_parser.parse_feed_json(json_content, page)
            
            if not page_items:
                consecutive_empty_pages += 1
                print(f"⚠️  第 {page} 页无有效数据，连续空页数: {consecutive_empty_pages}")
                self.logger.warning(f"第 {page} 页无有效数据，连续空页数: {consecutive_empty_pages}")
                
                if consecutive_empty_pages >= max_empty_pages:
                    print(f"🛑 连续 {max_empty_pages} 页无数据，停止爬取")
                    self.logger.info("连续多页无数据，停止爬取")
                    break
            else:
                consecutive_empty_pages = 0
                all_items.extend(page_items)
                print(f"📊 第 {page} 页解析到 {len(page_items)} 条问董秘数据")
                self.logger.info(f"第 {page} 页解析到 {len(page_items)} 条问董秘数据")
            
            page += 1
            
            # 添加请求间隔，避免过于频繁的请求
            time.sleep(1)
        
        print(f"🎉 问董秘爬取完成，共获取 {len(all_items)} 条数据")
        self.logger.info(f"问董秘爬取完成，共获取 {len(all_items)} 条数据")
        return all_items
    
    def crawl_and_save(self, max_pages: Optional[int] = None) -> int:
        """
        爬取数据并保存到数据库
        
        Args:
            max_pages: 最大页数限制
            
        Returns:
            成功保存的记录数
        """
        # 爬取数据
        items = self.crawl_all_pages(max_pages)
        
        if items:
            # 保存到数据库
            saved_count = self.db_manager.insert_iwen_data(items)
            self.logger.info(f"问董秘数据爬取和保存完成，共保存 {saved_count} 条记录")
            return saved_count
        else:
            self.logger.warning("未获取到任何问董秘数据")
            return 0


def main():
    """主函数"""
    print("=== Cninfo问董秘数据爬虫 ===")
    
    try:
        # 检查命令行参数；未提供参数时直接使用默认33页
        if len(sys.argv) > 1:
            try:
                max_pages = int(sys.argv[1])
            except ValueError:
                print(f"⚠️  无效的页数参数，使用默认值{DEFAULT_MAX_PAGES}页")
                max_pages = DEFAULT_MAX_PAGES
        else:
            max_pages = DEFAULT_MAX_PAGES
        
        print(f"📋 设置最大页数: {max_pages}")
        
        # 创建爬虫实例
        print("🔧 初始化爬虫...")
        crawler = CninfoCrawler()
        
        # 爬取并保存数据
        print("🎯 开始爬取和保存数据...")
        saved_count = crawler.crawl_and_save(max_pages)
        
        print(f"\n{'='*50}")
        print(f"🎉 爬取完成！")
        print(f"✅ 成功保存 {saved_count} 条问董秘数据到数据库")
        print(f"📁 数据库文件: database.sqlite")
        print(f"📊 数据表: cninfo_iwen")
        print(f"{'='*50}")
        
    except KeyboardInterrupt:
        print("\n⚠️  用户中断程序")
    except Exception as e:
        print(f"❌ 程序执行出错: {e}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    main()
