#!/usr/bin/python3
# -*- coding: utf-8 -*-

"""
超优化股票数据管理器（精简版）
移除不必要的year_index表和相关功能，简化代码结构
"""

import urllib3
import logging
import sqlite3
import pandas as pd
import numpy as np
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
import random
from datetime import datetime, timedelta
from tqdm import tqdm
from collections import defaultdict
import time
import psutil
import threading
from typing import Tuple, List, Dict, Any, Optional, Set

from AKShareDataFetcher import AKShareDataFetcher
from SignalGenerator import SignalGenerator
from StockBasicInfoManager import StockBasicInfoManager
from TradeDateManager import TradeDateManager

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)


class UltraOptimizedStockDataManager:
    def __init__(self, db_path: str, max_retries: int = 3, retry_delay: int = 5,
                 request_delay: float = 0.2, batch_size: int = 100,
                 page_size: int = 32768, cache_size_mb: int = 256) -> None:
        self.db_path = db_path
        self.max_retries = max_retries
        self.retry_delay = retry_delay
        self.request_delay = request_delay
        self.batch_size = batch_size
        self.page_size = page_size
        self.cache_size_mb = cache_size_mb
        self.trade_calendar = TradeDateManager()
        self.signal_generator = SignalGenerator()

        # 内存缓存系统
        self.metadata_cache: Dict[str, Tuple[str, str, int]] = {}

        # 维护任务状态
        self.last_maintenance: Dict[str, Optional[datetime]] = {
            'vacuum': None,
            'export': None
        }

        # 主线程连接
        self.conn: sqlite3.Connection = self.create_connection()
        self.initialize_database()
        self.load_metadata_cache()

        # 加载交易日历
        self.trade_calendar.load_trade_dates()

        # 初始化数据获取器
        self.data_fetcher = AKShareDataFetcher(
            max_retries=self.max_retries,
            retry_delay=self.retry_delay,
            request_delay=self.request_delay
        )

        # 线程局部存储
        self.thread_local = threading.local()
        self.thread_connections: List[sqlite3.Connection] = []
        self.thread_connections_lock = threading.Lock()

        # 检查缓存一致性
        self.check_cache_consistency()

    def create_connection(self) -> sqlite3.Connection:
        """创建高性能数据库连接"""
        conn = sqlite3.connect(self.db_path)
        conn.execute(f"PRAGMA page_size = {self.page_size};")
        conn.execute("PRAGMA journal_mode = WAL;")
        conn.execute("PRAGMA synchronous = NORMAL;")
        conn.execute(f"PRAGMA cache_size = -{self.cache_size_mb * 1024};")
        conn.execute("PRAGMA temp_store = MEMORY;")
        conn.execute(f"PRAGMA mmap_size = {self.cache_size_mb * 1024 * 1024};")
        conn.execute("PRAGMA auto_vacuum = INCREMENTAL;")
        return conn

    def get_thread_connection(self) -> sqlite3.Connection:
        """为当前线程创建或获取数据库连接"""
        thread_id = threading.get_ident()
        if not hasattr(self.thread_local, "conn") or self.thread_local.conn is None:
            self.thread_local.conn = sqlite3.connect(self.db_path)
            # 设置相同的PRAGMA
            self.thread_local.conn.execute(f"PRAGMA page_size = {self.page_size};")
            self.thread_local.conn.execute("PRAGMA journal_mode = WAL;")
            self.thread_local.conn.execute("PRAGMA synchronous = NORMAL;")
            self.thread_local.conn.execute(f"PRAGMA cache_size = -{self.cache_size_mb * 1024};")
            self.thread_local.conn.execute("PRAGMA temp_store = MEMORY;")
            self.thread_local.conn.execute(f"PRAGMA mmap_size = {self.cache_size_mb * 1024 * 1024};")
            self.thread_local.conn.execute("PRAGMA auto_vacuum = INCREMENTAL;")

            # 注册连接用于后续关闭
            with self.thread_connections_lock:
                self.thread_connections.append(self.thread_local.conn)

            logging.debug(f"为线程 {thread_id} 创建新的数据库连接")
        return self.thread_local.conn

    def initialize_database(self) -> None:
        """初始化数据库表结构（精简版）"""
        # 股票元数据表
        self.conn.execute('''
            CREATE TABLE IF NOT EXISTS stock_metadata (
                symbol TEXT PRIMARY KEY,
                ipo_date TEXT NOT NULL,
                last_updated TEXT NOT NULL,
                data_status INTEGER DEFAULT 0
            )
        ''')

        # 分区表（按年分区） - 从2004年开始
        self.create_partitioned_tables()
        self.conn.commit()

    def create_partitioned_tables(self) -> None:
        """创建按年分区的表（2004-当前年份）"""
        current_year = datetime.now().year
        for year in range(2004, current_year + 1):
            self.create_year_table(year)

    def create_year_table(self, year: int) -> None:
        """创建高度优化的年度数据表"""
        table_name = f'stock_data_{year}'
        self.conn.execute(f'''
            CREATE TABLE IF NOT EXISTS {table_name} (
                date TEXT NOT NULL,
                symbol TEXT NOT NULL,
                open REAL,
                close REAL,
                high REAL,
                low REAL,
                volume REAL,
                turnover REAL,
                amplitude REAL,
                change_pct REAL,
                change_amt REAL,
                turnover_rate REAL,
                PRIMARY KEY (symbol, date)
            ) WITHOUT ROWID
        ''')

        # 创建高性能索引
        self.conn.execute(f'''
            CREATE INDEX IF NOT EXISTS idx_{year}_symbol_date 
            ON {table_name}(symbol, date)
        ''')
        self.conn.commit()

    def load_metadata_cache(self) -> None:
        """预加载元数据到内存缓存"""
        try:
            df = pd.read_sql("SELECT symbol, ipo_date, last_updated, data_status FROM stock_metadata", self.conn)
            for _, row in df.iterrows():
                status = int(row['data_status']) if not pd.isna(row['data_status']) else 0
                self.metadata_cache[row['symbol']] = (
                    row['ipo_date'],
                    row['last_updated'],
                    status
                )
            logging.info(f"加载 {len(self.metadata_cache)} 条股票元数据到缓存")
        except Exception as e:
            logging.error(f"元数据缓存加载失败: {e}")

    def check_cache_consistency(self) -> None:
        """检查并修复内存缓存与数据库的一致性"""
        try:
            logging.info("开始缓存一致性检查...")

            # 从数据库加载当前元数据
            db_metadata = {}
            df = pd.read_sql("SELECT symbol, ipo_date, last_updated, data_status FROM stock_metadata", self.conn)
            for _, row in df.iterrows():
                status = int(row['data_status']) if not pd.isna(row['data_status']) else 0
                db_metadata[row['symbol']] = (
                    row['ipo_date'],
                    row['last_updated'],
                    status
                )

            # 比较缓存和数据库
            cache_inconsistencies = 0
            db_inconsistencies = 0

            # 1. 检查缓存中的条目是否在数据库中存在
            for symbol, cache_meta in self.metadata_cache.items():
                db_meta = db_metadata.get(symbol)

                if db_meta is None:
                    # 缓存中有但数据库中不存在
                    logging.warning(f"缓存不一致: 股票 {symbol} 在缓存中但不在数据库中，将从缓存中移除")
                    del self.metadata_cache[symbol]
                    cache_inconsistencies += 1
                elif cache_meta != db_meta:
                    # 缓存与数据库不一致
                    logging.warning(
                        f"缓存不一致: 股票 {symbol} 缓存与数据库不一致。缓存: {cache_meta}, 数据库: {db_meta}")
                    self.metadata_cache[symbol] = db_meta  # 使用数据库值更新缓存
                    cache_inconsistencies += 1

            # 2. 检查数据库中的条目是否在缓存中存在
            for symbol, db_meta in db_metadata.items():
                if symbol not in self.metadata_cache:
                    # 数据库中有但缓存中不存在
                    logging.warning(f"缓存不一致: 股票 {symbol} 在数据库中但不在缓存中，将添加到缓存")
                    self.metadata_cache[symbol] = db_meta
                    db_inconsistencies += 1

            # 统计结果
            total_issues = cache_inconsistencies + db_inconsistencies
            if total_issues == 0:
                logging.info("缓存一致性检查完成: 无不一致项")
            else:
                logging.warning(f"缓存一致性检查完成: 修复 {total_issues} 个不一致项 "
                                f"(缓存不一致: {cache_inconsistencies}, 数据库不一致: {db_inconsistencies})")
        except Exception as e:
            logging.error(f"缓存一致性检查失败: {e}")

    def transform_data(self, df: pd.DataFrame) -> pd.DataFrame:
        """转换数据为优化存储格式"""
        original_count = len(df)
        df = df.copy()

        # 转换日期处理 - 统一转换为YYYYMMDD格式
        if 'date' in df.columns:
            try:
                # 第一步：转换为datetime对象
                df['date'] = pd.to_datetime(df['date'], errors='coerce')
                # 第二步：过滤无效日期
                df = df.dropna(subset=['date'])
                # 第三步：转换为YYYYMMDD格式字符串
                df['date'] = df['date'].dt.strftime('%Y%m%d')
                # 第四步：过滤格式不正确的日期
                df = df[df['date'].str.len() == 8]
            except Exception as e:
                logging.error(f"日期转换错误: {e}")
                df['date'] = "00000000"

        # 数值字段处理
        numeric_cols = ['open', 'close', 'high', 'low', 'volume', 'turnover',
                        'amplitude', 'change_pct', 'change_amt', 'turnover_rate']
        for col in numeric_cols:
            if col in df.columns:
                df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0.0)

        if len(df) < original_count:
            logging.warning(f"数据转换过滤: {original_count} -> {len(df)} 条记录")

        return df

    def validate_date_format(self, date_str: str) -> bool:
        """验证日期格式是否为YYYYMMDD"""
        try:
            if not isinstance(date_str, str) or len(date_str) != 8 or not date_str.isdigit():
                return False

            year = int(date_str[:4])
            month = int(date_str[4:6])
            day = int(date_str[6:8])

            if year < 1900 or year > 2100 or month < 1 or month > 12 or day < 1 or day > 31:
                return False

            # 月份天数验证
            if month in [4, 6, 9, 11] and day > 30:
                return False
            if month == 2:
                leap_year = (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0)
                if leap_year and day > 29 or not leap_year and day > 28:
                    return False

            return True
        except:
            return False

    def bulk_insert_optimized(self, data_batch: List[Tuple[str, pd.DataFrame, Tuple[str, str, int]]]) -> int:
        """高性能批量插入优化"""
        year_data = defaultdict(list)
        total_rows = 0
        processed_symbols = set()
        meta_updates = []

        for item in data_batch:
            symbol, df, meta_info = item
            processed_symbols.add(symbol)
            meta_updates.append((symbol, meta_info))

            df = self.transform_data(df)
            if df.empty:
                continue

            total_rows += len(df)

            for _, row in df.iterrows():
                date_str = row['date']
                if not self.validate_date_format(date_str):
                    continue

                year = int(date_str[:4])
                if year < 2004:
                    continue

                year_data[year].append((
                    date_str, symbol,
                    float(row.get('open', 0.0)),
                    float(row.get('close', 0.0)),
                    float(row.get('high', 0.0)),
                    float(row.get('low', 0.0)),
                    float(row.get('volume', 0.0)),
                    float(row.get('turnover', 0.0)),
                    float(row.get('amplitude', 0.0)),
                    float(row.get('change_pct', 0.0)),
                    float(row.get('change_amt', 0.0)),
                    float(row.get('turnover_rate', 0.0))
                ))

        inserted_count = 0
        self.conn.execute("BEGIN")

        try:
            for year, data in year_data.items():
                if not data or year < 2004:
                    continue

                self.create_year_table(year)
                table_name = f'stock_data_{year}'
                chunk_size = 5000

                for i in range(0, len(data), chunk_size):
                    chunk = data[i:i + chunk_size]
                    try:
                        self.conn.executemany(
                            f'''INSERT OR IGNORE INTO {table_name}
                            (date, symbol, open, close, high, low, volume, 
                             turnover, amplitude, change_pct, change_amt, turnover_rate)
                            VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
                            chunk
                        )
                        changes = self.conn.execute("SELECT changes()").fetchone()[0]
                        inserted_count += changes
                    except sqlite3.IntegrityError:
                        pass

            for symbol, meta_info in meta_updates:
                self.metadata_cache[symbol] = meta_info

            self.conn.commit()
            return inserted_count
        except Exception as e:
            self.conn.rollback()
            logging.error(f"批量插入失败: {e}")
            return 0

    def get_stock_update_range(self, symbol: str, ipo_date: str, max_history_date: str) -> Tuple[str, str]:
        """确定股票需要更新的日期范围"""
        meta = self.metadata_cache.get(symbol)
        if not meta:
            return max(ipo_date, max_history_date), "首次导入"

        last_updated = meta[1]
        data_status = meta[2]

        if data_status == 1:  # 已完成首次导入
            current_date = datetime.now()
            return (current_date - timedelta(days=30)).strftime('%Y%m%d'), "日常更新"

        current_year = datetime.now().year
        return f"{current_year}0101", "继续首次导入"

    def fetch_combined_data(self, days_back: int = 5000) -> int:
        """高性能股票数据获取与存储（单线程安全版）"""
        logging.info("开始超优化股票数据更新（单线程模式）...")
        start_time = time.time()
        mem_before = psutil.Process().memory_info().rss / 1024 / 1024  # 记录初始内存

        # 获取股票代码信息
        stock_manager = StockBasicInfoManager()
        filtered_stock_codes_df = stock_manager.get_stock_codes()

        if filtered_stock_codes_df.empty:
            stock_manager.initialize_stock_database()
            filtered_stock_codes_df = stock_manager.get_stock_codes()
            if filtered_stock_codes_df.empty:
                logging.error("无法获取股票代码列表")
                return 0

        # 准备股票信息（向量化操作）
        symbol_col = 'stock_code' if 'stock_code' in filtered_stock_codes_df.columns else 'code'
        date_col = 'listing_date' if 'listing_date' in filtered_stock_codes_df.columns else 'list_date'

        stock_info = filtered_stock_codes_df[[symbol_col, date_col]].copy()
        stock_info.columns = ['stock_code', 'listing_date']

        # 日期格式化处理
        def format_date(date_val):
            if pd.isna(date_val):
                return "20040101"
            if isinstance(date_val, str) and date_val.strip().isdigit() and len(date_val.strip()) >= 8:
                return date_val.strip()[:8]
            try:
                parsed = pd.to_datetime(date_val, errors='coerce')
                return parsed.strftime('%Y%m%d') if not pd.isna(parsed) else "20040101"
            except:
                return "20040101"

        stock_info['listing_date'] = stock_info['listing_date'].apply(format_date)

        # 计算日期范围
        end_date_str = datetime.now().strftime('%Y%m%d')
        start_date, _ = self.trade_calendar.get_start_end_date(end_date_str, days_back)
        max_history_date_str = start_date  # 最大历史日期边界
        current_date_str = datetime.now().strftime('%Y%m%d')

        # 单线程处理
        inserted_total = 0
        batch_data = []
        total_stocks = len(stock_info)

        with tqdm(total=total_stocks, desc="🚀 更新股票数据", unit="股") as pbar:
            for _, stock in stock_info.iterrows():
                symbol = stock['stock_code']
                listing_date_str = stock['listing_date']

                try:
                    # 缓存检查
                    if symbol in self.metadata_cache:
                        meta = self.metadata_cache[symbol]
                        if meta[2] == 1 and meta[1] >= current_date_str:
                            pbar.update(1)
                            time.sleep(random.uniform(0.5, 1.0))  # 轻微延迟避免请求过快
                            continue

                    # 确定更新范围
                    start_date_str, _ = self.get_stock_update_range(
                        symbol,
                        listing_date_str,
                        max_history_date_str
                    )

                    if start_date_str > current_date_str:
                        pbar.update(1)
                        continue

                    # 生成随机请求间隔 (1-5秒) 不能更改此处的方法
                    request_delay = random.uniform(1, 5)
                    # 经过验证请求间隔 接口不会被封 这里的代码不做任何更改
                    hist_data: pd.DataFrame = self.data_fetcher.fetch_em(
                            symbol=symbol,
                            start_date=start_date_str,
                            end_date=end_date_str,
                            adjust="qfq",
                            request_delay=request_delay
                    )

                    if hist_data is None or hist_data.empty or 'date' not in hist_data.columns:
                        pbar.update(1)
                        continue

                    # 转换日期格式
                    hist_data['date'] = pd.to_datetime(hist_data['date'], errors='coerce')
                    hist_data = hist_data.dropna(subset=['date'])
                    if hist_data.empty:
                        pbar.update(1)
                        continue

                    hist_data['date'] = hist_data['date'].dt.strftime('%Y%m%d')
                    last_date = hist_data['date'].max()

                    # 添加到批处理
                    batch_data.append((symbol, hist_data, (listing_date_str, last_date, 1)))

                    # 批量插入
                    if len(batch_data) >= self.batch_size:
                        inserted = self.bulk_insert_optimized(batch_data)
                        inserted_total += inserted
                        batch_data = []
                        pbar.set_postfix_str(f"批次插入: {inserted}条")

                except Exception as e:
                    logging.error(f"处理股票 {symbol} 时出错: {str(e)}")
                finally:
                    pbar.update(1)
                    time.sleep(random.uniform(0.5, 1.5))  # 处理间隔延迟

        # 处理剩余批次
        if batch_data:
            inserted = self.bulk_insert_optimized(batch_data)
            inserted_total += inserted
            logging.info(f"最后批次插入: {inserted}条记录")

        # 结果统计
        elapsed = time.time() - start_time
        mem_after = psutil.Process().memory_info().rss / 1024 / 1024
        logging.info(f"股票更新完成! 耗时: {elapsed:.2f}秒, 内存变化: {mem_after - mem_before:.2f}MB")
        self.update_metadata_to_db()
        self.check_cache_consistency()

        return inserted_total

    def update_metadata_to_db(self) -> bool:
        """元数据同步"""
        try:
            current_metadata = []
            for symbol, meta in self.metadata_cache.items():
                ipo, last_updated, status = meta
                if not self.validate_date_format(last_updated):
                    last_updated = "20000101"
                current_metadata.append((symbol, ipo, last_updated, int(status)))

            self.conn.executemany('''
                INSERT OR REPLACE INTO stock_metadata 
                (symbol, ipo_date, last_updated, data_status)
                VALUES (?, ?, ?, ?)
            ''', current_metadata)
            self.conn.commit()
            return True
        except Exception as e:
            self.conn.rollback()
            logging.error(f"元数据同步失败: {str(e)}")
            return False

    def vacuum_database(self) -> None:
        """数据库维护"""
        try:
            self.conn.execute("PRAGMA incremental_vacuum(1000);")
            logging.info("数据库维护完成")
        except Exception as e:
            logging.error(f"维护失败: {e}")

    def get_stock_data(self, symbol: str, start_date: str, end_date: str) -> pd.DataFrame:
        """查询股票数据 - 线程安全版本"""
        try:
            # 获取当前线程的数据库连接
            conn = self.get_thread_connection()

            # 转换日期格式
            start_str = self.convert_to_yyyymmdd(start_date)
            end_str = self.convert_to_yyyymmdd(end_date)
            if not start_str or not end_str:
                return pd.DataFrame()

            # 确定年份范围
            start_year = max(2004, int(start_str[:4]))
            end_year = int(end_str[:4])

            results = []
            for year in range(start_year, end_year + 1):
                table_name = f"stock_data_{year}"
                if not self.table_exists(conn, table_name):
                    continue

                # 直接使用YYYYMMDD格式进行查询
                query = f"""
                    SELECT * FROM {table_name}
                    WHERE symbol = ? AND date BETWEEN ? AND ?
                """
                df = pd.read_sql_query(query, conn, params=(symbol, start_str, end_str))
                if not df.empty:
                    # 保持日期为YYYYMMDD字符串格式
                    df['date'] = df['date'].astype(str)
                    results.append(df)

            if results:
                final_df = pd.concat(results, ignore_index=True).sort_values('date')
                return final_df
            return pd.DataFrame()
        except Exception as e:
            logging.error(f"查询失败: {str(e)}")
            return pd.DataFrame()

    def convert_to_yyyymmdd(self, date_str: str) -> Optional[str]:
        """日期格式转换"""
        try:
            dt = pd.to_datetime(date_str, errors='coerce')
            return dt.strftime('%Y%m%d') if not pd.isnull(dt) else None
        except:
            return None

    def table_exists(self, conn: sqlite3.Connection, table_name: str) -> bool:
        """检查表是否存在 - 使用特定连接"""
        cursor = conn.cursor()
        cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}'")
        return cursor.fetchone() is not None

    def close(self) -> None:
        """安全关闭所有数据库连接"""
        try:
            # 关闭主连接
            if hasattr(self, 'conn') and self.conn:
                try:
                    self.conn.close()
                    logging.info("主数据库连接已关闭")
                except Exception as e:
                    logging.error(f"关闭主数据库连接失败: {e}")
                finally:
                    self.conn = None

            # 关闭当前线程的连接
            if hasattr(self.thread_local, "conn") and self.thread_local.conn:
                try:
                    self.thread_local.conn.close()
                    logging.info("当前线程的数据库连接已关闭")
                except Exception as e:
                    logging.error(f"关闭当前线程数据库连接失败: {e}")

        except Exception as e:
            logging.error(f"关闭数据库连接时发生错误: {e}")

    def __del__(self) -> None:
        self.close()

    def fetch_and_generate_signals(self, target_date: str, batch_size: int = 100, days_offset: int = 180) -> Dict[
        str, Any]:
        """获取所有股票信号（多线程优化版）"""
        # 直接使用YYYYMMDD格式的目标日期
        target_date_str = target_date
        target_date_dt = datetime.strptime(target_date, '%Y%m%d')
        target_date_obj = target_date_dt.date()

        stock_manager = StockBasicInfoManager()
        try:
            sz_stock_codes_df = stock_manager.get_stock_codes(market_types=['sz'])
            sh_stock_codes_df = stock_manager.get_stock_codes(market_types=['sh'])

            # 创建股票代码到名称和行业的映射字典
            all_stock_codes_df = pd.concat([sz_stock_codes_df, sh_stock_codes_df])
            self.stock_info_map = all_stock_codes_df.set_index('stock_code')[['stock_name', 'industry', 'rating']].to_dict(
                'index')
        except Exception as e:
            logging.error(f"获取股票代码失败: {str(e)}")
            return {'sz_signals': [], 'sh_signals': [], 'target_date': target_date_str}

        # 获取日期范围
        try:
            start_date, end_date = self.trade_calendar.get_start_end_date(target_date, days_offset)
        except Exception as e:
            logging.warning(f"获取交易日期失败，使用默认起始日期: {str(e)}")
            start_date = "20240101"
            end_date = target_date

        def process_stock_codes(stock_codes_df: pd.DataFrame, market_name: str) -> List[Dict[str, Any]]:
            """多线程处理股票信号生成"""
            stock_data_list = []
            stock_codes = stock_codes_df['stock_code'].tolist()

            # 第一步：多线程获取股票数据
            with ThreadPoolExecutor(max_workers=10) as executor:
                future_to_code = {
                    executor.submit(self.get_stock_data, stock_code, start_date, end_date): stock_code
                    for stock_code in stock_codes
                }

                with tqdm(total=len(future_to_code), desc=f"获取{market_name}股票数据") as pbar:
                    for future in as_completed(future_to_code):
                        stock_code = future_to_code[future]
                        try:
                            stock_data = future.result()
                            if not stock_data.empty and 'date' in stock_data.columns:
                                # 确保数据按日期排序
                                stock_data = stock_data.sort_values('date')

                                # 添加股票基本信息
                                if stock_code in self.stock_info_map:
                                    stock_data['stock_name'] = self.stock_info_map[stock_code]['stock_name']
                                    stock_data['industry'] = self.stock_info_map[stock_code]['industry']
                                    stock_data['rating'] = self.stock_info_map[stock_code]['rating']
                                else:
                                    stock_data['stock_name'] = '未知'
                                    stock_data['industry'] = '未知'
                                    stock_data['rating'] = '未知'
                                    logging.warning(f"未找到股票代码 {stock_code} 的基本信息")

                                # 检查目标日期是否存在
                                if target_date_obj in pd.to_datetime(stock_data['date']).dt.date.values:
                                    stock_data_list.append(stock_data)
                        except Exception as e:
                            logging.error(f"获取股票 {stock_code} 数据失败: {str(e)}")
                        pbar.update(1)

            # 第二步：批量生成信号
            return self.batch_generate_signals(stock_data_list, target_date_obj)

        # 并行处理两个市场
        with ThreadPoolExecutor(max_workers=2) as executor:
            sz_future = executor.submit(process_stock_codes, sz_stock_codes_df, "深圳")
            sh_future = executor.submit(process_stock_codes, sh_stock_codes_df, "上海")
            sz_signals = sz_future.result()
            sh_signals = sh_future.result()

        logging.info(f"信号生成完成: 深圳市场 {len(sz_signals)} 个信号, 上海市场 {len(sh_signals)} 个信号")
        return {
            'sz_signals': sz_signals,
            'sh_signals': sh_signals,
            'target_date': target_date_str,
            'batch_size': batch_size,
            'days_offset': days_offset
        }

    def batch_generate_signals(self, stock_data_list: List[pd.DataFrame], target_date_obj: datetime.date) -> List[
        Dict[str, Any]]:
        """生成信号的批量处理"""
        all_signals = []
        if not stock_data_list:
            return all_signals

        # 分批处理以避免内存问题
        for i in range(0, len(stock_data_list), self.batch_size):
            batch = stock_data_list[i:i + self.batch_size]
            try:
                batch_result = self.signal_generator.generate_signals(batch)
                # 过滤目标日期的信号
                for signal in batch_result.get('signals_list', []):
                    try:
                        signal_date = pd.to_datetime(signal['date']).date()
                        if signal_date == target_date_obj:
                            # 统一转换为YYYYMMDD格式
                            signal['date'] = signal_date.strftime('%Y%m%d')
                            all_signals.append(signal)
                    except:
                        continue
            except Exception as e:
                logging.error(f"批量生成信号失败: {str(e)}")

        return all_signals


if __name__ == "__main__":
    # 创建优化版数据管理器
    data_manager = UltraOptimizedStockDataManager(
        db_path="data/stock_year_data.db",
        batch_size=200,
        cache_size_mb=512
    )

    try:
        # 设置目标日期
        previous_day = datetime.now() - timedelta(days=2)
        target_date = previous_day.strftime('%Y%m%d')  # 保持YYYYMMDD格式
        print(f"🎯 开始获取目标日期 {target_date} 的交易信号...")
        start_time = time.time()

        # 调用信号生成方法
        signal_results = data_manager.fetch_and_generate_signals(target_date)

        # 提取结果
        sz_signals = signal_results['sz_signals']
        sh_signals = signal_results['sh_signals']
        target_date_str = signal_results['target_date']  # YYYYMMDD格式

        # 打印结果摘要
        print("\n" + "=" * 60)
        print(f"📊 信号生成完成! 耗时: {time.time() - start_time:.2f}秒")
        print(f"📅 目标日期: {target_date_str}")
        print(f"📈 深圳市场信号数: {len(sz_signals)}")
        print(f"📈 上海市场信号数: {len(sh_signals)}")
        print("=" * 60)

        # 打印示例信号
        if sz_signals:
            print("\n深圳市场信号示例:")
            for i, signal in enumerate(sz_signals[:3]):
                print(f"\n🔹 信号 {i + 1} 🔹")
                print(f"股票: {signal.get('stock_code', '未知代码')}")
                print(f"信号: {signal.get('signal_name', '未知信号')}")
                print(f"日期: {signal.get('date', '未知日期')}")  # 输出为YYYYMMDD格式
                print("-" * 50)

        if sh_signals:
            print("\n上海市场信号示例:")
            for i, signal in enumerate(sh_signals[:3]):
                print(f"\n🔹 信号 {i + 1} 🔹")
                print(f"股票: {signal.get('stock_code', '未知代码')}")
                print(f"信号: {signal.get('signal_name', '未知信号')}")
                print(f"日期: {signal.get('date', '未知日期')}")  # 输出为YYYYMMDD格式
                print("-" * 50)

    except Exception as e:
        logging.error(f"主程序错误: {e}")
    finally:
        data_manager.close()