#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Web API服务器 (SQLAlchemy版本)
提供选股功能的REST API接口，使用SQLAlchemy进行数据库操作
"""

from flask import Flask, request, jsonify, session, redirect, send_from_directory
from flask_cors import CORS
import pandas as pd
import numpy as np
from datetime import datetime
import time
import os
import shutil
from loguru import logger


from Strategy import StockStrategies
from model import DatabaseManager, HSnapshot, Block, BlockInfo, SelectResult, ExecutionCache, FBoard, NewsData, StockInfo, LabelInfo
from model.BaseModel import standardize_date_format
from model.User import User
from model.LoginRecord import LoginRecord, OperationType
# 更新导入语句，使用新的crawler目录结构
from crawler.PerformanceForecastParser import PerformanceForecastParser
from crawler.SimpleNewsCollector import SimpleNewsCollector
from crawler.AssetRestructureParser import AssetRestructureParser


app = Flask(__name__, static_folder='static')
app.secret_key = os.environ.get('APP_SECRET_KEY','dev-secret-key')
CORS(app)

# 初始化SQLAlchemy数据库管理器
db_manager = DatabaseManager()
db_manager.init_databases()

# 设置模型的数据库管理器
HSnapshot.set_db_manager(db_manager)
Block.set_db_manager(db_manager)
BlockInfo.set_db_manager(db_manager)
SelectResult.set_db_manager(db_manager)
NewsData.set_db_manager(db_manager)
FBoard.set_db_manager(db_manager)
StockInfo.set_db_manager(db_manager)
LabelInfo.set_db_manager(db_manager)
User.set_db_manager(db_manager)
LoginRecord.set_db_manager(db_manager)


# 全局缓存变量
_cached_stock_data = None
_cached_block_data = None
_cached_blockinfo_data = None

# 数据目录配置
DATA_DIR = "data"
TEMP_DIR = "temp"

# 确保目录存在
os.makedirs(DATA_DIR, exist_ok=True)
os.makedirs(TEMP_DIR, exist_ok=True)

def read_csv_with_encoding(file_path):
    """使用多种编码方式读取CSV文件"""
    encodings = ['utf-8', 'gbk', 'gb2312', 'utf-8-sig']
    
    # 定义时间字段的dtype，确保读取为字符串
    time_dtype = {
        'first_limit_up_time': str,
        'last_limit_up_time': str,
        '首次涨停时间': str,
        '最近封涨时间': str
    }
    
    for encoding in encodings:
        try:
            df = pd.read_csv(file_path, sep=None, engine='python', encoding=encoding, dtype=time_dtype)
            df.columns = df.columns.str.strip()
            logger.info(f"成功使用 {encoding} 编码读取文件 {os.path.basename(file_path)}")
            return df
        except UnicodeDecodeError:
            continue
        except Exception as e:
            logger.warning(f"使用 {encoding} 编码读取文件 {os.path.basename(file_path)} 失败: {str(e)}")
            continue
    
    logger.error(f"无法使用任何编码方式读取文件 {file_path}")
    return None

from StrategyConfig import get_strategy_list, get_chinese_name
STRATEGY_LIST = get_strategy_list()

def load_cached_data():
    """在服务启动时加载数据到缓存"""
    global _cached_stock_data, _cached_block_data, _cached_blockinfo_data
    
    try:
        logger.info("正在加载股票数据到缓存...")
        _cached_stock_data = HSnapshot.load_from_db("stock_data.db")
        
        if _cached_stock_data.empty:
            logger.warning("数据库中暂无股票数据，请先导入数据")
            _cached_stock_data = pd.DataFrame()  # 确保是空的DataFrame而不是None
        else:
            # 对股票数据进行去重处理
            original_count = len(_cached_stock_data)
            _cached_stock_data = _cached_stock_data.drop_duplicates(subset=['trade_date', 'security_id'], keep='last')
            deduplicated_count = len(_cached_stock_data)
            logger.info(f"股票数据去重: {original_count} -> {deduplicated_count} 条记录 (去除了 {original_count - deduplicated_count} 条重复记录)")
            
        logger.info("正在加载板块数据到缓存...")
        _cached_block_data = Block.load_from_db("stock_data.db")
        if _cached_block_data.empty:
            logger.warning("数据库中暂无板块数据")
            _cached_block_data = pd.DataFrame()
            
        _cached_blockinfo_data = BlockInfo.load_from_db("stock_data.db")
        if _cached_blockinfo_data.empty:
            logger.warning("数据库中暂无板块关联数据")
            _cached_blockinfo_data = pd.DataFrame()
        
        logger.info(f"数据加载完成 - 股票数据: {len(_cached_stock_data)} 条记录")
        return True
        
    except Exception as e:
        logger.error(f"加载缓存数据失败: {str(e)}")
        # 即使出错也返回True，让服务能够启动
        _cached_stock_data = pd.DataFrame()
        _cached_block_data = pd.DataFrame()
        _cached_blockinfo_data = pd.DataFrame()
        return True

def get_cached_stock_data():
    """获取缓存的股票数据"""
    global _cached_stock_data
    if _cached_stock_data is None:
        logger.warning("缓存数据未加载，正在重新加载...")
        if not load_cached_data():
            return pd.DataFrame()
    return _cached_stock_data

def get_cached_block_data():
    """获取缓存的板块数据"""
    global _cached_block_data
    if _cached_block_data is None:
        logger.warning("缓存数据未加载，正在重新加载...")
        if not load_cached_data():
            return pd.DataFrame()
    return _cached_block_data

def get_cached_blockinfo_data():
    """获取缓存的板块信息数据"""
    global _cached_blockinfo_data
    if _cached_blockinfo_data is None:
        logger.warning("缓存数据未加载，正在重新加载...")
        if not load_cached_data():
            return pd.DataFrame()
    return _cached_blockinfo_data

def login_required(func):
    def wrapper(*args, **kwargs):
        if not session.get('admin_logged_in'):
            return redirect('/admin/login')
        return func(*args, **kwargs)
    wrapper.__name__ = func.__name__
    return wrapper

@app.route('/admin/login')
def admin_login_page():
    return app.send_static_file('admin/login.html')

@app.route('/admin', methods=['GET'])
@login_required
def admin_home():
    return app.send_static_file('admin/index.html')

@app.route('/admin/fundamental')
@login_required
def admin_fundamental():
    return app.send_static_file('admin/fundamental/stockinfo.html')

@app.route('/admin/import')
@login_required
def admin_import():
    return redirect('/admin/imps')

@app.route('/admin/imps')
@login_required
def admin_imps():
    return app.send_static_file('admin/data-import/imps.html')

@app.route('/admin/batchs')
@login_required
def admin_batchs():
    return app.send_static_file('admin/data-import/batchs.html')

@app.route('/admin/news')
@login_required
def admin_news():
    return app.send_static_file('admin/news/news.html')

@app.route('/admin/strategy-config')
@login_required
def admin_strategy_config():
    return app.send_static_file('admin/strategy/strategy-config.html')

@app.route('/admin/execution-record')
@login_required
def admin_execution_record():
    return app.send_static_file('admin/strategy/execution-record.html')

@app.route('/admin/result-analysis')
@login_required
def admin_result_analysis():
    return app.send_static_file('admin/strategy/result-analysis.html')

@app.route('/admin/data-management')
@login_required
def admin_data_management():
    return app.send_static_file('admin/fundamental/data-man.html')

@app.route('/admin/label-management')
@login_required
def admin_label_management():
    return app.send_static_file('admin/fundamental/label.html')

@app.route('/admin/strategy')
@login_required
def admin_strategy():
    return app.send_static_file('admin/strategy/strategy.html')

@app.route('/admin/user')
@login_required
def admin_user():
    return app.send_static_file('admin/user/user.html')

@app.route('/admin/user/user-management')
@login_required
def admin_user_management():
    return app.send_static_file('admin/user/user-man.html')

@app.route('/admin/user/login-records')
@login_required
def admin_login_records():
    return app.send_static_file('admin/user/login-records.html')

@app.route('/admin/api/login', methods=['POST'])
def admin_api_login():
    try:
        data = request.get_json() or {}
        username = data.get('username','').strip()
        password = data.get('password','').strip()
        # 简单示例：从环境变量读取账号，默认 admin/admin
        user = os.environ.get('ADMIN_USER','admin')
        pwd = os.environ.get('ADMIN_PASS','admin')
        if username == user and password == pwd:
            session['admin_logged_in'] = True
            session['admin_user'] = username
            # 记录登录日志
            try:
                from model.LoginRecord import LoginRecord, OperationType
                ip_addr = request.headers.get('X-Forwarded-For', request.remote_addr)
                ua = request.headers.get('User-Agent', '')
                s = db_manager.get_stock_data_session()
                record = LoginRecord(
                    phone=username,  # 此处以用户名作为标识存入手机号字段
                    ip_address=ip_addr,
                    user_agent=ua,
                    status=OperationType.LOGIN
                )
                s.add(record)
                s.commit()
                s.close()
            except Exception as _:
                # 不影响登录流程
                pass
            return jsonify({'success': True})
        return jsonify({'success': False, 'error': '用户名或密码错误'})
    except Exception as e:
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/logout', methods=['POST'])
def admin_logout():
    # 记录登出日志（若已登录）
    try:
        username = session.get('admin_user')
        if username:
            from model.LoginRecord import LoginRecord, OperationType
            ip_addr = request.headers.get('X-Forwarded-For', request.remote_addr)
            ua = request.headers.get('User-Agent', '')
            s = db_manager.get_stock_data_session()
            record = LoginRecord(
                phone=username,
                ip_address=ip_addr,
                user_agent=ua,
                status=OperationType.LOGOUT
            )
            s.add(record)
            s.commit()
            s.close()
    except Exception:
        pass
    session.pop('admin_logged_in', None)
    session.pop('admin_user', None)
    return jsonify({'success': True})

@app.route('/admin/api/stats')
@login_required
def admin_api_stats():
    """获取数据库统计信息"""
    try:
        stats = {}
        
        # 获取stock_data.db的统计
        stock_data_session = db_manager.get_stock_data_session()
        
        # StockInfo统计
        try:
            from model.StockInfo import StockInfo
            stats['stockinfo'] = stock_data_session.query(StockInfo).count()
        except Exception as e:
            logger.warning(f"获取StockInfo统计失败: {str(e)}")
            stats['stockinfo'] = 0
        
        # Block统计
        try:
            from model.Block import Block
            stats['block'] = stock_data_session.query(Block).count()
        except Exception as e:
            logger.warning(f"获取Block统计失败: {str(e)}")
            stats['block'] = 0
        
        # BlockInfo统计
        try:
            from model.BlockInfo import BlockInfo
            stats['blockinfo'] = stock_data_session.query(BlockInfo).count()
        except Exception as e:
            logger.warning(f"获取BlockInfo统计失败: {str(e)}")
            stats['blockinfo'] = 0
        
        stock_data_session.close()
        
        return jsonify({
            'success': True,
            'stats': stats
        })
        
    except Exception as e:
        logger.error(f"获取统计数据失败: {str(e)}")
        return jsonify({
            'success': False,
            'error': str(e)
        }), 500

@app.route('/admin/api/strategy/stats')
@login_required
def api_strategy_stats():
    """获取策略统计信息"""
    try:
        # 获取stock_selection.db的统计
        stock_selection_session = db_manager.get_stock_selection_session()
        
        stats = {}
        
        # SelectResult统计
        try:
            from model.SelectResult import SelectResult
            stats['execution_count'] = stock_selection_session.query(SelectResult).count()
        except Exception as e:
            logger.warning(f"获取SelectResult统计失败: {str(e)}")
            stats['execution_count'] = 0
        
        # ExecutionCache统计
        try:
            from model.ExecutionCache import ExecutionCache
            stats['cache_count'] = stock_selection_session.query(ExecutionCache).count()
        except Exception as e:
            logger.warning(f"获取ExecutionCache统计失败: {str(e)}")
            stats['cache_count'] = 0
        
        stock_selection_session.close()
        
        # 策略数量（固定值，基于STRATEGY_LIST）
        stats['strategy_count'] = len(STRATEGY_LIST)
        
        # 计算成功率（模拟数据）
        if stats['execution_count'] > 0:
            stats['success_rate'] = f"{(stats['execution_count'] * 0.685):.1f}%"
        else:
            stats['success_rate'] = "0.0%"
        
        return jsonify({
            'success': True,
            'stats': stats
        })
        
    except Exception as e:
        logger.error(f"获取策略统计数据失败: {str(e)}")
        return jsonify({
            'success': False,
            'error': str(e)
        }), 500

@app.route('/')
def index():
    """根路由，重定向到新闻中心页面"""
    return app.send_static_file('news.html')

@app.route('/performance')
def performance():
    """收益分析页面"""
    return app.send_static_file('performance.html')

@app.route('/import')
def import_page():
    """数据导入页面"""
    return redirect('/admin/imps')

@app.route('/kline')
def kline_page():
    """K线图页面"""
    return app.send_static_file('kline.html')

@app.route('/news')
def news_page():
    """新闻中心页面"""
    return app.send_static_file('news.html')

@app.route('/fundamental')
def fundamental_page():
    """基本面信息页面"""
    return app.send_static_file('fundamental.html')

@app.route('/limit_up_analysis')
def limit_up_analysis_page():
    """涨停分析页面"""
    return app.send_static_file('limit_up_analysis.html')

@app.route('/consecutive_limit_up')
def consecutive_limit_up_page():
    """连板梯队页面"""
    return app.send_static_file('consecutive_limit_up.html')



@app.route('/api/strategies', methods=['GET'])
def get_strategies():
    """获取可用的策略列表"""
    return jsonify({
        'success': True,
        'data': {'strategies': STRATEGY_LIST}
    })

@app.route('/api/execute-strategies', methods=['POST'])
def execute_strategies():
    """执行选股策略"""
    try:
        data = request.get_json()
        execution_date = standardize_date_format(data.get('execution_date', datetime.now().strftime('%Y-%m-%d')))
        strategy_names = data.get('strategies', [])
        clear_cache = data.get('clear_cache', False)
        
        if not strategy_names:
            return jsonify({'success': False, 'error': '请选择至少一个策略'}), 400
        
        # 如果勾选了清空缓存，先清空指定执行日期和策略的缓存
        if clear_cache:
            logger.info(f"清空缓存: {execution_date} - {strategy_names}")
            # 使用SQLAlchemy清空缓存
            session = db_manager.get_stock_selection_session()
            try:
                # 删除选股结果
                deleted_results = session.query(SelectResult).filter(
                    SelectResult.execution_date == execution_date,
                    SelectResult.strategy_type.in_(strategy_names)
                ).delete()
                
                # 删除执行缓存
                deleted_cache = session.query(ExecutionCache).filter(
                    ExecutionCache.execution_date == execution_date,
                    ExecutionCache.strategy_type.in_(strategy_names)
                ).delete()
                
                session.commit()
                logger.info(f"清空缓存完成: 删除了 {deleted_results} 条选股结果和 {deleted_cache} 条缓存记录")
            except Exception as e:
                logger.error(f"清空缓存失败: {str(e)}")
                session.rollback()
            finally:
                session.close()
        
        # 检查缓存
        cached_results = []
        strategies_to_execute = []
        
        for strategy_name in strategy_names:
            ExecutionCache.set_db_manager(db_manager)
            cache_info = ExecutionCache.get_cache(execution_date, strategy_name)
            if cache_info:
                logger.info(f"找到缓存: {execution_date} - {strategy_name}")
                # 获取缓存的结果
                cached_results_objects = SelectResult.get_results(
                    execution_date=execution_date,
                    strategy_types=[strategy_name]
                )
                if cached_results_objects:
                    # 转换为JSON格式（将涨跌幅保留2位小数）
                    for result in cached_results_objects:
                        cp = result.change_percent if result.change_percent is not None else 0.0
                        cached_results.append({
                            'execution_date': result.execution_date,
                            'strategy_type': result.strategy_type,
                            'security_id': result.security_id,
                            'security_name': result.security_name,
                            'latest_price': result.latest_price,
                            'change_percent': round(cp, 2),
                            'industry': result.industry,
                            'limit_up_20d': result.limit_up_20d,
                            'limit_up_60d': result.limit_up_60d,
                            'last_limit_up_date': result.last_limit_up_date,
                            'net_inflow_3d': result.net_inflow_3d,
                            'big_net_inflow_3d': result.big_net_inflow_3d
                        })
                else:
                    strategies_to_execute.append(strategy_name)
            else:
                strategies_to_execute.append(strategy_name)
        
        # 如果有需要执行的策略
        if strategies_to_execute:
            logger.info(f"需要执行的策略: {strategies_to_execute}")
            
            # 使用缓存的数据，但需要根据执行日期过滤
            stock_data = get_cached_stock_data()
            if stock_data.empty:
                return jsonify({
                    'success': False, 
                    'error': '数据库中暂无股票数据，请先通过数据导入页面导入股票数据文件'
                }), 400
            
            # 根据执行日期过滤数据：执行选股时获取<=执行日的数据
            if execution_date:
                logger.info(f"执行选股：过滤数据 trade_date <= {execution_date}")
                # 使用日期类型进行比较，避免字符串比较导致的范围错误
                original_count = len(stock_data)
                try:
                    trade_dates = pd.to_datetime(stock_data['trade_date'], errors='coerce')
                    exec_dt = pd.to_datetime(execution_date, format='%Y-%m-%d', errors='coerce')
                    mask = trade_dates <= exec_dt
                    stock_data = stock_data[mask]
                    filtered_count = len(stock_data)
                    logger.info(f"执行选股数据过滤: {original_count} -> {filtered_count} 条记录 (过滤了 {original_count - filtered_count} 条记录)")
                    # 显示数据日期范围（标准化为YYYY-MM-DD）
                    if filtered_count > 0:
                        min_date = pd.to_datetime(stock_data['trade_date'], errors='coerce').min()
                        max_date = pd.to_datetime(stock_data['trade_date'], errors='coerce').max()
                        min_str = min_date.strftime('%Y-%m-%d') if pd.notna(min_date) else '-'
                        max_str = max_date.strftime('%Y-%m-%d') if pd.notna(max_date) else '-'
                        logger.info(f"执行选股数据日期范围: {min_str} 到 {max_str}")
                except Exception as _e:
                    # 兜底使用旧逻辑（尽量不影响执行）
                    stock_data['trade_date'] = stock_data['trade_date'].astype(str)
                    stock_data = stock_data[stock_data['trade_date'] <= execution_date]
                    filtered_count = len(stock_data)
                    logger.info(f"执行选股数据过滤(兜底): {original_count} -> {filtered_count} 条记录")
            
            # 数据预处理
            stock_data = HSnapshot.preprocess_data(stock_data)
            # 统一使用数据库字段名
            stock_data = stock_data[
                (stock_data['trade_px'] > 0) & 
                (stock_data['volume_trade'] > 0) &
                (stock_data['trade_px'] < 100000)
            ]
            
            logger.debug("数据预处理成功")
            # 使用缓存的板块数据
            block_data = get_cached_block_data()
            blockinfo_data = get_cached_blockinfo_data()
            
            # 执行策略
            execution_start_time = time.time()
            new_results = []
            
            for strategy_name in strategies_to_execute:
                try:
                    chinese_name = get_chinese_name(strategy_name)
                    selected_stocks = StockStrategies.apply_strategy_to_all(stock_data, strategy_name)
                    
                    if not selected_stocks.empty:
                        # 获取选中股票的行业信息
                        security_ids = selected_stocks['security_id'].astype(str).tolist()
                        BlockInfo.set_db_manager(db_manager)
                        industry_dict = BlockInfo.get_industries_by_stocks(security_ids)
                        
                        # 将策略输出标准化为统一结构（英文键），避免中文列缺失导致KeyError
                        results_data = []
                        for _, row in selected_stocks.iterrows():
                            security_id = str(row.get('security_id', ''))
                            latest_price = float(row.get('close', row.get('trade_px', 0.0)) or 0.0)
                            open_price = float(row.get('open', row.get('open_px', 0.0)) or 0.0)
                            change_percent = 0.0
                            if open_price and open_price != 0:
                                try:
                                    change_percent = (latest_price - open_price) / open_price * 100.0
                                except Exception:
                                    change_percent = 0.0
                            
                            # 获取行业信息
                            industry = industry_dict.get(security_id, '')
                            
                            result_item = {
                                'execution_date': str(execution_date),
                                'strategy_type': strategy_name,
                                'security_id': security_id,
                                'security_name': str(row.get('security_name', '')) if 'security_name' in row else '',
                                'latest_price': round(latest_price, 3),
                                'change_percent': round(change_percent, 2),
                                'industry': industry,
                                'limit_up_20d': int(row.get('limit_up_20d', 0) or 0),
                                'limit_up_60d': int(row.get('limit_up_60d', 0) or 0),
                                'last_limit_up_date': str(row.get('last_limit_up_date', '')) if 'last_limit_up_date' in row else '',
                                'net_inflow_3d': float(row.get('net_inflow_3d', 0.0) or 0.0),
                                'big_net_inflow_3d': float(row.get('big_net_inflow_3d', 0.0) or 0.0)
                            }
                            results_data.append(result_item)
                            new_results.append(result_item)

                        # 批量保存到数据库
                        success = SelectResult.save_batch(results_data)
                        if not success:
                            logger.error(f"保存策略 {strategy_name} 结果失败")
                        else:
                            # 同步写入标签（策略中文名作为标签，日期为执行日）
                            try:
                                from model.LabelInfo import LabelInfo
                                LabelInfo.set_db_manager(db_manager)
                                label_rows = []
                                for item in results_data:
                                    label_rows.append({
                                        'date': item.get('execution_date', execution_date),
                                        'security_id': item.get('security_id', ''),
                                        'label': chinese_name,
                                        'label_type': 'strategy',
                                        'confidence': 100
                                    })
                                if label_rows:
                                    import pandas as pd
                                    df_labels = pd.DataFrame(label_rows)
                                    LabelInfo.import_from_dataframe(df_labels)
                            except Exception as e:
                                logger.error(f"写入标签失败: {e}")

                        # 保存执行缓存
                        execution_time = time.time() - execution_start_time
                        ExecutionCache.set_db_manager(db_manager)
                        ExecutionCache.save_cache(
                            execution_date=execution_date,
                            strategy_type=strategy_name,
                            results_count=len(results_data),
                            execution_time=execution_time
                        )
                            
                except Exception as e:
                    logger.error(f"执行策略 {strategy_name} 时出错: {str(e)}")
                    continue
            
            # new_results 已在策略循环中构建（英文键）
            
            # 合并缓存结果和新结果
            all_results_json = cached_results + new_results
            
            return jsonify({
                'success': True,
                'data': {
                    'results': all_results_json,
                    'total_count': len(all_results_json),
                    'cached_count': len(cached_results),
                    'new_count': len(new_results),
                    'execution_time': time.time() - execution_start_time
                }
            })
        else:
            # 只返回缓存结果
            return jsonify({
                'success': True,
                'data': {
                    'results': cached_results,
                    'total_count': len(cached_results),
                    'cached_count': len(cached_results),
                    'new_count': 0,
                    'execution_time': 0
                }
            })
            
    except Exception as e:
        logger.error(f"执行策略失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/selection-results', methods=['GET'])
def get_selection_results():
    """获取选股结果"""
    try:
        execution_date = request.args.get('execution_date')
        strategy_types = request.args.getlist('strategy_types')
        limit = int(request.args.get('limit', 1000))
        
        if not strategy_types:
            strategy_types = None
        
        # 使用SQLAlchemy查询
        results_objects = SelectResult.get_results(
            execution_date=execution_date,
            strategy_types=strategy_types,
            limit=limit
        )
        
        results_json = []
        for result in results_objects:
            results_json.append({
                'execution_date': result.execution_date,
                'strategy_type': result.strategy_type,
                'security_id': result.security_id,
                'security_name': result.security_name,
                'latest_price': round(result.latest_price, 3) if result.latest_price is not None else 0.0,
                'change_percent': result.change_percent,
                'industry': result.industry,
                'limit_up_20d': result.limit_up_20d,
                'limit_up_60d': result.limit_up_60d,
                'last_limit_up_date': result.last_limit_up_date,
                'net_inflow_3d': result.net_inflow_3d,
                'big_net_inflow_3d': result.big_net_inflow_3d
            })
        
        return jsonify({
            'success': True,
            'data': {'results': results_json, 'total_count': len(results_json)}
        })
        
    except Exception as e:
        logger.error(f"获取选股结果失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/stock-kline/<security_id>', methods=['GET'])
def get_stock_kline(security_id):
    """获取股票K线数据"""
    try:
        limit = int(request.args.get('limit', 100))
        logger.info(f"请求K线数据: {security_id}, 限制: {limit}")
        
        # 使用缓存的数据
        stock_data = get_cached_stock_data()
        if stock_data.empty:
            logger.warning("缓存股票数据为空")
            return jsonify({
                'success': False, 
                'error': '数据库中暂无股票数据，请先导入数据'
            }), 400
        
        logger.info(f"缓存数据总行数: {len(stock_data)}")
        stock_data = stock_data[stock_data['security_id'] == security_id]
        logger.info(f"找到股票 {security_id} 的数据行数: {len(stock_data)}")
        
        if stock_data.empty:
            logger.warning(f"未找到股票 {security_id} 的数据")
            return jsonify({'success': False, 'error': f'未找到股票 {security_id} 的数据'}), 404
        
        # 去重处理：按日期和证券代码去重，保留最后一条记录
        stock_data = stock_data.drop_duplicates(subset=['trade_date', 'security_id'], keep='last')
        stock_data = stock_data.sort_values('trade_date')
        
        logger.info(f"去重后股票 {security_id} 的数据行数: {len(stock_data)}")
        
        kline_data = []
        prev_close = None
        for _, row in stock_data.tail(limit).iterrows():
            # 确保数值类型正确
            try:
                close_price = float(row.get('trade_px', 0)) if pd.notna(row.get('trade_px', 0)) else 0
                
                # 计算涨跌幅
                change_percent = 0
                if prev_close and prev_close > 0:
                    change_percent = ((close_price - prev_close) / prev_close) * 100
                
                kline_data.append({
                    'date': row['trade_date'].strftime('%Y-%m-%d') if hasattr(row['trade_date'], 'strftime') else str(row['trade_date']),
                    'open': float(row.get('open_px', 0)) if pd.notna(row.get('open_px', 0)) else 0,
                    'high': float(row.get('high_px', 0)) if pd.notna(row.get('high_px', 0)) else 0,
                    'low': float(row.get('low_px', 0)) if pd.notna(row.get('low_px', 0)) else 0,
                    'close': close_price,
                    'volume': float(row.get('volume_trade', 0)) if pd.notna(row.get('volume_trade', 0)) else 0,
                    'turnover': float(row.get('amount_trade', 0)) if pd.notna(row.get('amount_trade', 0)) else 0,
                    'change_percent': round(change_percent, 2)
                })
                
                prev_close = close_price
            except Exception as e:
                logger.error(f"处理K线数据行时出错: {str(e)}, 数据: {row}")
                continue
        
        logger.info(f"返回K线数据: {len(kline_data)} 条记录")
        if kline_data:
            logger.info(f"第一条K线数据示例: {kline_data[0]}")
        
        return jsonify({
            'success': True,
            'data': {'security_id': security_id, 'kline_data': kline_data}
        })
        
    except Exception as e:
        logger.error(f"获取K线数据失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/performance-analysis', methods=['POST'])
def analyze_performance():
    """分析选股结果的后续表现"""
    try:
        data = request.get_json()
        execution_date = standardize_date_format(data.get('execution_date', datetime.now().strftime('%Y-%m-%d')))
        strategy_type = data.get('strategy_type', '')
        analysis_days = data.get('analysis_days', 20)
        
        logger.info(f"收益分析请求: execution_date={execution_date}, strategy_type={strategy_type}, analysis_days={analysis_days}")
        
        # 获取选股结果 (使用SQLAlchemy)
        if strategy_type:
            logger.info(f"查询指定策略: {strategy_type}")
            results_objects = SelectResult.get_results(
                execution_date=execution_date, 
                strategy_types=[strategy_type]
            )
        else:
            logger.info("查询所有策略")
            results_objects = SelectResult.get_results(
                execution_date=execution_date
            )
        
        logger.info(f"获取到选股结果: {len(results_objects)} 条记录")
        
        if not results_objects:
            logger.warning(f"没有找到 {execution_date} 的选股结果")
            # 检查数据库中是否有其他日期的数据
            # 注意：这些方法在新的模型中可能不存在，需要临时处理
            available_dates = []
            available_strategies = []
            logger.info(f"数据库中可用日期: {available_dates}")
            logger.info(f"数据库中可用策略: {available_strategies}")
            return jsonify({'success': True, 'data': {'results': [], 'summary': None}})
        
        # 使用缓存的数据
        stock_data = get_cached_stock_data()
        if stock_data.empty:
            return jsonify({
                'success': False, 
                'error': '数据库中暂无股票数据，无法进行收益分析'
            }), 400
        
        # 根据执行日期过滤数据：收益分析时获取>=执行日的数据
        if execution_date:
            logger.info(f"收益分析：过滤数据 trade_date >= {execution_date}")
            # 确保trade_date是字符串格式进行比较
            stock_data['trade_date'] = stock_data['trade_date'].astype(str)
            original_count = len(stock_data)
            stock_data = stock_data[stock_data['trade_date'] >= execution_date]
            filtered_count = len(stock_data)
            logger.info(f"收益分析数据过滤: {original_count} -> {filtered_count} 条记录 (过滤了 {original_count - filtered_count} 条记录)")
            
            # 显示数据日期范围
            if not stock_data.empty:
                min_date = stock_data['trade_date'].min()
                max_date = stock_data['trade_date'].max()
                logger.info(f"收益分析数据日期范围: {min_date} 到 {max_date}")
        
        # 方案B：在分析端统一将 trade_date 转换为 datetime，避免字符串与 Timestamp 比较
        try:
            stock_data['trade_date'] = pd.to_datetime(stock_data['trade_date'], errors='coerce')
        except Exception as _:
            pass

        # 兜底：若不存在标准列名，按导入别名补齐，防止后续取值出错
        if 'close' not in stock_data.columns and 'trade_px' in stock_data.columns:
            stock_data['close'] = pd.to_numeric(stock_data['trade_px'], errors='coerce')
        if 'open' not in stock_data.columns and 'open_px' in stock_data.columns:
            stock_data['open'] = pd.to_numeric(stock_data['open_px'], errors='coerce')
        if 'volume' not in stock_data.columns and 'volume_trade' in stock_data.columns:
            stock_data['volume'] = pd.to_numeric(stock_data['volume_trade'], errors='coerce')
        
        # 分析结果
        analysis_results = []
        total_return = 0
        valid_count = 0
        
        for result in results_objects:
            try:
                # 将执行日期转换为datetime格式（现在统一为YYYY-MM-DD格式）
                if isinstance(execution_date, str):
                    exec_date = pd.to_datetime(execution_date, format='%Y-%m-%d', errors='coerce')
                else:
                    exec_date = execution_date
                
                # 获取后续价格数据：从执行日期开始，获取指定天数的数据
                future_data = stock_data[
                    (stock_data['security_id'] == result.security_id) &
                    (stock_data['trade_date'] >= exec_date)
                ].sort_values('trade_date').head(analysis_days)
                
                initial_price = result.latest_price
                
                # 添加详细日志
                logger.debug(f"股票 {result.security_id} 分析: 执行日期={execution_date}, 初始价格={initial_price}, 找到后续数据={len(future_data)} 条")
                
                if not future_data.empty:
                    # 有后续价格数据，计算实际收益率和更多指标
                    # 使用最后一天的价格作为最终价格
                    final_price = future_data.iloc[-1]['close']
                    return_rate = ((final_price - initial_price) / initial_price) * 100
                    analysis_days_actual = len(future_data)
                    has_future_data = True
                    
                    # 计算更多指标
                    # 1. 振幅 (最高价-最低价)/初始价格 * 100
                    max_price = future_data['close'].max()
                    min_price = future_data['close'].min()
                    amplitude = ((max_price - min_price) / initial_price) * 100
                    
                    # 2. 日收益率序列（用于计算夏普比率和波动率）
                    future_data_sorted = future_data.sort_values('trade_date')
                    daily_returns = future_data_sorted['close'].pct_change().dropna()
                    
                    # 3. 波动率（日收益率的标准差 * sqrt(252) 年化）
                    volatility = daily_returns.std() * (252 ** 0.5) * 100 if len(daily_returns) > 1 else 0.0
                    
                    # 4. 夏普比率（年化收益率 / 年化波动率，假设无风险利率为3%）
                    annual_return = return_rate * (252 / analysis_days_actual) if analysis_days_actual > 0 else 0.0
                    risk_free_rate = 3.0  # 假设无风险利率为3%
                    sharpe_ratio = (annual_return - risk_free_rate) / volatility if volatility > 0 else 0.0
                    
                    # 5. 最大回撤
                    cumulative_returns = (1 + daily_returns).cumprod()
                    running_max = cumulative_returns.expanding().max()
                    drawdown = (cumulative_returns - running_max) / running_max
                    max_drawdown = drawdown.min() * 100 if len(drawdown) > 0 else 0.0
                    
                    # 6. 胜率（上涨天数占比）
                    up_days = len(daily_returns[daily_returns > 0])
                    total_days = len(daily_returns)
                    win_rate_daily = (up_days / total_days) * 100 if total_days > 0 else 0.0
                    
                    # 7. 平均日收益率
                    avg_daily_return = daily_returns.mean() * 100 if len(daily_returns) > 0 else 0.0
                    
                    # 8. 最大单日涨幅和跌幅
                    max_single_day_gain = daily_returns.max() * 100 if len(daily_returns) > 0 else 0.0
                    max_single_day_loss = daily_returns.min() * 100 if len(daily_returns) > 0 else 0.0
                    
                    # 添加详细日志
                    logger.debug(f"股票 {result.security_id} 收益计算: 初始价格={initial_price}, 最终价格={final_price}, 收益率={return_rate:.2f}%, 分析天数={analysis_days_actual}")
                    logger.debug(f"股票 {result.security_id} 指标计算: 振幅={amplitude:.2f}%, 波动率={volatility:.2f}%, 夏普比率={sharpe_ratio:.2f}, 最大回撤={max_drawdown:.2f}%")
                    
                    total_return += return_rate
                    valid_count += 1
                else:
                    # 没有后续价格数据，设置为0
                    final_price = initial_price
                    return_rate = 0.0
                    analysis_days_actual = 0
                    has_future_data = False
                    
                    # 设置默认值
                    amplitude = 0.0
                    volatility = 0.0
                    sharpe_ratio = 0.0
                    max_drawdown = 0.0
                    win_rate_daily = 0.0
                    avg_daily_return = 0.0
                    max_single_day_gain = 0.0
                    max_single_day_loss = 0.0
                    
                    logger.debug(f"股票 {result.security_id} 无后续数据: 执行日期={execution_date}")
                
                analysis_results.append({
                    'security_id': result.security_id,
                    'security_name': result.security_name,
                    'strategy_type': result.strategy_type,
                    'initial_price': round(initial_price, 3),
                    'final_price': round(final_price, 3),
                    'return_rate': round(return_rate, 2),
                    'analysis_days': analysis_days_actual,
                    'has_future_data': has_future_data,
                    # 新增指标
                    'amplitude': round(amplitude, 2),  # 振幅
                    'volatility': round(volatility, 2),  # 年化波动率
                    'sharpe_ratio': round(sharpe_ratio, 2),  # 夏普比率
                    'max_drawdown': round(max_drawdown, 2),  # 最大回撤
                    'win_rate_daily': round(win_rate_daily, 1),  # 日胜率
                    'avg_daily_return': round(avg_daily_return, 2),  # 平均日收益率
                    'max_single_day_gain': round(max_single_day_gain, 2),  # 最大单日涨幅
                    'max_single_day_loss': round(max_single_day_loss, 2)  # 最大单日跌幅
                })
                    
            except Exception as e:
                logger.error(f"分析股票 {result.security_id} 时出错: {str(e)}")
                # 添加错误结果
                analysis_results.append({
                    'security_id': result.security_id,
                    'security_name': result.security_name,
                    'strategy_type': result.strategy_type,
                    'initial_price': round(result.latest_price, 3) if result.latest_price is not None else 0.0,
                    'final_price': round(result.latest_price, 3) if result.latest_price is not None else 0.0,
                    'return_rate': 0.0,
                    'analysis_days': 0,
                    'has_future_data': False,
                    # 新增指标默认值
                    'amplitude': 0.0,
                    'volatility': 0.0,
                    'sharpe_ratio': 0.0,
                    'max_drawdown': 0.0,
                    'win_rate_daily': 0.0,
                    'avg_daily_return': 0.0,
                    'max_single_day_gain': 0.0,
                    'max_single_day_loss': 0.0
                })
                continue
        
        # 计算汇总统计
        summary = None
        if len(analysis_results) > 0:
            # 计算有效数据的统计（有后续价格数据的）
            valid_results = [r for r in analysis_results if r['has_future_data']]
            valid_count = len(valid_results)
            
            if valid_count > 0:
                # 有有效数据时，计算实际统计
                avg_return = total_return / valid_count
                positive_count = len([r for r in valid_results if r['return_rate'] > 0])
                win_rate = (positive_count / valid_count) * 100
                
                # 计算新指标的汇总统计
                avg_amplitude = sum(r['amplitude'] for r in valid_results) / valid_count
                avg_volatility = sum(r['volatility'] for r in valid_results) / valid_count
                avg_sharpe_ratio = sum(r['sharpe_ratio'] for r in valid_results) / valid_count
                avg_max_drawdown = sum(r['max_drawdown'] for r in valid_results) / valid_count
                avg_win_rate_daily = sum(r['win_rate_daily'] for r in valid_results) / valid_count
                avg_daily_return = sum(r['avg_daily_return'] for r in valid_results) / valid_count
                
                # 找出最佳和最差表现
                best_return = max(valid_results, key=lambda x: x['return_rate'])
                worst_return = min(valid_results, key=lambda x: x['return_rate'])
                best_sharpe = max(valid_results, key=lambda x: x['sharpe_ratio'])
                worst_drawdown = min(valid_results, key=lambda x: x['max_drawdown'])
                
            else:
                # 没有有效数据时，设置为0
                avg_return = 0.0
                positive_count = 0
                win_rate = 0.0
                avg_amplitude = 0.0
                avg_volatility = 0.0
                avg_sharpe_ratio = 0.0
                avg_max_drawdown = 0.0
                avg_win_rate_daily = 0.0
                avg_daily_return = 0.0
                best_return = None
                worst_return = None
                best_sharpe = None
                worst_drawdown = None
            
            summary = {
                'total_stocks': len(results_objects),
                'valid_stocks': valid_count,
                'avg_return': round(avg_return, 2),
                'win_rate': round(win_rate, 1),
                'positive_count': positive_count,
                'analysis_days': analysis_days,
                'no_future_data_count': len(analysis_results) - valid_count,
                # 新增指标汇总
                'avg_amplitude': round(avg_amplitude, 2),
                'avg_volatility': round(avg_volatility, 2),
                'avg_sharpe_ratio': round(avg_sharpe_ratio, 2),
                'avg_max_drawdown': round(avg_max_drawdown, 2),
                'avg_win_rate_daily': round(avg_win_rate_daily, 1),
                'avg_daily_return': round(avg_daily_return, 2),
                # 最佳和最差表现
                'best_return_stock': best_return['security_id'] if best_return else None,
                'best_return_value': round(best_return['return_rate'], 2) if best_return else 0.0,
                'worst_return_stock': worst_return['security_id'] if worst_return else None,
                'worst_return_value': round(worst_return['return_rate'], 2) if worst_return else 0.0,
                'best_sharpe_stock': best_sharpe['security_id'] if best_sharpe else None,
                'best_sharpe_value': round(best_sharpe['sharpe_ratio'], 2) if best_sharpe else 0.0,
                'worst_drawdown_stock': worst_drawdown['security_id'] if worst_drawdown else None,
                'worst_drawdown_value': round(worst_drawdown['max_drawdown'], 2) if worst_drawdown else 0.0
            }
        
        return jsonify({
            'success': True,
            'data': {
                'results': analysis_results,
                'summary': summary
            }
        })
        
    except Exception as e:
        logger.error(f"收益分析失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/import/files', methods=['GET'])
def get_import_files():
    """获取数据导入目录中的文件列表"""
    try:
        import_files = []
        total_size = 0
        
        if os.path.exists(DATA_DIR):
            for root, dirs, files in os.walk(DATA_DIR):
                for file in files:
                    if file.endswith(('.csv', '.xlsx', '.xls', '.txt')):
                        file_path = os.path.join(root, file)
                        file_size = os.path.getsize(file_path)
                        file_date = datetime.fromtimestamp(os.path.getmtime(file_path))
                        
                        # 获取相对路径
                        rel_path = os.path.relpath(file_path, DATA_DIR)
                        dir_name = os.path.dirname(rel_path)
                        
                        # 确定文件类型
                        file_type = 'unknown'
                        if 'hsnapshot' in file.lower():
                            file_type = 'hsnapshot'
                        elif 'blockinfo' in file.lower():
                            file_type = 'blockinfo'
                        elif 'fboard' in file.lower():
                            file_type = 'fboard'
                        elif 'block' in file.lower():
                            file_type = 'block'
                        elif 'stock' in file.lower():
                            file_type = 'stock'
                        elif 'label' in file.lower():
                            file_type = 'label'
                        
                        import_files.append({
                            'name': file,
                            'path': rel_path,
                            'directory': dir_name if dir_name != '.' else '',
                            'size': file_size,
                            'size_formatted': format_file_size(file_size),
                            'date': file_date.strftime('%Y-%m-%d %H:%M:%S'),
                            'type': file_type
                        })
                        
                        total_size += file_size
        
        return jsonify({
            'success': True,
            'data': {
                'files': import_files,
                'total_files': len(import_files),
                'total_size': total_size,
                'total_size_formatted': format_file_size(total_size)
            }
        })
        
    except Exception as e:
        logger.error(f"获取导入文件列表失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/import/upload', methods=['POST'])
def upload_import_file():
    """上传并导入单个文件"""
    try:
        if 'file' not in request.files:
            return jsonify({'success': False, 'error': '没有选择文件'}), 400
        
        file = request.files['file']
        if file.filename == '':
            return jsonify({'success': False, 'error': '没有选择文件'}), 400
        
        if not file.filename.lower().endswith('.csv'):
            return jsonify({'success': False, 'error': '只支持CSV文件'}), 400
        
        # 检查文件类型
        filename_lower = file.filename.lower()
        if not ('hsnapshot' in filename_lower or 'fboard' in filename_lower):
            return jsonify({'success': False, 'error': '只支持hsnapshot和fboard文件'}), 400
        
        # 保存文件到临时目录
        temp_file_path = os.path.join(TEMP_DIR, file.filename)
        file.save(temp_file_path)
        
        # 根据文件名检测文件类型
        file_type = 'unknown'
        if 'hsnapshot' in filename_lower:
            file_type = 'hsnapshot'
        elif 'fboard' in filename_lower:
            file_type = 'fboard'
        
        # 读取文件内容
        try:
            df = read_csv_with_encoding(temp_file_path)
            if df is None or df.empty:
                return jsonify({'success': False, 'error': '文件读取失败或为空'}), 400
        except Exception as e:
            return jsonify({'success': False, 'error': f'文件读取失败: {str(e)}'}), 400
        
        # 导入数据
        import_count = 0
        if file_type == 'hsnapshot':
            from model.HSnapshot import HSnapshot
            df = df.rename(columns=HSnapshot.aliases)
            import_count = HSnapshot.import_from_dataframe(df)
        elif file_type == 'fboard':
            from model.FBoard import FBoard
            df = df.rename(columns=FBoard.aliases)
            import_count = FBoard.import_from_dataframe(df)
        else:
            return jsonify({'success': False, 'error': '无法识别的文件类型'}), 400
        
        # 删除临时文件
        try:
            os.remove(temp_file_path)
        except:
            pass
        
        return jsonify({
            'success': True,
            'data': {
                'filename': file.filename,
                'type': file_type,
                'import_count': import_count,
                'message': f'文件 {file.filename} 导入成功，共导入 {import_count} 条记录'
            }
        })
        
    except Exception as e:
        logger.error(f"文件上传导入失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/import/process', methods=['POST'])
def process_import_files():
    """处理数据导入文件"""
    try:
        data = request.get_json()
        selected_files = data.get('files', [])
        
        if not selected_files:
            return jsonify({'success': False, 'error': '请选择要导入的文件'}), 400
        
        processed_files = []
        errors = []
        
        for file_info in selected_files:
            try:
                file_path = os.path.join(DATA_DIR, file_info['path'])
                if not os.path.exists(file_path):
                    errors.append(f"文件不存在: {file_info['name']}")
                    continue
                
                # 根据文件名检测文件类型
                def detect_file_type_by_name(filename: str) -> str:
                    """根据文件名初步判断类型。"""
                    name = filename.lower()
                    if name.startswith("hsnapshot"):
                        return "hsnapshot"
                    elif name.startswith("blockinfo"):
                        return "blockinfo"
                    elif name.startswith("fboard"):
                        return "fboard"
                    elif name.startswith("block"):
                        return "block"
                    elif name.startswith("stock"):
                        return "stock"
                    elif name.startswith("label"):
                        return "label"
                    return "unknown"
                
                # 首先根据文件名检测文件类型
                file_type = file_info.get('type', 'unknown')
                if file_type == 'unknown':
                    file_type = detect_file_type_by_name(file_info['name'])
                
                # 读取文件内容
                file_extension = os.path.splitext(file_info['name'])[1].lower()
                try:
                    if file_extension in ['.xlsx', '.xls']:
                        df = pd.read_excel(file_path)
                        df.columns = df.columns.str.strip()
                    elif file_extension in ['.csv', '.txt']:
                        # 尝试多种编码方式读取文件
                        encodings = ['utf-8', 'gbk', 'gb2312', 'utf-8-sig']
                        df = None
                        
                        for encoding in encodings:
                            try:
                                # 定义时间字段的dtype，确保读取为字符串
                                time_dtype = {
                                    'securityId': str,
                                    '证券代码': str,
                                    'first_limit_up_time': str,
                                    'last_limit_up_time': str,
                                    '首次涨停时间': str,
                                    '最近封涨时间': str
                                }
                                df = pd.read_csv(file_path, sep=None, engine='python', encoding=encoding, dtype=time_dtype)
                                df.columns = df.columns.str.strip()
                                logger.info(f"成功使用 {encoding} 编码读取文件 {file_info['name']}")
                                break
                            except UnicodeDecodeError:
                                continue
                            except Exception as e:
                                logger.warning(f"使用 {encoding} 编码读取文件 {file_info['name']} 失败: {str(e)}")
                                continue
                        
                        if df is None:
                            raise Exception("无法使用任何编码方式读取文件")
                except Exception as e:
                    logger.error(f"读取文件 {file_info['name']} 失败: {str(e)}")
                    errors.append(f"读取文件 {file_info['name']} 失败: {str(e)}")
                    continue

                
                # 记录检测到的文件类型
                logger.info(f"文件 {file_info['name']} 检测到的类型: {file_type}")

                if file_type == 'hsnapshot':
                   
                    df = df.rename(columns = HSnapshot.aliases)
                    
                    df =df[df['trade_date']>0]
                    
 #                   logger.info(f"重新读取后的DataFrame形状: {df.shape}")
  #                  logger.info(f"列名: {list(df.columns)}")
 #                   logger.info(f"前3行数据:")
 #                   logger.info(df.head(3).to_string())
                    
                    # 确保必要的列存在
                    required_columns = ['trade_date', 'security_id']
                    if not all(col in df.columns for col in required_columns):
                        errors.append(f"文件 {file_info['name']} 缺少必要的列: {required_columns}")
                        continue
                    
                    # 转换日期格式 - 先检查数据类型再格式化
  #                  logger.info(f"trade_date列的数据类型: {df['trade_date'].dtype}")
 #                   logger.info(f"trade_date列的前3个值: {df['trade_date'].head(3).tolist()}")
                    
                   
                    # 标准化日期格式
                    if 'trade_date' in df.columns:
                        df['trade_date'] = df['trade_date'].apply(standardize_date_format)
                        # 过滤掉无效日期
                        df = df[df['trade_date'].notna()]
                          
                    # 导入到数据库
                    success_count = HSnapshot.import_from_dataframe(df)
                    
                    if success_count > 0:
                        # 导入成功，移动文件到temp目录
                        temp_file_path = os.path.join(TEMP_DIR, f"imported_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{file_info['name']}")
                        shutil.move(file_path, temp_file_path)
                        
                        processed_files.append({
                            'name': file_info['name'],
                            'type': file_type,
                            'status': 'success',
                            'message': f'成功导入股票数据，共 {success_count} 条记录'
                        })
                    else:
                        processed_files.append({
                            'name': file_info['name'],
                            'type': file_type,
                            'status': 'warning',
                            'message': '导入完成但无数据写入，可能是数据格式问题'
                        })
                    
                elif file_type == 'block':

                    df = df.rename(columns = Block.aliases)

                    # 处理板块数据文件
                    # 手动设置列名（因为文件没有标题行）
                    if len(df.columns) < 3:
                        errors.append(f"文件 {file_info['name']} 列数不足，需要至少3列")
                        continue
                    
                    # 确保必要的列存在
                    required_columns = ['block_code']
                    if not all(col in df.columns for col in required_columns):
                        errors.append(f"文件 {file_info['name']} 缺少必要的列: {required_columns}")
                        continue
                    
                    # 标准化日期格式
                    if 'trade_date' in df.columns:
                        df['trade_date'] = df['trade_date'].apply(standardize_date_format)
                        # 过滤掉无效日期
                        df = df[df['trade_date'].notna()]
                    
                    # 确保category列存在，默认为0
                    if 'category' not in df.columns:
                        df['category'] = 0
                    
                    # 导入到数据库
                    success_count = Block.import_from_dataframe(df)
                    
                    if success_count > 0:
                        # 导入成功，移动文件到temp目录
                        temp_file_path = os.path.join(TEMP_DIR, f"imported_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{file_info['name']}")
                        shutil.move(file_path, temp_file_path)
                        
                        processed_files.append({
                            'name': file_info['name'],
                            'type': file_type,
                            'status': 'success',
                            'message': f'成功导入板块数据，共 {success_count} 条记录'
                        })
                    else:
                        processed_files.append({
                            'name': file_info['name'],
                            'type': file_type,
                            'status': 'warning',
                            'message': '导入完成但无数据写入，可能是数据格式问题'
                        })
                    
                elif file_type == 'blockinfo':

                    df = df.rename(columns = BlockInfo.aliases)
                    # 处理板块关联数据文件
                    # 手动设置列名（因为文件没有标题行）
                    if len(df.columns) < 3:
                        errors.append(f"文件 {file_info['name']} 列数不足，需要至少3列")
                        continue
                    
                    # 确保必要的列存在
                    required_columns = ['block_code', 'security_id']
                    if not all(col in df.columns for col in required_columns):
                        errors.append(f"文件 {file_info['name']} 缺少必要的列: {required_columns}")
                        continue
                    
                    # 标准化日期格式
                    if 'trade_date' in df.columns:
                        df['trade_date'] = df['trade_date'].apply(standardize_date_format)
                        # 过滤掉无效日期
                        df = df[df['trade_date'].notna()]
                    
                    # 导入到数据库
                    success_count = BlockInfo.import_from_dataframe(df)
                    
                    if success_count > 0:
                        # 导入成功，移动文件到temp目录
                        temp_file_path = os.path.join(TEMP_DIR, f"imported_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{file_info['name']}")
                        shutil.move(file_path, temp_file_path)
                        
                        processed_files.append({
                            'name': file_info['name'],
                            'type': file_type,
                            'status': 'success',
                            'message': f'成功导入板块关联数据，共 {success_count} 条记录'
                        })
                    else:
                        processed_files.append({
                            'name': file_info['name'],
                            'type': file_type,
                            'status': 'warning',
                            'message': '导入完成但无数据写入，可能是数据格式问题'
                        })
                    
                elif file_type == 'fboard':
                    
                    # 重命名列
                    df = df.rename(columns=FBoard.aliases)
                    
                    # 确保必要的列存在（包括主键字段）
                    required_columns = ['trade_date', 'security_id', 'type']
                    if not all(col in df.columns for col in required_columns):
                        errors.append(f"文件 {file_info['name']} 缺少必要的列: {required_columns}")
                        continue
                    
                    # 标准化日期格式
                    df['trade_date'] = df['trade_date'].apply(standardize_date_format)
                    # 过滤掉无效日期
                    df = df[df['trade_date'].notna()]
                    
                    # 导入到数据库
                    success_count = FBoard.import_from_dataframe(df)
                    
                    if success_count > 0:
                        # 导入成功，移动文件到temp目录
                        temp_file_path = os.path.join(TEMP_DIR, f"imported_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{file_info['name']}")
                        shutil.move(file_path, temp_file_path)
                        
                        processed_files.append({
                            'name': file_info['name'],
                            'type': file_type,
                            'status': 'success',
                            'message': f'成功导入涨停板数据，共 {success_count} 条记录'
                        })
                    else:
                        processed_files.append({
                            'name': file_info['name'],
                            'type': file_type,
                            'status': 'warning',
                            'message': '导入完成但无数据写入，可能是数据格式问题'
                        })
                    
                elif file_type == 'stock':
                    
                    # 重命名列
                    df = df.rename(columns=StockInfo.aliases)
                    
                    # 确保必要的列存在
                    required_columns = ['security_id']
                    if not all(col in df.columns for col in required_columns):
                        errors.append(f"文件 {file_info['name']} 缺少必要的列: {required_columns}")
                        continue
                    
                    # 确保数值字段的类型正确
                    if 'market' in df.columns:
                        df['market'] = pd.to_numeric(df['market'], errors='coerce').fillna(1)
                    if 'is_active' in df.columns:
                        df['is_active'] = pd.to_numeric(df['is_active'], errors='coerce').fillna(1)
                    if 'public_float_share' in df.columns:
                        df['public_float_share'] = pd.to_numeric(df['public_float_share'], errors='coerce').fillna(0)
                    
                    # 导入到数据库
                    success_count = StockInfo.import_from_dataframe(df)
                    
                    if success_count > 0:
                        # 导入成功，移动文件到temp目录
                        temp_file_path = os.path.join(TEMP_DIR, f"imported_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{file_info['name']}")
                        shutil.move(file_path, temp_file_path)
                        
                        processed_files.append({
                            'name': file_info['name'],
                            'type': file_type,
                            'status': 'success',
                            'message': f'成功导入股票信息数据，共 {success_count} 条记录'
                        })
                    else:
                        processed_files.append({
                            'name': file_info['name'],
                            'type': file_type,
                            'status': 'warning',
                            'message': '导入完成但无数据写入，可能是数据格式问题'
                        })
                    
                elif file_type == 'label':
                     
                     # 重命名列
                     df = df.rename(columns=LabelInfo.aliases)
                     
                     # 确保必要的列存在
                     required_columns = ['date', 'security_id', 'label']
                     if not all(col in df.columns for col in required_columns):
                         errors.append(f"文件 {file_info['name']} 缺少必要的列: {required_columns}")
                         continue
                     
                     # 确保数值字段的类型正确
                     if 'confidence' in df.columns:
                         df['confidence'] = pd.to_numeric(df['confidence'], errors='coerce').fillna(100)
                     
                     # 导入到数据库
                     success_count = LabelInfo.import_from_dataframe(df)
                     
                     if success_count > 0:
                         # 导入成功，移动文件到temp目录
                         temp_file_path = os.path.join(TEMP_DIR, f"imported_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{file_info['name']}")
                         shutil.move(file_path, temp_file_path)
                         
                         processed_files.append({
                             'name': file_info['name'],
                             'type': file_type,
                             'status': 'success',
                             'message': f'成功导入标签信息数据，共 {success_count} 条记录'
                         })
                     else:
                         processed_files.append({
                             'name': file_info['name'],
                             'type': file_type,
                             'status': 'warning',
                             'message': '导入完成但无数据写入，可能是数据格式问题'
                         })
                     
                else:
                     processed_files.append({
                         'name': file_info['name'],
                         'type': file_type,
                         'status': 'warning',
                         'message': f'无法识别文件类型，检测到的列: {list(df.columns)}'
                     })
                    
            except Exception as e:
                logger.error(f"处理文件 {file_info['name']} 失败: {str(e)}")
                errors.append(f"处理文件 {file_info['name']} 失败: {str(e)}")
        
        return jsonify({
            'success': True,
            'data': {
                'processed_files': processed_files,
                'errors': errors,
                'total_processed': len(processed_files),
                'total_errors': len(errors)
            }
        })
        
    except Exception as e:
        logger.error(f"处理导入文件失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

def format_file_size(size_bytes):
    """格式化文件大小"""
    if size_bytes == 0:
        return "0 B"
    
    size_names = ["B", "KB", "MB", "GB"]
    i = 0
    while size_bytes >= 1024 and i < len(size_names) - 1:
        size_bytes /= 1024.0
        i += 1
    
    return f"{size_bytes:.1f} {size_names[i]}"

@app.route('/api/available-data', methods=['GET'])
def get_available_data():
    """获取可用的数据信息"""
    try:
        # 使用模型提供的方法查询最近可用执行日与策略
        available_dates = SelectResult.get_available_dates()
        available_strategies = SelectResult.get_available_strategies()
        
        return jsonify({
            'success': True,
            'data': {
                'available_dates': available_dates,
                'available_strategies': available_strategies,
                'strategy_list': STRATEGY_LIST
            }
        })
        
    except Exception as e:
        logger.error(f"获取可用数据信息失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/health', methods=['GET'])
def health_check():
    """健康检查接口"""
    try:
        # 检查数据库连接
        session = db_manager.get_stock_selection_session()
        session.close()
        
        return jsonify({
            'success': True,
            'status': 'healthy',
            'timestamp': datetime.now().isoformat()
        })
        
    except Exception as e:
        logger.error(f"健康检查失败: {str(e)}")
        return jsonify({
            'success': False,
            'status': 'unhealthy',
            'error': str(e),
            'timestamp': datetime.now().isoformat()
        }), 500

# ========================================
# 新闻相关API接口
# ========================================

@app.route('/api/news/crawl', methods=['POST'])
def crawl_news():
    """爬取新闻数据"""
    try:
        data = request.get_json() or {}
        tags = data.get('tags', ['年报预期', '业绩预告', 'A股'])
        max_pages = data.get('max_pages', 2)
        
        # 初始化东方财富爬虫
        # 使用多个解析器来爬取不同类型的新闻
        parsers = [
            PerformanceForecastParser(),
            AssetRestructureParser()
        ]
        
        # 收集所有解析器的数据
        news_list = []
        for parser in parsers:
            try:
                parser_news = parser.get_news_list(tag='A股', page=1)
                if parser_news:
                    news_list.extend(parser_news)
            except Exception as e:
                logger.warning(f"解析器 {parser.__class__.__name__} 爬取失败: {str(e)}")
                continue
        
        logger.info(f"开始爬取东方财富新闻，标签: {tags}, 最大页数: {max_pages}")
        
        # 如果原始爬虫没有获取到数据，使用简化版收集器
        if not news_list:
            logger.info("原始爬虫未获取到数据，尝试使用优化版收集器...")
            collector = SimpleNewsCollector()
            news_list = collector.collect_financial_news(
                sources=['performance_forecast', 'asset_restructure', 'general'],
                total_count=30
            )
        
        if not news_list:
            return jsonify({
                'success': False,
                'error': '未爬取到新闻数据'
            }), 400
        
        # 保存到数据库
        saved_count = NewsData.save_news_batch(news_list)
        
        logger.info(f"新闻爬取完成，获取 {len(news_list)} 条，保存 {saved_count} 条")
        
        return jsonify({
            'success': True,
            'data': {
                'total_crawled': len(news_list),
                'total_saved': saved_count,
                'news_preview': news_list[:5]  # 返回前5条新闻预览
            }
        })
        
    except Exception as e:
        logger.error(f"爬取新闻失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

def get_stock_price_info(session, stock_code):
    """获取股票的最新价格和涨跌幅信息"""
    try:
        from model.HSnapshot import HSnapshot
        
        # 获取最新的交易日期
        latest_date = session.query(HSnapshot.trade_date).order_by(HSnapshot.trade_date.desc()).first()
        if not latest_date:
            return None
        
        # 获取该股票的最新价格数据
        snapshot = session.query(HSnapshot).filter(
            HSnapshot.security_id == stock_code,
            HSnapshot.trade_date == latest_date[0]
        ).first()
        
        if not snapshot:
            return None
        
        # 计算涨跌幅
        change_percent = 0.0
        if snapshot.pre_close_px and snapshot.pre_close_px != 0:
            change_percent = ((snapshot.trade_px - snapshot.pre_close_px) / snapshot.pre_close_px) * 100
        
        return {
            'latest_price': round(snapshot.trade_px, 2) if snapshot.trade_px else 0.0,
            'change_percent': round(change_percent, 2),
            'pre_close': round(snapshot.pre_close_px, 2) if snapshot.pre_close_px else 0.0,
            'open_price': round(snapshot.open_px, 2) if snapshot.open_px else 0.0,
            'high_price': round(snapshot.high_px, 2) if snapshot.high_px else 0.0,
            'low_price': round(snapshot.low_px, 2) if snapshot.low_px else 0.0,
            'volume': snapshot.volume_trade or 0,
            'amount': round(snapshot.amount_trade, 2) if snapshot.amount_trade else 0.0,
            'trade_date': snapshot.trade_date
        }
    except Exception as e:
        logger.error(f"获取股票价格信息失败: {str(e)}")
        return None

@app.route('/api/news/list', methods=['GET'])
def get_news_list():
    """获取新闻列表"""
    try:
        # 获取参数
        limit = request.args.get('limit', 20, type=int)
        page = request.args.get('page', 1, type=int)
        category = request.args.get('category', None)
        search = request.args.get('search', None)
        days_back = request.args.get('days_back', 30, type=int)
        
        # 计算偏移量
        offset = (page - 1) * limit
        
        session = db_manager.get_stock_data_session()
        query = session.query(NewsData)
        
        # 应用筛选条件
        if category:
            query = query.filter(NewsData.category == category)
        
        if search:
            query = query.filter(NewsData.title.ilike(f'%{search}%'))
        
        # 按时间筛选
        from datetime import datetime, timedelta
        days_ago = datetime.now() - timedelta(days=days_back)
        query = query.filter(NewsData.crawl_time >= days_ago)
        
        # 获取总数
        total_count = query.count()
        
        # 确保页码有效
        if page < 1:
            page = 1
        if limit < 1:
            limit = 20
        
        # 重新计算偏移量
        offset = (page - 1) * limit
        
        # 分页查询
        news_list = query.order_by(NewsData.crawl_time.desc()).offset(offset).limit(limit).all()
        
        # 转换为字典列表
        result_list = []
        for news in news_list:
            news_dict = {
                'id': news.id,
                'news_id': news.news_id,
                'title': news.title,
                'summary': news.summary,
                'url': news.url,
                'source': news.source,
                'author': news.author,
                'publish_time': news.publish_time.strftime('%Y-%m-%d %H:%M:%S') if news.publish_time else None,
                'crawl_time': news.crawl_time.strftime('%Y-%m-%d %H:%M:%S') if news.crawl_time else None,
                'category': news.category,
                'stock_codes': news.stock_codes,
                'stock_name': news.stock_name,
                'sentiment_score': news.sentiment_score,
                'importance_score': news.importance_score
            }
            
            # 获取股票价格信息
            stock_code = None
            if news.stock_codes:
                try:
                    # 尝试解析股票代码列表
                    import json
                    stock_codes_list = json.loads(news.stock_codes)
                    if stock_codes_list and len(stock_codes_list) > 0:
                        stock_code = stock_codes_list[0]  # 使用第一个股票代码
                except:
                    pass
            
            # 如果没有解析到股票代码，尝试从股票名称中提取
            if not stock_code and news.stock_name:
                # 这里可以添加从股票名称提取代码的逻辑
                # 暂时跳过
                pass
            
            if stock_code:
                stock_price_info = get_stock_price_info(session, stock_code)
                if stock_price_info:
                    news_dict['stock_price'] = stock_price_info
            
            result_list.append(news_dict)
        
        session.close()
        
        logger.info(f"新闻列表查询成功: 总数={total_count}, 当前页={page}, 每页={limit}, 总页数={(total_count + limit - 1) // limit}")
        
        return jsonify({
            'success': True,
            'data': {
                'news_list': result_list,
                'total_count': total_count,
                'page': page,
                'limit': limit,
                'total_pages': (total_count + limit - 1) // limit
            }
        })
        
    except Exception as e:
        logger.error(f"获取新闻列表失败: {str(e)}")
        if 'session' in locals():
            session.close()
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/news/search', methods=['GET'])
def search_news():
    """搜索新闻"""
    try:
        keyword = request.args.get('keyword', '').strip()
        limit = request.args.get('limit', 20, type=int)
        
        if not keyword:
            return jsonify({
                'success': False,
                'error': '请提供搜索关键词'
            }), 400
        
        # 搜索新闻
        news_list = NewsData.search_news(keyword=keyword, limit=limit)
        
        # 为每条新闻添加股票价格信息
        session = db_manager.get_stock_data_session()
        try:
            for news in news_list:
                # 获取股票价格信息
                stock_code = None
                if news.get('stock_codes'):
                    try:
                        # 尝试解析股票代码列表
                        import json
                        stock_codes_list = json.loads(news['stock_codes'])
                        if stock_codes_list and len(stock_codes_list) > 0:
                            stock_code = stock_codes_list[0]  # 使用第一个股票代码
                    except:
                        pass
                
                if stock_code:
                    stock_price_info = get_stock_price_info(session, stock_code)
                    if stock_price_info:
                        news['stock_price'] = stock_price_info
        finally:
            session.close()
        
        return jsonify({
            'success': True,
            'data': {
                'news_list': news_list,
                'keyword': keyword,
                'total_count': len(news_list)
            }
        })
        
    except Exception as e:
        logger.error(f"搜索新闻失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/news/detail/<news_id>', methods=['GET'])
def get_news_detail(news_id):
    """获取新闻详情"""
    try:
        # 从数据库获取新闻详情
        session = db_manager.get_stock_data_session()
        news = session.query(NewsData).filter(NewsData.news_id == news_id).first()
        session.close()
        
        if not news:
            return jsonify({
                'success': False,
                'error': '新闻不存在'
            }), 404
        
        # 如果没有正文内容，尝试爬取
        if not news.content and news.url:
            # 东方财富网站的新闻详情需要特殊处理，这里暂时跳过
            logger.warning(f"跳过东方财富新闻详情获取: {news.url}")
        
        news_dict = {
            'id': news.id,
            'news_id': news.news_id,
            'title': news.title,
            'summary': news.summary,
            'content': news.content,
            'url': news.url,
            'source': news.source,
            'author': news.author,
            'publish_time': news.publish_time.strftime('%Y-%m-%d %H:%M:%S') if news.publish_time else None,
            'crawl_time': news.crawl_time.strftime('%Y-%m-%d %H:%M:%S') if news.crawl_time else None,
            'category': news.category,
            # 添加业绩预告相关字段
            'stock_name': news.stock_name,
            'forecast_type': news.forecast_type,
            'forecast_indicator': news.forecast_indicator,
            'forecast_value': news.forecast_value,
            'last_year_value': news.last_year_value,
            'yoy_change': news.yoy_change,
            'qoq_change': news.qoq_change,
            'change_reason': news.change_reason,
            # 处理keywords字段，确保它是数组格式
            'keywords': [],
            'stock_codes': news.stock_codes,
            'sentiment_score': news.sentiment_score,
            'importance_score': news.importance_score
        }
        
        # 处理keywords字段，确保它是数组格式
        if news.keywords:
            try:
                import json
                news_dict['keywords'] = json.loads(news.keywords)
            except:
                # 如果解析失败，保持原样
                news_dict['keywords'] = news.keywords if isinstance(news.keywords, list) else []
        
        # 获取股票价格信息
        stock_code = None
        if news.stock_codes:
            try:
                # 尝试解析股票代码列表
                import json
                stock_codes_list = json.loads(news.stock_codes)
                if stock_codes_list and len(stock_codes_list) > 0:
                    stock_code = stock_codes_list[0]  # 使用第一个股票代码
            except:
                pass
        
        if stock_code:
            session = db_manager.get_stock_data_session()
            try:
                stock_price_info = get_stock_price_info(session, stock_code)
                if stock_price_info:
                    news_dict['stock_price'] = stock_price_info
            finally:
                session.close()
        
        return jsonify({
            'success': True,
            'data': news_dict
        })
        
    except Exception as e:
        logger.error(f"获取新闻详情失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/news/cleanup', methods=['POST'])
def cleanup_old_news():
    """清理旧新闻数据"""
    try:
        data = request.get_json() or {}
        days = data.get('days', 30)
        
        # 清理旧数据
        deleted_count = NewsData.cleanup_old_news(days_to_keep=days)
        
        logger.info(f"清理完成，删除 {deleted_count} 条旧新闻")
        
        return jsonify({
            'success': True,
            'data': {
                'deleted_count': deleted_count,
                'days': days
            }
        })
        
    except Exception as e:
        logger.error(f"清理旧新闻数据失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/news/delete/<int:news_id>', methods=['DELETE'])
def delete_news(news_id):
    """删除单条新闻"""
    try:
        session = db_manager.get_stock_data_session()
        news = session.query(NewsData).filter(NewsData.id == news_id).first()
        
        if not news:
            return jsonify({
                'success': False,
                'error': '新闻不存在'
            }), 404
        
        session.delete(news)
        session.commit()
        session.close()
        
        logger.info(f"删除新闻成功，ID: {news_id}")
        
        return jsonify({
            'success': True,
            'data': {
                'deleted_id': news_id
            }
        })
        
    except Exception as e:
        logger.error(f"删除新闻失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/news/batch-delete', methods=['POST'])
def batch_delete_news():
    """批量删除新闻"""
    try:
        data = request.get_json() or {}
        news_ids = data.get('news_ids', [])
        
        if not news_ids:
            return jsonify({
                'success': False,
                'error': '请选择要删除的新闻'
            }), 400
        
        session = db_manager.get_stock_data_session()
        deleted_count = 0
        
        for news_id in news_ids:
            news = session.query(NewsData).filter(NewsData.id == news_id).first()
            if news:
                session.delete(news)
                deleted_count += 1
        
        session.commit()
        session.close()
        
        logger.info(f"批量删除新闻成功，删除 {deleted_count} 条")
        
        return jsonify({
            'success': True,
            'data': {
                'deleted_count': deleted_count,
                'total_requested': len(news_ids)
            }
        })
        
    except Exception as e:
        logger.error(f"批量删除新闻失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/news/stats', methods=['GET'])
def get_news_stats():
    """获取新闻统计信息"""
    try:
        from datetime import datetime, timedelta
        
        session = db_manager.get_stock_data_session()
        
        # 总新闻数
        total_count = session.query(NewsData).count()
        
        # 今日新增
        today = datetime.now().date()
        today_count = session.query(NewsData).filter(
            NewsData.crawl_time >= today
        ).count()
        
        # 本周新增
        week_ago = today - timedelta(days=7)
        week_count = session.query(NewsData).filter(
            NewsData.crawl_time >= week_ago
        ).count()
        
        session.close()
        
        return jsonify({
            'success': True,
            'data': {
                'total_count': total_count,
                'today_count': today_count,
                'week_count': week_count
            }
        })
        
    except Exception as e:
        logger.error(f"获取新闻统计信息失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/fundamental-data', methods=['GET'])
def get_fundamental_data():
    """获取基本面数据"""
    try:
        # 获取最新交易日期
        latest_dates = HSnapshot.get_available_dates()
        if not latest_dates:
            return jsonify({
                'success': False,
                'error': '数据库中暂无交易数据'
            }), 400
        
        latest_date = latest_dates[0]  # 获取最新日期
        logger.info(f"获取基本面数据，最新交易日期: {latest_date}")
        
        # 获取所有证券基本信息
        stock_info_list = StockInfo.get_all_stocks()
        if not stock_info_list:
            return jsonify({
                'success': False,
                'error': '数据库中暂无证券基本信息，请先导入证券基本信息数据'
            }), 400
        
        # 创建证券信息字典，便于快速查找
        stock_info_dict = {stock['security_id']: stock for stock in stock_info_list}
        
        # 获取最新交易数据
        latest_snapshot_data = HSnapshot.get_data(latest_date)
        if not latest_snapshot_data:
            return jsonify({
                'success': False,
                'error': f'数据库中暂无{latest_date}的交易数据'
            }), 400
        
        # 创建交易数据字典，便于快速查找
        snapshot_dict = {}
        for snapshot in latest_snapshot_data:
            snapshot_dict[snapshot.security_id] = snapshot
        
        # 读取筛选参数
        only_today_labels = (request.args.get('only_today_labels', '0') in ['1', 'true', 'True'])
        
        # 读取分页参数
        try:
            page = int(request.args.get('page', 1))
            page_size = int(request.args.get('page_size', 1000))  # 默认每页1000条
        except (ValueError, TypeError):
            page = 1
            page_size = 1000
        try:
            price_min = float(request.args.get('price_min')) if request.args.get('price_min') not in [None, '', 'null'] else None
        except Exception:
            price_min = None
        try:
            price_max = float(request.args.get('price_max')) if request.args.get('price_max') not in [None, '', 'null'] else None
        except Exception:
            price_max = None
        try:
            chg_min = float(request.args.get('chg_min')) if request.args.get('chg_min') not in [None, '', 'null'] else None
        except Exception:
            chg_min = None
        try:
            chg_max = float(request.args.get('chg_max')) if request.args.get('chg_max') not in [None, '', 'null'] else None
        except Exception:
            chg_max = None
        
        # 获取所有标签信息
        labels_dict = {}  # security_id -> [labels]
        today_labeled_security_ids = set()
        try:
            session = db_manager.get_stock_data_session()
            if only_today_labels:
                # 仅查询当天标签
                label_results = session.query(LabelInfo).filter(
                    LabelInfo.date == latest_date
                ).all()
            else:
            # 查询最新日期的标签和STATIC标签
                label_results = session.query(LabelInfo).filter(
                    (LabelInfo.date == latest_date) | (LabelInfo.date == 'STATIC')
                ).all()
                session.close()
            
            # 分组标签，同时记录当日打标的股票集合
            for label_info in label_results:
                if label_info.security_id not in labels_dict:
                    labels_dict[label_info.security_id] = []
                labels_dict[label_info.security_id].append(label_info.label)
                if label_info.date == latest_date:
                    today_labeled_security_ids.add(label_info.security_id)
        except Exception as e:
            logger.warning(f"获取标签信息失败: {str(e)}")
        
        # 获取行业和概念信息
        industry_dict = {}  # security_id -> industry
        concept_dict = {}   # security_id -> [concepts]
        try:
            session = db_manager.get_stock_data_session()
            
            # 查询行业信息 (category=2)
            industry_query = session.query(BlockInfo.security_id, Block.block_name).join(
                Block, BlockInfo.block_code == Block.block_code
            ).filter(Block.category == 2)
            
            for result in industry_query:
                security_id = result.security_id
                industry_name = result.block_name
                # 如果同一股票有多个行业，取第一个
                if security_id not in industry_dict:
                    industry_dict[security_id] = industry_name
            
            # 查询概念信息 (category!=2)
            concept_query = session.query(BlockInfo.security_id, Block.block_name).join(
                Block, BlockInfo.block_code == Block.block_code
            ).filter(Block.category != 2)
            
            for result in concept_query:
                security_id = result.security_id
                concept_name = result.block_name
                if security_id not in concept_dict:
                    concept_dict[security_id] = []
                concept_dict[security_id].append(concept_name)
            
            session.close()
        except Exception as e:
            logger.warning(f"获取行业和概念信息失败: {str(e)}")
        
        # 读取行业过滤（可选）
        industry_filter = request.args.get('industry')
        
        # 构建返回数据
        fundamental_data = []
        for security_id, stock_info in stock_info_dict.items():
            # 获取对应的交易数据
            snapshot = snapshot_dict.get(security_id)
            if not snapshot:
                continue  # 如果没有交易数据，跳过该股票

            # 若只显示当天打标数据，则过滤
            if only_today_labels and security_id not in today_labeled_security_ids:
                continue

            # 价格范围过滤（latest_price）
            latest_price = snapshot.trade_px or 0
            if price_min is not None and latest_price < price_min:
                continue
            if price_max is not None and latest_price > price_max:
                continue
            
            # 计算市值 = 流通股本 * 最新价
            public_float_share = stock_info.get('public_float_share', 0) or 0
            latest_price = snapshot.trade_px or 0
            market_value = public_float_share * latest_price
            
            # 获取涨跌幅
            change_percent = 0.0
            if snapshot.pre_close_px and snapshot.pre_close_px != 0:
                change_percent = ((snapshot.trade_px - snapshot.pre_close_px) / snapshot.pre_close_px) * 100

            # 涨跌幅范围过滤
            if chg_min is not None and change_percent < chg_min:
                continue
            if chg_max is not None and change_percent > chg_max:
                continue
            
            # 获取标签
            labels = labels_dict.get(security_id, [])
            
            # 获取行业和概念
            industry = industry_dict.get(security_id, '-')
            if industry_filter and industry != industry_filter:
                continue
            concepts = concept_dict.get(security_id, [])
            concepts_text = ', '.join(concepts) if concepts else '-'
            
            fundamental_data.append({
                'security_name': stock_info['security_name'],
                'security_id': stock_info['security_id'],
                'public_float_share': public_float_share,
                'market_value': market_value,
                'change_percent': round(change_percent, 2),
                'latest_price': latest_price,
                'volume': snapshot.volume_trade or 0,
                'turnover': snapshot.amount_trade or 0,
                'labels': labels,
                'industry': industry,
                'concepts': concepts_text
            })
        
        # 按证券代码排序
        fundamental_data.sort(key=lambda x: x['security_id'])
        
        # 计算分页
        total_count = len(fundamental_data)
        start_index = (page - 1) * page_size
        end_index = start_index + page_size
        paginated_data = fundamental_data[start_index:end_index]
        
        return jsonify({
            'success': True,
            'data': {
                'fundamental_data': paginated_data,
                'latest_date': latest_date,
                'total_count': total_count,
                'page': page,
                'page_size': page_size,
                'total_pages': (total_count + page_size - 1) // page_size
            }
        })
        
    except Exception as e:
        logger.error(f"获取基本面数据失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/api/import/stockinfo', methods=['POST'])
@login_required
def admin_import_stockinfo():
    """直接从data目录导入StockInfo数据"""
    try:
        from model.StockInfo import StockInfo
        import glob
        import os
        
        # 查找data目录下的stock相关文件
        data_dir = 'data'
        stock_files = glob.glob(os.path.join(data_dir, '*stock*.csv'))
        
        if not stock_files:
            return jsonify({
                'success': False,
                'error': '在data目录下未找到stock相关文件'
            })
        
        # 使用最新的文件
        latest_file = max(stock_files, key=os.path.getctime)
        logger.info(f"导入StockInfo文件: {latest_file}")
        
        # 读取CSV文件
        df = read_csv_with_encoding(latest_file)
        if df is None or df.empty:
            return jsonify({
                'success': False,
                'error': '文件读取失败或为空'
            })
        
        # 导入数据
        count = StockInfo.import_from_dataframe(df)
        
        return jsonify({
            'success': True,
            'count': count,
            'file': os.path.basename(latest_file)
        })
        
    except Exception as e:
        logger.error(f"导入StockInfo失败: {str(e)}")
        return jsonify({
            'success': False,
            'error': str(e)
        }), 500

@app.route('/admin/api/import/block', methods=['POST'])
@login_required
def admin_import_block():
    """直接从data目录导入Block数据"""
    try:
        from model.Block import Block
        import glob
        import os
        
        # 查找data目录下的block相关文件
        data_dir = 'data'
        block_files = glob.glob(os.path.join(data_dir, '*block*.csv'))
        
        if not block_files:
            return jsonify({
                'success': False,
                'error': '在data目录下未找到block相关文件'
            })
        
        # 使用最新的文件
        latest_file = max(block_files, key=os.path.getctime)
        logger.info(f"导入Block文件: {latest_file}")
        
        # 读取CSV文件
        df = read_csv_with_encoding(latest_file)
        if df is None or df.empty:
            return jsonify({
                'success': False,
                'error': '文件读取失败或为空'
            })
        
        # 导入数据
        count = Block.import_from_dataframe(df)
        
        return jsonify({
            'success': True,
            'count': count,
            'file': os.path.basename(latest_file)
        })
        
    except Exception as e:
        logger.error(f"导入Block失败: {str(e)}")
        return jsonify({
            'success': False,
            'error': str(e)
        }), 500

@app.route('/admin/api/import/blockinfo', methods=['POST'])
@login_required
def admin_import_blockinfo():
    """直接从data目录导入BlockInfo数据"""
    try:
        from model.BlockInfo import BlockInfo
        import glob
        import os
        
        # 查找data目录下的blockinfo相关文件
        data_dir = 'data'
        blockinfo_files = glob.glob(os.path.join(data_dir, '*blockinfo*.csv'))
        
        if not blockinfo_files:
            return jsonify({
                'success': False,
                'error': '在data目录下未找到blockinfo相关文件'
            })
        
        # 使用最新的文件
        latest_file = max(blockinfo_files, key=os.path.getctime)
        logger.info(f"导入BlockInfo文件: {latest_file}")
        
        # 读取CSV文件
        df = read_csv_with_encoding(latest_file)
        if df is None or df.empty:
            return jsonify({
                'success': False,
                'error': '文件读取失败或为空'
            })
        
        # 导入数据
        count = BlockInfo.import_from_dataframe(df)
        
        return jsonify({
            'success': True,
            'count': count,
            'file': os.path.basename(latest_file)
        })
        
    except Exception as e:
        logger.error(f"导入BlockInfo失败: {str(e)}")
        return jsonify({
            'success': False,
            'error': str(e)
        }), 500

@app.route('/admin/api/import/hsnapshot', methods=['POST'])
@login_required
def admin_import_hsnapshot():
    """批量导入hsnapshot数据"""
    try:
        from model.HSnapshot import HSnapshot
        import glob
        import os
        
        # 查找data目录下的hsnapshot相关文件
        data_dir = 'data'
        hsnapshot_files = glob.glob(os.path.join(data_dir, '*hsnapshot*.csv'))
        
        if not hsnapshot_files:
            return jsonify({
                'success': False,
                'error': '在data目录下未找到hsnapshot相关文件'
            })
        
        # 按文件名排序，确保按日期顺序处理
        hsnapshot_files.sort()
        logger.info(f"找到 {len(hsnapshot_files)} 个hsnapshot文件")
        
        total_count = 0
        processed_files = 0
        
        for file_path in hsnapshot_files:
            try:
                logger.info(f"处理文件: {os.path.basename(file_path)}")
                
                # 读取CSV文件
                df = read_csv_with_encoding(file_path)
                if df is None or df.empty:
                    logger.warning(f"文件为空或读取失败: {file_path}")
                    continue
                
                # 重命名列名以匹配模型期望的字段名
                df = df.rename(columns=HSnapshot.aliases)
                
                # 导入数据
                count = HSnapshot.import_from_dataframe(df)
                total_count += count
                processed_files += 1
                
                logger.info(f"文件 {os.path.basename(file_path)} 导入完成，处理 {count} 条记录")
                
            except Exception as e:
                logger.error(f"处理文件 {file_path} 失败: {str(e)}")
                continue
        
        return jsonify({
            'success': True,
            'total_files': len(hsnapshot_files),
            'processed_files': processed_files,
            'total_count': total_count
        })
        
    except Exception as e:
        logger.error(f"批量导入hsnapshot失败: {str(e)}")
        return jsonify({
            'success': False,
            'error': str(e)
        }), 500

@app.route('/admin/api/import/fboard', methods=['POST'])
@login_required
def admin_import_fboard():
    """批量导入fboard数据"""
    try:
        from model.FBoard import FBoard
        import glob
        import os
        
        # 查找data目录下的fboard相关文件
        data_dir = 'data'
        fboard_files = glob.glob(os.path.join(data_dir, '*fboard*.csv'))
        
        if not fboard_files:
            return jsonify({
                'success': False,
                'error': '在data目录下未找到fboard相关文件'
            })
        
        # 按文件名排序，确保按日期顺序处理
        fboard_files.sort()
        logger.info(f"找到 {len(fboard_files)} 个fboard文件")
        
        total_count = 0
        processed_files = 0
        failed_files = []
        
        for i, file_path in enumerate(fboard_files):
            try:
                logger.info(f"处理文件: {os.path.basename(file_path)}")
                
                # 读取CSV文件
                df = read_csv_with_encoding(file_path)
                if df is None or df.empty:
                    logger.warning(f"文件为空或读取失败: {file_path}")
                    failed_files.append({
                        'file': os.path.basename(file_path),
                        'error': '文件为空或读取失败'
                    })
                    continue
                
                # 重命名列名以匹配模型期望的字段名
                df = df.rename(columns=FBoard.aliases)
                
                # 检查必要的列是否存在
                required_columns = ['trade_date', 'security_id', 'market']
                missing_columns = [col for col in required_columns if col not in df.columns]
                if missing_columns:
                    error_msg = f"缺少必要列: {missing_columns}"
                    logger.error(f"文件 {os.path.basename(file_path)} {error_msg}")
                    failed_files.append({
                        'file': os.path.basename(file_path),
                        'error': error_msg
                    })
                    continue
                
                # 导入数据
                count = FBoard.import_from_dataframe(df)
                total_count += count
                processed_files += 1
                
                logger.info(f"文件 {os.path.basename(file_path)} 导入完成，处理 {count} 条记录")
                
            except Exception as e:
                error_msg = str(e)
                logger.error(f"处理文件 {file_path} 失败: {error_msg}")
                failed_files.append({
                    'file': os.path.basename(file_path),
                    'error': error_msg
                })
                continue
        
        return jsonify({
            'success': True,
            'total_files': len(fboard_files),
            'processed_files': processed_files,
            'failed_files': failed_files,
            'total_count': total_count
        })
        
    except Exception as e:
        logger.error(f"批量导入fboard失败: {str(e)}")
        return jsonify({
            'success': False,
            'error': str(e)
        }), 500

# 简单API占位：执行记录列表
@app.route('/admin/api/strategy/records')
@login_required
def admin_api_strategy_records():
    try:
        # 可从 ExecutionCache 或 SelectResult 中推断执行批次
        session_sel = db_manager.get_stock_selection_session()
        from model.SelectResult import SelectResult
        # 选取最近50条批次记录（按日期+策略聚合，这里用简化示例）
        rows = session_sel.query(SelectResult.execution_date, SelectResult.strategy_type).distinct().all()
        result = []
        for d, s in rows[-50:]:
            # 获取该日期和策略的结果数量
            count = session_sel.query(SelectResult).filter(
                SelectResult.execution_date == d,
                SelectResult.strategy_type == s
            ).count()
            
            # 获取缓存信息
            from model.ExecutionCache import ExecutionCache
            cache_info = session_sel.query(ExecutionCache).filter(
                ExecutionCache.execution_date == d,
                ExecutionCache.strategy_type == s
            ).first()
            
            result.append({
                'execution_time': f"{d} 09:30:00" if d else '',
                'execution_date': d or '',
                'strategy': s or '',
                'result_count': count,
                'cache_count': cache_info.results_count if cache_info else 0,
                'new_count': count,  # 简化处理，实际应该是新执行的数量
                'duration': cache_info.execution_time if cache_info else 0
            })
        session_sel.close()
        return jsonify({'success': True, 'records': result})
    except Exception as e:
        return jsonify({'success': False, 'error': str(e)}), 500

# 获取交易日期信息
@app.route('/admin/api/trading-dates')
@login_required
def admin_api_trading_dates():
    try:
        from model.HSnapshot import HSnapshot
        from sqlalchemy import func
        
        session = db_manager.get_stock_data_session()
        
        # 获取最小和最大交易日期
        min_date = session.query(func.min(HSnapshot.trade_date)).scalar()
        max_date = session.query(func.max(HSnapshot.trade_date)).scalar()
        
        # 计算交易天数
        trading_days = session.query(func.count(func.distinct(HSnapshot.trade_date))).scalar()
        
        session.close()
        
        # 统一日期格式，以最小日期的格式为准
        if min_date:
            # 如果最小日期是字符串格式，保持原样
            if isinstance(min_date, str):
                min_date_str = min_date
                max_date_str = str(max_date) if max_date else None
            else:
                # 如果最小日期是日期对象，转换为字符串格式
                min_date_str = min_date.strftime('%Y-%m-%d') if hasattr(min_date, 'strftime') else str(min_date)
                max_date_str = max_date.strftime('%Y-%m-%d') if max_date and hasattr(max_date, 'strftime') else str(max_date) if max_date else None
        else:
            min_date_str = None
            max_date_str = None
        
        return jsonify({
            'success': True,
            'min_date': min_date_str,
            'max_date': max_date_str,
            'trading_days': trading_days
        })
    except Exception as e:
        logger.error(f"获取交易日期信息失败: {e}")
        return jsonify({'success': False, 'error': str(e)}), 500

# 简单API占位：结果明细
@app.route('/admin/api/strategy/analysis')
@login_required
def admin_api_strategy_analysis():
    try:
        d = request.args.get('execution_date', '').strip()
        s = request.args.get('strategy', '').strip()
        session_sel = db_manager.get_stock_selection_session()
        from model.SelectResult import SelectResult
        q = session_sel.query(SelectResult)
        if d:
            q = q.filter(SelectResult.execution_date == d)
        if s:
            q = q.filter(SelectResult.strategy_type == s)
        rows = q.limit(1000).all()
        results = []
        for r in rows:
            results.append({
                'security_id': getattr(r, 'security_id', ''),
                'security_name': getattr(r, 'security_name', ''),
                'industry': getattr(r, 'industry', ''),
                'change_percent': getattr(r, 'change_percent', 0),
                'strategy_type': getattr(r, 'strategy_type', '')
            })
        session_sel.close()
        return jsonify({'success': True, 'results': results})
    except Exception as e:
        return jsonify({'success': False, 'error': str(e)}), 500



@app.route('/admin/api/labels/latest-date')
@login_required
def api_labels_latest_date():
    try:
        from model.HSnapshot import HSnapshot
        session = db_manager.get_stock_data_session()
        latest = session.query(HSnapshot.trade_date).order_by(HSnapshot.trade_date.desc()).first()
        session.close()
        return jsonify({'success': True, 'latest_date': latest[0] if latest else None})
    except Exception as e:
        logger.error(f"获取最新交易日失败: {e}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/api/labels/stocks')
@login_required
def api_labels_stocks():
    """获取最新交易日股票列表，分页，包含行业与概念（来源于 Block/BlockInfo）"""
    try:
        q_kw = (request.args.get('q') or '').strip()
        page = int(request.args.get('page') or 1)
        page_size = int(request.args.get('page_size') or 20)
        page = max(page, 1)
        page_size = max(min(page_size, 100), 10)

        from sqlalchemy import func, case
        from model.HSnapshot import HSnapshot
        from model.StockInfo import StockInfo
        from model.Block import Block
        from model.BlockInfo import BlockInfo

        s = db_manager.get_stock_data_session()
        # 最新交易日
        latest_date = s.query(func.max(HSnapshot.trade_date)).scalar()

        # 行业与概念聚合
        # industry: category=2 的 block_name
        industry_sub = (
            s.query(
                BlockInfo.security_id.label('sid'),
                func.max(case((Block.category == 2, Block.block_name), else_=None)).label('industry')
            )
            .join(Block, Block.block_code == BlockInfo.block_code)
            .group_by(BlockInfo.security_id)
        ).subquery()

        # concepts: 非 category=2 的 block_name 聚合
        concepts_sub = (
            s.query(
                BlockInfo.security_id.label('sid'),
                func.group_concat(case((Block.category != 2, Block.block_name), else_=None), ',').label('concepts')
            )
            .join(Block, Block.block_code == BlockInfo.block_code)
            .group_by(BlockInfo.security_id)
        ).subquery()

        base_q = (
            s.query(
                HSnapshot.security_id,
                StockInfo.security_name,
                HSnapshot.trade_px,
                industry_sub.c.industry,
                concepts_sub.c.concepts,
            )
            .outerjoin(StockInfo, StockInfo.security_id == HSnapshot.security_id)
            .outerjoin(industry_sub, industry_sub.c.sid == HSnapshot.security_id)
            .outerjoin(concepts_sub, concepts_sub.c.sid == HSnapshot.security_id)
            .filter(HSnapshot.trade_date == latest_date)
        )

        if q_kw:
            like = f"%{q_kw}%"
            base_q = base_q.filter(
                (HSnapshot.security_id.like(like)) | (StockInfo.security_name.like(like))
            )

        total = base_q.count()
        rows = base_q.order_by(HSnapshot.security_id).offset((page - 1) * page_size).limit(page_size).all()
        s.close()

        data = []
        for r in rows:
            data.append({
                'security_id': r[0],
                'security_name': r[1] or '',
                'latest_price': round(float(r[2]), 3) if r[2] is not None else None,
                'industry': r[3] or '',
                'concepts': ','.join(sorted(set((r[4] or '').split(',')))) if r[4] else ''
            })

        return jsonify({'success': True, 'data': data, 'total': total, 'page': page, 'page_size': page_size, 'latest_date': latest_date})
    except Exception as e:
        logger.error(f"获取标签管理股票列表失败: {e}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/api/labels/list')
@login_required
def api_labels_list():
    try:
        sid = (request.args.get('security_id') or '').strip()
        if not sid:
            return jsonify({'success': True, 'labels': []})
        from model.LabelInfo import LabelInfo
        session = db_manager.get_stock_data_session()
        rows = session.query(LabelInfo).filter(LabelInfo.security_id == sid).all()
        labels = [LabelInfo._to_dict(r) for r in rows]
        session.close()
        return jsonify({'success': True, 'labels': labels})
    except Exception as e:
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/api/labels/upsert', methods=['POST'])
@login_required
def api_labels_upsert():
    try:
        data = request.get_json() or {}
        sid = (data.get('security_id') or '').strip()
        label = (data.get('label') or '').strip()
        label_type = (data.get('label_type') or '').strip()
        confidence = int(data.get('confidence') or 100)
        if not sid or not label:
            return jsonify({'success': False, 'error': 'security_id 与 label 不能为空'}), 400
        from model.LabelInfo import LabelInfo
        session = db_manager.get_stock_data_session()
        # 使用特殊日期标识与交易日无关的长期标签
        static_date = 'STATIC'
        existing = session.query(LabelInfo).filter(
            LabelInfo.security_id == sid,
            LabelInfo.label == label,
            LabelInfo.date == static_date
        ).first()
        if existing:
            existing.label_type = label_type or existing.label_type
            existing.confidence = confidence
        else:
            obj = LabelInfo(date=static_date, security_id=sid, label=label, label_type=label_type, confidence=confidence)
            session.add(obj)
        session.commit()
        session.close()
        return jsonify({'success': True})
    except Exception as e:
        logger.error(f"保存标签失败: {e}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/api/labels/delete', methods=['POST'])
@login_required
def admin_labels_delete():
    """删除标签"""
    try:
        data = request.get_json()
        label_id = data.get('id')
        
        if not label_id:
            return jsonify({'success': False, 'error': '缺少标签ID'}), 400
        
        session = db_manager.get_stock_data_session()
        label = session.query(LabelInfo).filter(LabelInfo.id == label_id).first()
        
        if not label:
            session.close()
            return jsonify({'success': False, 'error': '标签不存在'}), 404
        
        session.delete(label)
        session.commit()
        session.close()
        
        return jsonify({'success': True})
        
    except Exception as e:
        logger.error(f"删除标签失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/api/fundamental/stats')
@login_required
def admin_fundamental_stats():
    """获取基本面数据统计信息"""
    try:
        session = db_manager.get_stock_data_session()
        
        # 统计各表的数据量
        stockinfo_count = session.query(StockInfo).count()
        block_count = session.query(Block).count()
        blockinfo_count = session.query(BlockInfo).count()
        
        session.close()
        
        return jsonify({
            'success': True,
            'stockinfo_count': stockinfo_count,
            'block_count': block_count,
            'blockinfo_count': blockinfo_count
        })
        
    except Exception as e:
        logger.error(f"获取基本面统计信息失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/api/fundamental/import-stockinfo', methods=['POST'])
@login_required
def admin_fundamental_import_stockinfo():
    """导入股票信息"""
    try:
        # 这里可以调用现有的导入逻辑
        # 暂时返回成功，实际实现需要根据具体需求
        return jsonify({
            'success': True,
            'success_count': 0,
            'error_count': 0,
            'message': '股票信息导入功能待实现'
        })
        
    except Exception as e:
        logger.error(f"导入股票信息失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/api/fundamental/import-block', methods=['POST'])
@login_required
def admin_fundamental_import_block():
    """导入板块信息"""
    try:
        # 这里可以调用现有的导入逻辑
        # 暂时返回成功，实际实现需要根据具体需求
        return jsonify({
            'success': True,
            'success_count': 0,
            'error_count': 0,
            'message': '板块信息导入功能待实现'
        })
        
    except Exception as e:
        logger.error(f"导入板块信息失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/api/fundamental/import-blockinfo', methods=['POST'])
@login_required
def admin_fundamental_import_blockinfo():
    """导入板块关联信息"""
    try:
        # 这里可以调用现有的导入逻辑
        # 暂时返回成功，实际实现需要根据具体需求
        return jsonify({
            'success': True,
            'success_count': 0,
            'error_count': 0,
            'message': '板块关联信息导入功能待实现'
        })
        
    except Exception as e:
        logger.error(f"导入板块关联信息失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

# 账户管理相关API
@app.route('/admin/api/user/list')
@login_required
def admin_user_list():
    """获取用户列表"""
    try:
        page = request.args.get('page', 1, type=int)
        size = request.args.get('size', 10, type=int)
        search = request.args.get('search', '').strip()
        
        session = db_manager.get_stock_data_session()
        
        query = session.query(User)
        
        if search:
            query = query.filter(
                (User.nickname.contains(search)) | 
                (User.phone.contains(search))
            )
        
        total = query.count()
        users = query.offset((page - 1) * size).limit(size).all()
        
        session.close()
        
        return jsonify({
            'success': True,
            'users': [user.to_dict() for user in users],
            'total': total,
            'page': page,
            'size': size
        })
        
    except Exception as e:
        logger.error(f"获取用户列表失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/api/user', methods=['POST'])
@login_required
def admin_user_create():
    """创建用户"""
    try:
        data = request.get_json()
        nickname = data.get('nickname', '').strip()
        phone = data.get('phone', '').strip()
        password = data.get('password', '').strip()
        
        if not all([nickname, phone, password]):
            return jsonify({'success': False, 'error': '请填写完整信息'}), 400
        
        session = db_manager.get_stock_data_session()
        
        # 检查手机号是否已存在
        existing_user = session.query(User).filter(User.phone == phone).first()
        if existing_user:
            session.close()
            return jsonify({'success': False, 'error': '手机号已存在'}), 400
        
        # 创建新用户（密码应该加密存储，这里简化处理）
        new_user = User(
            nickname=nickname,
            phone=phone,
            password=password  # 实际应用中应该加密
        )
        
        session.add(new_user)
        session.commit()
        session.close()
        
        return jsonify({'success': True, 'message': '用户创建成功'})
        
    except Exception as e:
        logger.error(f"创建用户失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/api/user/<int:user_id>', methods=['PUT'])
@login_required
def admin_user_update(user_id):
    """更新用户信息"""
    try:
        data = request.get_json()
        nickname = data.get('nickname', '').strip()
        phone = data.get('phone', '').strip()
        password = data.get('password', '').strip()
        
        if not all([nickname, phone]):
            return jsonify({'success': False, 'error': '请填写完整信息'}), 400
        
        session = db_manager.get_stock_data_session()
        
        user = session.query(User).filter(User.id == user_id).first()
        if not user:
            session.close()
            return jsonify({'success': False, 'error': '用户不存在'}), 404
        
        # 检查手机号是否被其他用户使用
        existing_user = session.query(User).filter(
            User.phone == phone,
            User.id != user_id
        ).first()
        if existing_user:
            session.close()
            return jsonify({'success': False, 'error': '手机号已被其他用户使用'}), 400
        
        user.nickname = nickname
        user.phone = phone
        if password:  # 如果提供了新密码
            user.password = password  # 实际应用中应该加密
        
        session.commit()
        session.close()
        
        return jsonify({'success': True, 'message': '用户更新成功'})
        
    except Exception as e:
        logger.error(f"更新用户失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/api/user/<int:user_id>', methods=['DELETE'])
@login_required
def admin_user_delete(user_id):
    """删除用户"""
    try:
        session = db_manager.get_stock_data_session()
        
        user = session.query(User).filter(User.id == user_id).first()
        if not user:
            session.close()
            return jsonify({'success': False, 'error': '用户不存在'}), 404
        
        session.delete(user)
        session.commit()
        session.close()
        
        return jsonify({'success': True, 'message': '用户删除成功'})
        
    except Exception as e:
        logger.error(f"删除用户失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/api/user/login-records')
@login_required
def admin_login_records_list():
    """获取登录记录列表"""
    try:
        page = request.args.get('page', 1, type=int)
        size = request.args.get('size', 20, type=int)
        phone = request.args.get('phone', '').strip()
        start_date = request.args.get('start_date', '').strip()
        end_date = request.args.get('end_date', '').strip()
        status = request.args.get('status', '').strip()
        
        session = db_manager.get_stock_data_session()
        
        query = session.query(LoginRecord)
        
        if phone:
            query = query.filter(LoginRecord.phone.contains(phone))
        
        if start_date:
            query = query.filter(LoginRecord.operation_time >= start_date)
        
        if end_date:
            query = query.filter(LoginRecord.operation_time <= end_date + ' 23:59:59')
        
        if status:
            query = query.filter(LoginRecord.status == OperationType(status))
        
        total = query.count()
        records = query.order_by(LoginRecord.operation_time.desc()).offset((page - 1) * size).limit(size).all()
        
        session.close()
        
        return jsonify({
            'success': True,
            'records': [record.to_dict() for record in records],
            'total': total,
            'page': page,
            'size': size
        })
        
    except Exception as e:
        logger.error(f"获取登录记录失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/api/user/login-stats')
@login_required
def admin_login_stats():
    """获取登录统计信息"""
    try:
        from datetime import datetime, timedelta
        
        session = db_manager.get_stock_data_session()
        
        # 总记录数
        total_records = session.query(LoginRecord).count()
        
        # 今日记录数
        today = datetime.now().date()
        today_records = session.query(LoginRecord).filter(
            LoginRecord.operation_time >= today
        ).count()
        
        # 活跃用户数（最近7天有登录记录的用户）
        week_ago = datetime.now() - timedelta(days=7)
        active_users = session.query(LoginRecord.phone).filter(
            LoginRecord.operation_time >= week_ago,
            LoginRecord.status == OperationType.LOGIN
        ).distinct().count()
        
        # 在线用户数（最近30分钟有登录记录的用户）
        thirty_minutes_ago = datetime.now() - timedelta(minutes=30)
        online_users = session.query(LoginRecord.phone).filter(
            LoginRecord.operation_time >= thirty_minutes_ago,
            LoginRecord.status == OperationType.LOGIN
        ).distinct().count()
        
        session.close()
        
        return jsonify({
            'success': True,
            'totalRecords': total_records,
            'todayRecords': today_records,
            'activeUsers': active_users,
            'onlineUsers': online_users
        })
        
    except Exception as e:
        logger.error(f"获取登录统计信息失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/admin/api/user/overview-stats')
@login_required
def admin_user_overview_stats():
    """获取用户概览统计信息"""
    try:
        from datetime import datetime, timedelta
        
        session = db_manager.get_stock_data_session()
        
        # 总用户数
        total_users = session.query(User).count()
        
        # 今日登录数
        today = datetime.now().date()
        today_logins = session.query(LoginRecord).filter(
            LoginRecord.operation_time >= today,
            LoginRecord.status == OperationType.LOGIN
        ).count()
        
        # 活跃用户数（最近7天有登录记录的用户）
        week_ago = datetime.now() - timedelta(days=7)
        active_users = session.query(LoginRecord.phone).filter(
            LoginRecord.operation_time >= week_ago,
            LoginRecord.status == OperationType.LOGIN
        ).distinct().count()
        
        # 在线用户数（最近30分钟有登录记录的用户）
        thirty_minutes_ago = datetime.now() - timedelta(minutes=30)
        online_users = session.query(LoginRecord.phone).filter(
            LoginRecord.operation_time >= thirty_minutes_ago,
            LoginRecord.status == OperationType.LOGIN
        ).distinct().count()
        
        session.close()
        
        return jsonify({
            'success': True,
            'totalUsers': total_users,
            'todayLogins': today_logins,
            'activeUsers': active_users,
            'onlineUsers': online_users
        })
        
    except Exception as e:
        logger.error(f"获取用户概览统计信息失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

# 定时任务相关API
import json
import threading
from datetime import datetime, timedelta
import schedule

# 全局定时任务变量
schedule_config = {
    'schedule_time1': '08:30:00',
    'schedule_time2': '20:00:00',
    'enabled1': True,
    'enabled2': True,
    'crawl_news': True,

}

schedule_thread = None
schedule_running = False

def load_schedule_config():
    """加载定时任务配置"""
    global schedule_config
    try:
        config_file = 'schedule_config.json'
        if os.path.exists(config_file):
            with open(config_file, 'r', encoding='utf-8') as f:
                schedule_config = json.load(f)
                logger.info(f"加载定时任务配置: {schedule_config}")
    except Exception as e:
        logger.error(f"加载定时任务配置失败: {str(e)}")

def save_schedule_config():
    """保存定时任务配置"""
    try:
        config_file = 'schedule_config.json'
        with open(config_file, 'w', encoding='utf-8') as f:
            json.dump(schedule_config, f, ensure_ascii=False, indent=2)
            logger.info(f"保存定时任务配置: {schedule_config}")
    except Exception as e:
        logger.error(f"保存定时任务配置失败: {str(e)}")

def scheduled_news_crawl():
    """定时爬取新闻任务"""
    try:
        logger.info("开始执行定时爬取任务")
        
        # 创建爬虫实例
        forecast_parser = PerformanceForecastParser()
        asset_parser = AssetRestructureParser()
        news_collector = SimpleNewsCollector()
        
        # 执行爬取
        saved_count = 0
        
        # 根据配置决定是否爬取新闻
        if schedule_config.get('crawl_news', True):
            logger.info("开始爬取新闻数据")
            
            # 爬取业绩预告
            try:
                forecast_data = forecast_parser.crawl()
                if forecast_data:
                    saved_count += len(forecast_data)
                    logger.info(f"业绩预告爬取成功，获取 {len(forecast_data)} 条数据")
            except Exception as e:
                logger.error(f"业绩预告爬取失败: {str(e)}")
            
            # 爬取资产重组
            try:
                asset_data = asset_parser.crawl()
                if asset_data:
                    saved_count += len(asset_data)
                    logger.info(f"资产重组爬取成功，获取 {len(asset_data)} 条数据")
            except Exception as e:
                logger.error(f"资产重组爬取失败: {str(e)}")
            
            # 爬取A股新闻
            try:
                news_data = news_collector.crawl()
                if news_data:
                    saved_count += len(news_data)
                    logger.info(f"A股新闻爬取成功，获取 {len(news_data)} 条数据")
            except Exception as e:
                logger.error(f"A股新闻爬取失败: {str(e)}")
        else:
            logger.info("新闻爬取已禁用")
        

        
        logger.info(f"定时爬取任务完成，新闻: {saved_count} 条")
        
    except Exception as e:
        logger.error(f"定时爬取任务失败: {str(e)}")

def schedule_worker():
    """定时任务工作线程"""
    global schedule_running
    
    # 设置定时任务
    if schedule_config['enabled1']:
        schedule.every().day.at(schedule_config['schedule_time1']).do(scheduled_news_crawl)
        logger.info(f"设置第一次定时爬取: {schedule_config['schedule_time1']}")
    
    if schedule_config['enabled2']:
        schedule.every().day.at(schedule_config['schedule_time2']).do(scheduled_news_crawl)
        logger.info(f"设置第二次定时爬取: {schedule_config['schedule_time2']}")
    
    schedule_running = True
    logger.info("定时任务工作线程启动")
    
    while schedule_running:
        schedule.run_pending()
        time.sleep(60)  # 每分钟检查一次
    
    logger.info("定时任务工作线程停止")

def start_schedule():
    """启动定时任务"""
    global schedule_thread, schedule_running
    
    if schedule_thread and schedule_thread.is_alive():
        logger.info("定时任务已在运行")
        return
    
    schedule_thread = threading.Thread(target=schedule_worker, daemon=True)
    schedule_thread.start()
    logger.info("定时任务启动成功")

def stop_schedule():
    """停止定时任务"""
    global schedule_running
    schedule_running = False
    logger.info("定时任务停止")

@app.route('/api/news/schedule', methods=['GET'])
def get_news_schedule():
    """获取定时爬取设置"""
    try:
        return jsonify({
            'success': True,
            'data': schedule_config
        })
    except Exception as e:
        logger.error(f"获取定时设置失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/news/schedule', methods=['POST'])
def set_news_schedule():
    """设置定时爬取"""
    try:
        data = request.get_json()
        
        # 更新配置
        schedule_config['schedule_time1'] = data.get('schedule_time1', '08:30:00')
        schedule_config['schedule_time2'] = data.get('schedule_time2', '20:00:00')
        schedule_config['enabled1'] = data.get('enabled1', True)
        schedule_config['enabled2'] = data.get('enabled2', True)
        schedule_config['crawl_news'] = data.get('crawl_news', True)

        
        # 保存配置
        save_schedule_config()
        
        # 重启定时任务
        stop_schedule()
        time.sleep(2)  # 等待线程停止
        start_schedule()
        
        logger.info(f"定时设置更新成功: {schedule_config}")
        return jsonify({
            'success': True,
            'message': '定时设置保存成功'
        })
        
    except Exception as e:
        logger.error(f"设置定时任务失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/news/schedule/status')
def get_schedule_status():
    """获取定时任务状态"""
    try:
        # 计算下次爬取时间
        now = datetime.now()
        next_crawl_time = None
        
        if schedule_config['enabled1']:
            time1 = datetime.strptime(schedule_config['schedule_time1'], '%H:%M:%S').time()
            next_time1 = datetime.combine(now.date(), time1)
            if next_time1 <= now:
                next_time1 += timedelta(days=1)
            if next_crawl_time is None or next_time1 < next_crawl_time:
                next_crawl_time = next_time1
        
        if schedule_config['enabled2']:
            time2 = datetime.strptime(schedule_config['schedule_time2'], '%H:%M:%S').time()
            next_time2 = datetime.combine(now.date(), time2)
            if next_time2 <= now:
                next_time2 += timedelta(days=1)
            if next_crawl_time is None or next_time2 < next_crawl_time:
                next_crawl_time = next_time2
        
        return jsonify({
            'success': True,
            'data': {
                'is_running': schedule_running,
                'next_crawl_time': next_crawl_time.strftime('%Y-%m-%d %H:%M:%S') if next_crawl_time else None,
                'config': schedule_config
            }
        })
        
    except Exception as e:
        logger.error(f"获取定时状态失败: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500

# 涨停分析相关API
@app.route('/api/limit-up/list')
def get_limit_up_list():
    """获取涨停股票列表"""
    try:
        from sqlalchemy import func
        
        # 获取查询参数
        page = request.args.get('page', 1, type=int)
        limit = request.args.get('limit', 20, type=int)
        trade_date = request.args.get('trade_date', '')
        security_id = request.args.get('security_id', '')
        security_name = request.args.get('security_name', '')
        consecutive_limit_up = request.args.get('consecutive_limit_up', '')
        market = request.args.get('market', '')
        
        # 参数验证
        if page < 1:
            page = 1
        if limit < 1:
            limit = 20
        
        # 构建查询条件
        session = FBoard.get_session()
        query = session.query(FBoard)
        
        if trade_date:
            query = query.filter(FBoard.trade_date == trade_date)
        if security_id:
            query = query.filter(FBoard.security_id.ilike(f'%{security_id}%'))
        if security_name:
            query = query.filter(FBoard.security_name.ilike(f'%{security_name}%'))
        if consecutive_limit_up:
            query = query.filter(FBoard.consecutive_limit_up == int(consecutive_limit_up))
        if market:
            query = query.filter(FBoard.market == int(market))
        
        # 计算总数
        total_count = query.count()
        
        # 分页查询
        offset = (page - 1) * limit
        items = query.order_by(FBoard.consecutive_limit_up.desc(), FBoard.turnover_amount.desc()).offset(offset).limit(limit).all()
        
        # 转换为字典
        items_data = []
        for item in items:
            items_data.append(FBoard._to_dict(item))
        
        # 计算统计信息
        avg_consecutive = query.with_entities(func.avg(FBoard.consecutive_limit_up)).scalar() or 0
        max_consecutive = query.with_entities(func.max(FBoard.consecutive_limit_up)).scalar() or 0
        total_turnover = query.with_entities(func.sum(FBoard.turnover_amount)).scalar() or 0
        
        session.close()
        
        return jsonify({
            'success': True,
            'data': {
                'items': items_data,
                'total_count': total_count,
                'total_pages': (total_count + limit - 1) // limit,
                'current_page': page,
                'page_size': limit,
                'avg_consecutive': float(avg_consecutive),
                'max_consecutive': int(max_consecutive),
                'total_turnover': float(total_turnover)
            }
        })
        
    except Exception as e:
        logger.error(f"获取涨停股票列表失败: {str(e)}")
        return jsonify({
            'success': False,
            'message': f'获取涨停股票列表失败: {str(e)}'
        })

@app.route('/api/consecutive-limit-up/tiers')
def get_consecutive_limit_up_tiers():
    """获取连板梯队数据"""
    try:
        # 获取查询参数
        trade_date = request.args.get('trade_date', '')
        security_id = request.args.get('security_id', '')
        security_name = request.args.get('security_name', '')
        market = request.args.get('market', '')
        
        # 构建查询条件
        session = FBoard.get_session()
        query = session.query(FBoard)
        
        if trade_date:
            query = query.filter(FBoard.trade_date == trade_date)
        if security_id:
            query = query.filter(FBoard.security_id.ilike(f'%{security_id}%'))
        if security_name:
            query = query.filter(FBoard.security_name.ilike(f'%{security_name}%'))
        if market:
            query = query.filter(FBoard.market == int(market))
        
        # 获取所有数据，只包含连板股票（consecutive_limit_up >= 0 且 type='连板'）
        items = query.filter(FBoard.consecutive_limit_up >= 0, FBoard.type == '连板').order_by(FBoard.consecutive_limit_up.desc(), FBoard.first_limit_up_time.asc()).all()
        
        # 按连板次数分组
        tiers_dict = {}
        total_count = 0
        total_turnover = 0
        max_consecutive = 0
        consecutive_sum = 0
        
        for item in items:
            consecutive_days = item.consecutive_limit_up or 0  # 直接使用，已在导入时+1
            if consecutive_days not in tiers_dict:
                tiers_dict[consecutive_days] = []
            
            # 获取股票数据并去掉涨停状态字段
            stock_data = FBoard._to_dict(item)
            if 'limit_up_status' in stock_data:
                del stock_data['limit_up_status']
            
            tiers_dict[consecutive_days].append(stock_data)
            total_count += 1
            total_turnover += item.turnover_amount or 0
            max_consecutive = max(max_consecutive, consecutive_days)
            consecutive_sum += consecutive_days
        
        # 构建梯队数据
        tiers = []
        for consecutive_days in sorted(tiers_dict.keys(), reverse=True):
            stocks = tiers_dict[consecutive_days]
            
            # 按首次涨停时间排序（升序，最早的在前面）
            stocks.sort(key=lambda x: x.get('first_limit_up_time', '') or '')
            
            # 计算梯队统计信息
            avg_change = sum(stock.get('daily_change', 0) for stock in stocks) / len(stocks) if stocks else 0
            tier_turnover = sum(stock.get('turnover_amount', 0) for stock in stocks)
            
            tiers.append({
                'consecutive_days': consecutive_days,
                'stocks': stocks,
                'avg_change': round(avg_change, 2),
                'total_turnover': tier_turnover
            })
        
        # 计算总体统计信息
        avg_consecutive = consecutive_sum / total_count if total_count > 0 else 0
        
        session.close()
        
        return jsonify({
            'success': True,
            'data': {
                'tiers': tiers,
                'total_count': total_count,
                'max_consecutive': max_consecutive,
                'avg_consecutive': round(avg_consecutive, 1),
                'total_turnover': total_turnover
            }
        })
        
    except Exception as e:
        logger.error(f"获取连板梯队数据失败: {str(e)}")
        return jsonify({
            'success': False,
            'message': f'获取连板梯队数据失败: {str(e)}'
        })









if __name__ == '__main__':
    # 启动时加载数据
    if load_cached_data():
        logger.info("数据加载成功，启动Web服务...")
        
        # 加载定时任务配置并启动
        load_schedule_config()
        start_schedule()
        
        app.run(host='0.0.0.0', port=5000, debug=True)
    else:
        logger.error("数据加载失败，无法启动Web服务")