from flask import Blueprint, jsonify, request, make_response
import os
import pandas as pd
from datetime import datetime
import json
import numpy as np

# 创建蓝图对象
stock_codes = Blueprint('stock_codes', __name__)

def get_stock_codes(is_real=False):
    """
    获取数据文件夹中的所有股票代码
    
    参数:
        is_real (bool): 是否使用实盘数据文件夹，默认为False
    
    返回:
        list: 股票代码列表
    """
    # 确定数据目录路径
    folder_path = 'data/realgo_5m/' if is_real else 'data/5m/'
    
    # 确保目录存在
    if not os.path.exists(folder_path):
        return []
    
    # 获取所有CSV文件
    files = [f for f in os.listdir(folder_path) if f.endswith('.csv')]
    
    # 提取股票代码（去掉.csv后缀）
    stock_codes_list = [os.path.splitext(f)[0] for f in files]
    
    # 按代码排序
    stock_codes_list.sort()
    
    return stock_codes_list

# 移除字典中所有值为 NaN 的键
def remove_nan_values(obj):
    if isinstance(obj, dict):
        return {k: remove_nan_values(v) for k, v in obj.items() 
                if not (isinstance(v, float) and np.isnan(v))}
    elif isinstance(obj, list):
        return [remove_nan_values(item) for item in obj]
    return obj

@stock_codes.route('/stock_codes', methods=['GET'])
def get_all_stock_codes():
    """
    获取所有可用的股票代码
    
    返回:
        JSON: 包含历史和实盘环境下的股票代码列表
    """
    # 获取历史和实盘环境的股票代码
    history_codes = get_stock_codes(is_real=False)
    real_codes = get_stock_codes(is_real=True)
    
    # 构建简化的响应
    response = {
        'history': history_codes,
        'real': real_codes
    }
    
    return jsonify(response)
    
@stock_codes.route('/check_missing_data', methods=['GET'])
def check_missing_data():
    """
    检查股票数据缺失情况及其日期
    
    参数:
        is_real: 是否检查实盘数据 (bool, 默认False)
        code: 股票代码 (str, 必需)
    
    返回:
        JSON: 包含数据缺失情况的统计信息
    """
    # 获取请求参数
    is_real = request.args.get('is_real', 'false').lower() == 'true'
    code = request.args.get('code')
    
    # 验证必需的参数
    if not code:
        return jsonify({'error': '股票代码是必需的参数'}), 400
    
    # 确定数据目录路径
    folder_path = 'data/realgo_5m/' if is_real else 'data/5m/'
    file_path = f'{folder_path}{code}.csv'
    
    # 检查文件是否存在
    if not os.path.exists(file_path):
        return jsonify({'error': f'找不到股票代码 {code} 的数据文件'}), 404
    
    try:
        # 读取CSV文件
        df = pd.read_csv(file_path)
        
        # 确保date列是日期时间格式
        df['date'] = pd.to_datetime(df['date'])
        
        # 检查每列的缺失值
        missing_data = {}
        for column in df.columns:
            missing_count = df[column].isna().sum()
            if missing_count > 0:
                # 获取缺失值的日期
                missing_dates = df[df[column].isna()]['date'].dt.strftime('%Y-%m-%d %H:%M:%S').tolist()
                missing_data[column] = {
                    'count': int(missing_count),
                    'percentage': float(round(missing_count / len(df) * 100, 2)),
                    'dates': missing_dates
                }
        
        # 构建响应
        response = {
            'code': code,
            'is_real': is_real,
            'total_records': len(df),
            'date_range': {
                'start': df['date'].min().strftime('%Y-%m-%d %H:%M:%S'),
                'end': df['date'].max().strftime('%Y-%m-%d %H:%M:%S')
            },
            'missing_data': missing_data
        }
        
        return jsonify(remove_nan_values(response))
    
    except Exception as e:
        return jsonify({'error': f'检查数据缺失时出错: {str(e)}'}), 500

@stock_codes.route('/stock_data', methods=['GET'])
def get_stock_data():
    """
    获取股票数据，支持根据时间范围过滤和分页
    
    参数:
        is_real: 是否使用实盘数据 (bool, 默认False)
        code: 股票代码 (str, 必需)
        start_date: 开始时间 (str, 格式: 'YYYY-MM-DD HH:MM:SS', 可选)
        end_date: 结束时间 (str, 格式: 'YYYY-MM-DD HH:MM:SS', 可选)
        page: 页码 (int, 默认1)
        page_size: 每页记录数 (int, 默认100)
    
    返回:
        JSON: 包含分页的股票数据和元信息
    """
    # 获取请求参数
    is_real = request.args.get('is_real', 'false').lower() == 'true'
    code = request.args.get('code')
    start_date = request.args.get('start_date')
    end_date = request.args.get('end_date')
    page = int(request.args.get('page', 1))
    page_size = int(request.args.get('page_size', 100))
    
    print(f"API Request: /stock_data?code={code}&is_real={is_real}&page={page}&page_size={page_size}")
    
    # 验证必需的参数
    if not code:
        return jsonify({'error': '股票代码是必需的参数'}), 400
    
    # 确定数据目录路径
    folder_path = 'data/realgo_5m/' if is_real else 'data/5m/'
    file_path = f'{folder_path}{code}.csv'
    
    # 检查文件是否存在
    if not os.path.exists(file_path):
        return jsonify({'error': f'找不到股票代码 {code} 的数据文件'}), 404
    
    try:
        # 读取CSV文件
        df = pd.read_csv(file_path)
        
        # 确保date列是日期时间格式
        df['date'] = pd.to_datetime(df['date'])
        
        # 根据日期范围过滤
        if start_date:
            start_date = pd.to_datetime(start_date)
            df = df[df['date'] >= start_date]
            
        if end_date:
            end_date = pd.to_datetime(end_date)
            df = df[df['date'] <= end_date]
        
        # 按时间倒序排列
        df = df.sort_values(by='date', ascending=False)
        
        # 计算分页
        total_records = len(df)
        total_pages = (total_records + page_size - 1) // page_size
        
        # 如果请求的页码超出范围，返回最后一页
        if page > total_pages and total_pages > 0:
            page = total_pages
        
        # 分页数据
        start_idx = (page - 1) * page_size
        end_idx = min(start_idx + page_size, total_records)
        
        # 获取当前页的数据
        page_data = df.iloc[start_idx:end_idx]
        
        # 格式化日期时间为字符串
        page_data['date'] = page_data['date'].dt.strftime('%Y-%m-%d %H:%M:%S')
        
        # 将 DataFrame 转换为字典列表，并替换 NaN 值为 None
        records = page_data.replace({np.nan: None}).to_dict('records')
        
        # 构建响应
        response_data = {
            'code': code,
            'is_real': is_real,
            'pagination': {
                'page': page,
                'page_size': page_size,
                'total_records': total_records,
                'total_pages': total_pages
            },
            'data': records
        }
        
        # 移除所有值为 NaN 的键
        response_data = remove_nan_values(response_data)
        
        # 创建响应对象并设置正确的 MIME 类型
        response = make_response(jsonify(response_data))
        response.headers['Content-Type'] = 'application/json'
        
        print(f"API Response: Records={len(page_data)}, Total={total_records}")
        
        return response
        
    except Exception as e:
        print(f"API Error: {str(e)}")
        return jsonify({'error': f'处理数据时出错: {str(e)}'}), 500

@stock_codes.route('/check_integrity', methods=['GET'])
def check_integrity():
    """
    检查数据完整性，支持实盘和历史数据
    参数: is_real (bool, 默认true), start_date (str, 可选)
    """
    import pandas as pd
    is_real = request.args.get('is_real', 'true').lower() == 'true'
    start_date = request.args.get('start_date')
    folder_path = 'data/realgo_5m/' if is_real else 'data/5m/'
    base_code = '000016.XSHG'  # 历史和实盘都用这个

    # 读取基准股票的所有日期
    base_path = f'{folder_path}{base_code}.csv'
    if not os.path.exists(base_path):
        return jsonify({'error': f'基准股票 {base_code} 数据文件不存在'}), 404

    base_df = pd.read_csv(base_path)
    base_df['date'] = pd.to_datetime(base_df['date'])
    if start_date:
        try:
            start_dt = pd.to_datetime(start_date)
            base_df = base_df[base_df['date'] >= start_dt]
        except Exception as e:
            return jsonify({'error': f'start_date参数格式错误: {str(e)}'}), 400
    base_dates = set(base_df['date'].dt.strftime('%Y-%m-%d %H:%M:%S'))

    # 获取所有股票代码
    files = [f for f in os.listdir(folder_path) if f.endswith('.csv')]
    codes = [os.path.splitext(f)[0] for f in files]

    missing = []
    duplicate = []

    for code in codes:
        file_path = f'{folder_path}{code}.csv'
        df = pd.read_csv(file_path)
        df['date'] = pd.to_datetime(df['date'])
        if start_date:
            df = df[df['date'] >= start_dt]
        df['date_str'] = df['date'].dt.strftime('%Y-%m-%d %H:%M:%S')

        # 统计每个date的出现次数
        date_counts = df['date_str'].value_counts().to_dict()

        # 检查缺失
        missing_dates = base_dates - set(df['date_str'])
        for d in missing_dates:
            missing.append({'code': code, 'date': d})

        # 检查重复
        for d, count in date_counts.items():
            if count > 1:
                duplicate.append({'code': code, 'date': d, 'count': count})

    return jsonify({'missing': missing, 'duplicate': duplicate, 'base_code': base_code, 'base_dates_count': len(base_dates), 'is_real': is_real, 'start_date': start_date})

@stock_codes.route('/sync_5m_to_realgo_5m', methods=['POST'])
def sync_5m_to_realgo_5m():
    """
    将data/5m中所有股票指定日期后的数据同步到data/realgo_5m，按date去重
    参数: start_date (str, 必选)
    """
    import pandas as pd
    import shutil
    data = request.get_json() or {}
    start_date = data.get('start_date')
    if not start_date:
        return jsonify({'error': 'start_date为必选参数'}), 400
    try:
        start_dt = pd.to_datetime(start_date)
    except Exception as e:
        return jsonify({'error': f'start_date格式错误: {str(e)}'}), 400

    src_folder = 'data/5m/'
    dst_folder = 'data/realgo_5m/'
    if not os.path.exists(src_folder):
        return jsonify({'error': 'data/5m目录不存在'}), 404
    os.makedirs(dst_folder, exist_ok=True)

    files = [f for f in os.listdir(src_folder) if f.endswith('.csv')]
    result = []
    for f in files:
        code = os.path.splitext(f)[0]
        src_path = os.path.join(src_folder, f)
        dst_path = os.path.join(dst_folder, f)
        try:
            df_src = pd.read_csv(src_path)
            df_src['date'] = pd.to_datetime(df_src['date'])
            df_new = df_src[df_src['date'] >= start_dt]
            if os.path.exists(dst_path):
                df_dst = pd.read_csv(dst_path)
                df_dst['date'] = pd.to_datetime(df_dst['date'])
                df_merged = pd.concat([df_dst, df_new], ignore_index=True)
            else:
                df_merged = df_new.copy()
            # 按date去重
            df_merged = df_merged.sort_values('date').drop_duplicates(subset=['date'], keep='last')
            before = len(df_dst) if os.path.exists(dst_path) else 0
            after = len(df_merged)
            added = after - before
            df_merged.to_csv(dst_path, index=False)
            result.append({'code': code, 'added': added, 'total': after})
        except Exception as e:
            result.append({'code': code, 'error': str(e)})
    return jsonify({'result': result, 'start_date': start_date}) 