
import glob
import pandas as pd
import json
from datetime import datetime
import os
import numpy as np
import datetime as date
import time

def sanitize_for_json(data):
    """
    递归清理数据，确保所有内容都可以 JSON 序列化
    """
    if isinstance(data, dict):
        return {k: sanitize_for_json(v) for k, v in data.items()}

    elif isinstance(data, (list, tuple, set)):
        return [sanitize_for_json(item) for item in data]

    elif isinstance(data, pd.Timestamp):
        return data.isoformat()

    elif isinstance(data, pd.Series):
        return data.tolist()

    elif isinstance(data, pd.DataFrame):
        return data.to_dict(orient='records')

    elif isinstance(data, (np.integer, np.int8, np.int16, np.int32, np.int64)):
        return int(data)

    elif isinstance(data, (np.floating, np.float16, np.float32, np.float64)):
        return float(data)

    elif isinstance(data, np.ndarray):
        return data.tolist()

    elif isinstance(data, np.bool_):
        return bool(data)

    elif isinstance(data, (datetime, date, time)):
        return data.isoformat()

    elif isinstance(data, Decimal):
        return float(data)

    elif hasattr(data, 'tolist'):  # 其他类似数组的对象
        return data.tolist()

    elif hasattr(data, '__dict__'):  # 自定义对象
        return sanitize_for_json(data.__dict__)

    else:
        try:
            json.dumps(data)  # 测试是否可序列化
            return data
        except (TypeError, ValueError):
            return str(data)  # 最后手段：转换为字符串



# 高频数据转低频
def high_fre_to_low(dft, types='15min'):
    """
    高频数据转低频
    参数：dft 高频数据， 字段 ['code', 'date_time', 'open', 'high', 'low', 'close', 'volume', 'turnover']
        日级以上的额外要 turnover_rate prev_close date
        types 转换级别 取值为 5min 10min 15min 30min 60min H D W M Q Y等， H D W M 指 小时 天 周 月 季度 年
        参考：https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects

    返回值：df 低频数据
    """

    dft = dft.reset_index(drop=True)
    if 'date_time' not in dft.columns.tolist():
        dft['date_time'] = dft['date']
    dft = dft.sort_values(by=['code', 'date_time'], ascending=True, kind='mergesort')
    dft['date_time'] = pd.to_datetime(dft['date_time'])
    pro_col_list = ['open', 'high', 'low', 'close', 'volume', 'turnover', 'turnover_rate', 'prev_close']
    for col in pro_col_list:
        if col in dft.columns.tolist():
            dft[col] = dft[col].astype(float)

    dft.index = dft['date_time']
    if types == '60min':
        dft['hour'] = dft['date_time'].map(lambda r: int(str(r)[11:13]))
        #print(dft.head())
        dft21 = dft[(dft['hour'] < 12)].copy().groupby(['code']).resample(rule=types, closed='right', label='right',
                                                                          offset='0 days 09:30:00').agg(
            {'date_time': 'last', 'open': 'first', 'high': 'max', 'low': 'min', 'close': 'last', 'code': 'first',
             'volume': 'sum', 'turnover': 'sum', }).dropna()
        dft22 = dft[(dft['hour'] >= 12)].copy().groupby(['code']).resample(rule=types, closed='right',
                                                                           label='right', ).agg(
            {'date_time': 'last', 'open': 'first', 'high': 'max', 'low': 'min', 'close': 'last', 'code': 'first',
             'volume': 'sum', 'turnover': 'sum', }).dropna()

        dft2 = pd.concat([dft21, dft22], ignore_index=True)
    elif types in ['D', 'W', 'M', 'Q', 'Y']:
        dft2 = dft.groupby(['code']).resample(rule=types, closed='right', label='right').agg(
            {'date_time': 'last', 'open': 'first', 'high': 'max', 'low': 'min', 'close': 'last', 'code': 'first',
             'volume': 'sum', 'turnover': 'sum', 'turnover_rate': 'sum', 'prev_close': 'first'}).dropna()
    else:
        dft2 = dft.groupby(['code']).resample(rule=types, closed='right', label='right').agg(
            {'date_time': 'last', 'open': 'first', 'high': 'max', 'low': 'min', 'close': 'last', 'code': 'first',
             'volume': 'sum', 'turnover': 'sum', }).dropna()

    dft2 = dft2.reset_index(drop=True)
    dft2 = dft2.sort_values(by=['code', 'date_time'], ascending=True, kind='mergesort')

    dft2['date'] = dft2['date_time'].map(lambda r: str(r)[0:10])
    dft2['date_str'] = dft2['date_time'].map(lambda r: str(r)[0:19])

    if types in ['D', 'W', 'M', 'Q', 'Y']:
        dft2 = dft2[['code', 'date', 'date_time', 'open', 'high', 'low', 'close', 'volume', 'turnover', 'turnover_rate',
                     'prev_close']]
    else:
        dft2 = dft2[['code', 'date', 'date_time', 'open', 'high', 'low', 'close', 'volume', 'turnover', 'date_str']]

    return dft2




def process_dataframe_to_json_files(df,timed,time_col='date_time', output_dir='output',  code_col='code'):
    """
    将DataFrame按照code分组，每个组内按时间戳构建JSON，并写入不同文件

    参数:
    df: pandas DataFrame
    time_col: 时间列名
    code_col: 分组列名
    output_dir: 输出目录
    """
    output_dir ="/Users/mac/Downloads/2025/save/"

    # 创建输出目录
    os.makedirs(output_dir, exist_ok=True)

    # 确保时间列是datetime类型
    if not pd.api.types.is_datetime64_any_dtype(df[time_col]):
        df[time_col] = pd.to_datetime(df[time_col])

    keep_cols = ['a','c','d','h','l','m','o','td','pc','sf','t','v','y','timestamp','code','date_time']
    # 按照code分组处理
    for code, group in df.groupby(code_col):
        print(f"处理代码: {code}, 数据量: {len(group)}")

        # 构建 {时间戳: 对象数据} 的字典
        result_dict = {}

        for _, row in group.iterrows():
            # 将时间转换为时间戳（秒级）
            date_t = row[time_col]
            timestamp = int(date_t.timestamp())
            # 直接切片获取年月日部分
            date_part = str(date_t).split(' ')[0]  # 获取 "2025-08-05"
            # 分割年月日
            parts = date_part.split('-')
            year = int(parts[0])
            month = int(parts[0]+parts[1])
            day = int(parts[0]+parts[1]+parts[2])

            row["y"] =year
            row["m"] =month
            row["d"] =day
            row["td"] =day

            row["t"] =row['date_str']
            row["timestamp"] =timestamp

            row["c"] =row["close"]
            row["h"] =row["high"]
            row["l"] =row["low"]
            row["o"] =row["open"]
            row["v"] =row["volume"]

            row["a"] =row["turnover"]
            row["pc"] =0.0
            row["sf"] =0

            new_row = row[keep_cols]

            # 构建对象数据（排除time和code列）
            object_data = new_row.drop([time_col, code_col]).to_dict()

            # 添加到结果字典
            result_dict[str(timestamp)] = object_data

        # 按时间戳排序
        sorted_result = {k: result_dict[k] for k in sorted(result_dict.keys())}

        etf_code =code.split(".")[0]
        timeds =timed.split("m")[0]
        # 生成文件名
        filename = f"{etf_code}_{timeds}.json"
        filepath = os.path.join(output_dir, filename)


        # 写入JSON文件
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(sorted_result, f, indent=2, ensure_ascii=False)

        print(f"已写入文件: {filepath}")






def process_dataframe_to_json_lines(df,timed,time_col='date_time', output_dir='output',  code_col='code'):
    """
    将DataFrame按照code分组，每个组内按时间戳构建JSON，并写入不同文件

    参数:
    df: pandas DataFrame
    time_col: 时间列名
    code_col: 分组列名
    output_dir: 输出目录
    """
    output_dir ="/Users/mac/Downloads/2025/save/"

    # 创建输出目录
    os.makedirs(output_dir, exist_ok=True)

    # 确保时间列是datetime类型
    if not pd.api.types.is_datetime64_any_dtype(df[time_col]):
        df[time_col] = pd.to_datetime(df[time_col])

    keep_cols = ['a','c','d','h','l','m','o','td','pc','sf','t','v','y','timestamp','code','date_time']
    # 按照code分组处理
    for code, group in df.groupby(code_col):
        print(f"处理代码: {code}, 数据量: {len(group)}")

        # 构建 {时间戳: 对象数据} 的字典
        # 生成文件名
        json_line_all=""
        output_file = f"{output_dir}{code}_{timed}.jsonl"
        with open(output_file, 'w', encoding='utf-8') as f:
            for _, row in group.iterrows():
                # 将时间转换为时间戳（秒级）
                date_t = row[time_col]
                timestamp = int(date_t.timestamp())
                # 直接切片获取年月日部分
                date_part = str(date_t).split(' ')[0]  # 获取 "2025-08-05"
                # 分割年月日
                parts = date_part.split('-')
                year = int(parts[0])
                month = int(parts[0]+parts[1])
                day = int(parts[0]+parts[1]+parts[2])



                row["a"] =row["turnover"]
                row["c"] =row["close"]
                row["h"] =row["high"]
                row["l"] =row["low"]
                row["o"] =row["open"]
                row["v"] =row["volume"]
                row["t"] =row['date_str']

                row["y"] =year
                row["m"] =month
                row["d"] =day
                row["td"] =day
                row["timestamp"] =timestamp
                row["pc"] =0.0
                row["sf"] =0

                new_row = row[keep_cols]
                # 构建对象数据（排除time和code列）
                object_data = new_row.drop([time_col, code_col]).to_dict()

                # 处理可能存在的NaN值
                # row_dict = object_data.where(pd.notnull(row), None).to_dict()

                # 转换为JSON并写入
                json_line = json.dumps(object_data, ensure_ascii=False)
                json_line_all =json_line_all+json_line+ '\n'


            f.write(json_line_all)
            # df_to_jsonl_simple(df, output_file)

            print(f"成功转换编码:{code},一共: {len(df)} 行数据到 {output_file}")


import pandas as pd
from typing import Dict, List


def fast_df_to_jsonl(df: pd.DataFrame,
                     key_mapping: Dict[str, str],
                     timestamp_column: str,
                     output_file: str = 'output.jsonl') -> None:
    """
    快速处理DataFrame并转换为JSON Lines

    参数:
        df: 输入DataFrame
        key_mapping: 列名映射字典 {旧列名: 新列名}
        timestamp_column: 时间戳列名
        output_file: 输出文件路径
    """
    # 一次性完成所有操作
    (df
     .rename(columns=key_mapping)
     .assign(
        year=lambda x: pd.to_datetime(x[timestamp_column]).dt.year,
        month=lambda x: pd.to_datetime(x[timestamp_column]).dt.month,
        day=lambda x: pd.to_datetime(x[timestamp_column]).dt.day,
        ymd_date=lambda x: pd.to_datetime(x[timestamp_column]).dt.strftime('%Y-%m-%d'),
        iso_date=lambda x: pd.to_datetime(x[timestamp_column]).dt.strftime('%Y-%m-%dT%H:%M:%S')
    )
     .to_json(output_file, orient='records', lines=True, force_ascii=False)
     )

    print(f"✅ 处理完成！输出文件: {output_file}")


# 示例1：基本使用示例
def example_json(df, types):
    process_dataframe_to_json_files(df,types)
    print("示例1处理完成！")


# 示例1：基本使用示例
def example_jsonl(df, types):
    process_dataframe_to_json_lines(df,types)
    print("示例1处理完成！")

import utils.df_group_json_prd as dj

def example_jsonl_prd(df,fileNumber,code_list, types):

    output_dir ="/Users/mac/Downloads/2025/save/jsonl"
    key_mapping = {'turnover': 'a', 'close': 'c', 'high': 'h', 'low': 'l', 'open': 'o', 'volume': 'v', 'date_str': 't'}

    # result = fast_grouped_processing(df, 'code', key_mapping, 'create_time')
    dj.fast_grouped_processing(df,'code',key_mapping,'date_time',code_list,output_dir,types)
    print("示例3处理完成！")



# 示例2：处理更复杂的数据结构
def example_2():
    # 创建包含嵌套数据的示例
    data_complex = {
        'time': [
            '2024-01-01 09:00:00', '2024-01-01 10:00:00',
            '2024-01-01 14:00:00', '2024-01-01 15:00:00'
        ],
        'code': ['STOCK_A', 'STOCK_A', 'STOCK_B', 'STOCK_B'],
        'price': [100.5, 101.2, 50.3, 51.8],
        'volume': [10000, 15000, 5000, 6000],
        'change': [0.007, 0.012, -0.005, 0.03],
        'indicators': [
            {'rsi': 45, 'macd': 0.5},
            {'rsi': 52, 'macd': 0.8},
            {'rsi': 60, 'macd': -0.2},
            {'rsi': 65, 'macd': 0.1}
        ]
    }

    df = pd.DataFrame(data_complex)
    process_dataframe_to_json_files(df, output_dir='stock_data')
    print("示例2处理完成！")


# 示例3：从CSV文件读取并处理
def example_3(csv_file_path):
    """
    从CSV文件读取数据并处理
    """
    try:
        # 读取CSV文件
        df = pd.read_csv(csv_file_path)

        # 检查必要的列是否存在
        required_cols = ['time', 'code']
        if not all(col in df.columns for col in required_cols):
            print(f"CSV文件必须包含列: {required_cols}")
            return

        process_dataframe_to_json_files(df, output_dir='csv_output')
        print("CSV文件处理完成！")

    except Exception as e:
        print(f"处理CSV文件时出错: {e}")


# 增强版本：支持更多选项
def advanced_process_dataframe(df, time_col='time', code_col='code',
                               output_dir='output', timestamp_unit='s',
                               exclude_cols=None, include_cols=None):
    """
    增强版数据处理函数

    参数:
    timestamp_unit: 时间戳单位 's'=秒, 'ms'=毫秒
    exclude_cols: 要排除的列
    include_cols: 只包含的列（如果指定，则只包含这些列）
    """

    if exclude_cols is None:
        exclude_cols = []

    os.makedirs(output_dir, exist_ok=True)

    # 时间转换
    if not pd.api.types.is_datetime64_any_dtype(df[time_col]):
        df[time_col] = pd.to_datetime(df[time_col])

    # 按code分组处理
    for code, group in df.groupby(code_col):
        result_dict = {}

        for _, row in group.iterrows():
            # 转换时间戳
            if timestamp_unit == 'ms':
                timestamp = int(row[time_col].timestamp() * 1000)  # 毫秒
            else:
                timestamp = int(row[time_col].timestamp())  # 秒

            # 构建对象数据
            if include_cols:
                # 只包含指定列
                object_data = {col: row[col] for col in include_cols
                               if col not in [time_col, code_col]}
            else:
                # 排除指定列
                object_data = row.drop([time_col, code_col] + exclude_cols).to_dict()

            result_dict[str(timestamp)] = object_data

        # 按时间戳排序
        sorted_result = {k: result_dict[k] for k in sorted(result_dict.keys())}

        # 写入文件
        filename = f"{code}.json"
        filepath = os.path.join(output_dir, filename)

        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(sorted_result, f, indent=2, ensure_ascii=False)

        print(f"生成: {filename} (包含 {len(sorted_result)} 条记录)")


# 文件查看工具函数
def check_output_files(directory='output'):
    """检查生成的JSON文件"""
    if not os.path.exists(directory):
        print(f"目录不存在: {directory}")
        return

    files = [f for f in os.listdir(directory) if f.endswith('.json')]
    print(f"在目录 '{directory}' 中找到 {len(files)} 个文件:")

    for file in files:
        filepath = os.path.join(directory, file)
        with open(filepath, 'r', encoding='utf-8') as f:
            data = json.load(f)

        print(f"  {file}: {len(data)} 条记录")


import pandas as pd
import json


def df_to_jsonl_simple(df, output_file):
    """
    最简单的DataFrame转JSON Lines函数
    """
    with open(output_file, 'w', encoding='utf-8') as f:
        for _, row in df.iterrows():
            # 处理可能存在的NaN值
            row_dict = row.where(pd.notnull(row), None).to_dict()

            # 转换为JSON并写入
            json_line = json.dumps(row_dict, ensure_ascii=False)
            f.write(json_line + '\n')

    print(f"成功转换 {len(df)} 行数据到 {output_file}")

    # 使用示例
    df = pd.DataFrame({
        'id': [1, 2, 3],
        'name': ['张三', '李四', '王五'],  # 包含中文
        'age': [25, 30, 35]
    })

    df_to_jsonl_simple(df, 'output.jsonl')




# 数据源目录
datadir = "/Users/mac/Downloads/2025/"
# 保存数据目录
savedir = "/Users/mac/Downloads/2025/save/"

# 所有文件路径列表
files = glob.glob(datadir+"etf_5m_price*.csv.gz",recursive=True)
print(files[0:1])



if __name__ == '__main__':
    print("开始处理数据...")
    # 转为30分钟级，转换级别 取值可以为 5min 10min 15min 30min 60min H D W M Q Y等， H D W M 指 小时 天 周 月 季度 年

    # '159755.SZ'
    code_list = ['510210.SH']

    type_list =['5min','15min','30min','60min']

    for t in type_list:
        for i in range(0, len(files)):
            file_path = files[i]
            print('开始转换:', file_path)
            df = pd.read_csv(file_path)
            df2 = high_fre_to_low(df, types=t)
            # example_jsonl(df2, types=types)
            fileNumber = file_path.split("/")[5].split("_")[3] + "_" + \
                         file_path.split("/")[5].split("_")[4].split(".")[
                             0]
            example_jsonl_prd(df2, fileNumber, code_list, types=t)
            # df2.to_csv(file_path + '_to_' + types + '.csv', index=False)
            print(f'处理完成文件名:',file_path)
            print(f'===================================')





    # 运行示例1
    # example_1()

    # 运行示例2
    # example_2()
    #
    # # 检查生成的文件
    # check_output_files('output')
    # check_output_files('stock_data')

    print("\n所有处理完成！")

    # 如果要处理CSV文件，取消下面的注释并提供文件路径
    # example_3("your_data.csv")





