import os
import sys

import pandas as pd
import rarfile
from datetime import datetime
from joblib import Parallel, delayed
import os.path

# 选16只股票
stocks = [
    'sh600050', # 中国联通
    'sh600150', # 中国船舶
    'sh600276', # 恒瑞医疗
    'sh600309', # 万华化学
    'sh600519', # 贵州茅台
    'sh600668', # 中国建筑
    'sh600900', # 长江电力
    'sh601088', # 中国神华
    'sh601138', # 工业富联
    'sh601288', # 农业银行
    'sh601398', # 工商银行
    'sh601628', # 中国人寿
    'sh601766', # 中国中车
    'sh601816', # 京沪高铁
    'sh601857', # 中国石油
    'sh601899', # 紫金矿业
]

# 在 base_path 目录下还有12个按月划分的子目录, 每个目录下再包涵按日划分的 rar 文件, rar 文件中压缩着所有股票当日的 tick 数据
base_path=os.path.join(os.path.expanduser('~'), 'Data/tick10')
output_path = os.path.join(os.path.expanduser('~'), 'Data/tick10_16')

# 确保输出目录存在
if not os.path.exists(output_path):
    os.makedirs(output_path)

# 中文字段列表
chinese_fields = [
    '时间', '代码', '市场', '最新价', '最高价', '最低价', '总量', '总金额',
    '挂买价1', '挂买量1', '挂买价2', '挂买量2', '挂买价3', '挂买量3',
    '挂买价4', '挂买量4', '挂买价5', '挂买量5', '挂买价6', '挂买量6',
    '挂买价7', '挂买量7', '挂买价8', '挂买量8', '挂买价9', '挂买量9',
    '挂买价10', '挂买量10', '挂卖价1', '挂卖量1', '挂卖价2', '挂卖量2',
    '挂卖价3', '挂卖量3', '挂卖价4', '挂卖量4', '挂卖价5', '挂卖量5',
    '挂卖价6', '挂卖量6', '挂卖价7', '挂卖量7', '挂卖价8', '挂卖量8',
    '挂卖价9', '挂卖量9', '挂卖价10', '挂卖量10', '总成交笔数', 'IOPV'
]

# 英文字段列表
english_fields = [
    'date_time', 'symbol', 'market', 'last', 'high', 'low', 'total_volume', 'total_amount',
    *['bid_price' + str(i) for i in range(10)],
    *['bid_volume' + str(i) for i in range(10)],
    *['ask_price' + str(i) for i in range(10)],
    *['ask_volume' + str(i) for i in range(10)],
    'total_trades', 'iopv'
]

# 创建映射字典
field_mapping = dict(zip(chinese_fields, english_fields))

required_fields = [
    'date_time', 'symbol', 'last', 'high', 'low', 'total_volume', 'total_amount',
    *['bid_price' + str(i) for i in range(10)],
    *['bid_volume' + str(i) for i in range(10)],
    *['ask_price' + str(i) for i in range(10)],
    *['ask_volume' + str(i) for i in range(10)],
]

# 检查时间是否在交易时间范围内
def is_within_trading_hours(time_str):
    time = datetime.strptime(time_str, '%Y-%m-%d %H:%M:%S').time()
    return (time >= datetime.strptime('09:30:00', '%H:%M:%S').time() and time <= datetime.strptime('11:30:00', '%H:%M:%S').time()) or \
        (time >= datetime.strptime('13:00:00', '%H:%M:%S').time() and time <= datetime.strptime('15:00:00', '%H:%M:%S').time())

# 处理单个文件
def process_file(file_path):
    try:
        with rarfile.RarFile(file_path) as rf:
            date_str = os.path.basename(rf.filename).split('.')[0]

            for stock in stocks:
                stock_file = f"{stock}.csv"
                if stock_file in rf.namelist():
                    with rf.open(stock_file) as f:
                        print(f"processing {rf} {stock_file}")
                        df = pd.read_csv(f, encoding='GBK')
                        # 修改字段名称
                        df.rename(columns=field_mapping, inplace=True)
                        # 去处时间范围之外的数据, 只保存 9:30~11:30, 13:00~15:00 的数据
                        df = df[df['date_time'].apply(is_within_trading_hours)]
                        # 输出到 parquet 文件
                        df[required_fields].to_parquet(os.path.join(output_path, f"{date_str}_{stock}.parquet"), index=False, compression='snappy')
    except Exception as e:
        print(f"Error processing {file_path}: {e}")
def process_by_iter():
    # 遍历所有 rar 文件, 根据 stocks 中的股票名称从 rar 文件中提取出来
    # 遍历所有子目录和文件
    for root, dirs, files in os.walk(base_path):
        for file in files:
            if file.endswith('.rar'):
                process_file(os.path.join(root, file))

def process_by_joblib():
    # 遍历所有 rar 文件, 根据 stocks 中的股票名称从 rar 文件中提取出来
    file_paths = []
    for root, dirs, files in os.walk(base_path):
        for file in files:
            if file.endswith('.rar'):
                file_paths.append(os.path.join(root, file))

    # 使用 joblib 进行并行处理
    Parallel(n_jobs=-1)(delayed(process_file)(file_path) for file_path in file_paths)

if __name__ == '__main__':
    # process_by_iter()
    process_by_joblib()



