import dolphindb as ddb
from sklearn.preprocessing import StandardScaler
import time
from joblib import Parallel, delayed
import pandas as pd


# 定义文件路径
moneyflow_path = r'F:\Personal\data\merged\moneyflow.csv'
limit_list_path = r'F:\Personal\data\merged\limit_list_d.csv'
stk_price_path = r'F:\Personal\data\merged\stk_factor.csv'
listed_company_path = r'F:\Personal\data\merged\listed_company.csv'
zhuban_zt_path = r'F:\Personal\data\merged\zhuban_price_path.csv'

# 读取较小的limit_list_df
limit_list_df = pd.read_csv(limit_list_path)

# 读取较小的listed_company_df
listed_company_df = pd.read_csv(listed_company_path)

# 过滤掉创业板和科创版
listed_company_df = listed_company_df[~listed_company_df['listed_sector'].isin(['科创板', '创业板'])]

# 过滤掉ST
listed_company_df = listed_company_df[~listed_company_df['secu_abbr'].str.contains('ST')]

# 过滤掉北交所
listed_company_df = listed_company_df[~listed_company_df['secu_abbr'].str.contains('ST')]

# 提取用于过滤的键
filter_keys = limit_list_df[['ts_code', 'trade_date']].drop_duplicates()

# 定义一个函数用于逐块读取和过滤moneyflow_df
def filter_chunk(chunk, filter_keys):
    return pd.merge(chunk, filter_keys, on=['ts_code', 'trade_date'])

# 逐块读取和过滤moneyflow_df
chunksize = 100000
moneyflow_filtered_chunks = Parallel(n_jobs=-1)(delayed(filter_chunk)(chunk, filter_keys) for chunk in pd.read_csv(moneyflow_path, chunksize=chunksize))
stk_price_filtered_chunks = Parallel(n_jobs=-1)(delayed(filter_chunk)(chunk, filter_keys) for chunk in pd.read_csv(stk_price_path, chunksize=chunksize))

# 合并所有过滤后的块
filtered_moneyflow_df = pd.concat(moneyflow_filtered_chunks, ignore_index=True)
filtered_stk_price_df = pd.concat(stk_price_filtered_chunks, ignore_index=True)

# 读取CSV文件到DataFrame
moneyflow_df = pd.read_csv(moneyflow_path)
stk_price_df = pd.read_csv(stk_price_path)
limit_list_df = pd.read_csv(limit_list_path)

# 执行inner join，基于'ts_code'和'trade_date'
merged_df = pd.merge(moneyflow_df, limit_list_df, on=['ts_code', 'trade_date'], how='inner')


# 计算 t1_close
stk_price_df['t1_close'] = stk_price_df['close'].shift(-1)

# 计算 tm1_vol
stk_price_df['tm1_vol'] = stk_price_df['vol'].shift(1)

# 计算 m5d_vol
stk_price_df['m5d_vol'] = stk_price_df['vol'].rolling(window=5).mean()

# 计算 t1_vol
stk_price_df['t1_vol'] = stk_price_df['vol'].shift(-1)

# 计算 t1_pct_change
stk_price_df['t1_pct_change'] = stk_price_df['pct_change'].shift(-1)

# 计算 tm1_pct_change
stk_price_df['tm1_pct_change'] = stk_price_df['pct_change'].shift(1)

# 计算 t2_open
stk_price_df['t2_open'] = stk_price_df['open'].shift(2)

# 计算 t2_pct_change
stk_price_df['t2_pct_change'] = stk_price_df['pct_change'].shift(2)

# 计算 t2_high
stk_price_df['t2_high'] = stk_price_df['high'].shift(2)

# 计算 t1_high
stk_price_df['t1_high'] = stk_price_df['high'].shift(-1)

# 计算 t1_low
stk_price_df['t1_low'] = stk_price_df['low'].shift(-1)

# 计算 t1_high_return
stk_price_df['t1_high_return'] = stk_price_df['high'].shift(-1) / stk_price_df['close'] - 1

# 计算 tm_5_close
stk_price_df['tm_5_close'] = stk_price_df['close'].shift(5)

# 计算 tm_3_close
stk_price_df['tm_3_close'] = stk_price_df['close'].shift(3)

# 计算 tm_20_close
stk_price_df['tm_20_close'] = stk_price_df['close'].shift(20)

# 计算 t_20_close
stk_price_df['t_20_close'] = stk_price_df['close'].shift(-20)

# 计算 t_10_close
stk_price_df['t_10_close'] = stk_price_df['close'].shift(-10)

# 计算 t_15_close
stk_price_df['t_15_close'] = stk_price_df['close'].shift(-15)

# 计算 t5_close
stk_price_df['t5_close'] = stk_price_df['close'].shift(5)

# 计算 t6_close
stk_price_df['t6_close'] = stk_price_df['close'].shift(6)

# 计算 next_high_open
# stk_price_df['next_high_open'] = stk_price_df.apply(lambda row: 1 if row['open'] > row['close'] * 1.01 and row['high'] >= 1.02 * row['close'] else 0, axis=1)

# 计算 last_week_return
stk_price_df['last_week_return'] = stk_price_df['close'] / stk_price_df['tm_5_close']

# 计算 last_3days_return
stk_price_df['last_3days_return'] = stk_price_df['close'] / stk_price_df['tm_3_close']

# 计算 last_month_return
stk_price_df['last_month_return'] = stk_price_df['close'] / stk_price_df['tm_20_close']

# 计算 next_week_return
stk_price_df['next_week_return'] = stk_price_df['t5_close'] / stk_price_df['close']

# 计算 t1_next_week_return
stk_price_df['t1_next_week_return'] = stk_price_df['t6_close'] / stk_price_df['t1_close']

# 计算 next_2week_return
stk_price_df['next_2week_return'] = stk_price_df['t_10_close'] / stk_price_df['close']

# 计算 next_3week_return
stk_price_df['next_3week_return'] = stk_price_df['t_15_close'] / stk_price_df['close']

# 计算 next_month_return
stk_price_df['next_month_return'] = stk_price_df['t_20_close'] / stk_price_df['close']

# 计算 high_return
stk_price_df['high_return'] = stk_price_df['high'] / stk_price_df['pre_close'] - 1


merged_df = pd.merge(merged_df, stk_price_df, on=['ts_code', 'trade_date'], how='inner')
merged_df = pd.merge(merged_df, listed_company_df, on=['ts_code'], how='inner')


merged_df.to_csv(zhuban_zt_path, index=False)
# 执行inner join，基于'ts_code'和'trade_date'
# final_df = pd.merge(merged_df, df, on=['ts_code', 'trade_date'], how='inner')

#
# # 对每个secu_code分组，并应用StandardScaler
# for name, group in df.groupby('secu_code'):
#     scaler = StandardScaler()
#     scaled_factor_values = scaler.fit_transform(group['factor_value'].values.reshape(-1, 1))
#     df.loc[group.index, 'factor_value_scaled'] = scaled_factor_values.flatten()
#
# # 绘制标准化后的factor_value折线图，按secu_code分组
# plt.figure(figsize=(10, 6))  # 可选：设置图形大小
# for name, group in df.groupby('secu_code'):
#     plt.plot(group['trading_date'], group['factor_value_scaled'], label=name)
#
# # 设置图例、标签和标题
# plt.legend()
# plt.xlabel('Trading Date')
# plt.ylabel('Factor Value (Scaled)')
# plt.title('Factor Value over '
#           'Time by secu_code (Scaled)')
#
# # 显示图形
# plt.show()
