import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader, TensorDataset
import torch.nn as nn
import torch.optim as optim
from modules import UsagiNet, LossFunc, WMAPE
import pickle
from sklearn.preprocessing import LabelEncoder
from datetime import datetime, timedelta

print("📊 加载增强特征数据...")
data_ori = pd.read_csv('./fund_enhanced_features.csv')
data_ori = data_ori.fillna(method='ffill') #利用前向插值处理缺失值
data_ori.sort_values([ 'transaction_date','fund_code'], inplace=True) #这样才能保证每个batch20个llm能和对应的fund对上
print(f"✅ 数据加载完成: {data_ori.shape}")
print(f"📋 数据列名: {list(data_ori.columns)}")

print('加载LLM 特征')
with open('llm_features_by_fund.pkl', 'rb') as file:
    data_llm = pickle.load(file)
features_llm = []
for key in data_llm['features'].keys():
    features_llm.append(data_llm['features'][key]['embeddings'])
features_llm = np.array(features_llm)
features_llm = torch.tensor(features_llm)

seq_len = 30 # 设置神经网络处理的序列长为14
num_epochs = 20 # 设置循环epoch为10
loss_penalty = torch.linspace(1,1e-2,7) # 设置损失函数惩罚项
current_date = datetime(2025,6,30) # 设置为训练数据的最后一天

# 处理特征列
print("\n🔧 处理特征数据...")

# 排除非特征列
exclude_cols = ['fund_code', 'transaction_date', 'apply_amt', 'redeem_amt']
potential_features = [col for col in data_ori.columns if col not in exclude_cols]

print(f"📋 潜在特征数: {len(potential_features)}")

# 分离数值型和分类型特征
numeric_features = []
categorical_features = []

for col in potential_features:
    if data_ori[col].dtype == 'object' or str(data_ori[col].dtype) == 'category':
        categorical_features.append(col)
    else:
        numeric_features.append(col)

print(f"📊 数值特征: {len(numeric_features)} 个")
print(f"📊 分类特征: {len(categorical_features)} 个")

# 对分类特征进行Label Encoding
data_processed = data_ori.copy()
label_encoders = {}

for col in categorical_features:
    print(f"🏷️ 编码分类特征: {col}")
    le = LabelEncoder()
    data_processed[col] = data_processed[col].fillna('unknown')
    data_processed[col] = le.fit_transform(data_processed[col])
    label_encoders[col] = le

# 最终特征列
features = numeric_features + categorical_features
print(f"📋 最终特征数: {len(features)}")
print(f"📋 特征示例: {features[:10]}")

# 将数据处理为tensor
print("\n📦 构建时间序列数据...")
data = []

fund_codes = data_processed.fund_code.unique()
print(f"📊 基金数量: {len(fund_codes)}")

for ii in range(len(fund_codes)):
    temp = data_processed[data_processed.fund_code == fund_codes[ii]].sort_values('transaction_date')
    
    # 构建特征矩阵：目标变量 + 特征变量
    targets = temp[['apply_amt', 'redeem_amt']].values
    feature_values = temp[features].values.astype(float)
    
    # 合并为完整数据矩阵
    fund_data = np.concatenate([targets, feature_values], axis=1)
    data.append(fund_data)
    
    if ii < 3:  # 只显示前3个基金的信息
        print(f"   基金 {fund_codes[ii]}: 数据形状 {fund_data.shape}")

data = np.array(data)
print(f"📊 最终数据形状: {data.shape}")  # [基金数, 时间序列长度, 总特征数]

# MaxMin归一化
print("\n⚙️ 数据标准化...")
# 不能直接用MinMax归一化这样预测结果最终逆归一化的时候会由于极差太大导致结果偏差过大

lower_bound = np.percentile(data, 25, axis=1)
upper_bound = np.percentile(data, 75, axis=1)
range_values =upper_bound - lower_bound + 1e-10
data = (data - np.tile(lower_bound[:,np.newaxis,:],(1,data.shape[1],1)))/np.tile(range_values[:,np.newaxis,:],(1,data.shape[1],1))
data = torch.tensor(data, dtype=torch.float32)
'''
min_values = data.min(axis=1, keepdims=True)
max_values = data.max(axis=1, keepdims=True)
range_values = max_values - min_values + 1e-10
data = (data - min_values) / range_values
data = torch.tensor(data, dtype=torch.float32)
'''

# 分离目标变量和特征变量
targets = data[:,:,0:2]  # apply_amt, redeem_amt
features_tensor = data[:,:,2:]  # 所有特征

print(f"📊 目标变量形状: {targets.shape}")
print(f"📊 特征变量形状: {features_tensor.shape}")

# 预留test数据进行模型效果评估
y_test = targets[:,-7:,:]
X_test = features_tensor[:,-21:-7,:]

print(f"📊 测试数据 - 输入: {X_test.shape}, 目标: {y_test.shape}")

# 利用滑动窗构造训练数据
print("\n🏋️ 构造训练数据...")
len_train = data.shape[1] - 7
window_size = seq_len + 7
X_train = []
y_train = []
current = 0
while current + window_size < len_train-1:
    window_features = features_tensor[:,current:current+seq_len,:]
    window_targets = targets[:,current+seq_len:current+seq_len+7,:]
    X_train.append(window_features)
    y_train.append(window_targets)
    current += 1

X_train = torch.cat(X_train)
y_train = torch.cat(y_train)

print(f"📊 训练数据 - 输入: {X_train.shape}, 目标: {y_train.shape}")

train_dataset = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_dataset, batch_size=20, shuffle=False)

# 模型初始化
input_size = features_tensor.shape[2]  # 特征维度
print(f"\n🧠 初始化模型 - 输入维度: {input_size}")

model = UsagiNet(input_size=input_size, hidden_size=512, output_size=14, do_prob=0.1)

# 定义损失函数和优化器
criterion = LossFunc(loss_penalty)
optimizer = optim.Adam(model.parameters(), lr=1e-3)

print(f"\n🚀 开始训练...")

# 训练模型
for epoch in range(num_epochs):
    model.train()
    epoch_loss = 0
    num_batches = 0
    
    for batch_X, batch_y in train_loader:
        optimizer.zero_grad()
        outputs = model(batch_X, features_llm)
        batch_size = batch_X.shape[0]
        outputs = outputs.reshape(batch_size, 7, 2).contiguous()
        loss = criterion(outputs, batch_y)
        loss.backward()
        optimizer.step()
        
        epoch_loss += loss.item()
        num_batches += 1
    
    # 验证
    model.eval()
    with torch.no_grad():
        test_outputs = model(X_test, features_llm)
        num_funds = X_test.shape[0]
        test_outputs = test_outputs.reshape(num_funds, 7, 2).contiguous()
        test_loss = criterion(test_outputs, y_test)
        wmape = WMAPE(y_test, test_outputs)
        score = wmape.sum()/2
    
    avg_train_loss = epoch_loss / num_batches
    print(f'Epoch [{epoch+1}/{num_epochs}], Train Loss: {avg_train_loss:.4f}, Test Loss: {test_loss.item():.4f}')
    print(f'WMAPE of apply {wmape[0]}, WMAPE of reedem {wmape[1]}, WAMPE {score}')
    
    # 显示每日损失
    '''
    if epoch == num_epochs - 1:  # 最后一个epoch显示详细信息
        loss_7days = torch.sum(torch.abs(test_outputs - y_test), dim=0)
        print("📊 每日预测误差:")
        for ii in range(7):
            print(f'   第{ii+1}天: apply_amt={loss_7days[ii,0]:.4f}, redeem_amt={loss_7days[ii,1]:.4f}')
    '''

print(f"\n✅ 训练完成！")
print(f"📈 最终预测结果形状: {test_outputs.shape}")
print(f"🎯 使用了 {len(features)} 个特征进行训练")

# 预测最终结果
X_predict = features_tensor[:,-14:,:]
predict_date = [(current_date+timedelta(days=ii)).strftime('%Y%m%d') for ii in range(1,8)]
model.eval()
with torch.no_grad():
    predict_outputs = model(X_predict, features_llm)
    num_funds = X_predict.shape[0]
    predict_outputs = predict_outputs.reshape(num_funds, 7, 2)

# 逆归一化
predict_outputs = predict_outputs.detach().numpy()
#predict_outputs = predict_outputs*np.tile(range_values[:,:,0:2],(1,7,1)) + np.tile(min_values[:,:,0:2],(1,7,1))
predict_outputs = predict_outputs*np.tile(range_values[:,np.newaxis,0:2],(1,7,1)) + np.tile(lower_bound[:,np.newaxis,0:2],(1,7,1))
# 构造输出结果
predictions = []
for ii in range(num_funds):
    temp = predict_outputs[ii]
    temp = pd.DataFrame(temp, columns=['apply_amt','redeem_amt'])
    temp['transaction_date'] = predict_date
    temp['fund_code'] = fund_codes[ii]
    predictions.append(temp)

predictions = pd.concat(predictions)
predictions = predictions[['fund_code','transaction_date','apply_amt','redeem_amt']]
predictions.sort_values([ 'transaction_date','fund_code'], inplace=True)
predictions.to_csv('predict_result.csv', index=False)

