import os
import random
import math

from tqdm import tqdm

os.environ['KMP_DUPLICATE_LIB_OK']='True'
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, models
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from sklearn.model_selection import TimeSeriesSplit, train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import f1_score, classification_report
import matplotlib.pyplot as plt
from PIL import Image
import processor_thx as thx
# 设置随机种子，保证结果可复现
def set_seed(seed=52):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False



# 检查是否有GPU可用
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"使用设备: {device}")
ft=['open', 'high', 'low', 'close', 'volume', 'amount',
                        'macd', 'macd_signal','macd_histogram','rsi', 'kdj_k', 'kdj_j','kdj_d', 'ma5', 'ma13', 'ma60']
# 数据预处理类
class StockDataPreprocessor:
    def __init__(self):
        self.scaler = StandardScaler()
        self.features = ft
        
    def preprocess_time_series(self, df, is_train=True):
        """预处理时序数据"""
        # 确保包含所有必要特征
        for feature in self.features:
            if feature not in df.columns:
                raise ValueError(f"数据中缺少必要特征: {feature}")
        
        # 提取特征数据
        data = df[self.features].values
        
        # 标准化
        if is_train:
            data_scaled = self.scaler.fit_transform(data)
        else:
            data_scaled = self.scaler.transform(data)
            
        return data_scaled
    
    def create_sequences(self, data, window_size=90):
        """
        将时序数据转换为序列样本
        每个样本包含window_size天的数据
        """
        sequences = []
        for i in range(len(data) - window_size + 1):
            seq = data[i:i+window_size]
            sequences.append(seq)
        return np.array(sequences)
    
    def preprocess_image(self, date):
        """预处理K线图像数据"""
        # transform = transforms.Compose([
        #     transforms.Resize((224, 224)),
        #     transforms.RandomAffine(degrees=0, translate=(0.05, 0.05)),  # 轻微平移增强
        #     transforms.RandomAdjustSharpness(sharpness_factor=1.2, p=0.5),
        #     transforms.ToTensor(),
        #     transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])  # ImageNet标准化参数
        # ])
        #
        # image = Image.open(image_path).convert('RGB')
        # image_tensor = transform(image)
        img = thx.plot_stock_chart_with_actions('002536', date, action=False, retrueImage=True)
        # transform = transforms.Compose([
        #     transforms.Resize((224, 224)),
        #     transforms.RandomAffine(degrees=0, translate=(0.05, 0.05)),  # 轻微平移增强
        #     transforms.RandomAdjustSharpness(sharpness_factor=1.2, p=0.5),
        #     transforms.ToTensor(),
        #     transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])  # ImageNet标准化参数
        # ])
        # img = transform(img)
        # 转换成tensor
        image_tensor = torch.from_numpy(img)


        return image_tensor.to(device)

# 自定义数据集类
class StockDataset(Dataset):
    def __init__(self, time_series_sequences, image_paths, labels):
        """
        初始化数据集
        time_series_sequences: 时序数据序列，形状为 (num_samples, window_size, num_features)
        image_paths: 图像路径列表，每个样本对应一个K线图
        labels: 标签列表，每个样本对应一个标签 (0=买入, 1=不操作, 2=卖出)
        preprocessor: 数据预处理实例
        """
        self.time_series = time_series_sequences
        self.image_paths = image_paths
        self.labels = labels

        
    def __len__(self):
        return len(self.labels)
    
    def __getitem__(self, idx):
        # 获取时序数据
        ts_data = self.time_series[idx]
        ts_tensor = torch.FloatTensor(ts_data)
        
        # 获取图像数据
        img_path = self.image_paths[idx]
        # 读取图像数据
        """预处理K线图像数据"""
        transform = transforms.Compose([
            transforms.Resize((1344, 1344)),
            transforms.RandomAffine(degrees=0, translate=(0.05, 0.05)),  # 轻微平移增强
            transforms.RandomAdjustSharpness(sharpness_factor=1.2, p=0.5),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])  # ImageNet标准化参数
        ])
        image = Image.open(img_path).convert('RGB')

        image_tensor = transform(image)
        # 获取标签
        label = self.labels[idx]
        return ts_tensor, image_tensor, torch.tensor(label, dtype=torch.long)

# 时序Transformer分支
class TimeSeriesTransformer(nn.Module):
    def __init__(self, input_dim, d_model=128, nhead=8, num_layers=6, dropout=0.3):
        super(TimeSeriesTransformer, self).__init__()
        self.d_model = d_model
        
        # 输入特征映射到d_model维度
        self.embedding = nn.Linear(input_dim, d_model)
        
        # 位置编码
        self.pos_encoder = PositionalEncoding(d_model, dropout)
        
        # Transformer编码器
        encoder_layers = TransformerEncoderLayer(d_model, nhead, dim_feedforward=512, 
                                                dropout=dropout, batch_first=True)
        self.transformer_encoder = TransformerEncoder(encoder_layers, num_layers)
        
        # 输出层，将特征映射到256维
        self.output_projection = nn.Linear(d_model, 256)
        self.dropout = nn.Dropout(dropout)
        
    def forward(self, x):
        # x形状: (batch_size, seq_len, input_dim)
        x = self.embedding(x) * math.sqrt(self.d_model)  # 映射并缩放
        x = self.pos_encoder(x)  # 添加位置编码
        x = self.transformer_encoder(x)  # Transformer编码
        x = self.dropout(x)
        
        # 全局平均池化，得到序列的整体表示
        x = x.mean(dim=1)
        
        # 映射到256维特征向量
        x = self.output_projection(x)
        return x

# 位置编码类
class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)
        
        position = torch.arange(max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
        pe = torch.zeros(max_len, 1, d_model)
        pe[:, 0, 0::2] = torch.sin(position * div_term)
        pe[:, 0, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)
        
    def forward(self, x):
        # x形状: (seq_len, batch_size, d_model) 或 (batch_size, seq_len, d_model)
        if x.shape[0] != self.pe.shape[0] and x.shape[1] == self.pe.shape[0]:
            # 如果是(batch_size, seq_len, d_model)格式
            x = x + self.pe.permute(1, 0, 2)
        else:
            x = x + self.pe[:x.size(0)]
        return self.dropout(x)


class DynamicDownsampleConv(nn.Module):
    def __init__(self, input_height, input_width, output_height=224, output_width=224, in_channels=3, out_channels=3):
        super().__init__()
        # 动态计算步长（确保输出尺寸为指定大小）
        self.stride_h = input_height // output_height
        self.stride_w = input_width // output_width

        # 处理无法整除的情况（微调步长确保输出尺寸正确）
        # 若整除有误差，可适当增加步长1
        if (input_height % output_height) > 0:
            self.stride_h += 1
        if (input_width % output_width) > 0:
            self.stride_w += 1

        # 定义卷积层（使用动态计算的步长）
        self.conv = nn.Conv2d(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=3,
            stride=(self.stride_h, self.stride_w),
            padding=0,
            groups=in_channels  # 分组卷积保持通道对应
        )


    def forward(self, x):
        # x形状应为 (batch_size, in_channels, input_height, input_width)
        out = self.conv(x)
        return out

# 图像分支 - 使用Swin Transformer
class ImageBranch(nn.Module):
    def __init__(self, pretrained=True, freeze_layers=4, dropout=0.3):
        super(ImageBranch, self).__init__()
        # 加载预训练的Swin Transformer
        # 定义一个卷积 将任意尺寸的输入映射到3*224*224
        self.ddc = DynamicDownsampleConv(input_height=1344, input_width=1344)
        self.swin = models.swin_t(pretrained=pretrained)
        print(self.swin)
        
        # 冻结指定数量的层
        if freeze_layers > 0:
            for i, param in enumerate(self.swin.parameters()):
                if i < freeze_layers * 100:  # 近似冻结前N层
                    param.requires_grad = False
        
        # 修改最后一层，输出256维特征
        in_features = self.swin.head.in_features
        self.swin.head = nn.Sequential(
            nn.Linear(in_features, 512),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(512, 256)
        )
        
    def forward(self, x):
        # x形状: (batch_size, 3, 224, 224)
        x=self.ddc(x)
        return self.swin(x)

# 交叉注意力融合层
class CrossAttentionFusion(nn.Module):
    def __init__(self, d_model=256, nhead=4, dropout=0.2):
        super(CrossAttentionFusion, self).__init__()
        self.cross_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=True)
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(dropout)
        
    def forward(self, ts_features, img_features):
        # 将特征转换为适合交叉注意力的形状 (batch_size, 1, d_model)
        ts = ts_features.unsqueeze(1)
        img = img_features.unsqueeze(1)
        
        # 时序特征关注图像特征
        attn_output, _ = self.cross_attn(ts, img, img)
        ts = ts + self.dropout(attn_output)
        ts = self.norm1(ts)
        
        # 图像特征关注时序特征
        attn_output, _ = self.cross_attn(img, ts, ts)
        img = img + self.dropout(attn_output)
        img = self.norm2(img)
        
        # 展平并拼接
        ts_flat = ts.squeeze(1)
        img_flat = img.squeeze(1)
        fused = torch.cat([ts_flat, img_flat], dim=1)
        
        return fused

# 完整的多模态模型
class MultiModalStockPredictor(nn.Module):
    def __init__(self, input_dim=11, d_model=128, nhead_ts=8, num_layers_ts=6, 
                 freeze_layers_img=4, nhead_cross=4, dropout=0.3):
        """
            初始化多模态股票预测模型

            参数:
            input_dim (int): 时序数据的输入特征维度，即使用的特征数量(如open, high, low, close等)
            d_model (int): Transformer模型的嵌入维度，决定了模型的表示能力
            nhead_ts (int): 时序Transformer中多头注意力机制的头数
            num_layers_ts (int): 时序Transformer编码器的层数
            freeze_layers_img (int): 图像分支中需要冻结的预训练层的数量，用于迁移学习
            nhead_cross (int): 交叉注意力融合模块中多头注意力机制的头数
            dropout (float): Dropout概率，用于防止模型过拟合
        """
        super(MultiModalStockPredictor, self).__init__()
        
        # 时序分支
        self.ts_branch = TimeSeriesTransformer(
            input_dim=input_dim,
            d_model=d_model,
            nhead=nhead_ts,
            num_layers=num_layers_ts,
            dropout=dropout
        )
        
        # 图像分支
        self.img_branch = ImageBranch(
            pretrained=True,
            freeze_layers=freeze_layers_img,
            dropout=dropout
        )
        
        # 交叉注意力融合
        self.cross_attn_fusion = CrossAttentionFusion(
            d_model=256,
            nhead=nhead_cross,
            dropout=dropout
        )
        
        # 最终分类器
        self.classifier = nn.Sequential(
            nn.Linear(512, 128),  # 256 + 256 = 512
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(128, 3)  # 3个类别: 买入, 不操作, 卖出
        )
        
    def forward(self, ts_data, img_data):
        # 时序特征提取
        ts_features = self.ts_branch(ts_data)
        
        # 图像特征提取
        img_features = self.img_branch(img_data)
        
        # 特征融合
        fused_features = self.cross_attn_fusion(ts_features, img_features)
        
        # 分类预测
        output = self.classifier(fused_features)
        
        return output

# 训练函数
def train_model(model, train_loader, val_loader, criterion, optimizer, scheduler, 
                num_epochs=15, device=device):
    """训练模型并返回训练历史"""
    model.to(device)
    best_val_f1 = 0.0
    best_model_weights = None
    
    # 记录训练过程
    history = {
        'train_loss': [],
        'train_f1': [],
        'val_loss': [],
        'val_f1': []
    }
    
    for epoch in range(num_epochs):
        print(f'Epoch {epoch+1}/{num_epochs}')
        print('-' * 10)
        
        # 训练阶段
        model.train()
        running_loss = 0.0
        all_preds = []
        all_labels = []
        # 使用 tqdm 创建进度条
        train_pbar = tqdm(enumerate(train_loader), total=len(train_loader), desc=f'Epoch {epoch + 1}/{num_epochs}')

        for i, (ts_data, img_data, labels) in train_pbar:
            ts_data = ts_data.to(device)
            img_data = img_data.to(device)
            labels = labels.to(device, dtype=torch.long)

            # 清零梯度
            optimizer.zero_grad()
            # 前向传播
            with torch.set_grad_enabled(True):
                outputs = model(ts_data, img_data)
                _, preds = torch.max(outputs, 1)
                loss = criterion(outputs, labels)
                
                # 反向传播和优化
                loss.backward()
                optimizer.step()
            
            # 统计
            running_loss += loss.item() * ts_data.size(0)
            all_preds.extend(preds.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())
            train_pbar.set_description(f'Epoch {epoch+1}/{num_epochs} Loss: {loss.item():.4f}')
        
        # 计算训练集指标
        epoch_loss = running_loss / len(train_loader.dataset)
        epoch_f1 = f1_score(all_labels, all_preds, average='weighted')
        
        history['train_loss'].append(epoch_loss)
        history['train_f1'].append(epoch_f1)
        
        print(f'Train Loss: {epoch_loss:.4f} F1: {epoch_f1:.4f}')
        
        # 验证阶段
        model.eval()
        running_loss = 0.0
        all_preds = []
        all_labels = []

        for ts_data, img_data, labels in val_loader:
            ts_data = ts_data.to(device)
            img_data = img_data.to(device)
            labels = labels.to(device)
            
            # 前向传播
            with torch.no_grad():
                outputs = model(ts_data, img_data)
                _, preds = torch.max(outputs, 1)
                loss = criterion(outputs, labels)
            
            # 统计
            running_loss += loss.item() * ts_data.size(0)
            all_preds.extend(preds.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())

        # 计算验证集指标
        epoch_loss = running_loss / len(val_loader.dataset)
        epoch_f1 = f1_score(all_labels, all_preds, average='weighted')
        
        history['val_loss'].append(epoch_loss)
        history['val_f1'].append(epoch_f1)
        
        print(f'Val Loss: {epoch_loss:.4f} F1: {epoch_f1:.4f}')
        
        # 学习率调度
        scheduler.step()
        
        # 保存最佳模型
        if epoch_f1 > best_val_f1:
            best_val_f1 = epoch_f1
            best_model_weights = model.state_dict()
            # 保存模型参数
            torch.save(model.state_dict(), f'best_model.pth')
            torch.save(model.state_dict(), f'{best_val_f1}_model.pth')

    
    # 加载最佳模型权重
    model.load_state_dict(best_model_weights)
    return model, history

# 预测函数
def predict(model, ts_data, img_data, preprocessor, device=device):
    """预测单个样本的交易决策"""
    model.to(device)
    model.eval()
    
    # 预处理时序数据
    ts_scaled = preprocessor.preprocess_time_series(ts_data, is_train=False)
    ts_sequence = preprocessor.create_sequences(ts_scaled)
    
    # 确保序列长度为90
    if ts_sequence.shape[1] != 90:
        raise ValueError(f"时序数据序列长度应为90，实际为{ts_sequence.shape[1]}")
    
    # 转换为张量
    ts_tensor = torch.FloatTensor(ts_sequence).to(device)
    img_tensor = preprocessor.preprocess_image(img_data).unsqueeze(0).to(device)
    
    # 预测
    with torch.no_grad():
        outputs = model(ts_tensor, img_tensor)
        _, preds = torch.max(outputs, 1)
        probabilities = torch.softmax(outputs, dim=1).cpu().numpy()[0]
    
    # 决策映射
    decision_map = {0: "买入", 1: "不操作", 2: "卖出"}
    decision = preds.item()
    
    return {
        "decision_code": decision,
        "decision": decision_map[decision],
        "probabilities": {
            "买入": float(probabilities[0]),
            "不操作": float(probabilities[1]),
            "卖出": float(probabilities[2])
        }
    }

# 后处理函数，基于技术指标过滤决策
def postprocess_decision(decision, latest_data):
    """
    基于最新技术指标过滤决策，降低误判风险
    latest_data: 包含最新一天的技术指标数据
    """
    # 提取必要的技术指标
    close = latest_data['close'].iloc[-1]
    ma10 = latest_data['ma10'].iloc[-1]
    rsi = latest_data['rsi'].iloc[-1]
    
    # 对买入决策进行过滤
    if decision == 0:
        # 买入条件：收盘价 > 10日均线 且 RSI < 40（超卖区间）
        if close <= ma10 or rsi >= 40:
            return 1  # 调整为不操作
    
    # 对卖出决策进行过滤
    elif decision == 2:
        # 卖出条件：收盘价 < 10日均线 且 RSI > 60（超买区间）
        if close >= ma10 or rsi <= 60:
            return 1  # 调整为不操作
    
    return decision

# 生成模拟数据用于演示
def generate_sample_data_old(start_date='2021-01-01', days=1000):
    all_data = pd.read_csv("F:/stock_data.csv", dtype={"symbol": str})
    df =  thx.read_stock_data_by_symbol(symbol='002536')

    df['date'] = pd.to_datetime(df['date'])
    df = df.set_index('date')


    macd, signal, histogram = thx.calculate_macd(df)
    df['macd'] = macd
    df['macd_signal'] = signal
    df['macd_histogram'] = histogram
    k, d, j = thx.calculate_kdj(df)
    df['kdj_k'] = k
    df['kdj_d'] = d
    df['kdj_j'] = j
    thx.calculate_ma(df)
    rsi = thx.calculate_rsi(df)
    df['rsi'] = rsi
    
    labels = df['action']

    # df 去掉列 action
    df = df.drop(columns=['action'])
    start_time = pd.to_datetime(start_date)
    df = df[(df.index >= start_time)]
    df.reset_index(inplace=True)
    df=df[0:1000]
    labels=labels[0:1000]
    date = df['date']

    return df, labels,date

def add_features(df):
    macd, signal, histogram = thx.calculate_macd(df)
    df['macd'] = macd
    df['macd_signal'] = signal
    df['macd_histogram'] = histogram
    k, d, j = thx.calculate_kdj(df)
    df['kdj_k'] = k
    df['kdj_d'] = d
    df['kdj_j'] = j
    thx.calculate_ma(df)
    rsi = thx.calculate_rsi(df)
    df['rsi'] = rsi

def generate_sample_data(window_size=90,action_0_num=5000,action_1_num=5000  , action_2_num=5000):
    all_data = pd.read_csv("F:/stock_data.csv", dtype={"symbol": str})
    symbols = all_data['symbol'].unique()
    # 打乱 symbols
    random.shuffle(symbols)
    symbol_original_data = {}
    all_ts_sequences = []
    all_img_paths = []
    all_labels = []
    n1=0
    n2=0
    n0=0
    for symbol in symbols:
        symbol_daily_data = thx.read_stock_data_by_symbol(symbol=symbol)
        df = symbol_daily_data.copy()
        df['date'] = pd.to_datetime(df['date'])


        df.set_index('date')

        add_features(df)
        print("预处理数据...")
        preprocessor = StockDataPreprocessor()
        ts_data = preprocessor.preprocess_time_series(df)
        results = all_data[all_data['symbol'] == symbol]
        for r in results.itertuples():
            action=r.action
            if action==0:
                if n0>=action_0_num:
                    continue
                n0+=1
            elif action==1:
                if n1>=action_1_num:
                    continue
                n1+=1
            elif action==2:
                if n2>=action_2_num:
                    continue
                n2+=1
            index = symbol_daily_data[symbol_daily_data['date'] == r.date].index[0]
            seq_data = ts_data[index-window_size+1:index + 1]
            all_ts_sequences.append(seq_data)
            image_path=f'F:/stock_data/data1/{symbol}/{r.date}.png'
            all_img_paths.append(image_path)
            all_labels.append(r.action)
            total_len=len(all_ts_sequences)
            if total_len == action_1_num+action_2_num+action_0_num:
                return np.array(all_ts_sequences), np.array(all_img_paths),  np.array(all_labels)


    return np.array(all_ts_sequences), np.array(all_img_paths),  np.array(all_labels)


# 主函数，演示完整流程
def main():
    set_seed()
    # 1. 生成模拟数据（实际使用时替换为真实数据）
    print("生成模拟数据...")

    ts_sequences, image_paths,labels =    generate_sample_data()
    # 随机取1万个 索引
    # 取出 labels= 0的 10000个  取出 labels= 1的 1000个 取出 labels= 2的 3000个


    # labels_0 = np.where(labels == 0)
    # labels_1 = np.where(labels == 1)
    # labels_2 = np.where(labels == 2)
    # indices0 = np.random.choice(len(labels_0), size=10000, replace=False)
    # indices1 = np.random.choice(len(labels_1), size=1000, replace=False)
    # indices2 = np.random.choice(len(labels_2), size=2000, replace=False)
    # indices=labels_0[indices0]+labels_1[indices1]+labels_2[indices2]
    # ts_sequences = ts_sequences[indices]
    # image_paths = image_paths[indices]
    # labels = labels[indices]
    # 2. 准备图像路径（实际使用时替换为真实图像路径）
    # 这里仅生成模拟路径

    
    # 4. 划分训练集和测试集（时间序列分割）
    print("划分训练集和测试集...")
    # tscv = TimeSeriesSplit(n_splits=5)
    # train_indices, test_indices = list(tscv.split(ts_sequences))[-1]  # 取最后一个分割

    # 按8-2比例 切割数据
    train_indices, test_indices = train_test_split(range(len(ts_sequences)), test_size=0.2, shuffle=True)
    # 训练集
    X_train_ts = ts_sequences[train_indices]
    X_train_img = image_paths[train_indices]

    y_train = labels[train_indices]  # 标签对应窗口后的第一天
    
    # 测试集
    X_test_ts = ts_sequences[test_indices]
    X_test_img =image_paths[test_indices]

    y_test = labels[test_indices]
    
    # 创建数据集和数据加载器
    train_dataset = StockDataset(X_train_ts, X_train_img, y_train)
    test_dataset = StockDataset(X_test_ts, X_test_img, y_test)
    
    train_loader = DataLoader(train_dataset, batch_size=150, shuffle=True, num_workers=4)
    test_loader = DataLoader(test_dataset, batch_size=150, shuffle=False,num_workers=4) #, num_workers=16
    
    # 5. 创建模型
    print("创建模型...")
    model = MultiModalStockPredictor(
        input_dim=len(ft),
        d_model=128,
        nhead_ts=8,
        num_layers_ts=6,
        freeze_layers_img=4,
        nhead_cross=4,
        dropout=0.3
    )
    # 加载权重
    model.load_state_dict(torch.load("best_model.pth"))
    # 6. 设置训练参数
    class_weights = torch.FloatTensor([1.0, 1.0, 1.0]).to(device)  # 类别权重
    criterion = nn.CrossEntropyLoss(weight=class_weights)
    optimizer = optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-4)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.8)
    
    # 7. 训练模型
    print("开始训练模型...")
    trained_model, history = train_model(
        model, train_loader, test_loader, criterion, optimizer, scheduler,
        num_epochs=100, device=device
    )
    
    # 8. 在测试集上评估
    print("\n在测试集上评估模型...")
    trained_model.eval()
    all_preds = []
    all_labels = []
    
    for ts_data, img_data, labels in test_loader:
        ts_data = ts_data.to(device)
        img_data = img_data.to(device)
        
        with torch.no_grad():
            outputs = trained_model(ts_data, img_data)
            _, preds = torch.max(outputs, 1)
        
        all_preds.extend(preds.cpu().numpy())
        all_labels.extend(labels.numpy())
    
    print("\n分类报告:")
    print(classification_report(all_labels, all_preds, target_names=["买入", "不操作", "卖出"]))
    
    # # 9. 演示预测（使用测试集中的最后一个样本）
    # print("\n演示预测过程...")
    # # 获取最后一个测试样本（90天数据）
    # last_idx = len(X_test_ts) - 1
    # sample_ts_data = df.iloc[test_indices[last_idx]:test_indices[last_idx]+90]
    # sample_img_path = X_test_img[last_idx]
    #
    # # 预测
    # prediction = predict(trained_model, sample_ts_data, sample_img_path, preprocessor)
    # print(f"原始预测结果: {prediction['decision']}")
    # print(f"概率分布: {prediction['probabilities']}")
    #
    # # 后处理
    # final_decision_code = postprocess_decision(prediction['decision_code'], sample_ts_data)
    # decision_map = {0: "买入", 1: "不操作", 2: "卖出"}
    # print(f"后处理后的最终决策: {decision_map[final_decision_code]}")

if __name__ == "__main__":
    main()
