import torch
import pandas as pd
from torch import nn
from torch.utils.data import DataLoader, Dataset
import sys
import numpy as np
from sklearn.preprocessing import StandardScaler 
# 调用计算设备，利用GPU加速计算
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 模型文件名配置变量
MODEL_PTH_FILE = "model_stock.pth"     # PyTorch模型文件名
MODEL_ONNX_FILE = "stock_model.onnx"   # ONNX模型文件名

# 自定义数据集类
class StockDataset(Dataset):
    def __init__(self, csv_file):
        self.data = pd.read_csv(csv_file)
        
        # 提取特征列（第二列到最后一列，即除了Date和peek之外的所有列）
        self.features = self.data.iloc[:, 2:].values.astype(np.float32)
        
        # 提取标签列（peek列）
        self.labels = self.data['peek'].values.astype(np.int64)
        
        # 标准化特征数据
        scaler = StandardScaler()
        self.features = scaler.fit_transform(self.features)
        
        print(f"数据集大小: {len(self.data)}")
        print(f"特征维度: {self.features.shape[1]}")
        print(f"标签分布: {np.bincount(self.labels)}")
        
    def __len__(self):
        return len(self.data)
        
    def __getitem__(self, idx):
        return torch.tensor(self.features[idx]), torch.tensor(self.labels[idx])

# 加载股票数据集
training_data = StockDataset('processed_stock_data.csv')
# 全部数据用于训练，不分训练测试集
test_data = training_data

# 设定batch大小
batch_size = 64

# 构建用于训练和测试的数据集的dataloader
train_dataloader = DataLoader(training_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)

# 显示数据的基本信息
def show_data_info(dataloader):
    for X, y in dataloader:
        print("Shape of X [N, Features]:", X.shape)
        print("Shape of y:", y.shape, y.dtype)
        print("Sample features:", X[0][:10])  # 显示前10个特征
        print("Sample label:", y[0].item())
        break
    
# 显示数据信息
show_data_info(test_dataloader)


# 设计全连接神经网络模型（适用于表格数据）
class NeuralNetwork(nn.Module):
    def __init__(self, input_size):
        super(NeuralNetwork, self).__init__()
        self.network = nn.Sequential(
            nn.Linear(input_size, 128),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(64, 32),
            nn.ReLU(),
            nn.Linear(32, 3)  # 输出3个类别：0, 1, 2
        )

    def forward(self, x):
        return self.network(x)


# 获取特征维度并创建模型
input_size = training_data.features.shape[1]
model = NeuralNetwork(input_size).to(device)
print(f"模型输入特征维度: {input_size}")
# print(model.linear_relu_stack[-2].weight[0])#模型倒数2层，权重0
# sys.exit()
loss_fn = nn.CrossEntropyLoss()#损失函数
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)  # 使用Adam优化器，适合表格数据

#训练
def train(dataloader, model, loss_fn, optimizer):
    size = len(dataloader.dataset)
    #batch 一次训练的数据长度
    for batch, (X, y) in enumerate(dataloader):
        X, y = X.to(device), y.to(device)

        # 计算预测误差
        pred = model(X)
        loss = loss_fn(pred, y)

        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if batch % 100 == 0:
            loss, current = loss.item(), batch * len(X)
            print(f"loss:{loss:>7f} [{current:>5d}/{size:>5d}]")
def export_onnx():
    model_c = NeuralNetwork(input_size)
    model_c.load_state_dict(torch.load(MODEL_PTH_FILE))
    model_c.eval()
    dummy_input = torch.randn(1, input_size)  # batch_size=1, features=input_size
    torch.onnx.export(model_c                      
                      , dummy_input
                      , MODEL_ONNX_FILE
                      , opset_version=10    # 操作的版本
                      , input_names=["input"]        # 输入名
                      , output_names=["output"]       # 输出名
                      )
    print(f"ONNX model exported to {MODEL_ONNX_FILE}")
def stock_train():    
    epochs = 100  # 增加训练轮数
    for t in range(epochs):
       print(f"Epoch {t + 1}\n------------------")
       train(train_dataloader, model, loss_fn, optimizer)
       test(test_dataloader, model, loss_fn)
    print("Done!")
    
    # 保存和恢复网络权值
    torch.save(model.state_dict(), MODEL_PTH_FILE)
    print(f"Save PyTorch Model State to {MODEL_PTH_FILE}")
def stock_inference():
    """加载已有模型文件并进行推理
    使用测试数据集进行模型推理演示
    """
    # 检查模型文件是否存在
    import os
    if not os.path.exists(MODEL_PTH_FILE):
        print(f"错误：模型文件 {MODEL_PTH_FILE} 不存在！")
        print("请先运行 stock_train() 进行模型训练")
        return
    
    # 创建和加载模型
    model_inference = NeuralNetwork(input_size).to(device)
    model_inference.load_state_dict(torch.load(MODEL_PTH_FILE))
    model_inference.eval()  # 设置为推理模式
    
    print(f"模型加载成功：{MODEL_PTH_FILE}")
    print(f"模型输入维度：{input_size}")
    
    # 使用测试数据集进行推理演示
    print("\n开始推理演示...")
    correct_predictions = 0
    total_predictions = 0
    class_predictions = {0: {'correct': 0, 'total': 0}, 
                        1: {'correct': 0, 'total': 0}, 
                        2: {'correct': 0, 'total': 0}}
    
    with torch.no_grad():
        for batch_idx, (X, y) in enumerate(test_dataloader):
            X, y = X.to(device), y.to(device)
            
            # 模型推理
            predictions = model_inference(X)
            predicted_labels = predictions.argmax(1)
            
            # 统计准确率
            correct_batch = (predicted_labels == y).sum().item()
            correct_predictions += correct_batch
            total_predictions += y.size(0)
            
            # 统计各类别的准确率
            for i in range(y.size(0)):
                true_label = y[i].item()
                pred_label = predicted_labels[i].item()
                class_predictions[true_label]['total'] += 1
                if true_label == pred_label:
                    class_predictions[true_label]['correct'] += 1
            
            # 显示前几个批次的详细结果
            if batch_idx < 3:  # 只显示前3个批次
                print(f"\n批次 {batch_idx + 1}:")
                print(f"输入形状: {X.shape}")
                print(f"真实标签: {y[:10].cpu().numpy() if y.size(0) >= 10 else y.cpu().numpy()}")
                print(f"预测标签: {predicted_labels[:10].cpu().numpy() if predicted_labels.size(0) >= 10 else predicted_labels.cpu().numpy()}")
                print(f"批次准确率: {correct_batch}/{y.size(0)} = {correct_batch/y.size(0)*100:.2f}%")
    
    # 输出总体统计结果
    overall_accuracy = correct_predictions / total_predictions * 100
    print(f"\n=== 推理结果统计 ===")
    print(f"总体准确率: {correct_predictions}/{total_predictions} = {overall_accuracy:.2f}%")
    
    print(f"\n各类别准确率:")
    for class_id, stats in class_predictions.items():
        if stats['total'] > 0:
            class_accuracy = stats['correct'] / stats['total'] * 100
            print(f"类别 {class_id}: {stats['correct']}/{stats['total']} = {class_accuracy:.2f}%")
        else:
            print(f"类别 {class_id}: 无数据")
    
    # 单个样本推理演示
    print(f"\n=== 单个样本推理演示 ===")
    # 获取一个样本
    sample_features, sample_label = test_data[0]
    sample_features = sample_features.unsqueeze(0).to(device)  # 添加batch维度
    
    with torch.no_grad():
        prediction = model_inference(sample_features)
        predicted_probabilities = torch.softmax(prediction, dim=1)
        predicted_class = prediction.argmax(1).item()
    
    print(f"样本特征形状: {sample_features.shape}")
    print(f"真实标签: {sample_label.item()}")
    print(f"预测类别: {predicted_class}")
    print(f"各类别概率:")
    for i, prob in enumerate(predicted_probabilities[0]):
        print(f"  类别 {i}: {prob.item():.4f} ({prob.item()*100:.2f}%)")
    
    print(f"\n推理完成！")
# 添加测试函数
def test(dataloader, model, loss_fn):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    model.eval()
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            test_loss += loss_fn(pred, y).item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
    test_loss /= num_batches
    correct /= size
    print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")

if __name__ == '__main__':
    # 训练模型
    stock_train()
    # 导出ONNX模型
    export_onnx()
    # 测试推理功能
    print("\n" + "="*50)
    print("开始测试模型推理功能...")
    print("="*50)
    stock_inference()
