import stockcnn
import torch
import database
from torch.utils.data import TensorDataset, DataLoader
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import matplotlib.pyplot as plt
import time


def showLoss(losses):
    # 训练结束后绘制损失曲线
    plt.figure(figsize=(10, 5))
    plt.plot(losses, label="Training Loss")

    # 添加图例和标题
    plt.legend()
    plt.title("Model Loss Over Time")
    plt.xlabel("Epoch")
    plt.ylabel("Loss")
    # 显示图形
    plt.show()


def getTrainData():
    ranges, train, test = database.getTrainData(0, 50000)

    features = torch.tensor(train, dtype=torch.float32)
    features = features.view(-1, 1, 28, 28)
    labels = torch.tensor(test, dtype=torch.float32)
    labels = labels.view(-1, 15)
    # 使用 TensorDataset 创建数据集
    dataset = TensorDataset(features, labels)
    # 可以选择使用 DataLoader 来批量加载数据集
    data_loader = DataLoader(dataset, batch_size=1024 * 5, shuffle=True)
    return data_loader


# 训练函数
def train():
    epochs = 3000  # 迭代次数
    train_loader = []  # 训练数据加载器
    print("开始加载数据...")
    data_loader = getTrainData()

    # 判断使用cuda
    device = "cpu"
    if torch.cuda.is_available() is True:  # cuda
        print("Cuda is available!")
        device = "cuda"
    # 实例化模型
    # cnnmodel = stockcnn.StockCnn()
    cnnmodel = torch.load("./cnnmodel-2000.model")
    cnnmodel.to(device)
    # 损失函数
    criterion = torch.nn.MSELoss()
    # 优化器
    optimizer = torch.optim.Adam(cnnmodel.parameters(), lr=0.001)
    print("开始训练...")
    losses = []
    for epoch in range(epochs):
        cnnmodel.train()  # 训练
        train_rights = []  # 正确率保存的数组
        # 遍历训练数据集
        start_time = time.perf_counter()
        for batch_idx, (data, targets) in enumerate(data_loader):
            data = data.to(device)
            targets = targets.to(device)
            output = cnnmodel(data)[0]  # 通过模型进行前向传播，计算输出
            loss = criterion(output, targets)  # 根据模型输出和目标计算损失
            losses.append(loss.item())  # 记录损失值
            optimizer.zero_grad()  # 清除（归零）模型参数的梯度
            loss.backward()  # 反向传播算法，计算损失相对于模型参数的梯度
            optimizer.step()  # 更新模型的权重，向着减少损失的方向移动

        end_time = time.perf_counter()
        # 计算时间差
        time_diff_ms = (end_time - start_time) * 1000
        print(
            f"Epoch {epoch+1}/{epochs}, Loss: {loss.item()} elapse: {time_diff_ms} ms"
        )
    # showLoss(losses)
    torch.save(cnnmodel, "./cnnmodel.model")
    print("训练完成")


train()
