import os
import traceback

import torch
from torch import nn

from dataset_from_csv import DatasetFromCSV
from model.lstm.lstm_model import LSTMModel
from util.constant_util import BTC_CSV_PATH
from torch.utils.tensorboard import SummaryWriter

from util.model_util import save_model, summary_add_image, load_checkpoint_last_file

summar_log_dir = 'summar_log'
os.makedirs(summar_log_dir, exist_ok=True)
# 创建SummaryWriter对象
writer = SummaryWriter(log_dir=summar_log_dir)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

epoch_count = 50
batch_size = 1280 * 2
sequence_length = 60 * 24 # 定义每次取前1天(60 * 24)的时间 预测后面的数据
# sequence_length = 30 # 定义每次取前30分钟的时间 预测后面的数据
hidden_size = 20
output_size = 30 * 5
num_layers = 2
input_size  = 6



train_data= DatasetFromCSV(BTC_CSV_PATH, sequence_length,30,training_ratio=0.7,valid_index=406621)
test_data= DatasetFromCSV(BTC_CSV_PATH, sequence_length,30,training_ratio=0.3,valid_index=406621)
train_loader = torch.utils.data.DataLoader(train_data,batch_size=batch_size)
test_loader = torch.utils.data.DataLoader(test_data,batch_size=batch_size)

model = LSTMModel(input_size,hidden_size,output_size,num_layers).to(device);




# criterion = nn.MSELoss()  # 均方误差损失函数
criterion = nn.SmoothL1Loss()  # 均方误差损失函数 L1 和 L2的线性结合
learning_rate = 0.01  # 学习率
# trainer = torch.optim.SGD(model.parameters(), lr=learning_rate)
optimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate)

epoch_index = 0
checkpoint = load_checkpoint_last_file('checkpoint')
if checkpoint is not None:
    model.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    epoch_index = checkpoint['epoch']

writer.add_graph(model, input_to_model=next(iter(train_loader))[0].to(torch.float32).to(device))
# writer.add_graph(model, input_to_model=train_data.data.iloc[0:batch_size].values)


try:
    model.train()
    for epoch in range(epoch_index,epoch_count):
        epoch_loss = 0
        for i, data in enumerate(train_loader):
            x_batch = data[0].to(torch.float32).to(device);
            y_batch_all = data[1].to(torch.float32).to(device);
            y_batch = y_batch_all[:, :, 1:] # 去除第一个维度 即时间戳
            # 前向传播
            outputs = model(x_batch)
            # print(outputs.shape) # torch.Size([128, 150])
            loss = criterion(outputs,y_batch.reshape(-1,outputs.shape[-1]))

            # 反向传播及优化
            optimizer.zero_grad()  # 梯度清零，防止堆积

            loss.backward()

            # 调用优化器的step函数，使用计算得到的梯度来更新模型参数
            # 优化器会使用学习率来缩放梯度。
            optimizer.step()
            global_step = epoch*int(len(train_data)/batch_size) + i;
            # writer.add_scalar('train_loss', loss.item(), epoch*len(train_data) + i * batch_size + batch_size) # 数值太大 不好观看
            writer.add_scalar('train_loss', loss.item(), global_step)
            # if i % 100 == 0:
            #     print(f"i : {i} ; loss : {loss.item()}")

            if i % 600 == 0:
                summary_add_image(writer, x_batch, outputs, y_batch_all, global_step)

            if i%1000 == 0:
                save_model(model,optimizer,epoch,loss)


except Exception as e:
    print(e)
    traceback.print_exc()





