import os
import time
import traceback

import torch
from torch import nn
from torch.optim.lr_scheduler import StepLR
from tqdm import tqdm

from dataset_from_csv import DatasetFromCSV
from model.lstm.lstm_model import LSTMModel
from util.constant_util import BTC_CSV_PATH
from torch.utils.tensorboard import SummaryWriter

from util.model_util import save_model, summary_add_image, load_checkpoint_last_file
from util.yaml_util import load_yaml

class LSTMPredictionModel(nn.Module):
    def __init__(self, config_yaml_path=r'model/lstm/config/default_lstm_config.yaml', summar_log_dir='summar_log'):
        super(LSTMPredictionModel, self).__init__()
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        starttime = time.strftime("%Y-%m-%d_%H_%M_%S")
        summar_log_dir = os.path.join(summar_log_dir, starttime)
        os.makedirs(summar_log_dir, exist_ok=True)
        # 创建SummaryWriter对象
        self.writer = SummaryWriter(log_dir=summar_log_dir)
        self.lstm_config = load_yaml(config_yaml_path)
        self.epoch_count = self.lstm_config.get('epoch_count', 50)
        self.batch_size = self.lstm_config.get('batch_size', 1280 * 2)
        self.sequence_length = self.lstm_config.get('sequence_length', 60 * 24)  # 定义每次取前1天(60 * 24)的时间 预测后面的数据
        self.predicted_length = self.lstm_config.get('predicted_length', 30)  # 定义每次取前1天(60 * 24)的时间 预测后面的数据
        # sequence_length = 30 # 定义每次取前30分钟的时间 预测后面的数据
        self.hidden_size = self.lstm_config.get('hidden_size', 20)
        self.output_size = self.lstm_config.get('output_size', 30 * 5)
        self.num_layers = self.lstm_config.get('num_layers', 2)
        self.input_size = self.lstm_config.get('input_size', 6)
        self.learning_rate = self.lstm_config.get('learning_rate', 0.001)  # 学习率
        self.valid_index = self.lstm_config.get('valid_index', 1070900)  # 数据起始索引位置
        self.last_index = None
        # self.last_index = self.valid_index + self.sequence_length + self.predicted_length + self.batch_size * 10 # 数据结束索引位置 只在测试试使用

        self.train_data= DatasetFromCSV(BTC_CSV_PATH, self.sequence_length,self.predicted_length,training_ratio=0.7,valid_index=self.valid_index,last_index=self.last_index)
        self.test_data= DatasetFromCSV(BTC_CSV_PATH, self.sequence_length,self.predicted_length,training_ratio=0.3,valid_index=self.valid_index,last_index=self.last_index)
        self.train_loader = torch.utils.data.DataLoader(self.train_data,batch_size=self.batch_size)
        self.test_loader = torch.utils.data.DataLoader(self.test_data,batch_size=self.batch_size)

        self.model = LSTMModel(self.input_size,self.hidden_size,self.output_size,self.num_layers).to(self.device);

        self.epoch_index = 0
        self.checkpoint = load_checkpoint_last_file('checkpoint',self.lstm_config)
        if self.checkpoint is not None:
            self.model.load_state_dict(self.checkpoint['model_state_dict'])
            self.epoch_index = self.checkpoint['epoch']
        self.writer.add_graph(self.model, input_to_model=next(iter(self.train_loader))[0].to(torch.float32).to(self.device))

    def compile(self):
        # criterion = nn.MSELoss()  # 均方误差损失函数
        self.criterion = nn.SmoothL1Loss()  # 均方误差损失函数 L1 和 L2的线性结合
        # trainer = torch.optim.SGD(model.parameters(), lr=learning_rate)
        # self.optimizer = torch.optim.RMSprop(self.model.parameters(), lr=self.learning_rate)
        self.optimizer = torch.optim.RMSprop([{'params': self.model.parameters(), 'initial_lr': self.learning_rate}], lr=self.learning_rate)
        if self.checkpoint is not None:
            self.optimizer.load_state_dict(self.checkpoint['optimizer_state_dict'])
        # 定义StepLR学习率调度器
        self.scheduler = StepLR(self.optimizer, step_size=30, gamma=0.1, last_epoch=self.epoch_index)

    def forward(self, x):
        return self.model(x)


    def fit(self):
        try:
            self.train()
            for epoch in tqdm(range(self.epoch_index,self.epoch_count), desc="Epoch"):
                epoch_loss = 0
                train_tqdm = tqdm(enumerate(self.train_loader), total=len(self.train_loader), desc="Train") # , position=0 一行显示 只显示一行 好像之前别的进度条也会被覆盖
                for i, data in train_tqdm:
                    x_batch = data[0].to(torch.float32).to(self.device);
                    y_batch_all = data[1].to(torch.float32).to(self.device);
                    y_batch = y_batch_all[:, :, 1:] # 去除第一个维度 即时间戳
                    # 前向传播
                    outputs = self(x_batch)
                    # print(outputs.shape) # torch.Size([128, 150])
                    loss = self.criterion(outputs,y_batch.reshape(-1,outputs.shape[-1]))

                    # 反向传播及优化
                    self.optimizer.zero_grad()  # 梯度清零，防止堆积

                    loss.backward()

                    # 调用优化器的step函数，使用计算得到的梯度来更新模型参数
                    # 优化器会使用学习率来缩放梯度。
                    self.optimizer.step()
                    global_step = epoch*int(len(self.train_data)/self.batch_size) + i;
                    # writer.add_scalar('train_loss', loss.item(), epoch*len(train_data) + i * batch_size + batch_size) # 数值太大 不好观看
                    self.writer.add_scalar('train_loss', loss.item(), global_step)
                    # if i % 100 == 0:
                    #     print(f"i : {i} ; loss : {loss.item()}")

                    if i % 30 == 0:
                        summary_add_image(self.writer, x_batch, outputs, y_batch_all, global_step)

                    if i % 100 == 0:
                        save_model(self.model,self.optimizer,epoch,loss,self.lstm_config)

                summary_add_image(self.writer, x_batch, outputs, y_batch_all, global_step)
                save_model(self.model, self.optimizer, epoch, loss, self.lstm_config)
                self.scheduler.step()

        except Exception as e:
            print(e)
            traceback.print_exc()





