import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
import time
import network


class BatchIdx:
    def __init__(self, size, batch_size_input, shuffle=False):
        self.idx = np.arange(size)
        self.size = size
        self.batch_size = batch_size_input
        self.batch_num = size // batch_size_input
        self.shuffle = shuffle

    def get_batch_idx(self):
        if self.shuffle:
            np.random.shuffle(self.idx)
        self.idx = self.idx[:self.batch_size * self.batch_num]
        return torch.from_numpy(self.idx.reshape((self.batch_num, -1))).cuda()


if __name__ == '__main__':
    # input and reshape data
    in_train = np.load('train_set/grid_array_11000_2024-11-22_13-41.npy')
    out_train = np.load('train_set/s_para_array_11000_2024-11-22_13-41.npy')
    data_size = len(in_train)
    in_train = in_train.reshape(-1, 1, 5, 5)

    # --- use personal dataloader ---

    # # move data to GPU
    # in_train = torch.from_numpy(in_train).float().cuda()
    # out_train = torch.from_numpy(out_train).float().cuda()
    # batch_size = 200
    #
    # # use personal dataloader
    # batch_idx = BatchIdx(data_size, batch_size, shuffle=True)
    # batch_num = batch_idx.batch_num

    # --- use personal dataloader end ---

    # --- use pytorch dataloader ---

    in_train = torch.from_numpy(in_train).float()
    out_train = torch.from_numpy(out_train).float()
    train_dataset = TensorDataset(in_train, out_train)
    batch_size = 100
    batch_num = data_size // batch_size
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

    # --- use pytorch dataloader end ---

    # build network
    model = network.CnnSim()
    model = model.cuda()

    # build optimizer
    learning_rate = 0.0003
    optimizer = optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.99), weight_decay=0.00007)
    epochs = 20000
    loss_fn = nn.MSELoss()

    # save learning curve
    time_stamp = time.strftime('%Y-%m-%d_%H-%M', time.localtime())
    file = open('learning_curve/' + 'learning_curve_' + time_stamp + '.txt', 'w')

    # start training
    model.train()

    for epoch in range(epochs):
        train_loss = 0

        # --- use personal dataloader ---

        # batch_idx_tensors = batch_idx.get_batch_idx()
        # for i in range(batch_num):
        #     batch_idx_tensor = batch_idx_tensors[i]
        #     inputs = in_train[batch_idx_tensor]
        #     targets = out_train[batch_idx_tensor]

        # --- use personal dataloader end ---

        # --- use pytorch dataloader ---

        for inputs, targets in train_dataloader:
            inputs = inputs.cuda()
            targets = targets.cuda()

            # --- use pytorch dataloader end ---

            optimizer.zero_grad()
            outputs = model(inputs)
            loss = loss_fn(outputs, targets)
            loss.backward()
            optimizer.step()

            if (epoch + 1) % 10 == 0 or epoch == 0:
                train_loss += loss.item()

        if (epoch + 1) % 10 == 0 or epoch == 0:
            train_loss = train_loss / batch_num
            print(f'Epoch {epoch + 1}/{epochs}, Train Loss: {train_loss:.6f}')
            file.write(f'Epoch {epoch + 1}/{epochs}, Train Loss: {train_loss:.6f}\n')

        if (epoch + 1) % 100 == 0:
            torch.save(model.state_dict(), 'model/cnn_sim' + time_stamp + '.pth')
            print('model saved')

    file.close()
    torch.save(model.state_dict(), 'model/cnn_sim' + time_stamp + '.pth')
    print('model saved')
    print('training finished')
