from prob import *
import torch
from torch import optim
import time
import numpy as np
import loss_lisan
from NN import ann

torch.set_default_dtype(torch.float64)
torch.set_default_tensor_type(torch.DoubleTensor)


class weightConstraint():
    def __init__(self):
        pass

    def __call__(self, module):
        if hasattr(module, 'weight'):
            w = module.weight.data
            w = w.clamp(0.0, 100.0)
            module.weight.data = w


def train(model, data_init, data_unsafe, data_domain):
    round = 0
    train_succ = False
    # fw = open("log/time_eg2_10_2.txt", "w+", encoding="utf-8")
    
    data_init = np.array(data_init)
    data_unsafe = np.array(data_unsafe)
    data_domain = np.array(data_domain)
    
    while round < ROUND_NUMS:
        round += 1
        train_succ = False
        verify_error = False
        train_start = time.time()

        optimizer = optim.Adam(model.parameters(), lr=0.01)
        batch_size = np.array([data_unsafe.shape[0] / BATCH_SIZE, data_init.shape[0] / BATCH_SIZE, data_domain.shape[0] / BATCH_SIZE], dtype=np.int32).squeeze()
        batch_nums = max(batch_size)
        print_block = int(batch_nums / 4)  # 每个batch打印四条

        # BATCH_SIZE = 4, data_size = [3872, 6840, 86052] ==> batch_size = [975, 1715, 21513] ==> batch_nums = 21513
        unsafe_list = np.arange(batch_nums) % batch_size[0]  # 21513个  [0,...,974]
        init_list = np.arange(batch_nums) % batch_size[1]
        domain_list = np.arange(batch_nums) % batch_size[2]

        for ep in range(EPOCHS_NUMS):
            epoch_loss = 0

            np.random.shuffle(init_list)
            np.random.shuffle(unsafe_list)
            np.random.shuffle(domain_list)


            for batch_index in range(batch_nums):

                batch_unsafe = torch.tensor(data=data_unsafe[unsafe_list[batch_index] * BATCH_SIZE: (unsafe_list[batch_index] + 1) * BATCH_SIZE, :])
                batch_init = torch.tensor(data=data_init[init_list[batch_index] * BATCH_SIZE: (init_list[batch_index] + 1) * BATCH_SIZE, :])
                batch_domain = torch.tensor(data=data_domain[domain_list[batch_index] * BATCH_SIZE: (domain_list[batch_index] + 1) * BATCH_SIZE, :])

                optimizer.zero_grad()
                batch_loss, loss_init, loss_unsafe, loss_domain = loss_lisan.calc_loss(model, batch_init, batch_unsafe, batch_domain)
                curr_batch_loss = batch_loss.item()
                batch_loss.backward(retain_graph=True)
                optimizer.step()

                if USE_CDINN:
                    constraints = weightConstraint()
                    for layer in model.hidden_layers1:
                        layer.apply(constraints)
                    for layer in model.hidden_layers2:
                        layer.apply(constraints)
                    model._modules['output_layer_linear_prim1'].apply(constraints)
                    model._modules['output_layer_linear_prim2'].apply(constraints)

                epoch_loss += curr_batch_loss
                # if curr_batch_loss > 0:
                #     print(f"init_loss: {loss_init}, unsafe_loss: {loss_unsafe}, domain_loss: {loss_domain}")
                #     print(f"batch_init: {batch_init}")
                #     print(f"batch_unsafe: {batch_unsafe}")
                #     print(f"batch_domain: {batch_domain}")

                if VERBOSE and (batch_index + 1) % print_block == 0:
                    print("epoch: %d/%d, batch: %d/%d, " % (ep+1, EPOCHS_NUMS, batch_index+1, batch_nums), end="")
                    # print("batch_loss: %f, loss_unsafe: %f, loss_init: %f, loss_domain: %f, epoch_loss: %f\n" % (curr_batch_loss, loss_unsafe, loss_init, loss_domain, epoch_loss))
                    print("batch loss: %f, epoch_loss: %f\n" % (curr_batch_loss, epoch_loss))
            # fw.write(f"第{ep}轮训练损失为{epoch_loss}\n")
            if epoch_loss < LOSS_OPT_FLAG:
                max_ep = ep+1
                break
        if epoch_loss < LOSS_OPT_FLAG:
            train_succ = True
            if max_ep == 1:
                verify_error = True
            break  # Success: end of restart while loop
        train_end = time.time()
        # fw.write(f"round: {round}, train_succ: {train_succ}, train_time: {train_end - train_start}\n\n")
    # fw.close()
    return model, train_succ, verify_error


if __name__ == "__main__":

    model = ann.gen_nn()
    # model.load_state_dict(torch.load("ann-model/ann-trained_eg2_0-1_relu.pt"), strict=True)
    train_start_time = time.time()
    model, train_succ_flag = train(model)
    train_end_time = time.time()

    print("flag: ", train_succ_flag)
    print(f"训练时间是{train_end_time - train_start_time}")