import os
import torch
import random
import datetime
from tqdm.notebook import tqdm
from torch.utils.tensorboard import SummaryWriter
from accelerate import Accelerator



from src.Homo3D_full import Homo3D
from src.Homo3D import Homo3D as H3D
from src.HomoCNN_full import HomoCNN
from src.DataSet_full import *
from src.MPELoss_with_backward_full import *
from src.accuracy_full import *


tbdir = os.path.join('./runs/p0', datetime.datetime.now().strftime("%m-%d@%H-%M-%S"))
writer = SummaryWriter(tbdir)

# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# print(device)

accelerator = Accelerator()
device = accelerator.device

# hyperparameters
r = 36
voxel_length = 36
case = 125
lr = 3e-4
batch_size = 8
dir_path = '../data/shear/'
net = HomoCNN().to(device)
# net = torch.nn.DataParallel(net)
H = Homo3D(r, r, r, 1, 1, 1)
H1 = H3D(r,r,r,1,1,1)
train_single_batch = True

### test Ke Fe


train_loader, test_loader = getDataLoader(dir_path, data_type = '_shear', voxel_length = voxel_length, case = case, batch_size = batch_size)
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
loss_func = MPELoss_with_backward.apply

model, optimizer, data = accelerator.prepare(net, optimizer, train_loader)

def train(model, optimizer, loss_func, H, H1, train_loader, device, epochs, pre_train = False):
    model.to(device)
    for e in tqdm(range(epochs)):
        # temp = random.randint(0,len(train_loader)-1)
        for step, (input, voxels, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force, C_hard, C_soft, jacobi, index) in enumerate(tqdm(train_loader)):
            
            output = model(input).to(device)
            loss = loss_func(output, H, voxels, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force, index)

            writer.add_scalar('Loss', loss.item(), step+e*len(train_loader))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print(step, 'loss: ', loss.item())

            # for name, param in model.named_parameters():
            #     if 'weight' in name:
            #         # print(name)
            #         writer.add_histogram(name + '_grad', param.grad, step+e*len(train_loader))
            #         writer.add_histogram(name + '_weight', param, step+e*len(train_loader))

            if step % 10 == 0 :
                # output = net(voxels).to(device)
                batch_error_mean, batch_error = get_accuracy(H, H1, C_hard, C_soft, voxels, output, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force,jacobi)
                # accuracy, accuracy_avg = get_accuracy_with_nomralized_force_True(H, elastic_train.type(torch.float64), voxels, output, transform_train, element_force)
                print('Epoch: ', e, '| train loss: %.4f' % loss.item(), '| test error average: %.4f' % batch_error_mean)
                writer.add_scalar('Error_Avg', batch_error_mean, step+e*len(train_loader))
                for i in range(len(batch_error)):
                    scalar_name = 'Error' + str(i)
                    writer.add_scalar(scalar_name, batch_error[i], step+e*len(train_loader))
            
            if step == 30:
                if(pre_train):
                    torch.save(model,'./model_shear_pre.pkl')
                    break
        if e % 10 == 1:
            torch.save(model,'./model_shear_' + str(e) +'.pkl')
    writer.close()
    torch.save(model,'model_shear_finish.pkl')
    # torch.svae(net,'./model'+str(datetime.datetime.now().strftime("%m-%d@%H-%M-%S")))
    torch.save({'epoch': epochs + 1, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, "./train_model_single_batch"+str(datetime.datetime.now().strftime("%m-%d@%H-%M-%S"))+'.pth.tar')


def test(model, H, H1, test_loader, device):
    model.to(device)
    with torch.no_grad():
        for step, (voxels, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force, C_hard, C_soft) in enumerate(
            tqdm(test_loader)):
            output = model(voxels).to(device)
            batch_error_mean, batch_error = get_accuracy(H, H1, C_hard, C_soft, voxels, output, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force)
            print('batch: ', step, '| batch_error_mean: %.4f' % batch_error_mean)
            for i in range(len(batch_error)):
                print(i, ' error: %.4f' % batch_error[i])



# pre train
pre_train = True
if pre_train:
    train(net, optimizer=optimizer, loss_func=loss_func, H=H, H1=H1, train_loader=train_loader, device=device, epochs=1, pre_train= True)
pre_net = torch.load('./model_shear_pre.pkl')
# train
train(pre_net, optimizer=optimizer, loss_func=loss_func, H=H, H1=H1, train_loader=train_loader, device=device, epochs=50, pre_train= False)

# test error
