import os
import torch
import random
import datetime
from tqdm.notebook import tqdm
from torch.utils.tensorboard import SummaryWriter

from src.Homo3D_full_acc import Homo3D
from src.Homo3D import Homo3D as H3D
from src.HomoCNN_full import HomoCNN
from src.DataSet_full import *
from src.MPELoss_with_backward_full_acc import *
from src.accuracy_full import *


tbdir = os.path.join('./runs/p0', datetime.datetime.now().strftime("%m-%d@%H-%M-%S"))
writer = SummaryWriter(tbdir)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)


# hyperparameters
r = 36
voxel_length = 36
case = 125
lr = 3e-4
batch_size = 8
dir_path = '../data/shear/'
net = HomoCNN().to(device)
net = torch.nn.DataParallel(net)
H = Homo3D(r, r, r, 1, 1, 1)
H.anchor()
H.indices()
H1 = H3D(r,r,r,1,1,1)
train_single_batch = True

### test Ke Fe





train_loader, test_loader = getDataLoader(dir_path, data_type = '_shear', voxel_length = voxel_length, case = case, batch_size = batch_size)


def train(model, H, H1, train_loader, device, epochs, lr):
    loss_func = MPELoss_with_backward.apply
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    model.to(device)
    for e in tqdm(range(epochs)):
        # temp = random.randint(0,len(train_loader)-1)
        for step, (input, voxels, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force, C_hard, C_soft, jacobi, index) in enumerate(tqdm(train_loader)):
            
            if (train_single_batch):
                output = model(input).to(device)
                # H.reset_gradient_like(output)
                loss = loss_func(output, H, voxels, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force, index)



                writer.add_scalar('Loss', loss.item(), step+e*len(train_loader))

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                print(step, 'loss: ', loss.item())

                # for name, param in model.named_parameters():
                #     if 'weight' in name:
                #         # print(name)
                #         writer.add_histogram(name + '_grad', param.grad, step+e*len(train_loader))
                #         writer.add_histogram(name + '_weight', param, step+e*len(train_loader))

                if step % 10 == 0 :
                    # output = net(voxels).to(device)
                    batch_error_mean, batch_error = get_accuracy(H, H1, C_hard, C_soft, voxels, output, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force,jacobi)
                    # accuracy, accuracy_avg = get_accuracy_with_nomralized_force_True(H, elastic_train.type(torch.float64), voxels, output, transform_train, element_force)
                    print('Epoch: ', e, '| train loss: %.4f' % loss.item(), '| test accuracy average: %.4f' % batch_error_mean)
                    writer.add_scalar('Accuracy_Avg', batch_error_mean, step+e*len(train_loader))
                    for i in range(len(batch_error)):
                        scalar_name = 'Accuracy' + str(i)
                        writer.add_scalar(scalar_name, batch_error[i], step+e*len(train_loader))
                # if step == 30:
                #     break
    writer.close()
    torch.save(model,'./model_shear.pkl')
    # torch.svae(net,'./model'+str(datetime.datetime.now().strftime("%m-%d@%H-%M-%S")))
    torch.save({'epoch': epochs + 1, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, "./train_model_single_batch"+str(datetime.datetime.now().strftime("%m-%d@%H-%M-%S"))+'.pth.tar')


def test(model, H, test_loader, device):
    model.to(device)
    with torch.no_grad():
        for step, (voxels, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force, C_hard, C_soft) in enumerate(
            tqdm(test_loader)):
            output = model(voxels).to(device)
            batch_error_mean, batch_error = get_accuracy(H, H1, C_hard, C_soft, voxels, output, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force)
            print('batch: ', step, '| batch_error_mean: %.4f' % batch_error_mean)
            for i in range(len(batch_error)):
                print(i, ' error: %.4f' % batch_error[i])



train(net,H,H1,train_loader,device,epochs=1,lr=lr)
# pre_net = torch.load('./model.pkl')
# pre_net = pre_net['state_dict']
# train(pre_net,H,H1,train_loader,device,epochs=5000,lr=lr)

# # optimizer = torch.optim.SGD(net.parameters(), lr=3e-4, weight_decay=1e-2)
# # optimizer = torch.optim.Adadelta(net.parameters(),lr = 1e-4)
# optimizer = torch.optim.Adam(net.parameters(), lr=1e-5)
# loss_func = MPELoss_with_backward.apply

# # # def get_accuracy():
# # #     return 1.0

# train_single_batch = False

# if (train_single_batch):
#     for step, (voxels, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force, C_hard, C_soft) in enumerate(
#             tqdm(train_loader)):


#         # ### test Ke Fe
#         # Ke, Fe = H.hexahedron(C_hard[0].view(6,6))
#         # print(C_hard[0])
#         # print((Ke - hard_element_stiffness[0]<1e-6).all())
#         # print((Fe - hard_element_force[0]<1e-6).all())

#         # break
        

#         for times in tqdm(range(1, 500)):
#             out = net(voxels).to(device)

#             loss = loss_func(out, H, voxels, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force)
#             # loss = loss_func(H, out, elastic_train, voxels)

#             writer.add_scalar('Loss', loss.item(), times)

#             optimizer.zero_grad()
#             loss.backward()
#             optimizer.step()

#             print(times, loss.item())

#             for name, param in net.named_parameters():
#                 if 'weight' in name:
#                     # print(name)
#                     writer.add_histogram(name + '_grad', param.grad, times)
#                     writer.add_histogram(name + '_weight', param, times)



#             if times % 20 == 0 or times == 1:
#                 output = net(voxels).to(device)
#                 batch_error_mean, batch_error = get_accuracy(H, H1, C_hard.type(torch.float64), C_soft.type(torch.float64), voxels, output, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force)
#                 # accuracy, accuracy_avg = get_accuracy_with_nomralized_force_True(H, elastic_train.type(torch.float64), voxels, output, transform_train, element_force)
#                 print('Epoch: ', times, '| train loss: %.4f' % loss.item(), '| test accuracy average: %.4f' % batch_error_mean)
#                 writer.add_scalar('Accuracy_Avg', batch_error_mean, times)
#                 for i in range(len(batch_error)):
#                     scalar_name = 'Accuracy' + str(i)
#                     writer.add_scalar(scalar_name, batch_error[i], times)
                    
#         break
#     writer.close()
#     torch.save({'epoch': times + 1, 'state_dict': net.state_dict(), 'optimizer': optimizer.state_dict()}, "./train_model_single_batch"+str(datetime.datetime.now().strftime("%m-%d@%H-%M-%S"))+'.pth.tar')
# else:
#     num_epoches = 2