import os
import torch
import random
import datetime
import scipy.io as scio
from tqdm.notebook import tqdm
from torch.utils.tensorboard import SummaryWriter


# print(sys.path)
# sys.path.append('/home/liuan/gitee/homo-cnn/')
# sys.path.append('/home/liuan/gitee/')
# from src.Homo3D_full_acc import Homo3D
# from src.Homo3D import Homo3D as H3D
# # from src.HomoCNN_full import HomoCNN
# from src.DataSet_full import *
# from src.MPELoss_with_backward_full_acc import *
# from src.accuracy_full import *
# from src.U_net import *

from src.UNet import *
from src.Error import *
from src.MPELoss import *
from src.Homo3D import *
from src.DataSet import *
from src.H3D import Homo3D as H3D
from src.DataParallel_my import *


def setup_seed(seed):
     torch.manual_seed(seed)
     torch.cuda.manual_seed_all(seed)
     np.random.seed(seed)
     random.seed(seed)
     torch.backends.cudnn.deterministic = True
# 设置随机数种子
setup_seed(100)
# 预处理数据以及训练模型
# ...
# ...

torch.set_printoptions(
    precision=4,    # 精度，保留小数点后几位，默认4
    threshold=200,
    edgeitems=100,
    linewidth=150,  # 每行最多显示的字符数，默认80，超过则换行显示
    profile=None,
    sci_mode=True  # 用科学技术法显示数据，默认True
)


tbdir = os.path.join('./runs/unet', datetime.datetime.now().strftime("%m-%d@%H-%M-%S"))
writer = SummaryWriter(tbdir)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)


# hyperparameters
r = 36
voxel_length = 36
case = 1250
lr = 1e-4
batch_size = 8
dir_path = '../data/new/Kelvin/'
# net = HomoCNN().to(device)
net = U_Net().to(device)
net = torch.nn.DataParallel(net)
# gpu0_bsz = 12
# acc_grad = 1
# net = U_Net()
# net = BalancedDataParallel(gpu0_bsz // acc_grad, net, dim=0).cuda()


H = Homo3D(r, r, r, 1, 1, 1)
H.anchor()
H.indices()
H1 = H3D(r,r,r,1,1,1)
train_single_batch = True

### test Ke Fe


# ### test DataParallel_my 
# batch_szie = 8
# gpu0_bsz = 2
# acc_grad = 1
# net = U_Net().to(device)
# net = BalancedDataParallel(gpu0_bsz // acc_grad, net, dim=0).cuda()




train_loader, test_loader = getDataLoader(dir_path, data_type = '_syn', voxel_length = voxel_length, case = case, batch_size = batch_size)
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
loss_func = MPELoss_with_backward.apply

def train(model, optimizer, loss_func, H, H1, train_loader, device, epochs, pre_train = False):
    # loss_func = MPELoss_with_backward.apply
    # optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    # optimizer = torch.optim.SGD(net.parameters(), lr=1e-7, weight_decay=1e-2)
    model.to(device)
    tmp = 0
    flag = True
    for e in tqdm(range(epochs)):
        # temp = random.randint(0,len(train_loader)-1)
        for step, (input, voxels, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force, C_hard, C_soft, jacobi, index) in enumerate(tqdm(train_loader)):
            
            output = model(input)
            # H.reset_gradient_like(output)
            loss = loss_func(output, H, voxels, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force, index)


            writer.add_scalar('Loss', loss.item(), step+e*len(train_loader))

#             if e > 0 and loss.item() > tmp :
#                 continue

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print(step, 'loss: ', loss.item())

            # for name, param in model.named_parameters():
            #     if 'weight' in name:
            #         # print(name)
            #         writer.add_histogram(name + '_grad', param.grad, step+e*len(train_loader))
            #         writer.add_histogram(name + '_weight', param, step+e*len(train_loader))

            if step % 100 == 0 :
                # output = net(voxels).to(device)
                batch_error_mean, batch_error = get_error(H, H1, C_hard, C_soft, voxels, output, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force,jacobi)
                # accuracy, accuracy_avg = get_accuracy_with_nomralized_force_True(H, elastic_train.type(torch.float64), voxels, output, transform_train, element_force)
                print('Epoch: ', e, '| train loss: %.4f' % loss.item(), '| test error average: %.4f' % batch_error_mean)
                writer.add_scalar('Error_Avg', batch_error_mean, step+e*len(train_loader))
                for i in range(len(batch_error)):
                    scalar_name = 'Error' + str(i)
                    writer.add_scalar(scalar_name, batch_error[i], step+e*len(train_loader))

            if step == 30:
                if(pre_train):
                    torch.save(model,'./unet/model_syn_pre.pkl')
                    break
        if e % 10 == 5:
            torch.save(model,'./unet/model_syn_new_' + str(e) +'.pkl')
    writer.close()
    torch.save(model,'./unet/model_syn_new_finish_1101.pkl')
    # torch.svae(net,'./model'+str(datetime.datetime.now().strftime("%m-%d@%H-%M-%S")))
#     torch.save({'epoch': epochs + 1, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, "./train_model_single_batch"+str(datetime.datetime.now().strftime("%m-%d@%H-%M-%S"))+'.pth.tar')


# def test(model, H, test_loader, device):
#     model.to(device)
#     with torch.no_grad():
#         for step, (voxels, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force, C_hard, C_soft) in enumerate(
#             tqdm(test_loader)):
#             output = model(voxels).to(device)
#             batch_error_mean, batch_error = get_accuracy(H, H1, C_hard, C_soft, voxels, output, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force)
#             print('batch: ', step, '| batch_error_mean: %.4f' % batch_error_mean)
#             for i in range(len(batch_error)):
#                 print(i, ' error: %.4f' % batch_error[i])

# pre train
# pre_train = False
# if pre_train:
#     train(net, optimizer=optimizer, loss_func=loss_func, H=H, H1=H1, train_loader=train_loader, device=device, epochs=1, pre_train= True)
# pre_net = torch.load('./unet/model_syn_pre.pkl')
pre_net = torch.load('./unet/model_syn_29.pkl')
# train
lr = 1e-4
optimizer = torch.optim.Adam(pre_net.parameters(), lr=lr)
loss_func = MPELoss_with_backward.apply
train(pre_net, optimizer=optimizer, loss_func=loss_func, H=H, H1=H1, train_loader=train_loader, device=device, epochs=20, pre_train= False)



# train(net,H,H1,train_loader,device,epochs=1,lr=lr)
# pre_net = torch.load('./model_shear.pkl')
# train(pre_net, H, H1, train_loader, device, epochs = 40, lr =lr)

# writer = SummaryWriter(tbdir)
# model  = torch.load('./unet/model_syn_29.pkl')
# test_data_path = './test_data/'
# def test(model, H, H1, test_loader, device):
#     model.to(device)
#     with torch.no_grad():
#         for step, (input, voxels, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force, C_hard, C_soft, jacobi, index) in enumerate(
#             tqdm(test_loader)):
#             # save test data to txt
# #             file_name = test_data_path + 'test_data_' + str(step)
# #             scio.savemat(file_name, {'inputs':input,'voxels':voxels,'hard_element_stiffness':hard_element_stiffness,'hard_element_force':hard_element_force,'soft_element_stiffness':soft_element_stiffness,'soft_element_force':soft_element_force,'C_hard':C_hard,'C_soft':C_soft,'jacobi':jacobi,'index':index})
#             # start_t = time()
#             output = model(input)
#             # end_t  =time()
#             # print(end_t-start_t)
#             # c = format((end_t-start_t)/8,'.6f')
#             # print(c)
# #             file_name = test_data_path + 'test_data_testtesttest_' + str(step)+'.mat'
# #             scio.savemat(file_name, {'output':output.cpu().numpy(),'inputs':input.cpu().numpy(),'voxels':voxels.cpu().numpy()})
#             # writer.add_scalar('costime',c,step)
#             batch_error_mean, batch_error = get_error(H, H1, C_hard, C_soft, voxels, output, hard_element_stiffness, hard_element_force, soft_element_stiffness, soft_element_force, jacobi)
#             writer.add_scalar('test error avg', batch_error_mean, step)
# #             print('batch: ', step, '| batch_error_mean: %.4f' % batch_error_mean)
#             for i in range(len(batch_error)):
#                 print(i, ' error: %.4f' % batch_error[i])
#                 scalar_name = 'test error ' + str(i)
#                 writer.add_scalar(scalar_name, batch_error[i], step)

# test(model,H,H1,train_loader,device)
# writer.close()

