import torch
from tqdm import tqdm
from utils.dataloader import apply_heatmap
from utils.utils import get_lr
from nets.centernet_training import focal_loss, reg_l1_loss,l1_loss,anti_l1_loss
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
def fit_one_epoch(model_train, model, loss_history, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, cuda, backbone):
    total_r_loss    = 0
    total_c_loss    = 0
    total_loss      = 0
    val_loss        = 0
    distance_loss   = 0
    head_loss       = 0
    legs_loss       = 0
    model_train.train()
    print('Start Train')
    with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
        for iteration, batch in enumerate(gen):
            batchs = []
            if iteration >= epoch_step:
                break

            with torch.no_grad():
                if cuda:
                    for i in range((len(batch) - 3)):
                        batchs.append(torch.from_numpy(batch[i]).type(torch.FloatTensor).cuda())
                    # batch = [torch.from_numpy(ann).type(torch.FloatTensor).cuda() for ann in batch]
                else:
                    for i in range((len(batch) - 3)):
                        batchs.append(torch.from_numpy(batch[i]).type(torch.FloatTensor))

            # -------------------------------------------------------------------------#
            # 一批图片的一揽子真实数据
            # batch_images=[batch,1/4input_shape,1/4input_shape,3]
            # batch_hms=[batch,1/4input_shape,1/4input_shape,3]
            # batch_whs=[batch,1/4input_shape,1/4input_shape,2]
            # batch_regs=[batch,1/4input_shape,1/4input_shape,2]
            # batch_reg_masks=[batch,1/4input_shape,1/4input_shape]
            # batch_up/down_boxes=[batch,n,4]
            # -------------------------------------------------------------------------#
            batch_images, batch_hms,batch_hms1,batch_hms2,batch_whs, batch_regs, batch_reg_masks,batch_masks = batchs
            image_paths = batch[-1]
            batch_up_boxes=batch[-3]
            batch_down_boxes = batch[-2]
            #----------------------#
            #   清零梯度
            #----------------------#
            optimizer.zero_grad()
            # -----------------------------------------------#
            # hm=[batch,class_num,1/4input_shape,1/4input_shape]
            # wh=[batch,2,1/4input_shape,1/4input_shape]
            # offset=[batch,2,1/4input_shape,1/4input_shape]
            # w1=[batch,class_num,1/4input_shape,1/4input_shape]
            # -----------------------------------------------#
            if backbone=="resnet50":
                hm,hm1,hm2, wh, offset,mask  = model_train(batch_images)
                #print(type(batch_up_boxes), np.shape(batch_up_boxes[0]),np.shape(batch_down_boxes[0]), batch_up_boxes)
                #print('~~~~~~~~~~~~~~~~~~~~~~')
                #print(type(batch_down_boxes), np.shape(batch_up_boxes[1]),np.shape(batch_down_boxes[1]), batch_down_boxes)
                #numpy_hm        = hm.cuda().data.cpu().numpy()
                numpy_hm1       = hm1.cuda().data.cpu().numpy()
                numpy_hm2       = hm2.cuda().data.cpu().numpy()
                #hm_mask         = np.argwhere(numpy_hm==np.max(numpy_hm))
                for i in range(len(numpy_hm1[0])):
                    c1_c_loss =0
                    pic_up_boxes =[]
                    pic_down_boxes = []
                    one_up_boxes = batch_up_boxes[i]
                    one_down_boxes = batch_down_boxes[i]
                    one_hm1=np.squeeze(numpy_hm1[i],axis=0)
                    one_hm2 = np.squeeze(numpy_hm2[i],axis=0)
                    for j in range(len(one_up_boxes)):
                        up_box=one_up_boxes[j]
                        down_box = one_down_boxes[j]
                        one1_hm1=one_hm1[up_box[0]:up_box[2],up_box[1]:up_box[3]]
                        one1_hm2 = one_hm2[down_box[0]:down_box[2], down_box[1]:down_box[3]]
                        if one1_hm1.size ==0 or one1_hm2.size ==0:
                            continue
                        hm1_mask        = np.argwhere(one1_hm1==np.max(one1_hm1))
                        hm2_mask        = np.argwhere(one1_hm2==np.max(one1_hm2))

                        pic_up_boxes.append(hm1_mask)
                        pic_down_boxes.append(hm2_mask)
                    if len(pic_up_boxes) == 0 or len(pic_down_boxes) == 0:
                        continue
                    for k in range(len(pic_up_boxes)):
                        up_mask = torch.from_numpy(pic_up_boxes[k])
                        down_mask = torch.from_numpy(pic_down_boxes[k])
                        c1_c_loss+= anti_l1_loss(up_mask, down_mask)
                    c1_c_loss=c1_c_loss/len(pic_up_boxes)
                    distance_loss+=c1_c_loss

                distance_loss   = distance_loss/len(numpy_hm1[0])
                c_loss          = focal_loss(hm, batch_hms)
                c_loss1         = focal_loss(hm1, batch_hms1)
                c_loss2         = focal_loss(hm2, batch_hms2)

                wh_loss         = 0.1 * reg_l1_loss(wh, batch_whs, batch_masks)
                off_loss        = reg_l1_loss(offset, batch_regs, batch_reg_masks)
                mask_loss       = l1_loss(batch_masks,mask)

                loss             = c_loss + wh_loss + off_loss+c_loss1+c_loss2+mask_loss+distance_loss
                total_loss      += loss.item()
                total_c_loss    += c_loss.item()
                total_r_loss    += wh_loss.item() + off_loss.item()
                head_loss       += c_loss1.item()
                legs_loss       += c_loss2.item()
                # numpy_hm = hm.cuda().data.cpu().numpy()
                # numpy_hm1 = hm1.cuda().data.cpu().numpy()
                # # numpy_hm2 = hm2.cuda().data.cpu().numpy()
                # # numpy_hm=np.transpose(numpy_hm,(0,2,3,1))
                # # numpy_hm=np.round(numpy_hm)
                # for i in range(len(image_paths)):
                #     class_target = []
                #     img = Image.open(image_paths[i])
                #     img = img.resize((np.shape(numpy_hm)[-2], np.shape(numpy_hm)[-2]))
                #
                #     for j in range(len(numpy_hm[i])):
                #         # target_max=np.max(numpy_hm[i][j])
                #         numpy_hm[i][j] = np.round(numpy_hm[i][j])
                #         numpy_hm1[i][j] = np.round(numpy_hm1[i][j])
                #         # numpy_hm2[i][j] = np.round(numpy_hm2[i][j])
                #
                #         target = np.argwhere(numpy_hm[i][j] == 1).tolist()
                #         target1 = np.argwhere(numpy_hm1[i][j]==1).tolist()
                #         # target2 = np.argwhere(numpy_hm2[i][j] == 1).tolist()
                #
                #         target = np.array(target)
                #         target1 = np.array(target1)
                #         # target2 = np.array(target2)
                #
                #         # target2 = np.array(target2)
                #         try:
                #             # 调换数组中(x,y)的位置再合并
                #             new_target = np.column_stack((target[..., 1], target[..., 0])).tolist()
                #             new_target1 = np.column_stack((target1[...,1],target1[...,0])).tolist()
                #             # new_target2 = np.column_stack((target2[..., 1], target2[..., 0])).tolist()
                #         except:
                #             new_target = []
                #             new_target1 = []
                #             # new_target2 = []
                #
                #         class_target.extend(new_target)
                #         class_target.extend(new_target1)
                #         # class_target.extend(new_target2)
                #
                #     img = np.array(img)
                #     imge = apply_heatmap(img, class_target, r=10)
                #     plt.imshow(imge)
                #     plt.xticks([])  # 去掉横坐标值
                #     plt.yticks([])  # 去掉纵坐标值
                #     plt.savefig('img/%03d-%03d.png' % (epoch + 1, iteration + 1))
                #     # plt.show()
                #     plt.close()


            else:
                outputs         = model_train(batch_images)
                loss            = 0
                c_loss_all      = 0
                r_loss_all      = 0
                index           = 0
                for output in outputs:
                    hm, wh, offset = output["hm"].sigmoid(), output["wh"], output["reg"]
                    c_loss      = focal_loss(hm, batch_hms)
                    wh_loss     = 0.1 * reg_l1_loss(wh, batch_whs, batch_reg_masks)
                    off_loss    = reg_l1_loss(offset, batch_regs, batch_reg_masks)

                    loss        += c_loss + wh_loss + off_loss
                    
                    c_loss_all  += c_loss
                    r_loss_all  += wh_loss + off_loss
                    index       += 1
                total_loss      += loss.item() / index
                total_c_loss    += c_loss_all.item() / index
                total_r_loss    += r_loss_all.item() / index
            loss.backward()
            optimizer.step()
            
            pbar.set_postfix(**{'total_r_loss'  : total_r_loss / (iteration + 1), 
                                'total_c_loss'  : total_c_loss / (iteration + 1),
                                'lr'            : get_lr(optimizer)})
            pbar.update(1)


    print('Finish Train')

    model_train.eval()
    print('Start Validation')
    with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
        for iteration, batch in enumerate(gen_val):
            if iteration >= epoch_step_val:
                break
            batchs = []
            if iteration >= epoch_step_val:
                break
            images, targets = batch[0], batch[1]
            with torch.no_grad():
                if cuda:
                    for i in range((len(batch) - 3)):
                        batchs.append(torch.from_numpy(batch[i]).type(torch.FloatTensor).cuda())
                    # batch = [torch.from_numpy(ann).type(torch.FloatTensor).cuda() for ann in batch]
                else:
                    for i in range((len(batch) - 3)):
                        batchs.append(torch.from_numpy(batch[i]).type(torch.FloatTensor))

                batch_images, batch_hms,batch_hms1,batch_hms2,batch_whs, batch_regs, batch_reg_masks,batch_masks = batchs
                batch_up_boxes=batch[-3]
                batch_down_boxes=batch[-2]
                if backbone=="resnet50":
                    hm,hm1,hm2, wh, offset,mask  = model_train(batch_images)



                    c_loss          = focal_loss(hm, batch_hms)
                    c_loss1         = focal_loss(hm1, batch_hms1)
                    c_loss2         = focal_loss(hm2, batch_hms2)


                    wh_loss         = 0.1 * reg_l1_loss(wh, batch_whs, batch_reg_masks)
                    off_loss        = reg_l1_loss(offset, batch_regs, batch_reg_masks)
                    mask_loss       = l1_loss(batch_masks, mask)
                    loss            = c_loss + wh_loss + off_loss+c_loss1+c_loss2+mask_loss

                    val_loss        += loss.item()
                else:
                    outputs = model_train(batch_images)
                    index = 0
                    loss = 0
                    for output in outputs:
                        hm, wh, offset  = output["hm"].sigmoid(), output["wh"], output["reg"]
                        c_loss          = focal_loss(hm, batch_hms)
                        wh_loss         = 0.1*reg_l1_loss(wh, batch_whs, batch_reg_masks)
                        off_loss        = reg_l1_loss(offset, batch_regs, batch_reg_masks)

                        loss            += c_loss + wh_loss + off_loss
                        index           += 1
                    val_loss            += loss.item() / index

                pbar.set_postfix(**{'total_loss': val_loss / (iteration + 1)})
                pbar.update(1)
    print('Finish Validation')
    
    loss_history.append_loss(total_loss / epoch_step, val_loss / epoch_step_val,total_r_loss / epoch_step,total_c_loss / epoch_step,head_loss / epoch_step,legs_loss / epoch_step)
    print('Epoch:'+ str(epoch+1) + '/' + str(Epoch))
    print('Total Loss: %.3f || Val Loss: %.3f ' % (total_loss / epoch_step, val_loss / epoch_step_val))
    torch.save(model.state_dict(), 'logs/ep%03d-loss%.3f-val_loss%.3f.pth' % (epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val))
    return val_loss