import torch
from tqdm import tqdm
from Fast_RCNN_Loss import Fast_RCNN_Loss
from Fast_RCNN_Extractor import get_device
from Fast_RCNN_Dateset import Train_dataloader
from Fast_RCNN_Main_Model import Fast_RCNN_Model
save_path = '/model_dir/'
device = get_device()


# 根据输入 返回中点坐标和长度
def make_forms(tup):
    x_2, x_1, y_2, y_1 = tup
    t_x = (x_1 + x_2) * 0.5
    t_y = (y_1 + y_2) * 0.5
    t_w = (x_2 - x_1)
    t_h = (y_2 - y_1)
    return t_x, t_y, t_w, t_h


def train_epoch(dataloader, model, optimizer, device):
    model.train()
    # 加载训练的进度条
    book = tqdm(dataloader, total=len(dataloader))
    total_loss = 0.0

    for bi, dictionary in enumerate(book):
        model_input = dictionary['Model_INPUT']  # 对应的特征图区域,区域class_name(做分类), 正例坐标(做回归)
        ct_boxes = dictionary['CT_Boxes']  # 生成的候选框的坐标
        for i in range(len(model_input)):
            input_ids = model_input[i][0].to(device)  # 得到特征map
            cla_label = model_input[i][1].to(device)  # 得到分类
            loc_label = torch.Tensor(model_input[i][2]).unsqueeze(0).to(device)   # loc_label 是真实框体的缩小版

            # 梯度清理
            model.zero_grad()
            Logits = model(input_ids)

            """
                t_x = (d_x - p_x ) / p_w
                t_y = (d_y - p_y ) / p_h
                t_w = np.log(d_w/p_w)
                t_h = np.log(d_h / p_h )
            """
            # 得到输出的 中心点坐标x,y以及长和宽的 偏移量
            (d_x, d_y, d_w, d_h) = Logits[1].squeeze(0)

            # 获取图像中心坐标和长度
            # ct_boxes 是我们通过区域选择得到的正例框体缩小版 但是计算的时候只计算正样例的loss
            # loc_label 是真是矿体的缩小版
            p_x, p_y, p_w, p_h = make_forms(ct_boxes[i])
            p_x, p_y, p_w, p_h = p_x.to(device), p_y.to(device), p_w.to(device), p_h.to(device)
            """
            模型通过梯度下降调整(d_x,d_y,d_w,d_h) 的值,让t_u更加贴近真真实的loc_label
            """
            t_u = (p_w * d_x + p_x, p_h * d_y + p_y, p_w * torch.exp(d_w), p_h * torch.exp(d_h))
            t_u = torch.Tensor(t_u).to(device)
            loss = Fast_RCNN_Loss(Logits[0], cla_label, t_u, loc_label)
            loss.backward(retain_graph=True)

            optimizer.step()
            optimizer.zero_grad()
            total_loss += loss

    print('avg loss : {0:.2f}'.format(total_loss/len(dataloader)))


def fit(dataloader, model, device, epochs, learning_rate):
    """
    x训练过程
    :param dataloader:
    :param model:
    :param device:
    :param epochs:
    :param learning_rate:
    :return:
    """
    optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
    for i in range(epochs):
        print(f'epoch:{i+1}')
        print('train')
        train_epoch(dataloader, model, optimizer, device)
        torch.save(model, save_path+f"Fast_RCNN_Model.pth")


if __name__ == '__main__':
    fit(Train_dataloader, Fast_RCNN_Model, device, epochs=3, learning_rate=0.002)