from tqdm import tqdm
import paddle
from paddle.vision.transforms import Compose, Resize, ToTensor
import os
import shutil
from utils.metric import compute_iou
from utils.dataset import LaneDataset
import paddle.nn.functional as F
from utils.loss import MySoftmaxCrossEntropyLoss, DiceLoss
from model.deeplabv3p import DeepLabv3p
from model.multi_res_unet import MultiResUnet
from config import Config
import paddle.distributed as dist
from paddle.static import InputSpec


# os.environ["CUDA_VISIBLE_DEVICES"] = "7"

device_list = [0]


def train_epoch(net, epoch, dataLoader, optimizer, trainF, config):
    # paddle.jit.save(
    #     layer=net,
    #     path=os.path.join(os.getcwd(), config.SAVE_PATH),
    #     input_spec=[InputSpec(shape=[4, 3, 384, 1024], dtype='float32')])
    # return
    net.train()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    for batch_item in dataLoader:
        image, mask = batch_item[0], batch_item[1].astype('int64')
        # if torch.cuda.is_available():
        #     # 转化成cuda形式的tensor haoyue
        #     image, mask = image.cuda(device=device_list[0]), mask.cuda(device=device_list[0])
        # 把存的梯度置为0 haoyue
        optimizer.clear_grad()
        out = net(image)
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=config.NUM_CLASSES)(out, mask)
        total_mask_loss += mask_loss.numpy().sum()
        # 后向计算 haoyue
        mask_loss.backward()
        # 更新梯度 haoyue
        optimizer.step()
        # dataprocess.set_description_str("epoch:{}".format(epoch))
        # dataprocess.set_postfix_str("mask_loss:{:.4f}".format(mask_loss.numpy().sum()))
    print("Epoch:{}, mask loss is {:.4f} \n".format(epoch, total_mask_loss / len(dataLoader)))
    trainF.write("Epoch:{}, mask loss is {:.4f} \n".format(epoch, total_mask_loss / len(dataLoader)))
    trainF.flush()


def test(net, epoch, dataLoader, testF, config):
    net.eval()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    result = {"TP": {i:0 for i in range(8)}, "TA":{i:1 for i in range(8)}}
    for batch_item in dataprocess:
        image, mask = batch_item[0], batch_item[1].astype('int64')
    #     if torch.cuda.is_available():
    #     image, mask = image.cuda(device=device_list[0]), mask.cuda(device=device_list[0])
        out = net(image)
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=config.NUM_CLASSES)(out, mask)
        total_mask_loss += mask_loss.numpy().sum()
        pred = paddle.argmax(F.softmax(out, axis=1), axis=1)
        # 计算整个数据集的IOU haoyue
        result = compute_iou(pred, mask, result)
        dataprocess.set_description_str("epoch:{}".format(epoch))
        dataprocess.set_postfix_str("mask_loss:{:.4f}".format(mask_loss.numpy().sum()))
    testF.write("Epoch:{}\n".format(epoch))
    for i in range(8):
        result_string = "{}: {:.4f} \n".format(i, result["TP"][i]/result["TA"][i])
        print(result_string)
        testF.write(result_string)
    testF.write("Epoch:{}, mask loss is {:.4f} \n".format(epoch, total_mask_loss / len(dataLoader)))
    testF.flush()


def adjust_lr(optimizer, epoch):
    if epoch == 0:
        lr = 1e-3
    elif epoch == 2:
        lr = 1e-2
    elif epoch == 100:
        lr = 1e-3
    elif epoch == 150:
        lr = 1e-4
    else:
        return
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

def load_model(lane_config, model, opt):
    model_path = os.path.join(os.getcwd(), lane_config.SAVE_PATH)
    last_save_model = -1
    for model_file in os.listdir(model_path):
        # 存在已保存过的模型文件
        if os.path.splitext(model_file)[1] == '.pdparams':
            prefix_name = os.path.splitext(model_file)[0]
            if len(prefix_name.split('_')) > 1 and 'laneNet' in prefix_name:
                last_save_model = max(last_save_model, int(prefix_name.split('_')[1]))
    if last_save_model != -1:
        print('last_save_model', last_save_model)
        layer_state_dict = paddle.load(os.path.join(model_path, "laneNet_{}.pdparams".format(last_save_model)))
        opt_state_dict = paddle.load(os.path.join(model_path, "adam_{}.pdopt".format(last_save_model)))
        model.set_state_dict(layer_state_dict)
        opt.set_state_dict(opt_state_dict)
    return last_save_model

def main():
    # 第2处改动，初始化并行环境
    dist.init_parallel_env()
    train_dataset = LaneDataset("train.csv", transform=Compose([Resize(size=(1024, 384)), ToTensor()]))
    lane_config = Config()
    # if os.path.exists(lane_config.SAVE_PATH):
    #     shutil.rmtree(lane_config.SAVE_PATH)
    if not os.path.exists(lane_config.SAVE_PATH):
        os.makedirs(lane_config.SAVE_PATH, exist_ok=True)
    trainF = open(os.path.join(lane_config.SAVE_PATH, "train.txt"), 'w')
    testF = open(os.path.join(lane_config.SAVE_PATH, "test.txt"), 'w')
    # kwargs = {'num_workers': 4, 'pin_memory': True} if torch.cuda.is_available() else {}
    # train_dataset = LaneDataset("train.csv", transform=transforms.Compose([ImageAug(), DeformAug(),
    #                                                                           ScaleAug(), CutOut(32, 0.5), ToTensor()]))
    #
    # train_data_batch = DataLoader(train_dataset, batch_size=4*len(device_list), shuffle=True, drop_last=True, **kwargs)
    train_data_batch = paddle.io.DataLoader(train_dataset, batch_size=4, shuffle=True, drop_last=True)
    val_dataset = LaneDataset("val.csv", transform=Compose([Resize(size=(1024, 384)), ToTensor()]))
    val_data_batch = paddle.io.DataLoader(val_dataset, batch_size=1, shuffle=False, drop_last=False)
    # net = MultiResUnet(3)
    net = DeepLabv3p(lane_config)
    net = paddle.DataParallel(net)
    # if torch.cuda.is_available():
    #     net = net.cuda(device=device_list[0])
    #     # 这个是多卡并行，这种情况下batch_size一定要和Gpu卡的数量一致 haoyue
    #     net = torch.nn.DataParallel(net, device_ids=device_list)
    # optimizer = paddle.optimizer.SGD(net.parameters(), lr=lane_config.BASE_LR,
    #                             momentum=0.9, weight_decay=lane_config.WEIGHT_DECAY)
    # weight_decay的意思是给loss加上一个L2的损失 haoyue
    optimizer = paddle.optimizer.Adam(learning_rate=lane_config.BASE_LR, parameters=net.parameters(), weight_decay=lane_config.WEIGHT_DECAY)
    last_save_model = load_model(lane_config, net, optimizer)
    for epoch in range(lane_config.EPOCHS):
        epoch_count = epoch
        if last_save_model != -1:
            epoch_count = last_save_model + epoch + 1
        # 使用sgd时可以根据epoch调整学习率，可以先往大调整，然后再细调 haoyue
        # adjust_lr(optimizer, epoch)
        train_epoch(net, epoch_count, train_data_batch, optimizer, trainF, lane_config)
        paddle.save(net.state_dict(), os.path.join(os.getcwd(), lane_config.SAVE_PATH, "laneNet_{}.pdparams".format(epoch_count)))
        paddle.save(optimizer.state_dict(), os.path.join(os.getcwd(), lane_config.SAVE_PATH, "adam_{}.pdopt".format(epoch_count)))
        test(net, epoch_count, val_data_batch, testF, lane_config)
    trainF.close()
    testF.close()
    paddle.save(net.state_dict(), os.path.join(os.getcwd(), lane_config.SAVE_PATH, "finalNet.pdparams"))
    paddle.save(optimizer.state_dict(), os.path.join(os.getcwd(), lane_config.SAVE_PATH, "finalNet.pdopt"))



if __name__ == "__main__":
    main()
