# -*- coding: utf-8 -*-
"""
Created on Fri Apr  8 10:45:25 2022

@author: 04566
"""
import os
import numpy as np
import paddle
import paddle.nn.functional as F
from data import dataset
from paddle.io import DataLoader
from paddle import optimizer
from layers import BackprojectDepth,Project3D,SSIM,disp_to_depth,get_smooth_loss
import networks
from mobencoder import mobilenet_encoder
from utils import setup_seed
import time
class Trainer:
    def __init__(self,dirpath):
        self.rank = paddle.distributed.get_rank()
        setup_seed(210)
        self.width=640
        self.height=192
        
        self.dirpath=dirpath
        self.img_dataset_source='train'
        train_dataset=dataset(dirpath=self.dirpath,img_dataset_source=self.img_dataset_source,\
                              width=self.width,height=self.height)
        self.img_dataset_source='val'
        val_dataset=dataset(dirpath=self.dirpath,img_dataset_source=self.img_dataset_source,\
                              width=self.width,height=self.height)
        self.batch_size=12
        self.train_loader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True,num_workers=0,\
                                       drop_last=True,worker_init_fn=setup_seed)
        self.val_loader = DataLoader(val_dataset, batch_size=self.batch_size, shuffle=True,num_workers=0,\
                                       drop_last=True,worker_init_fn=setup_seed)
        self.scales=[0,1,2,3]
        self.ssim = SSIM()
        self.backproject_depth = BackprojectDepth(self.batch_size, self.height, self.width)
        self.project_3d = Project3D(self.batch_size, self.height, self.width)       
        self.models = {}
        self.parameters_to_train = []
        #self.models["encoder"] =networks.ResnetEncoder(18, 'paddle_pretrained')
        self.models["encoder"] = mobilenet_encoder()
        self.parameters_to_train += list(self.models["encoder"].parameters())
        #self.models["depth"] =networks.DepthDecoder(self.models["encoder"].num_ch_enc, self.scales)
        self.models["depth"] =networks.DepthDecoder(np.array([64, 64, 128, 256, 512]), self.scales)
        self.parameters_to_train += list(self.models["depth"].parameters())
        self.parameters_to_train = paddle.nn.ParameterList(self.parameters_to_train)
        self.time=time.time()
        #优化器定义
        self.learning_rate = 1e-5
        self.scheduler_step_size = 5
        self.num_epochs = 10
        self.save_frequency = 5
        self.model_lr_scheduler = optimizer.lr.StepDecay(self.learning_rate, self.scheduler_step_size, 0.1)
        self.model_optimizer = optimizer.Adam(self.learning_rate, parameters=self.parameters_to_train)
        self.retrain = True
        self.log_path='./log'
        if self.retrain:
            self.load_model()
            
        
            
    def set_train(self):
        for m in self.models.values():
            m.train()
    def set_eval(self):
        for m in self.models.values():
            m.eval()
            
    def train(self):
        self.epoch = 0
        self.step = 0
        best_val_loss = float("inf")
        for self.epoch in range(self.num_epochs):
            self.run_epoch()
            if (self.epoch + 1) % self.save_frequency == 0:
                self.save_model(self.epoch)
            
            val_loss = self.val()
            print(f'In epoch {self.epoch}, the validation loss is {val_loss}.')
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                if self.rank == 0: self.save_model("best")
    
    def run_epoch(self):
        """
        Run a single epoch of training and validation
        """
        self.model_lr_scheduler.step()

        if self.rank == 0: print("Training")
        self.set_train()

        for batch_idx, inputs in enumerate(self.train_loader):
            print(time.time()-self.time)
            outputs, losses = self.process_batch(inputs)

            self.model_optimizer.clear_grad()
            losses["loss"].backward()
            print('loss:',losses["loss"])
            self.model_optimizer.step()
            self.time=time.time()
            self.step += 1
            
    def process_batch(self, inputs):
        """
        Pass a minibatch through the network and generate images and losses
        """
        features = self.models["encoder"](inputs[('img_raw_l',0)])
        outputs=self.models["depth"](features)
        self.generate_images_pred(inputs, outputs)
        losses = self.compute_losses(inputs, outputs)

        return outputs, losses
    
    def val(self):
        """
        Validate the model on the validation set
        """
        self.set_eval()
        if self.rank == 0: print("Validating")

        with paddle.no_grad():
            loss_list = []
            for batch_idx, inputs in enumerate(self.val_loader):
                outputs, losses = self.process_batch(inputs)
                loss_list.append(losses["loss"].numpy()[0])

        return sum(loss_list) / len(loss_list)
    
    def generate_images_pred(self, inputs, outputs):
        """
        Generate the warped (reprojected) color images for a minibatch.
        Generated images are saved into the `outputs` dictionary.
        """
        for scale in self.scales:
            disp = outputs[("disp", scale)]
            disp = F.interpolate(disp, [self.height, self.width], mode="bilinear", align_corners=False)

            #_, depth = disp_to_depth(disp, self.opt.min_depth, self.opt.max_depth)
            _, depth = disp_to_depth(disp, 0.1, 100)
            outputs[("depth",scale)] = depth

            cam_points = self.backproject_depth(depth, inputs['inv_K'])
            pix_coords = self.project_3d(cam_points, inputs['K'], inputs['T'])

            outputs[("sample", scale)] = pix_coords
            img = inputs[('img_raw_r',0)].clone()
            img.stop_gradient = False
            outputs[("color", scale)] = F.grid_sample(img,outputs[("sample", scale)],padding_mode="border")


    def compute_reprojection_loss(self, pred, target):
        abs_diff = paddle.abs(target - pred)
        l1_loss = abs_diff.mean(1, True)
        ssim_loss = self.ssim(pred, target).mean(1, True)
        reprojection_loss = 0.85 * ssim_loss + 0.15 * l1_loss
        return reprojection_loss

    def compute_losses(self, inputs, outputs):

        losses = {}
        total_loss = 0
        for scale in self.scales:
            loss = 0
            disp = outputs[("disp", scale)]
            color=inputs[(('img_raw_l',scale))]
            target = inputs[('img_raw_l',0)]
            
            reprojection_losses = []
            pred = outputs[("color", scale)]
            reprojection_losses.append(self.compute_reprojection_loss(pred, target))
            reprojection_losses = paddle.concat(reprojection_losses, 1)
            reprojection_loss = reprojection_losses

            identity_reprojection_losses = []
            pred = inputs[('img_raw_r',0)]
            identity_reprojection_losses.append(self.compute_reprojection_loss(pred, target))
            identity_reprojection_losses = paddle.concat(identity_reprojection_losses, 1)
            # save both images, and do min all at once below
            identity_reprojection_loss = identity_reprojection_losses

            # add random numbers to break ties
            identity_reprojection_loss += paddle.randn(identity_reprojection_loss.shape) * 0.00001

            combined = paddle.concat((identity_reprojection_loss, reprojection_loss), axis=1)
            if combined.shape[1] == 1:
                to_optimise = combined
            else:
                to_optimise = paddle.min(combined, axis=1)

            loss += to_optimise.mean()

            mean_disp = disp.mean(2, True).mean(3, True)
            norm_disp = disp / (mean_disp + 1e-7)
            smooth_loss = get_smooth_loss(norm_disp, color)

            loss += 1e-3 * smooth_loss / (2 ** scale)
            total_loss += loss
            losses["loss/{}".format(scale)] = loss
        
        total_loss /= len(self.scales)
        losses["loss"] = total_loss
        return losses
    
    def save_model(self, suffix):
        """
        Save model weights to disk
        """
        save_folder = os.path.join(self.log_path, "models", "weights_{}".format(suffix))
        if not os.path.exists(save_folder):
            os.makedirs(save_folder)

        for model_name, model in self.models.items():
            save_path = os.path.join(save_folder, "{}.pdparams".format(model_name))
            to_save = model.state_dict()
            paddle.save(to_save, save_path)

        # do not save parameter in the optimizor to save space
        save_path = os.path.join(save_folder, "{}.pdopt".format("adam"))
        paddle.save(self.model_optimizer.state_dict(), save_path)
        save_path = os.path.join(save_folder, "{}.pdopt".format("StepDecay"))
        paddle.save(self.model_lr_scheduler.state_dict(), save_path)

    def load_model(self):
        """
        Load model(s) from disk
        """
        self.load_weights_folder = os.path.join(self.log_path, "models", "weights_best")

        assert os.path.isdir(self.load_weights_folder), \
            "Cannot find folder {}".format(self.load_weights_folder)
        print("loading model from folder {}".format(self.load_weights_folder))
        #self.models_to_load=["encoder", "depth", "pose_encoder", "pose"]
        self.models_to_load=["encoder", "depth"]
        for n in self.models_to_load:
            print("Loading {} weights...".format(n))
            path = os.path.join(self.load_weights_folder, n+'.pdparams')
            #model_dict = self.models[n].state_dict()
            pretrained_dict = paddle.load(path)
            self.models[n].set_state_dict(pretrained_dict)

        # loading adam state
        optimizer_load_path = os.path.join(self.load_weights_folder, "StepDecay.pdopt")
        if os.path.isfile(optimizer_load_path):
            print("Loading StepDecay weights")
            optimizer_dict = paddle.load(optimizer_load_path)
            self.model_lr_scheduler.set_state_dict(optimizer_dict)
        else:
            print("Cannot find Adam weights so Adam is randomly initialized")
            
        optimizer_load_path = os.path.join(self.load_weights_folder, "adam.pdopt")
        if os.path.isfile(optimizer_load_path):
            print("Loading Adam weights")
            optimizer_dict = paddle.load(optimizer_load_path)
            self.model_optimizer.set_state_dict(optimizer_dict)
        else:
            print("Cannot find Adam weights so Adam is randomly initialized")
