import os
os.environ["MKL_NUM_THREADS"] = "1"  # noqa F402
os.environ["NUMEXPR_NUM_THREADS"] = "1"  # noqa F402
os.environ["OMP_NUM_THREADS"] = "1"  # noqa F402


import torch
import torchvision.transforms as transforms
import torch.nn.functional as F
import flow_transforms
import models
import datasets
import matplotlib.pyplot as plt
import time
from multiscaleloss import one_scale, realEPE
from util import sec_to_hm_str,flow2rgb
from tensorboardX import SummaryWriter
import datetime
from tqdm import tqdm

_DEPTH_COLORMAP = plt.get_cmap('plasma', 256)  # for plotting

class Trainer:
    def __init__(self, options):
        self.opt = options
        # self.log_path = os.path.join(self.opt.log_dir, self.opt.model_name)

        self.models = {}
        self.parameters_to_train = []
        self.device = torch.device("cpu" if self.opt.no_cuda else "cuda")
        self.flownetarch = self.opt.arch
        self.sparse = self.opt.sparse

        self.initial_model()
        self.inital_data()
        self.initial_optimizer()
        self.initial_loginfo()

        pass

    def initial_loginfo(self):
        log_dir = self.opt.log_dir
        save_path = '{},{}'.format(
            self.opt.arch,
            self.opt.solver,
            )

        timestamp = datetime.datetime.now().strftime("%m-%d-%H:%M")
        save_path = os.path.join(timestamp, save_path)
        save_path = os.path.join(log_dir, self.opt.dataset, save_path)
        print('=> will save everything to {}'.format(save_path))
        if not os.path.exists(save_path):
            os.makedirs(save_path)
        self.save_path = save_path
        self.writers = {}
        for mode in ["train", "val"]:
            self.writers[mode] = SummaryWriter(os.path.join(save_path, mode))


    def initial_optimizer(self):

        if self.opt.solver == 'adam':
            self.model_optimizer = torch.optim.Adam(self.parameters_to_train, self.opt.lr,
                                         betas=(self.opt.momentum, self.opt.beta))
        elif self.opt.solver == 'sgd':
            self.model_optimizer = torch.optim.SGD(self.parameters_to_train, self.opt.lr,
                                        momentum=self.opt.momentum)

        self.model_lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.model_optimizer, milestones=self.opt.milestones, gamma=0.5)


    def inital_data(self):
        # Data loading code
        input_transform = transforms.Compose([
            flow_transforms.ArrayToTensor(),
            transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]),
            transforms.Normalize(mean=[0.45, 0.432, 0.411], std=[1, 1, 1])
        ])
        target_transform = transforms.Compose([
            flow_transforms.ArrayToTensor(),
            transforms.Normalize(mean=[0, 0], std=[self.opt.div_flow, self.opt.div_flow])
        ])

        if 'KITTI' in self.opt.dataset:
            self.sparse = True
        if self.sparse:
            co_transform = flow_transforms.Compose([
                flow_transforms.RandomCrop((320, 448)),
                flow_transforms.RandomVerticalFlip(),
                flow_transforms.RandomHorizontalFlip()
            ])
        else:
            co_transform = flow_transforms.Compose([
                flow_transforms.RandomTranslate(10),
                flow_transforms.RandomRotate(10, 5),
                flow_transforms.RandomCrop((320, 448)),
                flow_transforms.RandomVerticalFlip(),
                flow_transforms.RandomHorizontalFlip()
            ])

        print("=> fetching img pairs in '{}'".format(self.opt.data_path))
        train_set, test_set = datasets.__dict__[self.opt.dataset](
            self.opt.data_path,
            transform=input_transform,
            target_transform=target_transform,
            co_transform=co_transform,
            split=self.opt.split_file if self.opt.split_file else self.opt.split_value
        )
        print('{} samples found, {} train samples and {} test samples '.format(len(test_set) + len(train_set),
                                                                               len(train_set),
                                                                               len(test_set)))
        self.train_loader = torch.utils.data.DataLoader(
            train_set, batch_size=self.opt.batch_size,
            num_workers=self.opt.workers, pin_memory=True, shuffle=True)

        self.val_loader = torch.utils.data.DataLoader(
            test_set, batch_size=self.opt.batch_size,
            num_workers=self.opt.workers, pin_memory=True, shuffle=False)
        self.val_iter = iter(self.val_loader)
        self.num_total_steps = len(train_set) // self.opt.batch_size * self.opt.num_epochs


    def initial_model(self):
        if self.opt.pretrained:
            network_data = torch.load(self.opt.pretrained)
            self.flownetarch = network_data['arch']
            print("=> using pre-trained model '{}'".format(self.flownetarch))
        else:
            network_data = None
            print("=> creating model '{}'".format(self.flownetarch))
        self.models["FlowNet"] = models.__dict__[self.flownetarch](network_data).to(self.device)
        self.parameters_to_train += list(self.models["FlowNet"].parameters())

    def set_train(self):
        """Convert all models to training mode
        """
        for m in self.models.values():
            m.train()

    def set_eval(self):
        """Convert all models to testing/evaluation mode
        """
        for m in self.models.values():
            m.eval()

    def train(self):
        self.epoch = 0
        self.step = 0
        self.start_time = time.time()
        for self.epoch in range(self.opt.num_epochs):
            # if self.epoch == 0:
            #     self.val()
            #     self.save_model()
            self.run_epoch()
            self.val()
            self.save_model()

    def run_epoch(self):
        print("Training")
        self.set_train()
        for batch_idx, (input, target) in enumerate(self.train_loader):
            before_op_time = time.time()
            target = target.to(self.device)
            input = torch.cat(input, 1).to(self.device)

            losses, output = self.process_batch(input, target, is_train=True)

            flow2_EPE = self.opt.div_flow * realEPE(output[0], target, sparse=self.sparse)

            self.model_optimizer.zero_grad()
            losses["loss"].backward()
            self.model_optimizer.step()

            duration = time.time() - before_op_time
            # log less frequently after the first 2000 steps to save time & disk space
            early_phase = batch_idx % self.opt.log_frequency == 0

            if early_phase :
                self.log_time(batch_idx, duration, losses["loss"].cpu().data, flow2_EPE.cpu().data)
                self.log("train", input, output, target, losses)
            self.step += 1
        self.model_lr_scheduler.step()

    def save_model(self):
        save_folder = os.path.join(self.save_path, "models", "weights_{}_{}".format(self.epoch,
                                                                                    round(self.val_EPE.cpu().item(), 2)))
        if not os.path.exists(save_folder):
            os.makedirs(save_folder)
        for model_name, model in self.models.items():
            save_path = os.path.join(save_folder, "{}.pth".format(model_name))
            to_save = model.state_dict()
            torch.save(to_save, save_path)

        save_path = os.path.join(save_folder, "{}.pth".format("adam"))
        torch.save(self.model_optimizer.state_dict(), save_path, _use_new_zipfile_serialization=False)
        print("Model save at" + save_folder)

    def val(self):
        print("Validation")
        self.set_eval()
        loss_total=0
        EPE = 0
        with torch.no_grad():
            for i, (inputs, target) in enumerate(self.val_loader):
                target = target.to(self.device)
                inputs = torch.cat(inputs, 1).to(self.device)
                losses, output = self.process_batch(inputs, target, is_train=True)
                flow2_EPE = self.opt.div_flow * realEPE(output[0], target, sparse=self.sparse)
                loss_total += losses["loss"].cpu().data
                EPE += flow2_EPE.cpu().data
        loss_total = loss_total/ i
        EPE = EPE / i

        print_string = "Epoch {:>3} | val_loss {:.5f} | val_EPE: {:.5f}"
        print(print_string.format(self.epoch, loss_total, EPE))
        self.val_EPE = EPE
        self.set_train()

    def log(self, mode, input, output, target, losses):
        writer = self.writers[mode]
        for l, v in losses.items():
            writer.add_scalar("{}".format(l), v, self.step)
        s = 0
        for j in range(min(4, self.opt.batch_size)):
            mean_values = torch.tensor([0.45, 0.432, 0.411], dtype=input.dtype).view(3, 1, 1)
            writer.add_image("GroundTruth", flow2rgb(self.opt.div_flow * target[j], max_value=10), self.step)
            writer.add_image('InputsA', (input[j, :3].cpu() + mean_values).clamp(0, 1), self.step)
            writer.add_image('InputsB', (input[j, 3:].cpu() + mean_values).clamp(0, 1), self.step)
            writer.add_image('FlowNet Outputs', flow2rgb(self.opt.div_flow * output[s][j], max_value=10), self.step)

    def process_batch(self, inputs, target, is_train=False):

        flownet_output = self.models["FlowNet"](inputs)

        if self.sparse:
            # Since Target pooling is not very precise when sparse,
            # take the highest resolution prediction and upsample it instead of downsampling target
            h, w = target.size()[-2:]
            flownet_output = [F.interpolate(flownet_output[0], (h,w)), *flownet_output[1:]]

        losses = self.compute_losses(flownet_output, target)

        return losses, flownet_output

    def compute_losses(self, network_output, target_flow, weights=None, sparse=False):
        losses = {}
        total_loss = 0
        if type(network_output) not in [tuple, list]:
            network_output = [network_output]
        if weights is None:
            weights = [0.005, 0.01, 0.02, 0.08, 0.32]  # as in original article
        assert (len(weights) == len(network_output))
        num_scale = len(weights)

        for scale in range(num_scale):
            output = network_output[scale]
            weight = weights[scale]
            loss = weight * one_scale(output, target_flow, sparse)
            losses['flow_net/{}'.format(scale)] = loss
            total_loss += loss
        total_loss /= num_scale
        losses["loss"] = total_loss
        return losses

    def log_time(self, batch_idx, duration, loss, EPE):
        """Print a logging statement to the terminal
                """
        samples_per_sec = self.opt.batch_size / duration
        time_sofar = time.time() - self.start_time
        training_time_left = (self.num_total_steps / self.step - 1.0) * time_sofar if self.step > 0 else 0
        print_string = "epoch {:>3} | batch {:>6} | examples/s: {:5.1f}" + \
                       " | loss: {:.5f} | EPE: {:.5f} |time elapsed: {} | time left: {}"
        print(print_string.format(self.epoch, batch_idx, samples_per_sec, loss, EPE,
                                  sec_to_hm_str(time_sofar), sec_to_hm_str(training_time_left)))