import os
import sys
import time
import math
import json
import array
from glob import glob

import threading
import concurrent.futures

import numpy as np
import torch
import torchvision
import torch.nn.functional as F
from argparse import ArgumentParser
from arguments import ModelParams, PipelineParams, OptimizationParams, ModelHiddenParams, get_combined_args
from utils.general_utils import safe_state
from scene import Scene
from gaussian_renderer import GaussianModel
from tqdm import tqdm

import dearpygui.dearpygui as dpg

from gaussian_renderer import render

def getProjectionMatrix(znear, zfar, fovX, fovY):
    tanHalfFovY = math.tan((fovY / 2))
    tanHalfFovX = math.tan((fovX / 2))

    P = torch.zeros(4, 4)

    z_sign = 1.0

    P[0, 0] = 1 / tanHalfFovX
    P[1, 1] = 1 / tanHalfFovY
    P[3, 2] = z_sign
    P[2, 2] = z_sign * zfar / (zfar - znear)
    P[2, 3] = -(zfar * znear) / (zfar - znear)
    return P

def getWorld2View2(R, t, translate=torch.tensor([.0, .0, .0]).cuda(), scale=1.0):
    Rt = torch.zeros((4, 4)).cuda()
    Rt[:3, :3] = R.transpose(0, 1)
    Rt[:3, 3] = t
    Rt[3, 3] = 1.0

    C2W = Rt.inverse()
    cam_center = C2W[:3, 3]
    cam_center = (cam_center + translate) * scale
    C2W[:3, 3] = cam_center
    return Rt

def safe_normalize(x: torch.Tensor, eps=1e-15):
    xsum = torch.sqrt((x**2).sum())
    return x / (xsum + eps)

def rotate_for_axis(vec, axis, angle):
    theta = angle * math.pi / 180.0
    rot_mat = torch.zeros(3, 3).cuda()

    n_x, n_y, n_z = axis[0], axis[1], axis[2]

    rot_mat[0, 0] = n_x * n_x * (1 - math.cos(theta)) + math.cos(theta)
    rot_mat[0, 1] = n_x * n_y * (1 - math.cos(theta)) - n_z * math.sin(theta)
    rot_mat[0, 2] = n_x * n_z * (1 - math.cos(theta)) + n_y * math.sin(theta)
    rot_mat[1, 0] = n_x * n_y * (1 - math.cos(theta)) + n_z * math.sin(theta)
    rot_mat[1, 1] = n_y * n_y * (1 - math.cos(theta)) + math.cos(theta)
    rot_mat[1, 2] = n_y * n_z * (1 - math.cos(theta)) - n_x * math.sin(theta)
    rot_mat[2, 0] = n_x * n_z * (1 - math.cos(theta)) - n_y * math.sin(theta)
    rot_mat[2, 1] = n_y * n_z * (1 - math.cos(theta)) + n_x * math.sin(theta)
    rot_mat[2, 2] = n_z * n_z * (1 - math.cos(theta)) + math.cos(theta)

    return (rot_mat @ vec.unsqueeze(-1)).squeeze()


def look_at(campos, target):
    # forward_vector = target - campos
    # print(self.forward_vector)  tensor([-0.7049, -0.6592, -0.2618])
    # print(safe_normalize(self.camera_center))  tensor([0.7049, 0.6592, 0.2618])
    forward_vector = safe_normalize(target - campos)
    up_vector = torch.tensor([0, 0, -1]).float().cuda()
    # It is not opengl, camera forward aligns with +z, (right_vector = cross(up_vector, forward_vector))
    right_vector = safe_normalize(torch.cross(up_vector, forward_vector))
    up_vector = safe_normalize(torch.cross(forward_vector, right_vector))
    R = torch.stack([right_vector, up_vector, forward_vector], dim=1).cuda()
    return R

def multithread_gpu_to_cpu_to_list(data: torch.Tensor):
    executor = concurrent.futures.ThreadPoolExecutor(max_workers=10)

    def gpu_to_cpu(data):
        return data.cpu().tolist()
    
    st = time.time()
    tasks = []
    idx = 0
    batchsize = 100000
    while idx < data.shape[0]:
        if idx+batchsize <= data.shape[0]:
            tasks.append(executor.submit(gpu_to_cpu, data[idx:idx+batchsize]))
        else:
            tasks.append(executor.submit(gpu_to_cpu, data[idx:]))
        idx += batchsize
    executor.shutdown()
    print(f"multit 0: {time.time() - st}")
    
    datas = []
    for sub_data in tasks:
        datas.extend(sub_data.result())
    print(f"multit 1: {time.time() - st}")

    return datas

class MiniCamera:
    def __init__(self, width, height, world_view_transform, projection_matrix, time, \
                 znear, zfar, fovx, fovy, R, T):
        self.image_width = width
        self.image_height = height
        self.time = time
        self.znear = znear
        self.zfar = zfar
        self.FoVx = fovx
        self.FoVy = fovy
        self.R = torch.from_numpy(R).float().cuda()
        self.T = torch.from_numpy(T).float().cuda()

        # It is not opengl, camera forward aligns with +z, (right_vector = cross(up_vector, forward_vector))
        self.right_vector = self.R[:,0]
        self.up_vector = self.R[:,1]
        self.forward_vector = self.R[:,2]

        self.world_view_transform = torch.tensor(world_view_transform).cuda()
        self.projection_matrix = torch.tensor(projection_matrix).cuda()
        self.full_proj_transform = self.world_view_transform @ self.projection_matrix
        self.camera_center = self.world_view_transform.inverse()[3, :3]

        self.yaw = 0
        self.pitch = 0
        self.roll = 0

    
    def move(self, dx, dy, dz):
        self.camera_center += safe_normalize(self.forward_vector) * dx
        self.camera_center += safe_normalize(self.right_vector) * dy
        self.camera_center += safe_normalize(self.up_vector) * dz

        # self.world_view_transform[3,:3] = -self.R.transpose(0,1) @ self.camera_center.unsqueeze(-1)
        # self.world_view_transform[3,:3] = (-self.R.transpose(0,1) @ self.camera_center.unsqueeze(-1)).squeeze()

        self.R = look_at(self.camera_center, torch.zeros(3).cuda())
        self.world_view_transform[:3, :3] = self.R
        self.world_view_transform[3,:3] = (-self.R.transpose(0,1) @ self.camera_center.unsqueeze(-1)).squeeze()
        self.full_proj_transform = self.world_view_transform @ self.projection_matrix
    
    def RT_update_after_rotate(self):
        self.forward_vector = safe_normalize(self.forward_vector)
        self.up_vector = safe_normalize(self.up_vector)
        self.right_vector = torch.cross(self.up_vector, self.forward_vector)

        self.R = torch.stack([self.right_vector, self.up_vector, self.forward_vector], dim=1)
        # self.world_view_transform = torch.tensor(getWorld2View2(self.R, self.T)).transpose(0, 1).cuda()
        self.world_view_transform[:3, :3] = self.R
        self.full_proj_transform = self.world_view_transform @ self.projection_matrix
        self.camera_center = self.world_view_transform.inverse()[3, :3]

    def rotate_yaw(self, dyaw):
        self.yaw = (self.yaw + dyaw) % 360.0
        self.forward_vector = rotate_for_axis(self.forward_vector, self.up_vector, self.yaw)
        self.right_vector = torch.cross(self.up_vector, self.forward_vector)
        self.RT_update_after_rotate()

    def rotate_pitch(self, dpitch):
        self.pitch = (self.pitch + dpitch) % 360.0
        self.forward_vector = rotate_for_axis(self.forward_vector, self.right_vector, self.pitch)
        self.up_vector = torch.cross(self.forward_vector, self.right_vector)
        self.RT_update_after_rotate()

    def rotate_roll(self, droll):
        self.roll = (self.roll + droll) % 360.0
        self.up_vector = rotate_for_axis(self.up_vector, self.forward_vector, self.roll)
        self.right_vector = torch.cross(self.up_vector, self.forward_vector)
        self.RT_update_after_rotate()
        

class OrbitCamera(MiniCamera):
    def __init__(self, width, height, world_view_transform, projection_matrix, time, \
                 znear, zfar, fovx, fovy, R, T):
        super(OrbitCamera, self).__init__(width, height, world_view_transform, projection_matrix, time, znear, zfar, fovx, fovy, R, T)

        self.spherical_center = torch.tensor([0., 0., 0.]).cuda()

        self.radius = 2
        self.phi = 0
        self.theta = 0

        self.radius, self.phi, self.theta = OrbitCamera.cartesian_to_spherical_coord(*tuple(self.camera_center.cpu()))
    
        self.update_camera()

    def get_camera_center(self):
        x, y, z = tuple(self.camera_center.cpu())
        return f'[{x:.2f}, {y:.2f}, {z:.2f}]'

    def update_camera(self):
        theta = self.theta - 180.0
        phi = self.phi - 180.0

        self.camera_center = torch.tensor(OrbitCamera.spherical_to_cartesian_coord(self.radius, phi, theta)).cuda() + self.spherical_center
        self.world_view_transform = torch.eye(4).float().cuda()
        self.world_view_transform[:3, :3] = look_at(self.camera_center, self.spherical_center)
        c2w = self.world_view_transform.inverse()
        c2w[3, :3] = self.camera_center
        self.world_view_transform = c2w.inverse()
        self.full_proj_transform = self.world_view_transform @ self.projection_matrix


    @staticmethod
    def spherical_to_cartesian_coord(radius, phi, theta):
        phi = phi * math.pi / 180.0
        theta = theta * math.pi / 180.0
        x = radius * math.sin(phi) * math.cos(theta)
        y = radius * math.sin(phi) * math.sin(theta)
        z = radius * math.cos(phi)
        return x, y, z

    @staticmethod
    def cartesian_to_spherical_coord(x, y, z):
        radius = math.sqrt(x**2 + y**2 + z**2)
        phi = math.acos(z / radius)
        theta = math.atan2(y, x)
        phi = phi * 180.0 / math.pi
        theta = theta * 180.0 / math.pi
        return radius, phi, theta





class Viewer:
    def __init__(self, args, dataset: ModelParams, hyperparam: ModelHiddenParams, iteration: int, pipeline: PipelineParams):
        self.dataset = dataset
        self.hyperparam = hyperparam
        self.iteration = iteration
        self.pipeline = pipeline

        self.exist_contrast_model = False
        self.gaussians = GaussianModel(dataset.sh_degree, hyperparam)
        self.gaussians.use_octree = args.model_use_octree
        self.scene = Scene(dataset, self.gaussians, load_iteration=iteration, shuffle=False)
        if dataset.contrast_model_path != "":
            self.exist_contrast_model = True
            self.contrast_gaussians = GaussianModel(dataset.sh_degree, hyperparam)
            self.contrast_gaussians.use_octree = args.contrast_model_use_octree
            dataset.model_path = dataset.contrast_model_path
            self.contrast_scene = Scene(dataset, self.contrast_gaussians, load_iteration=iteration, shuffle=False)
        bg_color = [1,1,1] if self.dataset.white_background else [0, 0, 0]
        self.background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")

        first_camera = self.scene.getTrainCameras()[0]
        
        self.camera = OrbitCamera(first_camera.image_width, first_camera.image_height, \
                                 first_camera.world_view_transform, first_camera.projection_matrix, \
                                 first_camera.time, \
                                 first_camera.znear, first_camera.zfar, first_camera.FoVx, first_camera.FoVy, \
                                 first_camera.R, first_camera.T)
        self.test_train_camera = None

        self.H = first_camera.image_height
        self.W = first_camera.image_width
        self.dphi = 0
        self.dtheta = 0

        self.render_img = None
        self.contrast_render_img = None
        self.gt_render_img = self.scene.getTrainCameras()[0].original_image.permute(1, 2, 0).contiguous().clamp(0,1).contiguous().detach().cpu().numpy()
        self.scaling_modifier = 1.0

        self.rendering_camera_type = "default"
        self.current_train_camera_idx = 0
        self.current_test_camera_idx = 0

        self.default_save_path = "output/view_output"
        self.terminal_message = ""
        self.is_show_gt_window = False

        dpg.create_context()
        self.gui_registry(self.H, self.W)

    def get_terminal_now_time(self):
        localtime = time.localtime()
        time_show = '[' + str(localtime.tm_hour) + ':' + str(localtime.tm_min) + ':' + str(localtime.tm_sec) + ']'
        return time_show

    def update_texture(self, train_camera=None, test_camera=None):
        camera = self.camera
        self.rendering_camera_type = "default"
        if train_camera is not None:
            self.rendering_camera_type = "train"
            self.test_train_camera = train_camera
            self.gt_render_img = train_camera.original_image.permute(1, 2, 0).contiguous().clamp(0,1).contiguous().detach().cpu().numpy()
            camera = train_camera
        if test_camera is not None:
            self.rendering_camera_type = "test"
            self.test_train_camera = test_camera
            self.gt_render_img = test_camera.original_image.permute(1, 2, 0).contiguous().clamp(0,1).contiguous().detach().cpu().numpy()
            camera = test_camera
        temp_render_img = render(camera, self.gaussians, self.pipeline, self.background, scaling_modifier=self.scaling_modifier)["render"]
        self.render_img = temp_render_img.permute(1, 2, 0).contiguous().clamp(0,1).contiguous().detach().cpu().numpy()
        if self.exist_contrast_model:
            temp_contrast_render_img = render(camera, self.contrast_gaussians, self.pipeline, self.background, scaling_modifier=self.scaling_modifier)["render"]
            self.contrast_render_img = temp_contrast_render_img.permute(1, 2, 0).contiguous().clamp(0,1).contiguous().detach().cpu().numpy()
        self.terminal_message = self.get_terminal_now_time() + " rendering successfully" + f" ({camera.time:.2f}s)"

    def save_metric(self, path, name):
        if self.rendering_camera_type == "default":
            render_img = render(self.camera, self.gaussians, self.pipeline, self.background, scaling_modifier=self.scaling_modifier)["render"]

            contrast_render_img = render(self.camera, self.contrast_gaussians, self.pipeline, self.background, scaling_modifier=self.scaling_modifier)["render"] \
                if self.exist_contrast_model else \
                torch.zeros_like(render_img).cuda()
        else:
            render_img = render(self.test_train_camera, self.gaussians, self.pipeline, self.background, scaling_modifier=self.scaling_modifier)["render"]
            contrast_render_img = render(self.test_train_camera, self.contrast_gaussians, self.pipeline, self.background, scaling_modifier=self.scaling_modifier)["render"] \
                if self.exist_contrast_model else \
                torch.zeros_like(render_img).cuda()
        
        if self.rendering_camera_type == "train":
            gt_img = self.scene.getTrainCameras()[self.current_train_camera_idx].original_image.cuda()
        elif self.rendering_camera_type == "test":
            gt_img = self.scene.getTestCameras()[self.current_test_camera_idx].original_image.cuda()
        else:
            gt_img = torch.zeros_like(render_img).cuda()

        print('saving name = ', name)
        temp_up_img = torch.cat([render_img, contrast_render_img], dim=2)
        temp_down_img = torch.cat([gt_img, gt_img], dim=2)
        img = torch.cat([temp_up_img, temp_down_img], dim=1)
        torchvision.utils.save_image(img, os.path.join(path, name+'.png'))

    def gui_registry(self, H, W):
        st_time = time.time()
        self.update_texture()
        print(f'render time: {time.time() - st_time}')

        with dpg.texture_registry(show=False):
            dpg.add_raw_texture(width=W, height=H, default_value=self.render_img, format=dpg.mvFormat_Float_rgb, tag="_main_texture")
            if self.exist_contrast_model:
                dpg.add_raw_texture(width=W, height=H, default_value=self.contrast_render_img, format=dpg.mvFormat_Float_rgb, tag="_contrast_texture")
            dpg.add_raw_texture(width=W, height=H, default_value=self.gt_render_img, format=dpg.mvFormat_Float_rgb, tag="_gt_texture")





        # Shower window
        with dpg.window(label="contrast_shower", width=W, height=H, pos=[W//2,H//2], show=self.exist_contrast_model, no_scrollbar=True, tag="_contrast_shower_window"):
            if self.exist_contrast_model:
                dpg.add_image("_contrast_texture")

        with dpg.window(label="shower", width=W, height=H, pos=[0,0], no_scrollbar=True, tag="_shower_window"):
            dpg.add_image("_main_texture")

        with dpg.window(label="ground_truth", width=W, height=H, pos=[W//2+W//4,H//2+H//4], show=False, no_scrollbar=True, tag="_gt_window"):
            dpg.add_image("_gt_texture")





        # Control window
        with dpg.window(label="control", width=400, height=500, pos=[W,0], tag="_control_window"):
            with dpg.child_window(width=400, height=150, menubar=True):
                with dpg.menu_bar():
                    dpg.add_menu(label="Detail")
                dpg.add_text("FPS: 0", tag="_fps_text")
                dpg.add_text(f"camera pos: {self.camera.get_camera_center()}", tag="_camera_pos")
                dpg.add_text(f"camera type: {self.rendering_camera_type}", tag="_camera_type_message")
                
                with dpg.child_window(width=400, height=150):
                    dpg.add_text(self.terminal_message, tag="_terminal_message")

            def modify_time(sender, app_data):
                self.camera.time = app_data
                self.update_texture()
            def modify_scaling_modifier(sender, app_data):
                self.scaling_modifier = app_data
                self.update_texture()

            with dpg.child_window(width=400, height=100, menubar=True):
                with dpg.menu_bar():
                    dpg.add_menu(label="Rendering")
                dpg.add_slider_float(label="time", min_value=0, max_value=self.scene.maxtime, callback=modify_time)
                dpg.add_slider_float(label="scaling modifier", min_value=0.1, max_value=1, default_value=1, callback=modify_scaling_modifier)
            
            with dpg.child_window(width=400, height=150, menubar=True):
                with dpg.menu_bar():
                    dpg.add_menu(label="Metric")
                
                def render_train(sender, app_data):
                    idx = dpg.get_value("_train_camera_render_index")
                    self.current_train_camera_idx = idx
                    self.update_texture(train_camera=self.scene.getTrainCameras()[idx])
                    dpg.configure_item("_gt_window", show=True)
                
                def render_test(sender, app_data):
                    idx = dpg.get_value("_test_camera_render_index")
                    self.current_test_camera_idx = idx
                    self.update_texture(test_camera=self.scene.getTestCameras()[idx])
                    dpg.configure_item("_gt_window", show=True)

                def saving_metric(sender, app_data):
                    name = dpg.get_value("_saving_name")
                    if name == "default":
                        name = self.rendering_camera_type + '_' + \
                            (str(self.current_train_camera_idx) if self.rendering_camera_type == "train" else \
                            str(self.current_test_camera_idx))
                    else:
                        name = "myself"
                    self.save_metric(self.default_save_path, name)
                    self.terminal_message = self.get_terminal_now_time() + " saving successfully"
                
                def clear_files(sender, app_data):
                    png_files = glob(os.path.join(self.default_save_path, "*.png"))
                    for file_path in png_files:
                        try:
                            os.remove(file_path)
                        except Exception as e:
                            self.terminal_message = self.get_terminal_now_time() + f" clear failed because of {e}"
                    self.terminal_message = self.get_terminal_now_time() + " clear successfully"
                    

                with dpg.group(horizontal=True):
                    dpg.add_input_int(label=f"(0-{len(self.scene.getTrainCameras())})", width=100, min_value=0, max_value=len(self.scene.getTrainCameras()), tag="_train_camera_render_index")
                    dpg.add_button(label="render_train", callback=render_train)
                with dpg.group(horizontal=True):
                    dpg.add_input_int(label=f"(0-{len(self.scene.getTestCameras())})", width=100, min_value=0, max_value=len(self.scene.getTestCameras()), tag="_test_camera_render_index")
                    dpg.add_button(label="render_test", callback=render_test)
                dpg.add_input_text(label="saving name", default_value="default", tag="_saving_name")


                with dpg.theme(tag="_save_button_theme"):
                    with dpg.theme_component(dpg.mvButton):
                        dpg.add_theme_color(dpg.mvThemeCol_Button, (128, 0, 0))
                        dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (180, 100, 100))
                        dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (200, 0, 0))
                        dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5)
                        dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3)

                with dpg.group(horizontal=True):
                    dpg.add_button(label="save", width=100, height=30, callback=saving_metric)
                    dpg.bind_item_theme(dpg.last_item(), "_save_button_theme")
                    dpg.add_button(label="clear", width=100, height=30, callback=clear_files)
                    dpg.bind_item_theme(dpg.last_item(), "_save_button_theme")
        


        def drag_main_view_with_orbit_theta_while_clicking(sender, app_data):
            if not dpg.is_item_focused("_shower_window"):
                return
            
            self.dtheta = 0

        def drag_main_view_with_orbit_theta_while_downing(sender, app_data):
            if not dpg.is_item_focused("_shower_window"):
                return
            
            app_data[1] = -app_data[1] / 6

            self.camera.theta = (self.camera.theta + app_data[1] - self.dtheta) % 360.0
            self.dtheta = app_data[1]
            
            dpg.set_value("_camera_pos", f"camera pos: {self.camera.get_camera_center()}")

            self.camera.update_camera()
            self.update_texture()

        def drag_main_view_with_orbit_phi_while_clicking(sender, app_data):
            if not dpg.is_item_focused("_shower_window"):
                return
            
            self.dphi = 0

        def drag_main_view_with_orbit_phi_while_downing(sender, app_data):
            if not dpg.is_item_focused("_shower_window"):
                return
            
            app_data[2] = app_data[2] / 6

            self.camera.phi = (self.camera.phi + app_data[2] - self.dphi) % 360.0
            self.dphi = app_data[2]
            
            dpg.set_value("_camera_pos", f"camera pos: {self.camera.get_camera_center()}")

            self.camera.update_camera()
            self.update_texture()

        def drag_main_view_with_orbit_radius(sender, app_data):
            if not dpg.is_item_focused("_shower_window"):
                return
            
            self.camera.radius = self.camera.radius - app_data / 8

            dpg.set_value("_camera_pos", f"camera pos: {self.camera.get_camera_center()}")

            self.camera.update_camera()
            self.update_texture()


        with dpg.handler_registry():
            dpg.add_mouse_click_handler(button=dpg.mvMouseButton_Left, callback=drag_main_view_with_orbit_theta_while_clicking)
            dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Left, callback=drag_main_view_with_orbit_theta_while_downing)
            dpg.add_mouse_click_handler(button=dpg.mvMouseButton_Right, callback=drag_main_view_with_orbit_phi_while_clicking)
            dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Right, callback=drag_main_view_with_orbit_phi_while_downing)
            dpg.add_mouse_wheel_handler(callback=drag_main_view_with_orbit_radius)

            # dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Left, callback=drag_main_view)
            # dpg.add_key_down_handler(callback=move_main_view)

        dpg.create_viewport(title='Custom Title', width=1920, height=1080, x_pos=0, y_pos=0)
        dpg.setup_dearpygui()
        dpg.show_viewport()

        while dpg.is_dearpygui_running():
            st_time = time.time()
            dpg.set_value("_main_texture", self.render_img)
            if self.exist_contrast_model:
                dpg.set_value("_contrast_texture", self.contrast_render_img)
            dpg.set_value("_gt_texture", self.gt_render_img)

            dpg.set_value("_camera_type_message", f"camera type: {self.rendering_camera_type}")
            dpg.set_value("_terminal_message", self.terminal_message)
            dpg.render_dearpygui_frame()
            dpg.set_value("_fps_text", f"FPS: {1.0 / (time.time() - st_time):.1f}")

    def __del__(self):
        dpg.destroy_context()

if __name__ == "__main__":
    # Set up command line argument parser
    parser = ArgumentParser(description="Testing script parameters")
    model = ModelParams(parser, sentinel=True)
    pipeline = PipelineParams(parser)
    hyperparam = ModelHiddenParams(parser)
    parser.add_argument("--iteration", default=-1, type=int)
    parser.add_argument("--quiet", action="store_true")
    parser.add_argument("--configs", type=str)
    parser.add_argument("--model_use_octree", action="store_true")
    parser.add_argument("--contrast_model_use_octree", action="store_true")
    args = get_combined_args(parser)
    
    if args.configs:
        import mmcv
        from utils.params_utils import merge_hparams
        config = mmcv.Config.fromfile(args.configs)
        args = merge_hparams(args, config)
    # Initialize system state (RNG)
    safe_state(args.quiet)

    viewer = Viewer(args, model.extract(args), hyperparam.extract(args), args.iteration, pipeline.extract(args))