import os
import pickle
import time
import dnnlib
import legacy

import click
import paramiko
import torch
from torch.utils.tensorboard import SummaryWriter

from metrics import metric_main


class LinuxFile:

    def __init__(self, ip, port, username, password):
        try:
            self.ip = ip
            self.port = port
            self.username = username
            self.password = password
            self.transport = paramiko.Transport((str(self.ip), int(self.port)))
            self.transport.connect(username=self.username, password=self.password)
            self.sftp = paramiko.SFTPClient.from_transport(self.transport)
        except Exception as e:
            raise e

    def up_file(self, localhost_file, server_file):
        """
        将本地文件上传至服务器
        :param localhost_file: 本地文件路径
        :param server_file: 服务器保存路径
        :return:
        """
        self.sftp.put(localhost_file, server_file)

    def down_file(self, localhost_file, server_file):
        """
        将服务器文件下载至本地
        :param localhost_file: 本地文件路径
        :param server_file: 服务器保存路径
        :return:
        """
        self.sftp.get(localhost_file, server_file)

    def close(self):
        """
        关闭服务器
        :return:
        """
        self.transport.close()


sleep_time = 60*3
best_fid = 1e10
best_nimg = -1
pre_length = 0
update = False
metric = 'fid50k_full'

ip = '202.38.247.163'
port = 2013
username = 'cike'
password = 'asdfjkl;'


def check_update(localhost, ssh, run_dir):
    if localhost:
        ssh.down_file(f'{run_dir}/snapshot.txt', f'{run_dir}/snapshot.txt')

    with open(f'{run_dir}/snapshot.txt', 'r') as log:
        length = len(log.readlines())

    global pre_length
    # is ckpt.pth already updated?
    if length > pre_length:
        pre_length = length
        return True

    return False

@click.command()
@click.option('--run_dir', help='Where the snapshot from', required=True, metavar='DIR')
@click.option('--eval_on', help='Eval G on which set', required=True, metavar='DIR')
@click.option('--localhost', help='Do eval on PC', is_flag=True)
def main(localhost, run_dir, eval_on):
    global best_fid
    global best_nimg
    ssh = None

    if localhost:
        ssh = LinuxFile(ip, port, username, password)

        if not os.path.exists(run_dir):
            os.makedirs(run_dir)

    writer = SummaryWriter(run_dir)

    while True:
        update = check_update(localhost, ssh, run_dir)
        if not update:
            time.sleep(sleep_time)
            continue

        if localhost:
            ssh.down_file(f'{run_dir}/network-snapshot.pkl', f'{run_dir}/network-snapshot-tmp.pkl')

        # prevent from renaming a new pkl to best
        os.rename(f'{run_dir}/network-snapshot.pkl', f'{run_dir}/network-snapshot-tmp.pkl')
        with dnnlib.util.open_url(f'{run_dir}/network-snapshot-tmp.pkl') as f:
            snapshot_data = legacy.load_network_pkl(f)
            G_ema = snapshot_data['G_ema'].requires_grad_(False).cuda() # type: ignore
            training_set_kwargs = snapshot_data['training_set_kwargs']
            training_set_kwargs['path'] = eval_on
            # training_set_kwargs['max_size'] = 3694 if eval_on=='data/images' else 2694
            # training_set_kwargs['max_size'] = 6592
            nimg = snapshot_data['nimg']
            del snapshot_data

        # eval
        result_dict = metric_main.calc_metric(metric=metric, G=G_ema,
            dataset_kwargs=training_set_kwargs, num_gpus=1, rank=0, device=torch.device('cuda'))

        print(result_dict)
        fid = result_dict.results.fid50k_full
        total_time = result_dict.total_time_str

        if fid < best_fid:
            best_fid = fid
            best_nimg = nimg

            os.rename(f'{run_dir}/network-snapshot-tmp.pkl', f'{run_dir}/network-snapshot-best.pkl')

        print(f'best_fid: {best_fid:.3f} at {best_nimg/1e3:.3f}k | cur: {fid:.3f} at {nimg/1e3:.3f}k | time: {total_time}')
        writer.add_scalar('fid', fid, nimg)

if __name__ == '__main__':
    main()
