# -*- coding: utf-8 -*-

"""
分析随机初始化和随机batch对结果的影响

2022/09/26
未解之谜：
    固定batch和初始化之后，在本地有相同的结果，云计算平台之后又出现莫名的偏差，我服了。
    目前分析是云端动态分配内存的影响。
2022/09/28
    破案了，是cudnn的原因。
"""

"""
Created on 03/23/2022
main.
@author: Kang Xiatao (kangxiatao@gmail.com)
"""
import torch

from models.model_base import ModelBase
from models.base.init_utils import weights_init
from configs import *
from utils.network_utils import get_network
from utils.data_utils import get_dataloader
from pruner.pruning import *
from train_test import *
import random


def init_seed(seed):
    # Disable cudnn to maximize reproducibility
    torch.cuda.cudnn_enabled = False
    torch.backends.cudnn.deterministic = True
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)


def main():
    config = init_config()
    logger, writer = init_logger(config)
    # init_seed(2022)

    ori_init = False

    # ===== get dataloader =====
    trainloader, testloader = None, None
    if 'batch' in config.debug:
        trainloader, testloader = get_dataloader(config.dataset, config.batch_size, 256, 4, root=config.dp, trainset_shuffle=False)

    # 直接三次实验数据
    for i in range(3):
        state = None
        # ===== build/load model =====
        # if config.pretrained:
        if ori_init:
            # state = torch.load(config.pretrained)
            path = os.path.join(config.checkpoint_dir, 'train_%s_init.pth.tar' % config.exp_name)
            state = torch.load(path)
            model = state['net']
            masks = state['mask']
            # config.send_mail_str += f"use pre-trained mode -> acc:{state['acc']} epoch:{state['epoch']}\n"
            config.network = state['args'].network
            config.depth = state['args'].depth
            config.dataset = state['args'].dataset
            config.batch_size = state['args'].batch_size
            config.learning_rate = state['args'].learning_rate
            config.weight_decay = state['args'].weight_decay
            config.epoch = state['args'].epoch
            config.target_ratio = state['args'].target_ratio
            print('load model finish')
            print(state['args'])
        else:
            model = get_network(config.network, config.depth, config.dataset, use_bn=config.get('use_bn', True))
            model.apply(weights_init)
            masks = None

        mb = ModelBase(config.network, config.depth, config.dataset, model)
        mb.cuda()

        if 'init' in config.debug:
            if not ori_init:
                state = {
                    'net': mb.model,
                    'acc': 0,
                    'epoch': 0,
                    'args': config,
                    'mask': mb.masks,
                    # 'ratio': mb.get_ratio_at_each_layer()
                }
                path = os.path.join(config.checkpoint_dir, 'train_%s_init.pth.tar' % config.exp_name)
                torch.save(state, path)
                ori_init = True

        # ===== get dataloader =====
        if 'batch' not in config.debug:
            trainloader, testloader = get_dataloader(config.dataset, config.batch_size, 256, 4, root=config.dp)

        print('='*10)
        inputs, targets = fetch_data(trainloader, 10, 1, mode=1)
        # print(targets)
        print(torch.mean(inputs), torch.var(inputs))
        for idx, layer in enumerate(mb.model.modules()):
            if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
                print(torch.mean(layer.weight.data), torch.var(layer.weight.data))
                break

        # ===== train =====
        tr_str, print_inf = train_once(mb, trainloader, testloader, config, writer, logger, state, config.lr_mode, config.optim_mode)
        config.send_mail_str += print_inf
        config.send_mail_str += tr_str
        # if 'test' not in config.exp_name:
        #     QQmail = mail_log.MailLogs()
        #     QQmail.sendmail(config.send_mail_str, header=config.send_mail_head)


if __name__ == '__main__':
    main()
