# 1-------------------------------------------------------

import sys

import numpy as np
import torch
from transformers import AdamW, get_linear_schedule_with_warmup
from torch_ema import ExponentialMovingAverage

torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark=True

# 2-------------------------------------------------------

# 历史初始化[loss,accuracy]

list_loss          =[]
list_accuracy_Y        =[]
list_accuracy_location =[]

# 3-------------------------------------------------------

from model_one.param import get_param_one
from model_one.train import train_one
from model_siamese.param import get_param_siamese
from model_siamese.train import train_siamese
from myutils.model_save_load import model_save
from myutils.test_save import test_save



from tqdm import tqdm
def train(device,model_name,pre_model_name,
    epochs,batch_size,lr,print_margin_batch,save_margin_epoch,
    is_test,use_mydata,use_transformer,use_siamese,use_location,use_shape):
    """运行测试"""

    if(is_test and use_siamese): # 不能同时 [测试,使用孪生姊妹网络]
        import warnings
        warnings.warn('test:正在使用孪生姊妹网络训练的模型')


    if is_test: # 测试
        with torch.no_grad():
            (dataset_one, dataLoader_one, model_one, loss_one) = get_param_one(model_name,pre_model_name,batch_size,device,True,use_mydata,use_transformer,use_shape,use_location)
            model_one.eval()
            np_Y,np_Y_hat = train_one(dataLoader_one,model_one,loss_one,None,None,None,True,dataLoader_one.__len__(),'test',device,list_loss,list_accuracy_Y,list_accuracy_location)
            test_save(np_Y,np_Y_hat)
            
    else :  # 需要训练
        (dataset_one, dataLoader_one, model_one, loss_one) = get_param_one(model_name,pre_model_name,batch_size,device,False,use_mydata,use_transformer,use_shape,use_location)
        (_,dataLoader_one_test,_,_)                        = get_param_one(model_name,pre_model_name,batch_size,device,True ,use_mydata,use_transformer,use_shape,use_location)
        if not use_mydata:
            (_,dataLoader_one_test_use_mydata,_,_)                        = get_param_one(model_name,pre_model_name,batch_size,device,True ,True,use_transformer,use_shape,use_location)
        
        if use_siamese: # 使用孪生姊妹
            (_,dataLoader,model,loss,_) \
                =get_param_siamese(dataset_one, model_one, loss_one, batch_size,device)
            train_func = train_siamese
        else: # 不使用孪生姊妹
            (_,dataLoader,model,loss) = (dataset_one, dataLoader_one, model_one, loss_one)
            train_func = train_one


        optimizer = torch.optim.Adadelta(model_one.parameters(), lr=lr, eps=1e-6)
        scheduler = None
        ema = None
        # scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer,lr/20,lr,60,40,'triangular',0.95,cycle_momentum=False)
        # ema = ExponentialMovingAverage(model_one.parameters(), decay=0.995)
        # scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=epochs*len(dataLoader)//4,
        #                                         num_training_steps=epochs*len(dataLoader))
        # 训练
        for i in range(1,epochs+1):
            model.train()
            epoch_info = f'{i}/{epochs}'
            train_func(dataLoader,model,loss,optimizer,scheduler,ema,False,print_margin_batch,epoch_info,device,list_loss,list_accuracy_Y,list_accuracy_location)
            if i%save_margin_epoch==0: # 测试并保存
                with torch.no_grad():
                    model_one.eval()
                    train_one(dataLoader_one_test,model_one,loss_one,None,None,None,True,dataLoader_one_test.__len__(),'test',device,list_loss,list_accuracy_Y,list_accuracy_location)
                
                model_save(model_one,model_name,dataLoader_one_test.__len__(),list_accuracy_Y)
                
                if not use_mydata:
                    with torch.no_grad():
                        model_one.eval()
                        train_one(dataLoader_one_test_use_mydata,model_one,loss_one,None,None,None,True,dataLoader_one_test_use_mydata.__len__(),'test',device,list_loss,list_accuracy_Y,list_accuracy_location)

        # if epochs!=0: # 用训练集测试
        #     model_save(model_one,model_name,dataLoader.__len__(),list_accuracy_Y)


# 4-------------------------------------------------------


import platform
from myutils.config import parse,get_use



# 5-------------------------------------------------------



if __name__=='__main__':

    if platform.system().lower() == 'windows':
        device = torch.device('cuda')
        model_number       = 4
        pre_model_name     = 'resnet34'
        epochs             = 10
        batch_size         = 20
        lr                 = 1e-1
        print_margin_batch = 1
        save_margin_epoch  = 1
        is_test      = False
        use_mydata   = False
        # use_transformer=args.use_transformer
        # use_siamese  = False
        # use_location = args.use_location
        # use_shape    = args.use_shape

        args = {
            'model_number'      :model_number,
            'pre_model_name'    :pre_model_name,
            'epochs'            :epochs,
            'batch_size'        :batch_size,
            'lr'                :lr,
            'print_margin_batch':print_margin_batch,
            'save_margin_epoch' :save_margin_epoch,
            'is_test'           :is_test,
            'use_mydata'        :use_mydata,
        }
        print(args)

    elif platform.system().lower() == 'linux':
        device = torch.device('cuda',0)
        args = parse()
        print(args.__dict__)
        model_number       = args.model_number
        pre_model_name     =args.pre_model
        epochs             = args.epochs
        batch_size         = args.batch_size
        lr                 = args.lr
        print_margin_batch = args.pm
        save_margin_epoch  = args.sm
        is_test      = args.is_test
        use_mydata   = args.use_mydata
        # use_transformer=args.use_transformer
        # use_siamese  = False
        # use_location = args.use_location
        # use_shape    = args.use_shape
    else:
        raise Exception('unknow os')


    use_dict=get_use(model_number)
    print(f'model_number={model_number},use_dict={use_dict}')
    
    use_transformer=use_dict['use_transformer']
    use_siamese  = use_dict['use_siamese']
    use_location = use_dict['use_location']
    use_shape    = use_dict['use_shape']

    model_name = f"model{model_number}_{pre_model_name}"

    train(device,model_name,pre_model_name,
        epochs,batch_size,lr,print_margin_batch,save_margin_epoch,
        is_test,use_mydata,use_transformer,use_siamese,use_location,use_shape)
