import torch
from torch.utils.data import DataLoader
import os
import argparse
import torch_optimizer as optim
from tensorboardX import SummaryWriter
from utils.trainer import Trainer
import torch.nn as nn
from datasets.ad_ds import AD_Dataset, load_data
import os
from torch.utils.data.sampler import WeightedRandomSampler
from sklearn.model_selection import StratifiedKFold
import time
import copy
import warnings
import numpy as np

from models.loss.focal_loss import FocalLoss
from models.loss.contrastive_loss2 import Contrastive_Loss
import xgboost as xgb
from models.dnns import DNNs
import os
import torch
import moxing as mox
# import pickle
# import joblib
from tqdm import tqdm

warnings.filterwarnings("ignore")
if __name__ == "__main__":
    # python -m visdom.server
    # mp.set_start_method('spawn')

    torch.backends.cudnn.benchmark = True
    parser = argparse.ArgumentParser()
    parser.add_argument("--epoch", type=int, default=150, help="epoch")
    parser.add_argument("--batch_size", type=int, default=128, help="batch size")
    parser.add_argument("--learning_rate", type=float, default=0.001, help="learning_rate")
    parser.add_argument("--log_path", type=str, default='log/tensorboard/',
                        help="log_path")
    parser.add_argument("--data_path", type=str, default='./train/train',
                        help="data_path")
    parser.add_argument("--label_path", type=str, default=r'./train/train_open.csv',
                        help="label_path")
    parser.add_argument("--data_url", type=str, default='',
                        help="data_url")
    parser.add_argument("--train_url", type=str, default='',
                        help="train_url")
    parser.add_argument("--log_url", type=str, default='',
                        help="log_url")
    parser.add_argument("--init_method", type=str, default='',
                        help="init_method")
    # parser.add_argument("--save_name", type=str, default='dnn0_5layers_focal',
    #                     help="save_name")
    # parser.add_argument("--save_name", type=str, default='dnn_residual_focal_2',
    #                     help="save_name")
    parser.add_argument("--save_name", type=str, default='xgb2',
                        help="save_name")
    parser.add_argument("--app_url", type=str, default='',
                        help="save_name")
    parser.add_argument("--boot_file", type=str, default='',
                        help="save_name")
    parser.add_argument("--log_file", type=str, default='',
                        help="save_name")
    # parser.add_argument("--model", type=str, default='cbam18',
    #                     help="cbam18,resnet18,effnetb4,ca18,cbam34,cbam50")
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    opt = parser.parse_args()
    print(str(opt))
    # os.environ["CUDA_VISIBLE_DEVICES"] = '0,1,2,3'
    BATCH_SIZE = opt.batch_size
    EPOCH = opt.epoch
    learning_rate = opt.learning_rate
    pretrain_w_path = opt.pretrain_weight_path if 'pretrain_weight_path' in opt else ''
    n_samples = BATCH_SIZE * 20

    writer = SummaryWriter(os.path.join(opt.log_path, opt.save_name), comment=opt.save_name,
                           flush_secs=2)
    save_path = os.path.join('save', opt.save_name)

    if not os.path.exists(opt.data_path):
        import moxing as mox

        mox.file.copy_parallel(opt.data_url, './train/')
        print('数据已加载')

    x, y = load_data(opt.data_path, opt.label_path)
    x = np.nan_to_num(x, nan=0.0, posinf=0, neginf=0)
    mean = np.mean(x, axis=0)
    std = np.std(x, axis=0)
    x = (x - mean) / std
    x = np.nan_to_num(x, nan=0.0, posinf=0, neginf=0)

    folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=2021).split(x, y)

    loss_list = []

    max_auc_list = []
    chkp_list = []

    from models.dnn2 import DNN

    obs_root_path = r'obs://henry-ad-competition/save/V0052'
    pth_root_path = r'../save/dnn0_3layers'
    pth_list = [
        r'f0/ckpt_27_f1_0.603.pth',
        r'f1/ckpt_36_wf1_0.597.pth',
        r'f2/ckpt_36_f1_0.618.pth',
        r'f3/ckpt_55_f1_0.646.pth',
        r'f4/ckpt_23_wf1_0.599.pth'
    ]
    for fold, (trn_idx, val_idx) in enumerate(folds):

        pth_name = pth_list[fold]
        pth_list[fold] = os.path.join(pth_root_path, pth_name)
        if not os.path.exists(pth_list[fold]):
            #         os.makedirs(pth_root_path)
            obs_path = os.path.join(obs_root_path, pth_name)
            print('copying', obs_path)
            mox.file.copy(obs_path, pth_list[fold])
        model = DNN(28169, 4096, 512, 3, dropout_p=0.4, all=False)
        model.load_state_dict(torch.load(pth_list[fold]))
        val_data = AD_Dataset(x, y, val_idx, device)
        val_data_loader = DataLoader(val_data, batch_size=BATCH_SIZE, shuffle=False, num_workers=8,
                                     pin_memory=True)  # 使用DataLoader加载数据
        model = nn.DataParallel(model)
        model = model.to(device)
        xgb_input1 = []
        xgb_input2 = []
        xgb_labels = []
        for i, val_data in enumerate(tqdm(val_data_loader)):
            inputs, labels, idx = val_data
            inputs = inputs.to(device)
            with torch.no_grad():
                x1,x2 = model(inputs)
                xgb_input1.append(x1.cpu().numpy())
                xgb_input2.append(x2.cpu().numpy())
                xgb_labels.append(labels.numpy())
        xgb_input1 = np.concatenate(xgb_input1,axis=0)
        xgb_input2 = np.concatenate(xgb_input2,axis=0)
        xgb_labels = np.concatenate(xgb_labels,axis=0)
        xgb1 = xgb.XGBClassifier(
            max_depth=6, learning_rate=0.05, n_estimators=4000,
            objective='binary:logistic', tree_method='gpu_hist',
            subsample=0.8, colsample_bytree=0.8,
            min_child_samples=3, eval_metric='auc', reg_lambda=0.5
        )
        xgb2 = xgb.XGBClassifier(
            max_depth=6, learning_rate=0.05, n_estimators=4000,
            objective='binary:logistic', tree_method='gpu_hist',
            subsample=0.8, colsample_bytree=0.8,
            min_child_samples=3, eval_metric='auc', reg_lambda=0.5
        )
        xgb1.fit(xgb_input1, xgb_labels)
        xgb2.fit(xgb_input2, xgb_labels)
        save_name = os.path.join(save_path)
        if not os.path.exists(save_name):
            os.makedirs(save_name)
        model_save_path = os.path.join(save_name, 'xgboost_%d_1.json' % fold)
        xgb1.save_model(model_save_path)
        model_save_path = os.path.join(save_name, 'xgboost_%d_2.json' % fold)
        xgb2.save_model(model_save_path)

    if opt.train_url != '':
        import moxing as mox

        # from deep_moxing.model_analysis.api import analyse, tmp_save

        # model_path = 'obs://ad-competiton/my_baseline/model'
        train_url = 'obs:' + opt.train_url.replace('s3:', '')
        data_url = 'obs:' + opt.data_url.replace('s3:', '')
        log_url = 'obs://' + opt.log_url
        print('Start to save model to', train_url, 'from', save_path)
        np.save('./mean.npy', mean)
        np.save('./std.npy', std)
        mox.file.copy('./mean.npy', train_url + '/mean.npy')
        mox.file.copy('./std.npy', train_url + '/std.npy')
        mox.file.copy('./std.npy', train_url + '/std.npy')
        mox.file.copy_parallel(save_path, train_url)
        # mox.file.copy_parallel('./log',log_url)
