#coding=utf8
from __future__ import division
from torch import nn
import torch
import torch.utils.data as torchdata
from torchvision import datasets,transforms
import os,time
import pandas as pd
import numpy as np
import torch.optim as optim
from torch.autograd import Variable
from torch.optim import lr_scheduler
from utils.train import train,trainlog
from sklearn.preprocessing import LabelEncoder
from torch.nn import CrossEntropyLoss
from twdata.twdataset import TWdata
from models.Resnet_1d import resnet20_1d
from models.Resnext_1d import resnext20_type1
from models.densenet_1d import densenet119
from sklearn.model_selection import train_test_split
from models.xception_1d import xception
from models.LeNet import LeNet,MLPNet
import logging
from twdata.twaugment import Compose, AddNoise, RandomAmplitude, DownSample, FlowNormalize, \
                        AddAxis, CenterCrop, RandomShiftCrop,ShiftCrop

from utils.ensemble_tools import prepare_data_split,oversample_by_num, class_to_idx_map, \
                         pred_aug, pred_single, my_makedirs
from sklearn.model_selection import KFold, StratifiedKFold
from glob import glob


class TWAug(object):
    def __init__(self):
        self.augment = Compose([
            AddNoise(A=0.1),
            RandomAmplitude(l=0.9,h=1.1),
            # RandomShiftCrop(),
            FlowNormalize(),
            AddAxis()
        ])

    def __call__(self, spct):
        return self.augment(spct)

class TWAugVal(object):
    def __init__(self):
        self.augment = Compose([
            FlowNormalize(),
            AddAxis()
        ])

    def __call__(self, spct):
        return self.augment(spct)


class TWAugTest(object):
    def __init__(self):
        self.augment = Compose([
            FlowNormalize(),
            AddAxis()
        ])
    def __call__(self, spct):
        return self.augment(spct)


os.environ["CUDA_VISIBLE_DEVICES"] = "2"

n_splits = 5
epoch_num = 50
bs = 64*3
usecuda = 1
save_inter = 5


k_start = 0
start_epoch = 0
resume = None



rawdata_root = '/media/gserver/data/tianwen/rawdata'
train_data_root = os.path.join(rawdata_root, 'first_train_data')
test_data_root = os.path.join(rawdata_root, 'first_test_data')

train_pd, val_pd, test_pd = prepare_data_split(rawdata_root, test_size=0.1, random_state=42)
train_pd.index = range(train_pd.shape[0])

save_dir = '/media/gserver/models/tianwen/stacking/xception_ori/'
if not os.path.exists(save_dir):
    os.makedirs(save_dir)
logfile = '/media/gserver/models/tianwen/stacking/xception_ori/trainlog.log'
trainlog(logfile)


# k fold prepare

num_crop = 8



kf = StratifiedKFold(n_splits=n_splits, random_state=37, shuffle=True)
k = 0

for train_index, test_index in kf.split(train_pd, y=train_pd['type']):
    stage1_train_pred = np.zeros((train_pd.shape[0], 4), dtype=np.float32)
    stage1_train_true = np.zeros(train_pd.shape[0], dtype=np.int)

    if k < k_start:
        k+=1
        continue

    logging.info('==='*40)
    logging.info('fold-%d'%k)
    #
    #
    train_fold = train_pd.loc[train_index,:]
    test_fold = train_pd.loc[test_index, :]
    #
    train_fold_train, train_fold_val = train_test_split(train_fold,
                                         test_size=0.1, random_state=42,
                                         stratify=train_fold['type'])

    train_fold_train = oversample_by_num(train_fold_train)

    # saving dir
    save_dir_k = os.path.join(save_dir, 'stage1-fold%d'%k)
    if not os.path.exists(save_dir_k):
        os.makedirs(save_dir_k)

    logging.info( 'train_fold_train label value_counts' )
    logging.info( train_fold_train.type.value_counts() )
    logging.info('train_fold_val label value_counts')
    logging.info( train_fold_val.type.value_counts() )

    data_set = {}
    data_set['train'] = TWdata(index_pd=train_fold_train,
                               data_root=train_data_root,
                               classes=class_to_idx_map.keys(),
                               transform=TWAug(),
                               )

    data_set['val'] = TWdata(index_pd=train_fold_val,
                             data_root=train_data_root,
                             classes=class_to_idx_map.keys(),
                             transform=TWAugVal(),
                             )

    logging.info( 'train augment:')
    for item in  data_set['train'].transform.augment.transforms:
        logging.info( '  %s %s'%(item.__class__.__name__, item.__dict__) )

    logging.info( 'val augment:' )
    for item in  data_set['val'].transform.augment.transforms:
        logging.info( '  %s %s'%(item.__class__.__name__, item.__dict__) )


    data_loader = {}
    data_loader['train'] = torchdata.DataLoader(data_set['train'], 32 * 3, num_workers=4,
                                                shuffle=True, pin_memory=True)
    data_loader['val'] = torchdata.DataLoader(data_set['val'], batch_size=128 * 3, num_workers=4,
                                              shuffle=False, pin_memory=True)

    # model prepare
    model = xception(num_classes=4)

    model = torch.nn.DataParallel(model)
    if resume:
        logging.info('resuming finetune from %s' % resume)
        model.load_state_dict(torch.load(resume))
        resume = None

    model = model.cuda()

    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-5)
    criterion = CrossEntropyLoss()
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=40, gamma=0.5)

    best_acc, best_model_wts = train(model,
                                     epoch_num,
                                     bs,
                                     start_epoch,
                                     optimizer,
                                     criterion,
                                     exp_lr_scheduler,
                                     data_set,
                                     data_loader,
                                     usecuda,
                                     save_inter,
                                     save_dir_k,
                                     print_inter=400,
                                     val_inter=4000)

    start_epoch = 0

    # pred stage1-train's test fold
    model.load_state_dict(best_model_wts)
    test_fold_pred, test_fold_true = pred_single(model,
                                              test_fold,
                                              train_data_root,
                                              class_to_idx_map,
                                              TWAugTest,
                                              bs,
                                              usecuda=1)

    stage1_train_pred[test_index] = test_fold_pred
    stage1_train_true[test_index] = test_fold_true

    dir1 = my_makedirs(os.path.join(save_dir,'stage1-train_pred'))
    save_path = os.path.join(dir1, 'train_pred-%s.npy'%k)
    np.save(save_path, stage1_train_pred)
    logging.info('saved stage1_train_pred to %s' % save_path)

    dir2 = my_makedirs(os.path.join(save_dir, 'stage1-train_true'))
    save_path = os.path.join(dir2, 'train_true-%s.npy'%k)
    np.save(save_path, stage1_train_true)
    logging.info('saved stage1_train_true to %s' % save_path)

    # pred test
    test_pred_k, _ = pred_single(model,
                              test_pd,
                              test_data_root,
                              class_to_idx_map,
                              TWAugTest,
                              bs,
                              usecuda=1)

    dir3 = my_makedirs(os.path.join(save_dir, 'stage1-test'))
    save_path = os.path.join(dir3, 'test-%d.npy'%k)
    np.save(save_path, test_pred_k)
    logging.info('saved test_pred_k to %s' % save_path)


    k+=1


stage1_train_pred = np.zeros((train_pd.shape[0],4),dtype=np.float32)
stage1_train_true = np.zeros(train_pd.shape[0],dtype=np.int)


for file_path in glob(os.path.join(save_dir,'stage1-train_pred','*.npy')):
    npy = np.load(file_path)
    stage1_train_pred += npy

save_path = os.path.join(save_dir, 'stage1-train_pred.npy')
np.save(save_path,stage1_train_pred)
logging.info('saved stage1_train_pred to %s' % save_path)



for file_path in glob(os.path.join(save_dir, 'stage1-train_true', '*.npy')):
    npy = np.load(file_path)
    stage1_train_true += npy

save_path = os.path.join(save_dir, 'stage1-train_true.npy')
np.save(save_path, stage1_train_true)
logging.info('saved stage1_train_true to %s' % save_path)



stage1_test = np.zeros((test_pd.shape[0],4),dtype=np.float32)
i = 0.0
for file_path in glob(os.path.join(save_dir, 'stage1-test', '*.npy')):
    npy = np.load(file_path)
    stage1_test += npy
    i+=1.0

test_pd = test_pd/i
save_path = os.path.join(save_dir, 'stage1-test.npy')
np.save(save_path, stage1_test)
logging.info('saved stage1_test to %s' % save_path)