#coding=utf8
from __future__ import division
from torch import nn
import torch
from torch.utils.data import TensorDataset, DataLoader
import torch.utils.data as torchdata
from torchvision import datasets,transforms
import os,time
import pandas as pd
import numpy as np
import torch.optim as optim
from torch.autograd import Variable
from torch.optim import lr_scheduler
from utils.train import train,trainlog
from sklearn.preprocessing import LabelEncoder
from torch.nn import CrossEntropyLoss
from twdata.twdataset import TWdata
from models.Resnext_1d import resnext20_type1
from models.densenet_1d import densenet119
from sklearn.model_selection import train_test_split
from models.LeNet import LeNet,MLPNet
import logging
from twdata.twaugment import Compose, AddNoise, RandomAmplitude, DownSample, FlowNormalize, \
                        AddAxis, CenterCrop, RandomShiftCrop, ShiftCrop
from collections import OrderedDict
import torch.nn.functional as F
from math import ceil
from sklearn.metrics import f1_score, classification_report
from utils.train import dt
import copy

class_to_idx_map = OrderedDict({'galaxy': 0,
             'qso': 1,
             'star': 2,
             'unknown': 3})

idx_to_class_map = OrderedDict({0: 'galaxy',
             1: 'qso',
             2: 'star',
             3: 'unknown'})

def my_makedirs(dir1):
    if not os.path.exists(dir1):
        os.makedirs(dir1)

    return dir1


def prepare_data_split(rawdata_root, test_size=0.1, random_state=42):
    # data prepare

    train_index = pd.read_csv(os.path.join(rawdata_root, 'first_train_index_20180131.csv'))
    test_index = pd.read_csv(os.path.join(rawdata_root, 'first_test_index_20180131.csv'))

    train_index['type'] = train_index['type'].apply(lambda x: class_to_idx_map[x])
    test_index['type'] = 0


    index_train, index_val= train_test_split(train_index,
                                             test_size=test_size, random_state=random_state,
                                             stratify=train_index['type'])

    return index_train, index_val, test_index



def oversample_by_num(index_train):
    class_sample_count = index_train.type.value_counts()
    weights = [int(class_sample_count.max() / class_sample_count[x]) for x in range(len(class_sample_count))]

    repeat_index_train = pd.DataFrame()
    for i, weight in enumerate(weights):
        temp = index_train[index_train.type == i]
        temp = pd.concat([temp] * weight, ignore_index=True)
        repeat_index_train = pd.concat([repeat_index_train, temp], axis=0, ignore_index=True)

    return repeat_index_train


def pred_aug( model,test_pd,data_root,class_map,TWAugTest , bs,usecuda=1,num_crop=8,crop_size=0.8):

    test_true = np.zeros(test_pd.shape[0], dtype=int)
    scores_pred = np.zeros((num_crop,test_pd.shape[0], 4), dtype=np.float32)
    q = np.linspace(0, 2600 - int(2600 * crop_size), num_crop, dtype=int)

    model.train(False)
    for aug_cnt, crop_start in enumerate(q):
        crop_end = crop_start + int(2600 * crop_size)
        data_set = {}


        data_set['test'] = TWdata(index_pd=test_pd,
                                 data_root=data_root,
                                 classes=class_map,
                                 transform=TWAugTest(start_point=crop_start, end_point=crop_end),
                                 )

        data_loader = {}
        data_loader['test'] = torchdata.DataLoader(data_set['test'], batch_size=bs, num_workers=4,
                                                  shuffle=False, pin_memory=True)

        idx = 0
        for batch_cnt, data in enumerate(data_loader['test']):

            print 'aug %d, %d/%d' % (aug_cnt, batch_cnt, len(data_set['test']) // bs)

            inputs, labels = data

            if usecuda:
                inputs = Variable(inputs.cuda())
            else:
                inputs = Variable(inputs)

            # forward
            outputs = model(inputs)
            # print outputs.size()

            score = F.softmax(outputs)

            scores_pred[aug_cnt, idx:idx + score.size(0), :] = score.data.cpu().numpy()
            test_true[idx:idx + score.size(0)] = labels.numpy()
            idx = idx + score.size(0)

    scores_pred = scores_pred.mean(axis=0)   # (len(test_data), 4)
    return scores_pred, test_true


def pred_single( model,test_pd,data_root,class_map,TWAugTest , bs,usecuda=1):

    test_true = np.zeros(test_pd.shape[0], dtype=int)
    scores_pred = np.zeros((test_pd.shape[0], 4), dtype=np.float32)

    model.train(False)

    data_set = {}


    data_set['test'] = TWdata(index_pd=test_pd,
                              data_root=data_root,
                              classes=class_map,
                              transform=TWAugTest(),
                              )

    data_loader = {}
    data_loader['test'] = torchdata.DataLoader(data_set['test'], batch_size=bs, num_workers=4,
                                               shuffle=False, pin_memory=True)

    idx = 0
    for batch_cnt, data in enumerate(data_loader['test']):

        print 'predicting test %d/%d' % (batch_cnt, len(data_set['test']) // bs)

        inputs, labels = data

        if usecuda:
            inputs = Variable(inputs.cuda())
        else:
            inputs = Variable(inputs)

        # forward
        outputs = model(inputs)
        # print outputs.size()

        score = F.softmax(outputs)

        scores_pred[ idx:idx + score.size(0), :] = score.data.cpu().numpy()
        test_true[idx:idx + score.size(0)] = labels.numpy()
        idx = idx + score.size(0)

    return scores_pred, test_true


def simple_train_npy(model, x_train, y_train, x_val, y_val,
                     start_epoch, epoch_num,
                     optimizer, criterion, exp_lr_scheduler,
                     bs_train=32*3, bs_val=32*3,val_inter=200):
    tensor_x_train = torch.from_numpy(x_train).float()
    tensor_y_train = torch.from_numpy(y_train).long()
    tensor_x_val = torch.from_numpy(x_val).float()
    tensor_y_val = torch.from_numpy(y_val).long()

    data_set = {}
    data_set['train'] = TensorDataset(tensor_x_train, tensor_y_train)
    data_set['val'] = TensorDataset(tensor_x_val, tensor_y_val)

    data_loader = {}
    data_loader['train'] = DataLoader(data_set['train'], bs_train, num_workers=4, shuffle=True, pin_memory=True)
    data_loader['val'] = DataLoader(data_set['val'], bs_val, num_workers=4, shuffle=False, pin_memory=True)

    step = -1
    best_model_wts = model.state_dict()
    best_acc = 0.0
    for epoch in range(start_epoch,epoch_num):
        for batch_cnt, bc_data in enumerate(data_loader['train']):
            step+=1
            pred_score, labels = bc_data

            pred_score = Variable(pred_score.cuda())
            labels = Variable(labels.cuda())

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward
            outputs = model(pred_score)
            loss = criterion(outputs, labels)

            _, preds = torch.max(F.softmax(outputs), 1)

            loss.backward()
            optimizer.step()

            if step % val_inter == 0:
                # val phase
                model.train(False)  # Set model to evaluate mode

                val_loss = 0
                val_corrects = 0
                val_size = ceil(len(data_set['val']) / data_loader['val'].batch_size)
                val_preds = np.zeros(len(data_set['val']))+100
                val_true = np.zeros(len(data_set['val']))+100

                idx = 0


                for batch_cnt_val, data_val in enumerate(data_loader['val']):
                    # print data
                    inputs, labels = data_val

                    inputs = Variable(inputs.cuda())
                    labels = Variable(labels.cuda())

                    # forward
                    outputs = model(inputs)

                    _, preds = torch.max(F.softmax(outputs), 1)
                    loss = criterion(outputs, labels)

                    # statistics
                    val_loss += loss.data[0]
                    batch_corrects = torch.sum((preds == labels)).data[0]

                    val_preds[idx:(idx+labels.size(0))] = preds.data.cpu().numpy()
                    val_true[idx:(idx + labels.size(0))] = labels.data.cpu().numpy()

                    val_corrects += batch_corrects

                    idx += labels.size(0)

                val_loss = val_loss / val_size
                val_f1 = f1_score(val_true, val_preds,average='macro')
                val_report = classification_report(val_true, val_preds, target_names=class_to_idx_map.keys())
                unique, counts = np.unique(val_preds, return_counts=True)

                print('epoch-%d--------------'%epoch)
                print('current lr:%s' % exp_lr_scheduler.get_lr())
                print('%s'%val_report)
                print('pred unique: %s' % unique)
                print('pred count: %s'%counts)
                print('%s epoch[%d]-val-loss: %.4f ||val-f1@1 : %.5f'
                             % (dt(), epoch, val_loss, val_f1))
                print('--' * 30)

                if val_f1 > best_acc:
                    best_acc = val_f1
                    best_model_wts = copy.deepcopy(model.state_dict())
    return best_model_wts, best_acc





def simple_npy_predict(model, x_test, y_test, bs, usecuda=1):
    tensor_x = torch.from_numpy(x_test).float()
    tensor_y = torch.from_numpy(y_test).long()


    test_true = np.zeros(x_test.shape[0], dtype=int)
    class_pred = np.zeros(x_test.shape[0], dtype=int)
    scores_pred = np.zeros((x_test.shape[0], 4), dtype=np.float32)

    model.train(False)

    data_set = {}
    data_set['test'] = TensorDataset(tensor_x, tensor_y)

    data_loader = {}
    data_loader['test'] = DataLoader(data_set['test'], bs, num_workers=4, shuffle=False, pin_memory=True)

    idx = 0
    for batch_cnt, data in enumerate(data_loader['test']):
        inputs, y_test = data

        if usecuda:
            inputs = Variable(inputs.cuda())
        else:
            inputs = Variable(inputs)


        # forward
        outputs = model(inputs)
        # print outputs.size()

        score = F.softmax(outputs)
        _, preds = torch.max(F.softmax(outputs), 1)

        scores_pred[ idx:idx + score.size(0), :] = score.data.cpu().numpy()
        class_pred[idx:idx + score.size(0)] = preds.data.cpu().numpy()
        test_true[idx:idx + score.size(0)] = y_test.numpy()
        idx = idx + score.size(0)

    return scores_pred, class_pred, test_true



