#coding=utf8
from __future__ import division
from torch import nn
import torch
from torch.utils.data import TensorDataset, DataLoader
import torch.utils.data as torchdata
from torchvision import datasets,transforms
import os,time
import pandas as pd
import numpy as np
import torch.optim as optim
from torch.autograd import Variable
from torch.optim import lr_scheduler
from utils.train import train,trainlog
from sklearn.preprocessing import LabelEncoder
from torch.nn import CrossEntropyLoss
from twdata.twdataset import TWdata
from models.Resnext_1d import resnext20_type1
from models.densenet_1d import densenet119
from sklearn.model_selection import train_test_split
from models.LeNet import LeNet,MLPNet
import logging
from twdata.twaugment import Compose, AddNoise, RandomAmplitude, DownSample, FlowNormalize, \
                        AddAxis, CenterCrop, RandomShiftCrop, ShiftCrop
from collections import OrderedDict
import torch.nn.functional as F
from math import ceil
from sklearn.metrics import f1_score, classification_report
from utils.train import dt
import copy

def pred_unknown( model,test_pd,data_root,class_map,TWAugTest , bs,usecuda=1):

    test_true = np.zeros(test_pd.shape[0], dtype=int)
    scores_pred = np.zeros((test_pd.shape[0], 3), dtype=np.float32)

    model.train(False)

    data_set = {}


    data_set['test'] = TWdata(index_pd=test_pd,
                              data_root=data_root,
                              classes=class_map,
                              transform=TWAugTest(),
                              )

    data_loader = {}
    data_loader['test'] = torchdata.DataLoader(data_set['test'], batch_size=bs, num_workers=4,
                                               shuffle=False, pin_memory=True)

    idx = 0
    for batch_cnt, data in enumerate(data_loader['test']):

        inputs, labels = data

        if usecuda:
            inputs = Variable(inputs.cuda())
        else:
            inputs = Variable(inputs)

        # forward
        outputs = model(inputs)
        # print outputs.size()

        score = F.softmax(outputs)

        scores_pred[ idx:idx + score.size(0), :] = score.data.cpu().numpy()
        test_true[idx:idx + score.size(0)] = labels.numpy()
        idx = idx + score.size(0)

    scores_pred_max = np.max(scores_pred,axis=1)

    return scores_pred_max

def add_samples(train_index,rawdata_root,select):
    train_data_root = os.path.join(rawdata_root, 'first_train_data')
    add_index = pd.read_csv(os.path.join(rawdata_root, 'first_train_index_20180131.csv'))
    add_index['id'] = add_index['id'].apply(lambda x: os.path.join(train_data_root, str(x)))
    add_index = add_index[add_index['type'].apply(lambda x: True if x in select else False)]

    print 'ori:',train_index.shape
    print 'add:',add_index.shape
    train_index = pd.concat([train_index, add_index], axis=0, ignore_index=True)
    train_index.index = range(train_index.shape[0])

    return train_index
