#coding=utf8
from __future__ import division
from torch import nn
import torch
import torch.utils.data as torchdata
import os,time
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from torch.nn import CrossEntropyLoss
from twdata.twdataset import TWdata
from sklearn.model_selection import train_test_split
from models.LeNet import LeNet,MLPNet
import logging
from twdata.twaugment import Compose, AddNoise, RandomAmplitude, DownSample, FlowNormalize, \
                        AddAxis, CenterCrop, RandomShiftCrop

os.environ["CUDA_VISIBLE_DEVICES"] = "2"


class TWAug(object):
    def __init__(self):
        self.augment = Compose([
            FlowNormalize(),
        ])

    def __call__(self, spct):
        return self.augment(spct)


rawdata_root = '/media/gserver/data/tianwen/rawdata'

# data prepare
train_data_root = os.path.join(rawdata_root, 'first_train_data')
test_data_root = os.path.join(rawdata_root, 'first_test_data')

train_index = pd.read_csv(os.path.join(rawdata_root, 'first_train_index_20180131.csv'))
test_index = pd.read_csv(os.path.join(rawdata_root, 'first_test_index_20180131.csv'))


le = LabelEncoder()
train_index['type'] = le.fit_transform(train_index['type'])
test_index['type'] = 0
print le.classes_

ids = train_index['id'].tolist()


index_train, index_val= train_test_split(train_index,
                                         test_size=0.1, random_state=42,
                                         stratify=train_index['type'])



class_sample_count = index_train.type.value_counts()
weights =  [int(class_sample_count.max()/class_sample_count[x]) for x in range(len(class_sample_count))]
print weights


#
data_set = {}
data_set['train'] = TWdata(index_pd = index_train,
                           data_root=train_data_root,
                           classes = le.classes_,
                            transform=TWAug()
                           )


data_loader = {}
data_loader['train'] = torchdata.DataLoader(data_set['train'], 128*3, num_workers=4,
                                            shuffle=False, pin_memory=True)



all_data = np.zeros((len(data_set['train']), 2600),dtype=np.float32)
all_label = np.zeros(len(data_set['train']),dtype=np.int)

idx=0
for batch_cnt, batch_data in enumerate(data_loader['train']):
    inputs, labels = batch_data

    inputs = inputs.numpy()
    labels = labels.numpy()

    all_data[idx : idx + inputs.shape[0]] = inputs
    all_label[idx : idx + inputs.shape[0]] = labels
#
    idx = idx + inputs.shape[0]
    # break
#
#
print all_data.shape

from scipy.stats import itemfreq
print itemfreq(all_label)

a= all_data[all_label==2]

from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=42,n_jobs=8)

all_data, all_label = sm.fit_sample(all_data, all_label)



resampled_minority_data = all_data[all_label!=2]
resampled_minority_label = all_label[all_label!=2]


# save as txt
save_dir = '/media/gserver/data/tianwen/rawdata/first_train_data_sm'
if not os.path.exists(save_dir):
    os.mkdir(save_dir)
for i in range(resampled_minority_label.shape[0]):
    print '%d/%d'%(i,resampled_minority_label.shape[0])
    file_name = '%s.npy'%(i)
    file_path = os.path.join(save_dir,file_name)
    np.save(file_path, resampled_minority_data[i])

resampled_minority_csv = pd.DataFrame({'id': range(resampled_minority_label.shape[0]),
                                       'type': resampled_minority_label})
resampled_minority_csv['id'] = 'first_train_data_sm/'+resampled_minority_csv['id'].astype(str).str[:]+'.npy'
ori_majority_csv = index_train[index_train['type']==2].copy()
ori_majority_csv['id'] = 'first_train_data/'+ori_majority_csv['id'].astype(str).str[:]+'.txt'

resampled_csv = pd.concat([resampled_minority_csv, ori_majority_csv],axis=0,ignore_index=True)

# print resampled_csv
print resampled_csv['type'].value_counts()

# train
resampled_csv.to_csv('/media/gserver/data/tianwen/rawdata/first_train_resample.csv', index=False)
index_train['id'] = 'first_train_data/'+index_train['id'].astype(str).str[:]+'.txt'
index_train.to_csv('/media/gserver/data/tianwen/rawdata/first_train.csv', index=False)


# val
index_val['id'] = 'first_train_data/'+index_val['id'].astype(str).str[:]+'.txt'
index_val.to_csv('/media/gserver/data/tianwen/rawdata/first_val.csv', index=False)

# test
test_index['id'] = 'first_test_data/'+test_index['id'].astype(str).str[:]+'.txt'
test_index.to_csv('/media/gserver/data/tianwen/rawdata/first_test.csv', index=False)