# coding=utf8
from __future__ import division
import os
import torch
import torch.utils.data as data
import numpy as np
import pandas as pd
from twdata.twaugment import Compose, AddNoise, RandomAmplitude, DownSample, FlowNormalize, \
                        AddAxis, CenterCrop, RandomShiftCrop

os.environ["CUDA_VISIBLE_DEVICES"] = "0"


class TWdata(data.Dataset):
    def __init__(self, index_pd, data_root, classes,transform=None):

        self.ids = index_pd['id'].tolist()
        self.labels = index_pd['type'].tolist()
        self.data_root = data_root
        self.classes = classes
        self.transform = transform

    def __len__(self):
        return len(self.ids)

    def __getitem__(self, item):
        id = self.ids[item]
        label = self.labels[item]
        data_path = os.path.join(self.data_root, '%s.txt'%id)

        spct = np.loadtxt(data_path,delimiter=',')  #(2600, )

        if self.transform is not None:
            spct = self.transform(spct) # (C,2600)


        return torch.from_numpy(spct).float(), label


class TWdata2(data.Dataset):
    def __init__(self, index_pd, data_root, classes,transform=None):

        self.ids = index_pd['id'].tolist()
        self.labels = index_pd['type'].tolist()
        self.data_root = data_root
        self.classes = classes
        self.transform = transform

    def __len__(self):
        return len(self.ids)

    def __getitem__(self, item):
        id = self.ids[item]
        label = self.labels[item]

        data_path = os.path.join(self.data_root, id)

        if data_path.split('.')[-1]=='txt':
            spct = np.loadtxt(data_path,delimiter=',')  #(2600, )
        else:
            spct = np.load(data_path)

        if self.transform is not None:
            spct = self.transform(spct) # (C,2600)

        return torch.from_numpy(spct).float(), label



class TWdata3(data.Dataset):
    def __init__(self, index_pd, data_root, classes,minority=1,  transform=None):

        self.ids = index_pd['id'].tolist()
        self.labels = index_pd['type'].tolist()
        self.data_root = data_root
        self.classes = classes
        self.transform = transform
        self.minority = minority

        self.ids = {}
        for i in range(len(classes)):
            self.ids[i] = index_pd[index_pd['type']==i]['id'].tolist()

    def __len__(self):
        labels = np.array(self.labels)
        return len(labels[labels==self.minority])

    def __getitem__(self, item):

        spcts = []
        labels = []
        for i in range(len(self.classes)):
            if i == self.minority:
                choice = item
            else:
                choice = np.random.randint(len(self.ids[i]))

            id = self.ids[i][choice]
            data_path = os.path.join(self.data_root, id)

            if data_path.split('.')[-1]=='txt':
                spct = np.loadtxt(data_path,delimiter=',')  #(2600, )
            else:
                spct = np.load(data_path)

            if self.transform is not None:
                spct = self.transform(spct) # (C,2600)

            spcts.append(torch.from_numpy(spct).float())
            labels.append(i)
        # spcts = np.array(spcts)

        return spcts, labels


class TWdata_smooth(data.Dataset):
    def __init__(self, index_pd, data_root, classes,transform=None):

        self.ids = index_pd['id'].tolist()
        self.labels = index_pd['type'].tolist()
        self.data_root = data_root
        self.classes = classes
        self.transform = transform

    def __len__(self):
        return len(self.ids)

    def __getitem__(self, item):
        id = self.ids[item]
        label = self.labels[item]
        data_path = os.path.join(self.data_root, '%s.txt'%id)

        spct = np.loadtxt(data_path,delimiter=',')  #(2600, )

        if self.transform is not None:
            spct = self.transform(spct) # (C,2600)

        if label == 3:
            label_oh = np.zeros(len(self.classes),dtype=np.float32) + 0.4/3
            label_oh[label] = 0.6
        else:
            label_oh = np.zeros(len(self.classes),dtype=np.float32)
            label_oh[label] = 1.0


        return torch.from_numpy(spct).float(), torch.from_numpy(label_oh).float()



def collate_fn(batch):
    spcts = []
    labels = []
    for sample in batch:

        spcts += sample[0]
        labels += sample[1]

    return torch.stack(spcts, 0), torch.LongTensor(labels)



if __name__ == '__main__':
    from sklearn.preprocessing import LabelEncoder
    from sklearn.model_selection import train_test_split
    from sklearn.decomposition import PCA
    from twaugment import TWAug
    import cPickle

    rawdata_root = '/media/gserver/data/tianwen/rawdata'

    # data prepare
    train_data_root = os.path.join(rawdata_root, 'first_train_data')
    train_index = pd.read_csv(os.path.join(rawdata_root, 'first_train_index_20180131.csv'))


    le = LabelEncoder()
    train_index['type'] = le.fit_transform(train_index['type'])
    print le.classes_

    index_train = pd.read_csv(os.path.join(rawdata_root,'first_train.csv'))
    index_val = pd.read_csv(os.path.join(rawdata_root,'first_val.csv'))
    index_test = pd.read_csv(os.path.join(rawdata_root,'first_test.csv'))

    class_sample_count = index_train.type.value_counts()
    weights =  [int(class_sample_count.max()/class_sample_count[x]) for x in range(len(class_sample_count))]
    print weights

    print index_train.type.value_counts()
    print index_val.type.value_counts()
    print index_test.type.value_counts()




    data_set = {}
    data_set['train'] = TWdata3(index_pd = index_train,
                               data_root=rawdata_root,
                               classes = le.classes_,
                                transform=AddAxis()
                               )

    data_loader = {}
    data_loader['train'] = data.DataLoader(data_set['train'], 3, num_workers=4,
                                                shuffle=False, pin_memory=True, collate_fn=collate_fn)

    for batch in data_loader['train']:
        print batch[1]
        break