import csv
import numpy
import random
import time

from sklearn import preprocessing


class DataPreprocessor(object):
    def __init__(self):
        # float matrix of data
        self.mat = None
        # int array: 0 indicates 'male', 1 indicates 'female'
        self.gender = None
        # int array of ages
        self.age = None
        # int array: 0 indicates 'control', 1 indicates 'case'
        self.case = None

    def loadFromFile(self, filename):
        # read tab delimited data from file
        reader = csv.reader(open(filename,'rb'), delimiter='\t')
        # cast data to list of lists
        dataAsList = list(reader)
        # convert to numpy array
        matWithLabels= numpy.array(dataAsList)
    
        # load data as float matrix
        self.mat = numpy.array(matWithLabels[2:,4:]).astype('float')

        # load gender array: male = 0, female = 1
        self.gender = numpy.zeros((self.mat.shape[0]))
        femaleInd = numpy.where(matWithLabels[2:,2]=='F')
        self.gender[femaleInd] = 1

        # load age as int array
        self.age = numpy.array(matWithLabels[2:,0]).astype('int')

        # load case array: control = 0, case = 1
        self.case = numpy.zeros((self.mat.shape[0]))
        caseInd = numpy.where(matWithLabels[2:,1]=='Case')
        self.case[caseInd] = 1

    
    # ALT-Normalize: (appears worse than divide by element-wise mean)
    #     Idea was to normalize each protein's level based on community levels of said protein
    #print 'Normalizing all protein levels to ratio [0,1]'
    # NOTE: After Transpose, each row of matrix is a given protein with 234 column-patients
    #TODO: add method parameter so that different methods can be used
    def normalize(self, mat, method='old'):
        if method=='l1':
            return preprocessing.normalize(mat, norm='l1')
        elif method=='l2':
            return preprocessing.normalize(mat, norm='l2')
        elif method=='scale':
            return preprocessing.scale(mat)
        else:
            normalizer = numpy.mean(mat)
            mat /= normalizer
            return mat

        
    def selectTrainingSet(self, pct=0.75, rseed=-1):
        '''Returns array of indices for training set
        :param pct: percentage of case/control data to use for training [0,1]
        :type pct: float
        :param rseed: seed for random number generator; if rseed<0, use time
        :type rseed: int
        :return: row indices for training set
        :rtype: numpy.array
        '''
        # extract indices of case and control
        caseInd = numpy.where(self.case==1)[0]
        controlInd = numpy.where(self.case==0)[0]

        # seed random number generator
        if rseed<0:
            rseed = int(time.time()*100) # use time as seed
        #print('USING RSEED: {}').format(rseed)
        random.seed(rseed)

        # randomly shuffle the indices
        random.shuffle(caseInd)
        random.shuffle(controlInd)

        # find last indices for training sets based on 'pct'
        endIndexCase = int(len(caseInd)*pct)
        endIndexControl = int(len(controlInd)*pct)
        
        # return concatenated array of case and control indices
        return numpy.concatenate((caseInd[:endIndexCase],
                                  controlInd[:endIndexControl]))
        

    def selectBalancedTrainingSet(self, pct=0.75, rseed=-1):
        '''Returns array of indices for balanced training set
        :param pct: percentage of case/control data to use for training [0,1]
        :type pct: float
        :param rseed: seed for random number generator; if rseed<0, use time
        :type rseed: int
        :return: row indices for balanced training set
        :rtype: numpy.array
        '''
        # extract indices of case and control
        caseInd = numpy.where(self.case==1)[0]
        controlInd = numpy.where(self.case==0)[0]

        # seed random number generator
        if rseed<0:
            rseed = int(time.time())
        print('USING RSEED: {}').format(rseed)
        random.seed(rseed)

        # randomly shuffle the indices
        random.shuffle(caseInd)
        random.shuffle(controlInd)
        
        # choose end index based on size of smaller case/control array
        if len(controlInd)<len(caseInd):
            endIndex = int(len(controlInd)*pct)
        else:
            endIndex = int(len(caseInd)*pct)

        # return concatenated array of indices
        return numpy.concatenate((caseInd[:endIndex],
                                  controlInd[:endIndex]))

        

    def selectBalancedCaseControl(self, unbalancedIndices):
        '''Returns array of indices for a uniform case/control distribution
        :param unbalancedIndices: row indices for an unbalanced set of case/control
        :type unbalancedIndices: numpy.array
        :return: row indices for a balanced set
        :rtype: numpy.array
        '''
                
        # extract indices of case and control
        caseInd = numpy.where(self.case==1)[0]
        controlInd = numpy.where(self.case==0)[0]
        
        #Sort the unbalancedIndices into case and control sets        
        userCaseInd = numpy.intersect1d(caseInd,unbalancedIndices).astype('int')
        userControlInd = numpy.intersect1d(controlInd,unbalancedIndices).astype('int')
        #BUG -- we probably don't want SORTED sets, we'll always grab the case values with lowest index and
        #       never train on the higher index case values (NOTE: selection is shuffled, here we undo that)


        #DEBUG: return unbalancedIndices[115:] #(yields 23 / 21 when pct=0.75, rseed = 234675687)

        if(len(userCaseInd) > len(userControlInd)):            
            return numpy.concatenate((userCaseInd[:len(userControlInd)],
                                      userControlInd))
        else:
            return numpy.concatenate((userCaseInd,
                                      userControlInd[:len(userCaseInd)]))
