#!/usr/bin/env python
# encoding: utf-8
"""
QandG.py

Created by Brant Faircloth on 2007-10-08.
Copyright (c) 2007 Brant C. Faircloth. All rights reserved.

An estimator of the Queller and Goodnight R and code to run permutation tests
to evaluate the "significance" of this value of R.

"""

import numpy,csv,pdb

infile = ''

class excelSingleSpace:
    """class for csv module to work correctly"""
    delimiter = ','
    quotechar = '"'
    escapechar = None
    doublequote = True
    skipinitialspace = False
    lineterminator = '\r'


def read():
    '''read data into a large numpy array (l by w of data)'''
    handle  = open(infile,'rU')
    c       = csv.reader(handle, delimiter=',', dialect = excelSingleSpace)
    indiv   = int(c.next()[0])
    loci     = int(c.next()[0])
    # skip remaining header
    c.next()
    # create an array to hold the data using the first row of real data cast to int
    #geno = [int(i) for i in c.next()[3:]]
    geno = [int(i) for i in c.next()]
    geno = numpy.array(geno)
    for row in c:
        # cast list of strings to ints
        row = [int(i) for i in row]
        # convert list to array (slicing out other info)
        vector = numpy.array(row)
        # vertically stack with original array
        geno = numpy.vstack((geno,vector))
    return geno

def allFreq(genotypes):
    '''
    compute the allele frequencies of the sampled population
    
    accepts an array of genotypes
    '''
    #pdb.set_trace()
    fD = {}
    for i in range(0,genotypes.shape[1],2):
        A = numpy.bincount(genotypes[:,i]) 
        B = numpy.bincount(genotypes[:,i+1])
        # normalize the dumbass shape so we can add bincounts below
        shapeAdj = A.shape[0] - B.shape[0]
        if shapeAdj == 0:
            pass
        elif shapeAdj > 0:
            B = numpy.hstack((B,numpy.zeros(abs(shapeAdj))))
        else:
            A = numpy.hstack((A,numpy.zeros(abs(shapeAdj))))
        # get counts of alleles of each number/letter for entire genotype (A + B column)
        locus       = A + B
        # get the indices of these loci, dropping the counts for 0s but keeping positions
        # correct
        indices     = locus.ravel().nonzero()[0]
        # remove 0's from index
        temp        = indices.ravel().nonzero()[0]
        indices     = indices[temp]
        # get records with indices (non-zero)
        alleleCount = locus[indices]
        total       = sum(alleleCount)
        frequencies = alleleCount/float(total)
        aD = {}
        for allele in range(len(indices)):
            aD[indices[allele]] = frequencies[allele]
        fD[i] = aD
    #pdb.set_trace()
    # return a nested dict, indexed by locus and then by allele:frequency
    return fD


def group(data, gSlice):
    '''
    compute the allele frequencies of the group under analysis
    
    1.) Slice out group from other genotypes
    2.) Slice out individual from group genotypes
    3.) Do this for everyone
    
    '''
    #copy()
    #delete()
    #x[1:2] = []
    #pdb.set_trace()
    test = []
    for indiv in range(len(gSlice)):
        # if first element:
        if indiv == 0:
            # get group:
            group = data[gSlice[indiv]:gSlice[indiv+1],:]
            # get population without group
            groupless = numpy.delete(data,numpy.s_[gSlice[indiv]:gSlice[indiv+1]],axis=0)
        # if last element:
        elif indiv == len(gSlice) - 1:
            group = data[gSlice[indiv]:,:]
            groupless = numpy.delete(data,numpy.s_[gSlice[indiv]:],axis=0)
        # otherwise
        else:
            group = data[gSlice[indiv]:gSlice[indiv+1],:]
            groupless = numpy.delete(data,numpy.s_[gSlice[indiv]:gSlice[indiv+1]],axis=0)
        #pdb.set_trace()
        test.append(mainFreq(group,groupless))
    return numpy.array(test)

def estimator(sample,group,ind):
    '''
    See page 264 of Queller and Goodnight 1989
    '''
    #pdb.set_trace()
    
    qgEstimatorNumerator    = []
    qgEstimatorDenominator  = []
    
    #pdb.set_trace()
    for locus in ind.keys():
        for allele in ind[locus].keys():
            if allele == 0:
                pass
            else:
                try:
                    pi_jm   = group[locus][allele]
                except KeyError:
                    pi_jm   = 0.0
                try:
                    tp_im   = sample[locus][allele]
                except KeyError:
                    tp_im   = 0.0
                    
                pijm        = ind[locus][allele]
                
                qgEstimatorNumerator.append(pi_jm - tp_im) 
                qgEstimatorDenominator.append(pijm - tp_im)
    
    #get the sums by locus for an individual
    qgEstimatorNumerator = sum(qgEstimatorNumerator)
    qgEstimatorDenominator = sum(qgEstimatorDenominator)
    return qgEstimatorNumerator,qgEstimatorDenominator


def mainFreq(group,groupless):
    '''
    compute the allele frequencies for various groups
    '''
    
    groupNum    = []
    groupDenom  = []
    
    #frequency of alleles in population exluding group under obs.
    
    grouplessFreq = allFreq(groupless[:,3:])
    
    for individual in range(len(group)):
        # get group freq w/o individual
        #pdb.set_trace()
        individualless = numpy.delete(group,numpy.s_[individual],axis=0)
        # frequency of alleles in group exluding individual under obs
        individuallessFreq = allFreq(individualless[:,3:])
        # compute individual freq
        genotype = group[individual,3:]
        fD = {}
        for allele in range(0,genotype.shape[0],2):
            A = genotype[allele]
            B = genotype[allele + 1]
            try: 
                if A in fD[allele].keys():
                    fD[allele][A] += 0.5
            except:
                fD[allele]= {A:0.5}
            
            if B in fD[allele].keys():
                fD[allele][B] += 0.5
            else:
                fD[allele][B] = 0.5
        num,denom = estimator(grouplessFreq,individuallessFreq,fD)
        groupNum.append(num)
        groupDenom.append(denom)
    
    try:
        return sum(groupNum)/sum(groupDenom)
    except:
        pdb.set_trace()
    
def groupSlice(data, groups):
    temp = []
    #pdb.set_trace()
    for i in range(len(groups[:-1])):
        if i == 0:
            temp.append(i)
            temp.append(groups[i])
            j = groups[i]
        else:
            j += groups[i]
            temp.append(j)
    groups = numpy.array(temp)
    return groups

def randomize(data):
    '''
    using numpy.random.permutation
    1.) separate group information from data
    2.) split columns of alleles out
    3.) randomly permute columns
    4.) stitch columns back togther w/ numpy.column_stack
    5.) stick the group info back onto the data
    '''
    #pdb.set_trace()
    indivs,loci = data.shape
    #strip off group data
    group = data[:,2]
    #stitch on some extra columns that pretend to be Indiv. ID
    iD = numpy.arange(indivs)
    sex = data[:,1]
    sex = numpy.random.permutation(sex)
    randomData = numpy.column_stack((iD,sex,group))
    for i in range(3,loci,1):
        column = numpy.copy(data[:,i])
        randColumn = numpy.random.permutation(column)
        randomData = numpy.column_stack((randomData,randColumn))
    return randomData
    
        
def randomLoop(data,relatedness,permute=1000):
    '''
    loop through a lot of permutations
    '''
    #pdb.set_trace()
    print 'random data relatedness values\n'
    prob = numpy.zeros(relatedness.shape[0])
    for i in range(permute):
        print (('permutation %s \n') % (i))
        rData = randomize(data)
        groups = numpy.bincount(rData[:,2])[1:]
        gSlice = groupSlice(rData,groups)
        randRelatedness = group(rData,gSlice)
        # perform comparison with actual relatedness value
        # and cast into int32
        compare = numpy.cast['int32'](relatedness > randRelatedness)
        # keep a record of the comparisons
        prob += compare
    pdb.set_trace()

def real(data):
    # get number of individuals in each group, removing the 0th element
    groups = numpy.bincount(data[:,2])[1:]
    pdb.set_trace()
    # create a list of integers to use as slices for the data array
    gSlice = groupSlice(data, groups)
    relatedness = group(data,gSlice)
    return relatedness

def main():
    print 'reading data\n'
    data = read()
    print 'real data relatedness values\n'
    relatedness = real(data)
    #pdb.set_trace()
    randomLoop(data,relatedness,1000)


if __name__ == '__main__':
    main()

