import codecs
import csv
import numpy as np
import math
import random
import argparse

def _match(a, b, depth = 5):
    """ Say if two rows are matching are not, perform 'depth' tests"""
    #selects what columns to match in sequence.
    if a[5] == b[5]: # Gender
        if a[3] == b[3]: # School
            if a[4] == b[4]: # Class
                if a[8] == b[8]: # SES
                    if a[7] == b[7]: #Verbal
                        return True
    return False

	#select all the columns you would like to compare, same on line 52
def match(a, b, columns = [5, 3, 4, 8]): 
    """ [5, 3, 6, 8, 7] is Gender, School, Ethnicity number, SES, Verbal
    
    Matches a and b according to the column indices given in columns.
    Return True if they match, False else.
    """
    for idx in columns:
        if a[idx] != b[idx]:
            return False
    return True

def match_group_A_to_B(arows, brows):
    """ Match with all the constraints """

    res = {}
    for arow in arows:
        for brow in brows:
            if match(arow, brow):

                a_ID = arow[2] 
                b_ID = brow[2]

                if a_ID in res:
                    res[a_ID].append(b_ID)
                else:
                    res[a_ID] = [b_ID]
    return res

def fuzzy_group_match_group_A_to_B(arows, brows, min_matches, matching_columns):
    """ Match with decreasing constraints, until at least one 
    match in the group"""

    res = {}
    # matching_columns = [5, 3] # Matching only on columns 5 and 3
    number_constraints = len(matching_columns)

    for current_number_constraints in range(number_constraints, 0, -1): 
    # decreasing constraints
        for arow in arows:
            for brow in brows:
                if match(arow, brow, columns = \
                    matching_columns[0:current_number_constraints]):

                    a_ID = arow[2] 
                    b_ID = brow[2]

                    if a_ID in res:
                        res[a_ID].append(b_ID)
                    else:
                        res[a_ID] = [b_ID]
       # if len(res) > 0:
        if len(res) >= min_matches: # how many matches minimum per group
            return current_number_constraints, \
                   matching_columns[:current_number_constraints], res
    return current_number_constraints, \
           matching_columns[:current_number_constraints], res


def clean(row):
    """
    replace nulls by -99 (Keri's convention for missing values)
    should be None really
    
    TODO make this better
    """
    ret = row

    for i, elem in enumerate(row):
        if i in [6, 15]:
            if ret[i] == '#NULL!':
                ret[i]= -99
            ret[i] = int(ret[i])

    return ret


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--min_matches_per_group',
                        help='The minimum number of members of a group.',
                        default=5,
                        type=int)
    parser.add_argument('--data_filename', 
                        help='for instance ../data/matching_2.csv)',
                        required=True)
    parser.add_argument('--top_cutoff_percent',help='for instance .15 \
        to get the top 15% of the population when ranked by the trust score',
        type=float, default=.15, required=True)
    parser.add_argument('--bottom_cutoff_percent',help='for instance .10 \
        to get the bottom 10% of the population when ranked by the trust score',
        type=float, default=.10, required=True)
    parser.add_argument('--matching_columns', 
                        help='the columns used for matching, for instance 5 3',
                        type=int, nargs='+', required=True)
        
    args = parser.parse_args()

    #rows = csv.reader(codecs.open('keri.csv', 'rU', 'utf-16'))
    rows = csv.reader(open('../data/matching_2.csv'),delimiter=',')
    if args.data_filename:
        rows = csv.reader(open(args.data_filename),delimiter=',')
    # data = np.genfromtxt('keri.csv', unpack=True, delimiter=',')

    data = []
    i=0
    for row in rows:
        if i>0:
            cleaned = clean(row)
            data.append(cleaned)
        i+=1


    ar = np.array(data)

    np.set_printoptions(threshold = 10000 , edgeitems = 100000)

    rand_idx = random.randint(0,1) 
    # We want to do a blind experiment so we should now know what group 
    # we are looking at top group or bottom group


    for age in range(9,16):
        if age != 13: # no data for age 13 and lazy to do proper 
                      # checking in the code
            # change 10 to what ever trust scale, same on lines 129, 136, 138, 142          
            idx =  np.logical_and( ar[:,1] == str(age) , ar[:,10] != '-99' ) # -99 is missing value

            subset = ar[idx]
            sorted = subset[np.argsort(subset[:, 10].astype(np.float))] 
            # we sort according to column 10 by ascending order
            n_rows = len(sorted)
           
            print 'age', age
            print '\n'
            print 'Matching is done on:%s' % args.matching_columns
            bottom_start = 0
            bottom_end = int(math.ceil(len(sorted)*args.bottom_cutoff_percent))
            bottom_rows = sorted[bottom_start:bottom_end,:]
            bottom_cutoff = bottom_rows[-1][10]

            top_start =  int(len(sorted) * (1-args.top_cutoff_percent))
            top_end =  int(math.ceil(len(sorted)))
            top_rows = sorted[top_start:top_end,:]
            top_cutoff = top_rows[-1][10]

            max_trust_score =  sorted[-1][10].astype(np.float) # the maximum value
            print 'For age %s maximum score is: %s' % (age, max_trust_score)
            print 'Number of children of age %s is : %s' % (age, n_rows)

            print 'bottom cutoff score is: %s' % bottom_cutoff
            print 'number of children in bottom group is: %s' % len(bottom_rows)
            print 'top cutoff score is: %s' % top_cutoff
            print 'number of children in top group is: %s' % len(top_rows)

            min_matches_per_group = args.min_matches_per_group
            # it is accepted to have no matches, this makes fuzzy_matching 
            # behvae like regular matching
            number_constraints, constraints, match_top = \
            fuzzy_group_match_group_A_to_B(top_rows, bottom_rows, 
                                            min_matches_per_group,
                                            args.matching_columns)
            number_constraints, constraints, match_bottom = \
            fuzzy_group_match_group_A_to_B(bottom_rows, top_rows,
                                            min_matches_per_group,
                                            args.matching_columns)

            picked_group = [match_top, match_bottom][rand_idx]
            print 'Number of constraints, constraints'
            print number_constraints, constraints
            print 'Number of matches', len(picked_group)

            for id in picked_group:
                var = picked_group[id]
                s = map(str, var)   
                s = ''.join(s)          
                s = int(s)
                if id != s:
                    print 'Original ID, Matched IDS', id, picked_group[id]   
            nkeys = len(picked_group)
            uniq = []
            for x in picked_group.values():
                uniq += x
            nunique_matches = len(list(set(uniq)))
            print 'Unique IDS in both groups : %s + %s = %s ' % \
                (nkeys, nunique_matches, nkeys+nunique_matches)
            print '---------------------------'
main()
