### Session Data Analyzer by Tecumseh Fitch version 0.5 9 Feb 2011 ####
# Reads in a CSV file of bird data from CogLab, and calculates statistics #
# second version fixes median function
#version7:fixed median for odd numbers.gwf
#version8: writes out to a .csv file
#9: opens all files within a specified folder and does analyses on each individual file, savng them to a separate directory
###  statistics functions #######
from GeneralTools import *

## Distribution calculators from other's code (NETLIB, JSci, as used by Mark Hale MIT), should check accuracy against R ##



#### CSV Reading Code ######

def ExtractNamedColumn(csvFile, columnName):
    ''' Extracts a given named column of a CSV file and returns it as a list, using csv dictReader'''
    import csv
    datafile = open(indirectory + "/"+infilename, "rU")  # open the CSV file
    dataList = []
    for row in csv.DictReader(datafile):
        value = row[columnName]
        dataList.append(int(value))
    datafile.close()
    return dataList


##### Main Code: extract the designated columns of data from a CSV file, and do statistics on them ####
import os
import Tkinter
from tkFileDialog import askdirectory
top = Tkinter.Tk()
indirectory = askdirectory()
need_header = True
out_directory = "Summary_Statistics/"
if not os.path.isdir("./"+ out_directory):
    os.mkdir (out_directory)
for subdir, dirs, files in os.walk(indirectory):
    for infilename in files:
        if ".DS_Store" in infilename:
            continue
        else:
            outfile = open(out_directory + infilename.split(".")[0] + "_Statistics.csv", 'w')#for each csv file read, a separate file is written
            omnibus_outfile = open(out_directory+ "/Statistics_Summary.csv", "a+")#additionally, the analysis is APPENDED to an omnibus file for the data from all files.
            columnNameList = [ 'session_length','correct', 'correct_train', 'correct_test',"correct_probes", 'correct_train_3n', 'correct_train_2n','correct_test_3n','correct_test_2n', 'correct_train_large', 'correct_train_small',
                               'correct_test_large','correct_test_small'] # <- can add whatever column names you want here, it will do stats on each
            
            outfile.write("Participant, Test, Category, Total, Mean, SD, Min, Median, Max, Lower_CI, Upper_CI\n")
            if need_header ==True:
                    omnibus_outfile.write("Participant, Test, Category, Total, Mean, SD, Min, Median, Max, Lower_CI, Upper_CI\n")
                    need_header = False
            for columnName in columnNameList:
                dataList = ExtractNamedColumn(infilename, columnName)
                dataTotal = total(dataList)
                dataMean = mean(dataList)
                dataSD = stdDev(dataList)
                dataMedian = median(dataList)
                maximum = max(dataList)
                minimum = min(dataList)
                CI = confidenceInterval(dataList, 0.05)
                upper_CI = dataMean + CI
                lower_CI = dataMean-CI
                participant = infilename.split("_")[0]
                testwithending = infilename.split("_")[2]
                test = testwithending.split(".")[0]
                outfile.write(str(participant)+ "," + str(test) + ",")
                outfile.write(str(columnName)+ ",")
                
                outfile.write(str(dataTotal) + "," +str(dataMean) +","+str(dataSD) + ","+ str(minimum)+","+ str(dataMedian)+","+ str(maximum) +  "," +str( lower_CI) +","+ str(upper_CI) + "\n")
                omnibus_outfile.write(str(participant)+ "," + str(test) + ",")
                omnibus_outfile.write(str(columnName)+ ",")
                omnibus_outfile.write(str(dataTotal) + "," +str(dataMean) +","+str(dataSD) + ","+ str(minimum)+","+ str(dataMedian)+","+ str(maximum) +  "," +str( lower_CI) +","+ str(upper_CI) + "\n")

outfile.close()

omnibus_outfile.close()
print "Analysis completed"
