import numpy
import copy
import pylab
import cPickle
import find_rc
from general_neuro.fast_thresh_detect import fast_thresh_detect

print "loading in cPickle"
data_set_list = cPickle.load(open('IPC_data_full.cPickle', 'r'))
print 'data loaded..'

def plot_data_set(data):
    pylab.figure()
    pylab.clf()
    # plot the data
    num_data = len(data.keys())
    num_rows = int(numpy.sqrt(num_data))
    num_cols = int(numpy.sqrt(num_data))+1
    if num_rows * num_cols < num_data:
        num_rows += 1
    plot_count = 0

    data_list = []
    inject_list = []
    title_list = []
    for data_title, data in data.items():
        data_list.append(data['data'])
        inject_list.append(data['inject'])
        title_list.append(data_title)
    indexes = numpy.argsort(inject_list)

    for i in indexes:
        plot_count += 1
        pylab.subplot(num_rows,num_cols, plot_count)
        pylab.plot(data_list[i])
        pylab.xticks('')
        pylab.yticks('')
        pylab.xlabel('%1.2f nA' % inject_list[i])
        pylab.ylabel(title_list[i][-10:])
    pylab.suptitle(title_list[i].split('--')[0])

def find_duplicates_and_average(data_set, count):
    injects = {}
    for name, data in data_set.items():
        if data['inject'] in injects.keys():
            injects[data['inject']].append(data)
        else:
            injects[data['inject']] = [data]
    for current, data_list in injects.items():
        if len(data_list) > 1:
            # average the data and add it to the data_set
            data_lengths = []
            for data in data_list:
                data_lengths.append(len(data['data']))
            all_same_length = True
            for data_length in data_lengths:
                if data_length != numpy.average(data_lengths):
                    all_same_length = False
            if all_same_length:
                    print "found %d data of length %d to average." % (len(data_lengths), data_lengths[0])
                    print "  current = %2.4f" % current
            # find out if they have the same pulse onset and offset times
            ps = []
            ns = []
            same = True
            datas = []
            for data in data_list:
                p, n = fast_thresh_detect(data['Iinj'], threshold=current/2.0)
                datas.append(data['data'][:min(data_lengths)])
                if ps:
                    if p[0] not in ps:
                        print "not same stim on or off"
                        print "   ON/OFF indexes = %s" % (str(ps))
                        same = False
                if ns:
                    if n[0] not in ns:
                        print "--not same stim on or off"
                        print "   ON/OFF indexes = %s" % (str(ns))
                        same = False
                ps.append(p[0])
                ns.append(n[0])
            if same:
                print "same onset and offset times for all data, even if different recording durations."
                print "  ON/OFF indexes = %s %s" % (str(ps), str(ns))
                print "  current = %2.4f" % current
                # find the average and add it to the data_set
                #print datas
                all_data = numpy.vstack(datas)
                avg_data = numpy.average(all_data, axis=0)
                #print avg_data.shape
                #print avg_data

                #count += 1
                #pylab.figure(count)
                #for tdata in datas:
                #    pylab.plot(tdata, linewidth = 0.3, color = 'black')
                #pylab.plot(avg_data, linewidth = 1.5, color='red')
    
                # now add the average to the data set
                for name, data in data_set.items():
                    if len(data['data']) == min(data_lengths) and data['inject'] == current:
                        data_set['average_%1.3f' % current] = copy.deepcopy(data)
                        data_set['average_%1.3f' % current]['data'] = copy.deepcopy(avg_data)
                        
                
            else:
                print "not the same onset and offset times for this set of data, cannot average"
                print "  ON/OFF indexes = %s %s" % (str(ps), str(ns))
                print "  data lengths = %s" % str(data_lengths)
                print "  current = %2.4f" % current

def fit_rc_data_set_list(data_set_list):
    count = 0
    abort_me = False
    for data_set in data_set_list:
        fit_in_set = False
        plot_data_set(data_set)
        response = raw_input('Fit RC constant for this data set? y/n/a')
        if response == 'y':
            fit_in_set = True
        if response == 'a':
            abort_me = True
        if abort_me:
            break
        count += 1
        if fit_in_set:
            for name, data in data_set.items():
                if data['inject'] < 0.11:
                    print "Data with name %s." % name
                    response = raw_input('Fit RC constant?  y/n/a ')
                    valid_responses = ['y','n','a']
                    while response not in valid_responses:
                        response = raw_input('Fit RC constant?  y/n/a ')
                    if response == 'y':
                        v_series = data['data']
                        len_v = len(v_series)
                        t_series = numpy.arange(0,len_v*0.1,0.1)
                        fit_data = find_rc.find_rc(data['data'],t_series, data['inject'])
                        # assume we fit things well, lets store the info
                        data['g'] = fit_data.g
                        data['C'] = fit_data.C
                        data['tau1'] = fit_data.tau1
                        data['tau2'] = fit_data.tau2
                        data['avg_tau'] = fit_data.avg_tau
                    if response == 'n':
                        try:
                            del(data['g'])
                            del(data['C'])
                            del(data['tau1'])
                            del(data['tau2'])
                            del(data['avg_tau'])
                        except:
                            print "no rc fit data to delete."
                            continue
                    if response == 'a':
                        abort_me = True
                        break

#fit_rc_data_set_list(data_set_list)
count = 0
for data_set in data_set_list:
    count += 100
    before_keys = len(data_set.keys())
    find_duplicates_and_average(data_set, count)
    after_keys = len(data_set.keys())
    if before_keys is not after_keys:
        plot_data_set(data_set)
    

            
