import smap.archiver.client
import time
import scipy.interpolate
from numpy import subtract, histogram, std
import csv
import calendar, datetime
from operator import itemgetter


#change the name and location of the output file
f1 = open('/home/cbe/.gvfs/bsg on 169.229.130.74/New York Times/Analysis/setpointsOct16-Jan10.csv', 'wb')
testsout = csv.writer(f1,dialect='excel')
f2 = open('/home/cbe/.gvfs/bsg on 169.229.130.74/New York Times/Analysis/setpointsData.csv', 'wb')
testsdata = csv.writer(f2,dialect='excel')  

#input stream ids
c = smap.archiver.client.SmapClient(base='http://new.openbms.org/backend', key='SA2nYWuHrJxmPNK96pdLKhnSSYQSPdALkvnA', private=True)

#get the uuids for both room_temp and ctl_setpt
uuids = []
stpt = []
paths = []
uuids.append(c.query('select distinct uuid where Path like \'/nytimes/FPB%/ROOM_TEMP\''))
for x in reversed(xrange(len(uuids[0]))):
    uuid = uuids[0][x]
    path = repr(c.query('select distinct Path where uuid = \'%s\'' % uuid)).strip('[u\'\']')
    fpb = path.split('/')[2]
    ctl = c.query('select distinct uuid where Path like \'/nytimes/%s/CTL_STPT\'' % fpb)
    if (ctl): #make sure that there is a bms point that corresponds--if not, throw out this fpb
        stpt.extend(ctl)
        paths.append(path.split('/')[2])
    else:
        del uuids[0][x]

uuids.append(reversed(stpt))
uuids.append(reversed(paths)) 
uuids = zip(*sorted(zip(*uuids), key=itemgetter(2)))



#input a time range for setpoint analysis
start_time = int(calendar.timegm(time.strptime('2012-01-01 00:00:00', '%Y-%m-%d %H:%M:%S')))
end_time = int(calendar.timegm(time.strptime('2012-01-03 00:00:00', '%Y-%m-%d %H:%M:%S')))  
new_time = []
new_timefm = []
intrpl_rm = []
intrpl_stpt = []
deviations = []
tmbuffer = 0 #data buffer for interpolation
interval = 900 #number of seconds for resample interval
smsample = 1 #number of samples to average for smoothing (before resampling) (set to 1 to not average)
x = start_time + tmbuffer
while x <= end_time - tmbuffer: #shrink total time interval by one interval length on each side to account for missing data 
    new_timefm.append(x*1000)
    x = x + interval
rmtmp_d = c.data_uuid(uuids[0],start_time, end_time)
stpt_d = c.data_uuid(uuids[1],start_time, end_time)

#only look at the hours between 6am and 8pm and only weekdays (note adjustment for UTC time)
for tm in new_timefm:
    if tm/1000 < 1320523200: #check for Nov 6 DST switch
        if time.gmtime((tm / 1000) - (3600*4))[6] < 5 and time.gmtime((tm / 1000) - (3600*4))[3] >= 6 and time.gmtime((tm / 1000) - (3600*4))[3] < 18:
        #print time.gmtime(tm/1000)
            new_time.append(tm)
    else:
        if time.gmtime((tm / 1000) - (3600*5))[6] < 5 and time.gmtime((tm / 1000) - (3600*5))[3] >= 6 and time.gmtime((tm / 1000) - (3600*5))[3] < 18:
        #print time.gmtime(tm/1000)
            new_time.append(tm)

#note this is a dumb smoothing function. It should really be averaging over a time range, not number of samples--as # of samples is subject to missing data errors
def smoothList(rdata,degree):  
    smoothed=[0]*(len(rdata))  
    for i in range(len(smoothed)):
        if (i < degree - 1) or (i > len(rdata)-degree):
            smoothed[i]=rdata[i]
        else:
            if (degree & 1) == 1: #check for even/odd window size
                smoothed[i]=sum(rdata[i-(degree/2):i+(degree/2)+1])/float(degree)
            else:
                smoothed[i]=sum(rdata[i-(degree/2):i+(degree/2)+1])/float(degree+1) 
    return smoothed  
invalids = []
fpb_names = []
for x in xrange(len(rmtmp_d)):  #for each stream
    #print sorted(paths)[x]
    if len(rmtmp_d[x][:,0]) > 0: #check that there is data
        if smsample > 1:
            smdata = smoothList(rmtmp_d[x][:,1],smsample)
        else:
            smdata = rmtmp_d[x][:,1]
        f_data = scipy.interpolate.interp1d(rmtmp_d[x][:,0], smdata, bounds_error=False) #create an interpolation table
        if std(f_data(new_time)) == 0:     #check to make sure that the rm_temperaure data isn't constant (if constant, don't include in analysis)
            invalids.append(x)
            print "exluded", sorted(paths)[x]  
        else:
            intrpl_rm.append(f_data(new_time)) #interpolate to interval time
            fpb_names.append(uuids[0][x]) 
testsdata.writerow(fpb_names)

for x in xrange(len(stpt_d)):  #for each stream
    if x not in invalids:
        if len(stpt_d[x][:,0]) > 0: #check that there is data
            if smsample > 1:
                smdata = smoothList(stpt_d[x][:,1],smsample)
            else:
                smdata = stpt_d[x][:,1]
            f_data = scipy.interpolate.interp1d(stpt_d[x][:,0], smdata, bounds_error=False) #create an interpolation table  
            intrpl_stpt.append(f_data(new_time)) #interpolate to interval time 

for x in xrange(min(len(intrpl_rm),len(intrpl_stpt))):
    if (len(intrpl_rm[x]) == len(intrpl_stpt[x])):
        deviations.append(subtract(intrpl_rm[x],intrpl_stpt[x]))
chart = histogram(deviations, bins= 20, range=(-10,10))
chart = zip(*[chart[1],chart[0]/float(sum(chart[0]))*100]) #change counts into percentages, rearrange to have bins as first column
for x in xrange(len(chart)):
    testsout.writerow(chart[x])
    
#this is simply a sanity check--output all the data and make sure things are lining up as expected
intrpl_rmz = zip(*intrpl_rm)
intrpl_stptz = zip(*intrpl_stpt)
deviationsz = zip(*deviations)
for x in xrange(len(new_time)):
    if new_time[x]/1000 < 1320523200: #change back to local time--check for Nov 6 DST switch
        row = [time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime((new_time[x]/1000)-(3600*4)))]
    else:
        row = [time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime((new_time[x]/1000)-(3600*5)))]
    for y in xrange(min(len(intrpl_rmz[x]),len(intrpl_stptz[x]))):
        row.extend([intrpl_rmz[x][y]])
        row.extend([intrpl_stptz[x][y]])
        row.extend([deviationsz[x][y]])
    testsdata.writerow(row)

f1.close()
f2.close()


