import smap.archiver.client
import scipy.interpolate
import csv
import calendar
from datetime import datetime
from dateutil import tz

#change the name and location of the output file
f1 = open('/home/cbe/.gvfs/bsg on 169.229.130.74/!Projects/HVAC/UFAD/NYT_CPB - 2011/Analysis/DH/test.csv', 'wb')
testsout = csv.writer(f1,dialect='excel') 

#input stream ids
c = smap.archiver.client.SmapClient(base='http://new.openbms.org/backend', key='SA2nYWuHrJxmPNK96pdLKhnSSYQSPdALkvnA', private=True)

#insert local times
dt_format = '%Y-%m-%d %H:%M:%S'
time_zone = tz.gettz('EST5EDT')
start_time = int(calendar.timegm(datetime.strptime('2012-01-09 12:38:00', dt_format).replace(tzinfo=time_zone).utctimetuple()))
end_time = int(calendar.timegm(datetime.strptime('2012-01-09 12:52:00', dt_format).replace(tzinfo=time_zone).utctimetuple())) 
new_time = []
interpolated = []
tmbuffer = 0 #data buffer for interpolation
interval = 60 #number of seconds for resample interval
smsample = 4 #number of samples to average for smoothing (before resampling)
x = start_time + tmbuffer
while x <= end_time - tmbuffer: #shrink total time interval by one interval length on each side to account for missing data 
    new_time.append(x*1000)
    x = x + interval
#newdata = c.data('Path = \'/new_york_times/m32/c2\'',start_time, end_time)
newdata = c.data('Metadata/Extra/DeviceName = \'TreeEastPer\' or Metadata/Extra/DeviceName = \'TreeWestPer\' or Metadata/Extra/DeviceName = \'TreeSouthPer\' or Metadata/Extra/DeviceName = \'TreeNorthPer\' or Metadata/Extra/DeviceName = \'TreeWestInt\' or Metadata/Extra/DeviceName = \'TreeEastInt\' or Metadata/Extra/DeviceName = \'TreeSouthInt\' or Metadata/Extra/DeviceName = \'TreeNorthInt\'',start_time, end_time)
#diag = c.query('select distinct uuid where Metadata/Extra/DeviceName = \'Plenum temp\'')
#print diag
#print newdata

#note this is a dumb smoothing function. It should really be averaging over a time range, not number of samples--as # of samples is subject to missing data errors
def smoothList(rdata,degree):  
    smoothed=[0]*(len(rdata))  
    for i in range(len(smoothed)):
        if (i < degree - 1) or (i > len(rdata)-degree):
            smoothed[i]=rdata[i]
        else:
            if (degree & 1) == 1: #check for even/odd window size
                smoothed[i]=sum(rdata[i-(degree/2):i+(degree/2)+1])/float(degree)
            else:
                smoothed[i]=sum(rdata[i-(degree/2):i+(degree/2)+1])/float(degree+1) 
    return smoothed  

for row in newdata[1]:  #for each stream
    print type(row)
    if len(row[:,0]) > 0: #check that there is data
        smdata = smoothList(row[:,1],smsample)
        f_data = scipy.interpolate.interp1d(row[:,0], smdata, bounds_error=False) #create an interpolation table  
        interpolated.append(f_data(new_time)) #interpolate to interval time and average x number of samples
    #else:
        #interpolated.append('no_data')
transposed = zip(*interpolated)

headings = []
headings.append(['uuid'])
headings.append(['name'])
headings.append(['grid']) 
for x in xrange(len(newdata[0])):
    uuid = newdata[0][x]
    headings[0].extend([uuid])
    if len(newdata[1][x][:,0]) > 0:
        headings[2].extend(c.query('select distinct Metadata/Extra/GridLocation where uuid = \'%s\'' % uuid))
        headings[1].append(repr(c.query('select distinct Metadata/Extra/DeviceName where uuid = \'%s\'' % uuid)).strip('[u\'\']') + repr(c.query('select distinct Metadata/Extra/SensorLocation where uuid = \'%s\'' % uuid)).strip('[u\'\']'))
        #names.append(c.query('select distinct Path where uuid = \'%s\'' % uuid))
for row in headings:
    testsout.writerow(row)

for x in xrange(len(transposed)):
    row = [datetime.fromtimestamp(new_time[x]/1000, time_zone).strftime(dt_format)]
    row.extend(transposed[x])
    testsout.writerow(row)

f1.close()


