"""
Copyright Joshua Barron, Barron Software 2010.
hecate-profiler:

profiler.py
================================
Main module of the profiling system.  Provides APIs to track data object uses as well as to query
statistics about usage.

_recalculate_coordinate and _points2distance taken from http://bitbucket.org/bartekgorny/python-geography/src/tip/haversine.py
under the GPL license.
"""
from sqlalchemy.ext.sqlsoup import SqlSoup
from sqlalchemy import select
from sqlalchemy.sql.expression import func, extract
import math, hashlib, cPickle, time
from numpy import array, sqrt, mean
from scipy.cluster.vq import kmeans, vq
from numpy.numarray.nd_image import variance
from datetime import datetime, timedelta


class Location:
    def _recalculate_coordinate(self, val, _as=None):
        """
        Accepts a coordinate as a tuple (degree, minutes, seconds)
        You can give only one of them (e.g. only minutes as a floating point number) and it will be duly
        recalculated into degrees, minutes and seconds.
        Return value can be specified as 'deg', 'min' or 'sec'; default return value is a proper coordinate tuple.
        """
        deg, min, sec = val
        # pass outstanding values from right to left
        min = (min or 0) + int(sec) / 60
        sec = sec % 60
        deg = (deg or 0) + int(min) / 60
        min = min % 60
        # pass decimal part from left to right
        dfrac, dint = math.modf(deg)
        min = min + dfrac * 60
        deg = dint
        mfrac, mint = math.modf(min)
        sec = sec + mfrac * 60
        min = mint
        if _as:
            sec = sec + min * 60 + deg * 3600
            if _as == 'sec': return sec
            if _as == 'min': return sec / 60
            if _as == 'deg': return sec / 3600
        return deg, min, sec
            
    def _points2distance(self, start, end):
        """
        Calculate distance (in kilometers) between two points given as (long, latt) pairs
        based on Haversine formula (http://en.wikipedia.org/wiki/Haversine_formula).
        Implementation inspired by JavaScript implementation from http://www.movable-type.co.uk/scripts/latlong.html
        Accepts coordinates as tuples (deg, min, sec), but coordinates can be given in any form - e.g.
        can specify only minutes:
        (0, 3133.9333, 0) 
        is interpreted as 
        (52.0, 13.0, 55.998000000008687)
        which, not accidentally, is the lattitude of Warsaw, Poland.
        """
        start_long = math.radians(self._recalculate_coordinate(start[0], 'deg'))
        start_latt = math.radians(self._recalculate_coordinate(start[1], 'deg'))
        end_long = math.radians(self._recalculate_coordinate(end[0], 'deg'))
        end_latt = math.radians(self._recalculate_coordinate(end[1], 'deg'))
        d_latt = end_latt - start_latt
        d_long = end_long - start_long
        a = math.sin(d_latt/2)**2 + math.cos(start_latt) * math.cos(end_latt) * math.sin(d_long/2)**2
        c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
        return 6371 * c

    def __init__(self, latitude, longitude):
        self.lat = latitude
        self.long = longitude

    def WithinArea(self, radial_distance, origin):
        """
        WithinArea: Checks whether or not the distance between this location and the
        origin is less than the (kilometers) distance specified (within a circle, essentially).
        """
        #first, calculate the distance between this location and the origin using
        #the Haversine formula to account for curvature of the earth
        d = self._points2distance(((self.long, 0, 0), (self.lat, 0, 0)),
                                  ((origin.long, 0, 0), (origin.lat, 0, 0)))
        #d is now the distance in km between this and the origin
        return d <= radial_distance
        
class HecateProfiler:
	def PopularObjectsForTags(self, tags):
		sel = select([self.db.object_tags.tag, self.db.object_uses.obj_id, func.count(self.db.object_uses.obj_id).label('count')],
					 (self.db.object_uses.obj_id == self.db.object_tags.obj_id) & (self.db.objects.app_id == self.app_id) &
					 (self.db.object_uses.obj_id == self.db.objects.id) & (self.db.object_tags.tag.in_(tags)),
					 group_by=[self.db.object_tags.tag, self.db.object_uses.obj_id],
					 order_by='count desc')
		rs = self.db.engine.execute(sel)
		results = [] 
		for r in rs:
			results.append(r[1])
		return results
			

	def MostPopularTags(self, *args, **kwargs):
		"""
        MostPopularTags: discovers and returns an ordered tuple of the most popular tags for this app.
        Kwargs list:
        ================================================================
        location, distance = specify a location and radial distance for tag use
        time_of_day, time_interval = specify a time of day (datetime.time)
									and a time interval (datetime.timedelta) on either side of said time
        day_of_week = specify a day of week (0=Sunday, 6=Saturday)

		Returns a list containing the most popular tags (in order).
        """
		location = kwargs.get('location', None)
		distance = kwargs.get('distance', None)
		time_of_day = kwargs.get('time_of_day', None)
		time_interval = kwargs.get('time_interval', None)
		day_of_week = kwargs.get('day_of_week', None)
		
		sel = select([self.db.object_tags.tag, func.count(self.db.object_tags.tag).label('count')],
                     (self.db.objects.app_id == self.app_id) & (self.db.object_uses.obj_id == self.db.object_tags.obj_id),
                     group_by=[self.db.object_tags.tag],
                     order_by='count desc')
		if location and distance:
			#this is a little expensive, but we need to pull the locations out and perform distance calculations
			#on them in order to get a list of datetime objects to filter the locations by
			locations = select([self.db.object_uses], order_by='obj_id')
			valid_datetimes = []
			locations = self.db.engine.execute(locations)
			for loc in locations:
				if location.WithinArea(distance, Location(loc[1], loc[2])):
					valid_datetimes.append(loc[3])
			#now we have a list of valid datetimes (datetimes are OK to filter by since we assume they are unique)
			sel = sel.where(self.db.object_uses.datetime.in_(valid_datetimes))
		if time_of_day and time_interval:
            #again, we have to pull out valid datetimes.
			dts = self.db.engine.execute("select datetime, extract(hour from datetime), extract (minute from datetime), extract(seconds from datetime) from object_uses")
			valid_datetimes = []
			for dt in dts:
				t = datetime.today() + timedelta(dt[1], dt[2], dt[3]) #hour/minute/second
				tod = datetime.today() + timedelta(time_of_day.hour, time_of_day.minute, time_of_day.second)
				if abs(t-tod) <= time_interval:
					valid_datetimes.append(dt[0]) #append the datetime to the OK times
			sel = sel.where(self.db.object_uses.datetime.in_(valid_datetimes))
		if day_of_week:
            #this one is easier, we just need to make sure the day of the week is the same
			sel = sel.where(extract('dow', self.db.object_uses.datetime) == day_of_week)
		rs = self.db.engine.execute(sel)
		tagset = [r for r in rs] #convert to a permanent list
		return tagset

	def _variance(self, centroid, points):
		var = 0
		#avg = mean(points, axis=0) if len(points) > 1 else points
		avg = centroid
		for point in points:
			var += (avg - point) ** 2
		var = var / len(points) #equal probability distribution
		if hasattr(var, 'append') and len(var) == 1:
			var = var[0]
		return var
	def LocationWeighting(self, dispersion):
		"""
		LocationWeighting: converts a dispersion value into a 0-1 weight
		"""
		if dispersion <= 18:
			return 1
		return 18 / dispersion
	
	def LocationSatisfiesThreshold(self, location, tag, threshold):
		dispersion, centroids = self.LocationBound(tag, True)
		#collapse the dictionaries
		dispersion = dispersion[tag]
		centroids = centroids[tag]
		nearest_distance = 100000000 #arbitrarily large
		for centroid in centroids:
			c = Location(centroid[0][0], centroid[0][1])
			distance = Location(0,0)._points2distance(((c.lat, 0, 0), (c.long, 0, 0)),
													  ((location.lat, 0,0), (location.long, 0, 0)))
			distance = abs(distance)
			if distance < nearest_distance:
				nearest_distance = distance
		dispersion_weight = self.LocationWeighting(dispersion)
		weighted_threshold = threshold - (threshold * (1 - dispersion_weight))
		return nearest_distance <= weighted_threshold

	def LocationBound(self, tags=None, ret_centroids=False):
		"""
		LocationBound: discovers if supplied tag(s) is/are location bound.
		=======================
		tags: a single or a list of tags to be checked; if not supplied, all tags are checked

		Returns a dictionary indexed by tags indicating the dispersion distance value. (smaller = tight clusters)
		"""
		if tags and not hasattr(tags, "append"):
			tags = [tags]

	    #first, we need to get all locations by tag
		rs = select([self.db.object_tags.tag, self.db.object_uses.latitude, self.db.object_uses.longitude, self.db.objects.id],
					(self.db.object_uses.obj_id == self.db.object_tags.obj_id) & (self.db.objects.app_id == self.app_id) & 
					(self.db.objects.id == self.db.object_uses.obj_id),
					order_by=self.db.object_tags.tag)
		if tags:
			rs = rs.where(self.db.object_tags.tag.in_(tags))
		rs = self.db.engine.execute(rs)
		tlocs = {}
		for r in rs:
			if tlocs.has_key(r[0]):
				tlocs[r[0]].append((float(str(r[1])), float(str(r[2]))))
			else:
				tlocs[r[0]] = [(float(str(r[1])), float(str(r[2])))]
		centroids = self.LocationCentroidsForTags(tlocs)
		#import pdb; pdb.set_trace()
		#now, we can calculate the index of dispersion by dividing the 
		#mean location by the variance 
		lc = {}
		for tag in centroids.keys():
			idx = 0.0
			#we first need the variance for this centroid
			#centroids[tag][1] = array of points for this centroid
			for centroid in centroids[tag]:
				var = self._variance(array(centroid[0]), array(centroid[1]))
				centroid = centroid[0]
				idx = idx + (abs(var[0] / centroid[0]) + abs(var[1] / centroid[1])) / 2
			idx = idx / len(centroids[tag][0])
			zero = (0, 0, 0)
			dispersion_distance = Location(0,0)._points2distance((zero, zero), (zero, (idx, 0, 0)))
			lc[tag] = dispersion_distance
		if ret_centroids:
			return lc, centroids
		return lc

	def TimeWeighting(self, dispersion):
		"""
		LocationWeighting: converts a dispersion value into a 0-1 weight
		"""
		if dispersion <= 300:
			return 1
		return 300 / dispersion
	
	def TimeSatisfiesThreshold(self, time, tag, threshold):
		dispersion, centroids = self.TimeBound(tag, True)
		#collapse the dictionaries
		dispersion = dispersion[tag]
		centroids = centroids[tag]
		nearest_time = timedelta(seconds=100000000) #arbitrarily large
		for centroid in centroids:
			ctime = datetime.today() + timedelta(seconds=int(centroid))
			distance = abs(ctime - time)
			#distance = abs(centroid - time)	
			if distance < nearest_time:
				nearest_time = distance
		dispersion_weight = self.TimeWeighting(dispersion)
		weighted_threshold = threshold - (threshold * (1 - dispersion_weight))
		return nearest_time <= timedelta(seconds=weighted_threshold)
	
	def TimeBound(self, tags=None, ret_centroids=False):
		"""
		TimeBound: discovers if supplied tag(s) is/are time bound.
		======================
		tags: a single or a list of tags to be checked

		Returns a dictionary indexed by tags indicating the 'correlation' value.
		"""
		if tags and not hasattr(tags, "append"):
			tags = [tags]
		#first, we need to get all times by tag
		rs = select([self.db.object_tags.tag, self.db.object_uses.datetime],
					(self.db.object_uses.obj_id == self.db.object_tags.obj_id) & (self.db.objects.app_id == self.app_id) & 
					(self.db.objects.id == self.db.object_uses.obj_id),
					order_by=self.db.object_tags.tag)
		if tags:
			rs = rs.where(self.db.object_tags.tag.in_(tags))
		rs = self.db.engine.execute(rs)
		ttimes = {}
		for r in rs:
			t = r[1].time()
			t = t.hour * 3600 + t.minute * 60 + t.second
			if ttimes.has_key(r[0]):
				ttimes[r[0]].append(t)
			else:
				ttimes[r[0]] = [t]
		centroids, variance = self.TimeCentroidsForTags(ttimes)
		#now calculate the index of dispersion by dividing the mean time by variance
		lt = {}
		for tag in centroids.keys():
			idx = variance[tag] / len(centroids[tag])
			lt[tag] = idx
		if ret_centroids:
			return lt, centroids
		return lt
				
	
	def TimeCentroidsForTags(self, times_by_tag={}):
		"""
		TimeCentroidsForTags: discovers the time centroids for time bound tag clusters via the k-means algorithm.
		"""
		tbt = times_by_tag
		results = {}
		variance = {}
		for key in tbt.keys():
			arr = array(tbt[key])
			centroids, variance[key] = kmeans(arr, 4)
			results[key] = centroids
		return results, variance

	def LocationCentroidsForTags(self, locations_by_tag={}):
		"""
		CentroidsForTags: discovers location centroids for locational tag clusters via the k-means algorithm.
		===================================================================
		locations_by_tag: a dictionary of the format
				{'tag': [(lat1, long1), (lat2, long2), ...]}

		Returns a dictionary indexed by tag with 4 (as per research) centroid coordinates along with an extra
		coordinate pair indexed	by "_combined" indicating the cross-tag centroids.  
		"""
		lbt = locations_by_tag
		results = {}
		for key in lbt.keys():
			arr = array(lbt[key])
			#arr = whiten(arr)
			centroids, variance = kmeans(arr, 4)
			code, distance = vq(arr, centroids)
			#we need to create a list of points by centroid to accompany the returned centroids
			cts = []
			for ci in range(len(centroids)):
				pts = []
				for index in range(len(code)):
					if code[index] == ci:
						pts.append(arr[index].tolist())
				cts.append((centroids[ci], pts))
			results[key] = cts
		return results
         
	def TrackUsage(self, obj, dt, long, lat, tags=[], relationships = None):
		"""
        TrackUsage: updates the profiler with an additional use of a specific data object.
        If this is the first use of the data object, you must supply a list of tags or provide them within the object
        as a 'tags' list-like attribute.  Additionally, you may specify a list of relationship objects for pattern
        tracking.
		"""
		self.hasher.update(cPickle.dumps(obj))
		hash = self.hasher.hexdigest()
		if not self.db.engine.execute(select([self.db.objects], self.db.objects.hash == hash)).fetchone():
            #we need to insert this object into the master objects table
			ins = self.db.objects._table.insert().values(app_id=self.app_id, hash=hash)
			self.db.engine.execute(ins)
		obj_id = self.db.engine.execute(select([self.db.objects.id], self.db.objects.hash == hash)).fetchone()[0]
		if len(tags) == 0:
            #we need to introspect the object for tags
			tags = obj.tags if hasattr(obj, "tags") else None
        #check if the tags used exist for this object pair in the database
		tagset = self.db.engine.execute(select([self.db.object_tags.tag], self.db.object_tags.obj_id == obj_id)).fetchall()
		if not tagset:  #we need to insert all the tags in this case
			for tag in tags:
				ins = self.db.object_tags._table.insert().values(obj_id=obj_id,tag=tag)
				self.db.engine.execute(ins)
        #insert the usage into the appropriate table
		ins = self.db.object_uses._table.insert().values(obj_id=obj_id, longitude=long, latitude=lat, datetime=dt)
		self.db.engine.execute(ins)

	def __init__(self, app_id):
        #initializes the database handle for the profiler
		self.db = SqlSoup('postgres://hecate:profilepass@localhost:5432/profiler-db')
		self.hasher = hashlib.sha256()
		self.app_id = app_id

if __name__ == '__main__':
    from datetime import datetime
    now = datetime.now()
    rnd_obj = (7, 3, 2)
    p = HecateProfiler(1)
    import pdb; pdb.set_trace()
    p.TrackUsage(rnd_obj, now, 3.1456, 4.5678, ('blah', 'bleh'))
        
