	#!/usr/bin/env python
from abc import ABCMeta, abstractmethod, abstractproperty
import summary as ss
import buffer
import pylru
from operator import itemgetter
import copy
import random,sys, math
import numpy as np
import util
#import time
from heapq import merge

debug = False

class SpaceSaving  :
	__metaclass__ = ABCMeta
	def __init__(self,eps):
		self.eps = eps
		self.k = int(1/eps)

	@abstractmethod
	def add(self,item):
		yield None

	def get_summary(self, type='dict'):
		return self.summary.get_summary(type)

	def bulk_update(self,D2):
		yield None

	def pr_to_stderr(self,key, error=None):
		if debug :
			print >> sys.stderr, key, error
	def print_trace(self):
		self.tracker.stats.print_summary()

#===============================================================================
# Space-Saving with Stream-Summary Metwally 2005
#===============================================================================

#class SpaceSavingBucket(SpaceSaving) :
#	def __init__(self,eps):
#		super(SpaceSavingBucket, self).__init__(eps)
#		self.summary = ss.StreamSummary()
#
#	def add(self,item):
#		#self.pr_to_stderr("item",item)
#
##		 Add space-saving logic
#
#		element  = (item[0], item[1], 0.0)
#
#		if self.summary.has_element(item[0]) :
#			self.summary.increase_element(element)
#		elif len(self.summary) < self.k :
#			self.summary.add_element(element) # item is [(i,j),w] where i,j is the index in the product matrix and w is the weight of the entry
#		else :
#			self.summary.replace_min(element)

#	def get_summary(self):
#		return self.summary.get_element_list()

#===============================================================================
# Space-Saving with Stream-Summary and Buffer.
# Efficient Computation of Frequent and Top-k Elements in Data Streams [Metwally 2005]
#===============================================================================
class SpaceSavingBucketBuffer(SpaceSaving):
	def __init__(self,eps, bufsize):
		super(SpaceSavingBucketBuffer,self).__init__(eps)
		self.summary = ss.StreamSummary()
		self.bufsize = bufsize
		self.buffer = buffer.Buffer(self.bufsize)

	def add(self,entry) :
		if self.buffer.has_element(entry[0]) :						  # update entry in buffer
			self.buffer.update_element(entry[0],entry[1])
		else :
			self.buffer.add(entry[0], (entry[0],entry[1],0.0))	 # insert item
		if len(self.buffer) == self.bufsize :				   # merge if buffer is full
#			self.bulk_update(self.buffer)
			self.merge(self.buffer)
			self.buffer.clear()
#			self.buffer.add(entry[0],(entry[0],entry[1],0.0))

	def bulk_update(self, D2):
		for element in D2.get_buffer().values() :
			if self.summary.has_element(element[0]) :
				self.summary.increase_element(element)
			elif len(self.summary) < self.k :
				self.summary.add_element(element) # item is [(i,j),w] where i,j is the index in the product matrix and w is the weight of the entry
			else :
				self.summary.replace_min(element)

	def merge(self,D2):
		# set union
		if len(self.summary) == 0 :
			self.summary.addAll(D2.get_buffer().values())						# copy directly if summary is empty
			return

		_bucket = self.summary.get_min_bucket()
		while _bucket != None :
			for key,item in _bucket.elements.items() :
				if D2.has_element(key) :
					D2.update_element(key, item[1],item[2])
				else :
					D2.add(key,item)
			_bucket = _bucket.next
		temp = list(D2.get_buffer().values())
		temp.sort(key=itemgetter(1), reverse=True)

#		print temp
		self.summary.clear_summary()
		if len(temp) > self.k :
#			self.summary.addAll(temp[:self.k])
			kth = temp[self.k][1]
#			print "kth, ", kth
			for item in temp :
				if item[1] >= kth :
					self.summary.add_element((item[0],item[1],item[2]))
		else :
			self.summary.addAll(map(lambda item : (item[0],item[1],item[2]), temp))
		return

	def get_summary(self,_type):
		if len(self.buffer) > 0 :
			self.merge(self.buffer)
		return self.summary.get_summary(type=_type)

#===============================================================================
# Space-Saving with Stream-Summary and LRU Cache
# A More Accurate Space Saving Algorithm for Finding the Frequent Items [Zhou, 2010]
#===============================================================================
#class SpaceSavingBucketLRU(SpaceSaving) :
#	def __init__(self,eps,cachesize):
#		super(SpaceSavingBucketLRU,self).__init__(eps)
#		self.summary = ss.StreamSummary()
#		self.lru = pylru.lrucache(cachesize)
#
#	def add (self,item):
#		element  = (item[0], item[1], 0.0)
#		if self.summary.has_element(item[0]) :
#			self.summary.increase_element(element)
#		elif item[0] in self.lru :
#			lru_item = self.lru[item[0]]
#			lru_item = (lru_item[0],element[1]+lru_item[1], lru_item[2])
#
#			if len(self.summary) < self.k :
#				self.summary.add_element(lru_item)
#			else :
#				self.summary.replace_min(lru_item)
#			del self.lru[item[0]]
#		else :
#			self.lru[item[0]] = element # add element to lru. least recent used item automatically popped is cache is full
#
#		elkeys = self.summary.get_elements().keys()
#		buckkeys = self.summary.get_bucket_el_keys()
		#self.pr_to_stderr("elkeys  : ", elkeys)
		#self.pr_to_stderr("buckkeys:", buckkeys)
		#self.pr_to_stderr("buckets", self.summary.buckets.keys())


#===================================================================
# Space-Saving with Stream-Summary and LRU Cache with limit
# This is a variant where we only add an item to Space-Saving if its weight exceeds the value of current min bucket
#===================================================================

#class SpaceSavingBucketLimitLRU(SpaceSaving):
#	def __init__(self,eps,cachesize):
#		super(SpaceSavingBucketLimitLRU,self).__init__(eps)
#		self.summary = ss.StreamSummary()
#		self.lru = pylru.lrucache(cachesize)
#	def add (self,item):
#		element  = (item[0], item[1], 0.0)
#		if self.summary.has_element(item[0]) :
#			self.summary.increase_element(element)
#		elif item[0] in self.lru :
#			lru_item = self.lru[item[0]]
#			lru_item = (lru_item[0],element[1]+lru_item[1], lru_item[2])
#			if self.summary.get_min_bucket() != None and lru_item[1] > self.summary.get_min_bucket().value :
#			   if len(self.summary) < self.k :
#				   self.summary.add_element(lru_item)
#			   else :
#				   self.summary.replace_min(lru_item)
#			else :
#			   self.lru[item[0]] = lru_item
#		else :
#			self.lru[item[0]] = element # add element to lru. least recent used item automatically popped is cache is full
#
#
##===============================================================================
## Space-Saving with Hashtable
##===============================================================================
#class SpaceSavingHash(SpaceSaving) :
#	def __init__(self,eps):
#		self.summary = {}
#	def add(self,item):
#		return
#
##===============================================================================
## Space-Saving with Hashtable and LRU Cache
##===============================================================================
#class SpaceSavingHashLRU(SpaceSaving) :
#	def __init__(self,eps, cachesize):
#		super(SpaceSavingHashBuffer, self).__init__(eps)
#		self.summary = {}
#		self.lru = pylru.lrucache(cachesize)
#	def add(self,item):
#		return

#===============================================================================
# Space-Saving with Hashtable and buffer
#===============================================================================
class SpaceSavingHashBuffer(SpaceSaving) :
	def __init__(self,eps,bufsize):
		super(SpaceSavingHashBuffer, self).__init__(eps)
		self.summary = {}
		self.bufsize = bufsize
		self.buffer = buffer.Buffer(self.bufsize)

	def add(self,entry):
		if self.buffer.has_element(entry[0]) :						  # update entry in buffer
			self.buffer.update_element(entry[0], entry[1])
		else :
			self.buffer.add(entry[0], (entry[0],entry[1],0.0))	 # insert item
		if len(self.buffer) == self.bufsize :				   # merge if buffer is full
			self.merge(self.buffer)
			self.buffer.clear()

	def bulk_update(self, D2):
		if len(self.summary) == 0 :								# copy directly if summary is empty
			self.summary = copy.deepcopy(D2.get_buffer())
			return

#		print self.summary.items()
		for key, item in self.summary.iteritems() :
			if D2.has_element(key ) :
				D2.update_element(item)
			else :
				D2.add(key, item)

		temp = list(D2.get_buffer().values())
		sorted(temp, key=itemgetter(1), reverse=True)
		if len(temp) > self.k :
			kth = temp[self.k][1]
			self.summary.clear()
			for item in temp :
				if item[1] > kth :
					self.summary[item[0]] = (item[0],item[1],item[2])
		else :
			self.summary = dict(zip(zip(*temp)[0], temp))
		return

	def merge(self,D2):
		# set union
		if len(self.summary) == 0 :								# copy directly if summary is empty
			self.summary = copy.deepcopy(D2.get_buffer())
			return
		for key, item in self.summary.items() :
			if D2.has_element(key) :
				D2.update_element(key, item[1],item[2])
			else :
				D2.add(key,item)

		temp = list(D2.get_buffer().values())
		temp.sort(key=itemgetter(1), reverse=True)
		self.summary.clear()
		if len(temp) > self.k :
			kth = temp[self.k][1]
			for item in temp :
				if item[1] >= kth :
					self.summary[item[0]] = item #(item[0],item[1],item[2])
		else :
			self.summary = dict(zip(map(lambda x: x[0],temp), temp))
		return

	def get_summary(self,type):
		if len(self.buffer) > 0 :
			self.merge(self.buffer)
		if type == 'list' :
			return self.summary.values()
		else :
			return self.summary


def round_slice_and_sort(_list,max):
	return sorted(map(lambda x: (x[0], round(x[1],3)), _list),key=itemgetter(1),reverse=True)[:max]


def count_exact(a,limit):
	count = {}
	for item in a :
		if item[0] not in count :
			count[item[0]] = 0.0
		count[item[0]] += (item[1])

	exact = []
	for k,v in count.iteritems() :
		exact.append((k,round(v,3)))

	exact=sorted(exact,key=itemgetter(1),reverse=True)
	return exact[:limit]

def main() :
	ss_limit = 1000
	limit = 1000
	base = 1000000
	buffer = 0
	filename = "results"
	_skew = 1.5
	for s in range(1) :
		skew = _skew + (s * 0.5)
		for i in range(1) :
			f = open("../out/results",'a')
			input = base * math.pow(2, i)
			eps = 1.0
			zipfdist = list(np.random.zipf(1.8, input))
			a = []
			for el in zipfdist :
				a.append(("".join(["a",str(el)]), random.random()))
			results = {}
			results["info"] = {"input":input, "buffer": buffer, "skew": skew, "round": i}
#			results["timestamp"] = time.asctime( time.localtime(time.time()) )

			with util.Timer() as t :
				res = count_exact(a, limit)

			results["exact"] = (res,round(t.interval,5))

			results["ssb"] = {}
			results["ssbb"] ={}
			results["sshb"] = {}
			for j in range(4) :
				k = 10*(j+1)
				eps = 1.0/k
				with util.Timer() as t :
					ss = SpaceSavingBucket(eps)
					for item in a :
						ss.add(item)

				results["ssb"][j] = (round_slice_and_sort(ss.get_summary('list'),limit),round(t.interval,5),eps)
				print "FINISHED SpaceSavingBucketBuffer !!!!!!!!!!: %.03f sec." % t.interval

				with util.Timer() as t :
					ss = SpaceSavingBucketBuffer(eps, k)
					for item in a :
						ss.add(item)

				results["ssbb"][j] = (round_slice_and_sort(ss.get_summary('list'),limit), round(t.interval, 5))
				print "FINISHED SpaceSavingBucketBuffer !!!!!!!!!!: %.03f sec." % t.interval

				with util.Timer() as t :
					ss = SpaceSavingHashBuffer(eps, k)
					for item in a :
						ss.add(item)
				results["sshb"][j] = (round_slice_and_sort(ss.get_summary('list'),limit), round(t.interval,5))
				print "FINISHED SpaceSavingBucketBuffer!!!!!!!!!!: %.03f sec." % t.interval

			f.write(str(results))
			f.close()

#main()
