from indexer import organize
from urllib2 import *

import cherrypy, json, os, Queue, threading, urllib

#q = Queue.Queue(100)

class SolrMp3:
	
    @cherrypy.expose
    def index(self, dir):
        if dir:
            q.put(dir)
        return json.dumps({'queue' : q.qsize() })
	
    @cherrypy.expose
    def reindex(self):
        org = organize.Organize(None, os.path.dirname(os.path.abspath(__file__)) + '/media')
        count = org.reindex()
        return json.dumps({'num of docs indexes: ' : count })
	
    @cherrypy.expose
    def search(self, query = '', facet = '', page = 1, debug = False):
        search_results = []
        facets = []

        url = 'http://localhost:8983/solr/select'
        data = self.__build_query(query, facet, page)

        response = eval(urllib.urlopen(url, data).read())
	
        if response["facet_counts"]['facet_fields']:
            facets = response["facet_counts"]['facet_fields']

        if debug:
            debug_info = url + '?' + data
        else:
            debug_info = None

        cherrypy.response.headers["Content-Type"] = "application/json"
        return json.dumps({
            'query' : query,
            'total_results_found' : response["response"]["numFound"],
            'search_results' : response["response"]["docs"],
            'facets' : facets,
            'debug_info' : debug_info
		})
		
    def __build_query(self, query, facet, page):
        if query == '':
            q = '*:*'  # '*'
            facet_mincount = 0
        else:
            q = 'title:%s OR author:%s OR genre:%s' % (query, query, query)
            facet_mincount = 1

        page = (int(page) -1) * 100

        return urllib.urlencode([
            ('q', q),
            ('wt', 'python'),
            ('facet', 'true'),
            ('facet.field', 'genre'),
            ('facet.field', 'performer'),
            ('facet.mincount', facet_mincount),
            ('fq', facet),
            ('start', page),
            ('rows', '100')
        ])


#working_dir = os.path.dirname(os.path.abspath(__file__))


# Indexer
#def worker():
#	while True:
#		item = q.get()
#		org = organize.Organize(item, working_dir + '/media')
#		org.organize()
#        q.task_done()

#t = threading.Thread(target=worker)
#t.daemon = True
#t.start()

#q.join()       # block until all tasks are done
