#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import re
import string
from sets import Set

from stemming.porter2 import stem

from google.appengine.ext import db

from stopWords import listStopWords
import logging


    
def indexNewRecord(entities_to_index, personKey):
    from model import Index
    #from model import TokenList

    regex_punctuation = re.compile('[%s]' % re.escape(string.punctuation), re.UNICODE)
    person = entities_to_index[0]
    
    
    #tokens = Set()
    tokenList = list()

    #index author name for full text search
    list_of_tokens = re.sub(regex_punctuation," ", person.author_name).split()
    tokenList.extend(list_of_tokens)
    #tokens.update(Set(list_of_tokens))

    #Only index the alias of email
    author_email = person.author_email.rpartition('@')[0]
    list_of_tokens = re.sub(regex_punctuation," ", author_email).split()
    tokenList.extend(list_of_tokens)
    #tokens.update(Set(list_of_tokens))
    
    #author_phone
    list_of_tokens = re.sub(regex_punctuation," ", person.author_phone).split()
    tokenList.extend(list_of_tokens)
    #tokens.update(Set(list_of_tokens))
    
    #source_name
    #Dont' Index Source Name
    #list_of_tokens = re.sub(regex_punctuation," ", person.source_name).split()
    # tokens.update(Set(list_of_tokens))
    
    #Not indexing source_date ; source_date
    
   
    #first name
    list_of_tokens = re.sub(regex_punctuation," ", person.first_name).split()
    first_name_tokens = list_of_tokens
    tokenList.extend(list_of_tokens)
    #tokens.update(Set(list_of_tokens))

    #last name
    list_of_tokens = re.sub(regex_punctuation," ", person.last_name).split()
    last_name_tokens = list_of_tokens
    tokenList.extend(list_of_tokens)
    #tokens.update(Set(list_of_tokens))
    
    #not indexing 
    #sex
    #date_of_birth
    #age
    
    #home_street
    list_of_tokens = re.sub(regex_punctuation," ", person.home_street).split()
    tokenList.extend(list_of_tokens)
    #tokens.update(Set(list_of_tokens))  

    #home_neighbourhood
    list_of_tokens = re.sub(regex_punctuation," ", person.home_neighborhood).split()
    tokenList.extend(list_of_tokens)
    #tokens.update(Set(list_of_tokens))     

    #home_city
    list_of_tokens = re.sub(regex_punctuation," ", person.home_city).split()
    tokenList.extend(list_of_tokens)
    #tokens.update(Set(list_of_tokens)) 

    #home_state
    list_of_tokens = re.sub(regex_punctuation," ", person.home_state).split()
    tokenList.extend(list_of_tokens)
    #tokens.update(Set(list_of_tokens))

    #home_postal_code
    list_of_tokens = re.sub(regex_punctuation," ", person.home_postal_code).split()
    tokenList.extend(list_of_tokens)
    #tokens.update(Set(list_of_tokens))  

    #home_country
    list_of_tokens = re.sub(regex_punctuation," ", person.home_country).split()
    tokenList.extend(list_of_tokens)
    #tokens.update(Set(list_of_tokens))          

    #other : get rid of description for purpose of indexing
    other = person.other.partition('description:')[2]
    list_of_tokens = re.sub(regex_punctuation," ", other).split()
    #logging.info('words in other %s' % list_of_tokens)
    tokenList.extend(list_of_tokens)
    #tokens.update(Set(list_of_tokens))

    #initialise token counter
    tokenCounter = dict()
    for token in tokenList:
        tokenLower = token.lower()
        if tokenLower in tokenCounter:
            tokenCounter[tokenLower] = tokenCounter[tokenLower] + 1
        else:
            tokenCounter[tokenLower] = 1

    tokens = tokenCounter.iterkeys()

    index_entities_to_put = []
    #tokenToIndex = TokenList(parent=personKey)
    tokens = tokenCounter.iterkeys()
    for token in tokens:
      if token in listStopWords:
        continue

      tokenLower = stem(token).lower()
      #tokenToIndex.tokens.append(tokenLower)    #Maintaining list of all tokens in a record
      ind = Index(parent=personKey)
      ind.token = tokenLower
      ind.parentKey = personKey
      ind.frequency = tokenCounter[token]
      if token in first_name_tokens or token in last_name_tokens:
        ind.frequency = ind.frequency + 5     #weight added to make sure that words in name get higher precedence
      
      index_entities_to_put.append(ind)
    
    #index_entities_to_put.append(tokenToIndex)
    keys = db.put(index_entities_to_put)
    
    
    #words = record.first_name + " " + record.last_name + " " + " " + record.age +" " +record.home_country +  " " + record.home_city + " " +  record.home_neighbourhood + " " + record.home_street + " " + record.other
    #self.response.out.write(words)
    
    
def search_full_text(query_words, results_to_fetch):
    from model import Index
    #from model import TokenList
        
    result = Set()
    recordReference = list()
    uniqueRecords = list()
    keyList = list()

    

    solutionKeys = list()
    solutionKeyIDs = list()
    
    oneWordQuery = False
    if len(query_words) == 1:
        oneWordQuery = True

    firstRecord = True
    solutionDict = dict()
    
    for word in query_words :
           
       word = stem(word.lower())  # stemming on upper case doesn't work
       if word in listStopWords:
          #this is a stopWord
          continue

       #First always try and match exact word ... if less than 300 go for prefix match
       queryExact = db.Query(Index, keys_only=True) 
       queryExact.filter("token =", word)
       queryExact.order('-frequency')

       queryPrefix = db.Query(Index, keys_only=True) 
       queryPrefix.filter("token >", word)
       queryPrefix.filter("token <", word+u"\ufffd")
       queryPrefix.order('token')
       queryPrefix.order('-frequency')
       
       if oneWordQuery:
        noOfRecords = 100
       else:
        noOfRecords = 1000   # Need a high number to handle and semantics, will get back to this

       recordList = queryExact.fetch(noOfRecords)
       

       #logging.info('LENGTH %s' % word)
       #logging.info('LENGTH %d' % len(recordList))
       #recordList = list(key.parent() for key in recordList)
       newRecordList = list()
       if len(recordList) < noOfRecords:
        recordList.extend(queryPrefix.fetch(noOfRecords - len(recordList)))   #will get back to this
        
        #logging.info('LENGTH after prefix %d' % len(recordList))
                  
       if oneWordQuery:
        recordReference = list( db.get(key.parent()) for key in  recordList)
       else:
           #recordList.extend( list(key.parent() for key in newRecordList)  )
           if firstRecord:
                solutionKeys = list(key.parent() for key in recordList)
                solutionKeyIDs = list(key.__str__() for key in solutionKeys)
                solutionKeyIDs.sort()
                firstRecord = False

           elif len(solutionKeys) is not 0:
                parentRecordList = list(key.parent() for key in recordList)
                parentRecordListIDs = list(key.__str__() for key in parentRecordList) 
                
                
                
                # Take out common records
                
                parentRecordListIDs.sort()

                i = 0
                j = 0
                noMoreSolution = False
                lenSortedList = len(solutionKeyIDs)
                lenParentRecordList = len(parentRecordListIDs)
                #logging.info('LENGTH %d' % lenSortedList)
                #logging.info('LENGTH %d' % lenParentRecordList)

                
            
                tempSolutionKeyIDs = list()
                while i < lenSortedList:
                    while j < lenParentRecordList:
                        
                        if   parentRecordListIDs[j] == solutionKeyIDs[i]:
                            tempSolutionKeyIDs.append(solutionKeyIDs[i])
                            j = j + 1
                            break
                        elif  parentRecordListIDs[j] > solutionKeyIDs[i]:
                            i = i + 1
                            if i >= lenSortedList:
                                noMoreSolution = True
                                break
                        else: #smaller
                            j = j + 1
                    if noMoreSolution or j >= lenParentRecordList :
                        break
                    i = i + 1 
           
                # tempSolutionKeys = list()
                # for key in solutionKeys:
                    # if key in parentRecordList:
                        # tempSolutionKeys.append(key)
                solutionKeyIDs = tempSolutionKeyIDs
                
       tempSolutionKeys = list()
       for key in solutionKeys:
            if key.__str__() in solutionKeyIDs:
                tempSolutionKeys.append(key)
       solutionKeys = tempSolutionKeys

       if len(solutionKeys) is 0:
            break



    if not oneWordQuery:
        recordReference.extend(list( db.get(key) for key in  solutionKeys))
    
    return recordReference