#coding=utf-8

import os, sys, hashlib, urllib
import textsegment as ts,vglobals,database,EncodingConvert as ec,StringIO,time
from struct import *
from logger import log
import logger
import base64



# in multiple thread env, if there are many threads attempt to use db,
# db SHOULD be re-instanced like
# index.db = new database()
db = database.database()

__doc__ == '''
    Functions for reading/writing invert-indexing contents
    Author:Zheng QingChuan <zqc53@126.com>
    LastUpdate: 2007-4-2 4:51:41
'''

#Files to store the index data
FILE_DOC = os.curdir+'\\index\doc.1'
FILE_SNAP = os.curdir+'\\index\snap.2'
FILE_DICT = os.curdir+'\\index\dict.3'
FILE_POS = os.curdir+'\\index\pos.4'
FILE_CHANGEQUEUE = os.curdir+'\\index\changequeue.5'

ALREADY_INDEXED = -542312
IS_DIR = '$$$***@@@iamdir'

#General structure of index files
TYPES = {
        FILE_DOC:'256s1i',#filename with full path, id of the snap to this doc
        FILE_SNAP:'1i%ds', #length of text,text
        FILE_DICT:'1i%ds1i',#length of word, word, id in positon file
        FILE_POS:'1i%ds'   #length of position string, position string
    }

#globals
RECORDS = {
     FILE_DOC:0
    }

#fix string to a special length
def fill_string(textToFill,length):
    if not length : length=len(textToFill)
    k = length - len(textToFill)
    if k <= 0: return textToFill[:len(textToFill) + k]
    return textToFill.ljust(length)


class indexfiles:
    '''
       I/O operations within index files 
    '''
    def __init__(self):pass
    
    #Read non-aligned data block, pattern 'i%ds' for default
    def g_read(self,pid,filename):
        file = open(filename,'rb')
        pt = filename == FILE_DICT and '%ds1i' or '%ds'
        sk = filename == FILE_DICT and calcsize('i') or 0
        skip = 0
        if pid == 1:
            skip = unpack('i',file.read(4))[0] # a 4-byte int
            fmt = pt % (skip,)
            size = calcsize(fmt)
            ss = file.read(size)
            rt = unpack(fmt,ss)
            return rt
        else:
          for k in range(1,pid):
              skip = unpack('i',file.read(4))[0] # a 4-byte int
              size = calcsize(pt%(skip,))
              file.seek(size,1)
        skip = unpack('i',file.read(4))[0]# a 4-byte int
        fmt  = pt%(skip,)
        size = calcsize(fmt)
        return unpack(fmt,file.read(size))

    def appendindex(self,objlist):
        #check if there are data already in the index file
        hasdata = True
        pindexfile = open(FILE_DICT,'r')
        k = pindexfile.read(5)
        pindexfile.close()
        if not k or len(k) < 1:hasdata = False
        
        #offset for both dict and doc file,1 means default empty file
        start_id = 1
        if not hasdata:
            #index file is empty, just write them to the file
            indexfile = open(FILE_DICT,'r+')
            posfile   = open(FILE_POS,'r+')
            for o in objlist:
                fmt = TYPES[FILE_DICT] % (len(o.word),)
                s = pack(fmt,len(o.word),o.word,start_id)
                indexfile.write(s)
                #get number of positions
                pos_count = len(o.pos[o.docID])
                str_pos = '%d[%d] %s' %(start_id,pos_count,o.pos[o.docID])
                fmt = TYPES[FILE_POS] %(len(str_pos),)
                posfile.write(pack(fmt,len(str_pos),str_pos))
                start_id = start_id + 1
        else:
                #already datas existed in the indexfile, merge new datas to the indexfile
                indexfile = open(FILE_DICT,'a+')
                posfile   = open(FILE_POS,'a+')
                for o in objlist:
                    fmt = TYPES[FILE_DICT] % (len(o.word),)
                    s = pack(fmt,len(o.word),o.word,start_id)
                    indexfile.write(s)
                    #get number of positions
                    pos_count = len(o.pos[o.docID])
                    str_pos = '%d[%d] %s' %(start_id,pos_count,o.pos[o.docID])
                    fmt = TYPES[FILE_POS] % (len(str_pos),)
                    posfile.write(pack(fmt,len(str_pos),str_pos))

        indexfile.flush()
        indexfile.close()
        posfile.flush()
        posfile.close()

    
    #filetype: FILE_DOC
    def append(self,filetype,s,*args):
        file = open(filetype,'a')
        a = pack(s and TYPES[filetype]%s or TYPES[filetype],*args)
        file.write(a)
        file.flush()
        file.close()
        
    #filetype: FILE_DOC
    def update(self,filetype,pid,*args):
       #try: 
        file = open(filetype,'r+')
        size = calcsize(TYPES[filetype])
        file.seek(pid*size)
        file.write(pack(TYPES[filetype],*args))
        file.flush()
        file.close()
       #except error:
       # print 'error during updating ',filetype
        
    #filetype: FILE_DOC, etc...
    def clear(self,filetype):
        open(filetype,'w').close()
        
    #read a piece of data from filetype
    def read(self,filename,pid,filetype=None):
        file = open(filename)
        size = filetype == None and calcsize(TYPES[filename]) or calcsize(filetype)
        file.seek(pid*size)
        s = file.read(size)
        file.close()
        return filetype == None and unpack(TYPES[filename],s) or unpack(filetype,s)
    
    #count the records of FILE_DOC
    def get_doc_count(self):
      if RECORDS[FILE_DOC] == 0:
          file = open(FILE_DOC,'rb')
          count = 0
          while file.read(calcsize(TYPES[FILE_DOC])):
              count = count+1
          file.close()
          RECORDS[FILE_DOC] = count
          return RECORDS[FILE_DOC]
      else :
        return RECORDS[FILE_DOC]
         
    class resultobj:
        #keyword = [a,b,c,d,e]
        #pos = [(a,(pos1,pos2...)),(b,(pos1,pos2,pos3...)),(c,(...)),d...]
        def __init__(self,keywords,pos,docid):
            self.docid = docid
            self.pos = pos
            self.keywords = keywords
            self.fs = 100 * len(keywords)
            if self.docid > -1:
                self.filename = db.getDoc(self.docid)

        def getfilepath(self):
            return self.filepath
                
        def getattechments(self):pass
        
        def relation(self):
            c = float( self.fs - ( len(self.keywords) - len(self.pos) ) * 50 )
            l = map(lambda a:a[1],self.pos)
            for i in range(len(l) - 1):
                if self.pos[i][0] + self.pos[i+1][0] == self.keywords[i] + self.keywords[i+1]:
                   ks = self.f(l[i],l[i+1])
                   if ks == 0 : ks = 1
                   c = c + 100 / ks
            return float(c / self.fs)

        def f(self,a,b):
            k = 4294967296
            for i in a:
                for j in b:
                  s = abs(i - j)
                  k = s > k and k or s 
            return k
            
        def __str__(self):
            return self.filename + str(self.pos)

        def __cmp__(self,other):
                return self.relation() < other.relation() and 1 or -1    
            

PREP_INDEX = os.curdir + '\\prep_index.txt'
PREP_MOCK_FILE = 'someDirver:\\path\\to\\file\\name.txt'

#invert index obj
class IndexObj:
    __BAD_HASH = -1721634
    filename = '' # source file name of file
    docID  = 0 # file ID
    word = '' 
    pos = {} # docID:pos(6,11,24,78,90...) 

    def __init__(self,word='',filename='',docID='',pos={}):
       self.word = word
       self.filename = filename
       self.docID = docID
       self.pos = pos
        
    def __cal(self,word=None):
        if not word:word = self.word
        return int(
        ''.join(map(lambda a: str(ord(unicode(a))), word))
        )

    def __str__(self):
        return 'DocID=%d,word=%s,pos=%s' % (self.docID,self.word,self.pos)
     
    def __cmp__(self,other):
        return cmp( self.word, other.word ) #and self.pos[self.docID] == other.pos[other.docID]
    
    def __hash__(self):
        try:
            return self.__cal()
        except Exception:
            return self.__BAD_HASH
               

#covert index-record-text to IndexObj
def read_index_info(text,start=0,end=-1):
    '''
    sample index-record-info:

    guangzhou 1[2] 3,6
    he 2[1] 1
    i 1[2] 4,-1
    live 1[2],2[1],3[4] 2,5,2,1,2,3,4
    shanghai 2[1] 3
    tom 1[1] 1  

    '''
    ALL_DATA = -356123
    has_next = False
    all = text
    rows = all.split('\n')
    IndexObjs = []
    start = int(start)
    end = int(end)
    
    # page flipping
    #fetch_count = end - start
    #if fetch_count < 0 :# all data
    #       fetch_count = ALL_DATA
    
    for k in rows:
        if len(k) <2:
            continue
            
        name,freq,pos = k.split(' ')
        
        #process freqency and pos
        freqs = freq.split(',')
        poses = pos.split(',')
        pos_end = 0
        pos_start = 0    
        index = 0

        # init indexobj
        o = IndexObj()
        o.word = name
        o.filename = PREP_MOCK_FILE
        o.pos = {} # remenber this!!

        for f in freqs:

            

            ## page flipping
            #if start != 0:
            #     start -=  1
            #     index += 1
            #     continue
            
            #f looks like : 235[12]
            kg = f.split('[')
            docid = int(kg[0])
            freqOfWord = int(kg[1].split(']')[0])
            #add to IndexObj
            pos_end = pos_end + freqOfWord
            str_pos = ','.join(poses[pos_start:pos_end])
            pos_start = pos_end
            o.pos[docid] = str_pos
            o.docID = docid

        IndexObjs.append(o)
        time.sleep(vglobals.POSSTR_TO_INDEXOBJ_INTERVAL)
            
            ## page flipping
            #if fetch_count == 0 and fetch_count != ALL_DATA:
            #    ed = len(freqs) - 1
            #    if index != ed : has_next = True
            #    return IndexObjs,has_next
            #else:
            #    fetch_count = fetch_count - 1  
            #    index += 1
            #    continue
        

    #for i in IndexObjs:
    #print '%s appears at doc %s' % (i.word,i.pos)
    return IndexObjs,has_next


#call textsegment module and convert the result
def xconvert(filetoindex,fid=-1):
      return ts.segment_and_output(filetoindex,fid)
      


#call textsegment module and convert the result
def convert(filetoindex,text,fcallback=None):
      outputstrings = ts.segment_and_output(filetoindex,text,callback=fcallback)
      idxobjs,has_next = read_index_info(outputstrings)    
      idxobjs.sort()
      #from util import mergesort
      #idxobjs = mergesort(idxobjs)
      return ka(idxobjs),has_next

SIGN_DELETE = '+__#*sign_delete*#__+'
def ka(idxobjs):
    if len(idxobjs) <= 1: return idxobjs
    for i in range(len(idxobjs)):
        backcount = 1
        k = i
        while k >= 2 and idxobjs[k-1].word==SIGN_DELETE:
            k = k - 1
            backcount = backcount + 1
        if idxobjs[i].word == idxobjs[i-backcount].word:
            idxobjs[i].word = SIGN_DELETE
            for k,v in idxobjs[i-backcount].pos.iteritems():
                for p,q in idxobjs[i].pos.iteritems():
                    if p==k:
                       idxobjs[i-backcount].pos[k] = idxobjs[i-backcount].pos[k] + ',' + idxobjs[i].pos[p]
    result = []
    for k in idxobjs:
        if k.word != SIGN_DELETE:result.append(k)
            
    return result

def idxobj2posstr(idxobja):
    import StringIO
    result = StringIO.StringIO('')
    #result.write(idxobja.word + ' ')
    pos = idxobja.pos
    c = 0
    for docid in pos.iterkeys():
        if c != 0:result.write(',')
        result.write('%d[%d]'%(docid,len(str(pos[docid]).split(','))))
        c += 1
    c = 0
    result.write(' ')
    for poses in pos.itervalues():
        if c!=0:result.write(',')
        result.write(poses)
        c+=1
    return result.getvalue()



'''

Index persisten operations

  Op sequence:
    Index Creating:
       add_index_doc() => add_index_snap() =>add_index_word()
   
'''        

'''
Indexing methods

todo: process attechment

'''
def add_file(filename):

    # Offer a docid for indexing procedure
    # attach with doc and snap
    def _callback(text,enc,needsnap,adddoc,xdocid=-3):
        # todo: process attechment   
        if adddoc == True:
           xdocid = add_index_doc(filename)
        if needsnap == True:
           if not text :return xdocid
           # digest a hash key for the text
           f = open(filename,'rb')
           dochash = hashlib.sha224(f.read()).hexdigest()
           f.close()
           # check if the same file had been already indexed
           flg = check_hash(dochash)
           if flg == True:
               print 'Content ALREADY_INDEXED'
               # would raise a exception below
               return ALREADY_INDEXED
           maxlen = vglobals.MAX_SNAPSHOT_LEN
           #if len(text) > maxlen: text = text[:maxlen]
           add_index_snap(text,xdocid,dochash,encoding=enc)
        return xdocid

    # read indexable content from local file
    ftext = check_file_type(filename)
    # a dir, ignore it
    if ftext == IS_DIR:
       return
    objlist = []
    try:
        objlist,has_next = convert(filename,text=ftext,fcallback=_callback)        
    except Exception, ext:
        # ALREADY_INDEXED would raise a exception here
        log(__name__,'ext: '+str(Exception)+str(ext),level=logger.LVL_DEBUG)
        return
    # starts to insert words and positions
    for k in objlist:
           #time.sleep(vglobals.INDEX_WORD_INTERVAL)
           add_index_word(k) # don't do commit() here, 1000+ times slower
    db.commit()
    # update statistics
    vglobals.FILE_INDEXD += 1
    vglobals.LAST_INDEX = filename

# check hash of a string
def check_hash(text):
     is_indexed = db.checkHash(text)
     return is_indexed
# add doc, commited
def add_index_doc(filename):
     docid = db.addDoc(filename)       
     return docid

# add a snap commited
def add_index_snap(text,docid,dochash,encoding='gbk'):
    db.addSnap(text,docid,encoding,dochash)    

#Function to add a index word to database
def add_index_word(indexobj,autocommit=False):
    #Check there is a word existed
    word = indexobj.word
    word = db.getWord(word)                    
    #Already in the dict?
    #Get pos str, update posstr
    if word:
       posid = word[2]
       posstr = word[1]+' '+db.getPos(posid)
       o_obj = read_index_info(posstr)[0][0]
       o_obj.docID = indexobj.docID
       o_obj.pos[indexobj.docID] = indexobj.pos[indexobj.docID]
       posstr = idxobj2posstr(o_obj)
       db.updatePos(posid,posstr,autocommit=False)
    #Not in dict?
    else:
        posstr = idxobj2posstr(indexobj)
        posid = db.addPos(posstr,autocommit=False,needid=True)
        db.addWord(indexobj.word,posid,autocommit=False) 
    if autocommit:db.commit()   

#Check filename and decide wich format parser to use
#return the indexable text
def check_file_type(filename):
    if os.path.isdir(filename):
         return IS_DIR
    fmts = vglobals.SUPPORT_FORMAT
    text = None
    ext = ''
    if filename.find('.') != -1:
       idx = filename.rfind('.')
       ext = filename[idx+1:]
    for support_list in fmts.iterkeys():
        if ext in support_list:
           str_plugin = fmts[support_list]
           mod = my_import(str_plugin)
           text = mod(filename).get_content()
    return text

# Customize import
def my_import(name):
    if name.find('.') == -1 :return __import__(name)
    names = name.split('.')
    mod = None
    err = False
    fullname = ''
    for str_mod in names:
        fullname = fullname + '.' + str_mod
        try:
           mod = __import__(str_mod)     
        except Exception:
           #not a module
           mod = getattr(mod,str_mod)  
    return mod


'''

     Search

'''    

# Main method
def search(str_kws,start=0,size=-1):
    # Got keyword segmented
    option = str_kws.split(':')
    exts = []
    if len(option) > 1:
        str_kws = option[0]
        exts.extend(map(lambda a: a.strip(),option[1].split(',')))

    if ' ' not in str_kws:str_kws +=  ' '

    kws = ts.word_segment(str_kws,[],[],[],[],[])

    kws[0].extend(kws[1])
    keywords = kws[0]
    #search them
    resultobjs = []
    resultobjs,has_next = search_by_words(keywords,start,size)

    if not resultobjs: # No result matchs
       return [],False,0
    
    if exts:
        resultobjs = [r for r in resultobjs if extension(r.filename) in exts]

    resultobjs.sort()
    
    end = start + size
    if end >= len(resultobjs):
        return resultobjs[start:],False,len(resultobjs)
    elif end < len(resultobjs):
        return resultobjs[start:end],True,len(resultobjs)

    if size <= 0 :
       return resultobjs,False,len(resultobjs) 

# core search method
def search_by_words(keywords,start=0,size=-1):
    results = []
    docIDs = []
    has_next = False
    for k in keywords:
        #get docIDs with $k
        posstr = db.getPosByWord(k)
        if not posstr:continue
        posstr = k+' '+posstr
        end = int(size) + int(start)
        idxobjs,has_next = read_index_info(posstr,start,end)
        for o in idxobjs:
            for docid in o.pos.iterkeys():
                if docid not in docIDs:
                  docIDs.append(docid)
                  xpos = [[k,map(lambda a:int(a),o.pos[docid].split(','))],]
                  rs = indexfiles.resultobj(keywords,xpos,docid)
                  results.append(rs)
                else:
                  for i in range(len(results)):
                      if results[i].docid == docid:
                          results[i].pos.append((k,map(lambda a:int(a),o.pos[docid].split(','))  ))              

    if len(results) == 0: return None,False

    # Setting output data
    for r in results:
        r.path = r.filename[:r.filename.rfind('\\')]
        r.path64 = base64.b64encode(r.path)
        r.docid64 = base64.b64encode(str(r.docid))
        r.filename64 = base64.b64encode(r.filename)
        r.rating = r.relation()
        #get summary of the search result.
        r.summary = get_summary(r)
        #for w in keywords:
            #r.filename = r.filename.replace(w,'<font color=red>'+w+'</font>')
    #r.filename = ec.zh2utf8(r.filename)[1]
    return results,has_next

#get summary for a result object
def get_summary(rsobj):
    summary = StringIO.StringIO()
    s_range = 24
    words = []
    max_value_len = 300
    # get all text related to the keywords
    for wordobj in rsobj.pos:
        word = wordobj[0]
        pos =  wordobj[1]
        rs = db.getSnap(rsobj.docid)
        # not indexable, no summary
        if rs == None:return 'No summary for this file.'
        alltext = rs[0]    
        ln = len(alltext)
        start = pos[0] - s_range > 0 and (pos[0] - s_range) or 0
        end = pos[0] + s_range > ln and ln or (pos[0] + s_range)
        summary.write(alltext[start:end]+'...')
        words.append(word)
    # highlight the keywords
    summary = summary.getvalue()
    #summary = ec.zh2gbk(summary)[1]
    if len(summary) > max_value_len:
        summary = summary[:max_value_len]
    #for w in words:
        #summary = summary.replace(w,u'<font color=red>'+w+u'</font>')
    #summary = ec.zh2utf8(summary)[1]
    return summary

def xget_summary(rsobj):
    summary = StringIO.StringIO()
    s_range = 24
    words = []
    max_value_len = 300
    # get all text related to the keywords
    for wordobj in rsobj.pos:
        word = wordobj[0]
        pos =  wordobj[1]
        if word not in rsobj.keywords:continue
        rs = db.getSnap(rsobj.docid)
        # not indexable, no summary
        if rs == None:return 'No summary for this file.'
        alltext = rs[0]    
        ln = len(alltext)
        try:
            pos = alltext.index(word)    
        except:continue
        start = pos - s_range > 0 and (pos - s_range) or 0
        end = pos + s_range > ln and ln or (pos + s_range)
        summary.write(alltext[start:end]+'...')
        words.append(word)
    # highlight the keywords
    summary = summary.getvalue()
    enc = ''
    try:
        enc = ec.getcodec(summary)        
    except:enc = 'gbk'
    #reload(sys)
    sys.setdefaultencoding(enc)
    for w in words:
        try:
            summary = summary.replace(w,w)    
        except:continue
    if len(summary) > max_value_len:
        summary = summary[:max_value_len]
    sys.setdefaultencoding('gbk')
    return summary


# get file extension
def extension(filename):
    ext = ''
    if filename.find('.') != -1:
       idx = filename.rfind('.')
       ext = filename[idx+1:]
    return ext
        

if __name__ == '__main__':
   if len(sys.argv) == 1:
       print '''Usage: python index.py filename  or
                python index.py search '$keywords'
       
       '''
   elif len(sys.argv) == 2:
       filename = sys.argv[1]
       add_file(filename)
   elif len(sys.argv) == 3:
       kw = sys.argv[2]
       kw = ec.zh2gbk(kw)[1]
       print 'Result for ',kw,':'
       rs = search(kw)
       if not rs or len(rs) == 0:
           print 'No matchs.'
       else:
         for r in rs:
            print r