#coding=utf-8

__doc__='''

    Module for word segment of a plain/html text-file
    
    LastUpdate: 2007-4-2 4:51:41
'''

from logger import *
import codecs, sys, string, re, StringIO, time
import EncodingConvert as ec, index
import vglobals

OUTPUT = False
DEBUG  = False
INFO = False
FIELD_FILENAME = -4294967296
outputFile = os.curdir+'\\sentences.txt'
outputFile2 = os.curdir+'\\words.txt'
CODECLIST   = os.curdir+'\\codec_list.txt'
currentFile = ''



# raised if current file had been indexed
class already_indexed_exception(Exception):pass

#bad way, using EncodingConvert.py already
def read_file(fname):
    codecStr = 'Unknow'
    isUnknow = False
    cs     = [codecs.BOM32_BE,codecs.BOM32_LE,codecs.BOM64_BE,\
              codecs.BOM64_LE,codecs.BOM_BE,codecs.BOM_LE,\
              codecs.BOM_UTF16_BE,codecs.BOM_UTF16_LE,codecs.BOM_UTF16,\
              codecs.BOM_UTF32_BE,codecs.BOM_UTF32_LE,codecs.BOM_UTF32,\
              codecs.BOM_UTF8,codecs.BOM]
    
    f = open(fname,'r')
    # first 2 bytes
    k = f.read(2)
    p = __in_list(k,cs)
    if p >= 0:
        print p
    # first 3 bytes
    else:
       k = f.read(3)
       p = __in_list(k,cs)
       if p >= 0:
          print p
       else:   
    # first 4 bytes
         k = f.read(2)
         p = __in_list(k,cs)
         if p >= 0:
             print p
         else:
            print p
    return p

def __in_list(str_,lst):
    for index in range(len(lst)):
        print '%s compares to %s' % (lst[index],str_)
        if lst[index] == str_:
            return index
    return -1

'''
  Whether a file is a text file
'''
def istextfile(filename, blocksize = 512):
    return istext(open(filename).read(blocksize))

def istext(s):
    text_characters = "".join(map(chr, range(32, 127)) + list("\n\r\t\b"))
    _null_trans = string.maketrans("", "")
    if "\0" in s:
        return 0
    
    if not s:  # Empty files are considered text
        return 1

    # Get the non-text characters (maps a character to itself then
    # use the 'remove' option to get rid of the text characters.)
    t = s.translate(_null_trans, text_characters)

    # If more than 30% non-text characters, then
    # this is considered a binary file
    if len(t)/len(s) > 0.30:
        return 0
    return 1

def segment(filecontent,fcallback,needsnap,adddoc,xdocid=-3,encoding=None):
    '''
      filecontent is a string to segment
      @return
          ewords : a list of English words
          cwords : a list of Chinese words
          others : other type of words
    '''
    ewords,cwords,decimal,sentence_pos,word_pos_e,word_pos_c = [],[],[],[],[],[]
    t = filecontent
    if t == None:
       # Activate the callback function
       docid = -3
       if fcallback : docid = fcallback(t,None,needsnap,adddoc)
       return ewords,cwords,decimal,word_pos_e,word_pos_c,docid
    #convert to unicode
    encc,t = ec.zh2unicode(t)
    if encc == 'unk':
        raise Exception("Unknown encoding")
    if not encoding:    
        reload(sys)
        sys.setdefaultencoding(encc)
    else:
        reload(sys)
        sys.setdefaultencoding(encoding)

    #segment to sentences
    if INFO:
        print "segment text to sentences using encoding",encc

    # Activate the callback function
    docid = fcallback(t,encc,needsnap,adddoc,xdocid)
    if docid == index.ALREADY_INDEXED:
        log(__name__,'this file had already been indexed.')
        raise already_indexed_exception() 
        
    sentences,sentence_pos = sentence_segment(t)
    #segment to words
    if INFO:
        print "segment sentences text to words..."

    for s in range(len(sentences)):
        all_words_in_this_sentence = word_segment(sentences[s],sentence_pos[s],ewords,cwords,word_pos_e,word_pos_c)
        ewords = all_words_in_this_sentence[0]
        cwords = all_words_in_this_sentence[1]
        decimal = all_words_in_this_sentence[2]
        word_pos_e = all_words_in_this_sentence[3]
        word_pos_c = all_words_in_this_sentence[4]

    return ewords,cwords,decimal,word_pos_e,word_pos_c,docid

# fix up words & word positions
def __do_with_overlapped_words(words,word_pos):
    for k in range(len(words)):
        if k >= len(words) : break
        if words[k] not in words[k+1:]:continue
        for i in range(k+1,len(words)):
            if i >= len(words):break
            if words[i] == words[k] and None != words[i]:
                # there is a same word
                word_pos[k].extend(word_pos[i])
                words[i] = None
                word_pos[i] = None
    #rebuilt the lists
    _words = []
    _pos = []
    for w in range(len(words)):
        if words[w] != None and word_pos[w] != None:
            _words.append(words[w])
            _pos.append(word_pos[w])
            
    return _words,_pos

# a object represent a word with its' position within the file
class word_obj:
    text = ''
    pos = [] #positions in this document
    count = 0
    def __init__(self,text,pos,count):
        self.text = text
        self.pos = pos
        self.count = count
    def __str__(self):
        return '%s %s %d' % (self.text,','.join(map(lambda a:str(a),pos)),self.count)

def sentence_segment(text):
    '''
     text is a whole text
    '''
    sentences = []
    rep = '|*YoY*|'
    split_str = ', . ? ; : \' ! \n'
    splits = split_str.split(" ")
    splits.append(u'，')
    splits.append(u'。')
    splits.append(u'！')
    splits.append(u'？')
    splits.append(u'“')
    splits.append(u'”')
    splits.append(u'-')
    splits.append(u'—')
    splits.append(u'……')
    splits.append(u'、')
    splits.append(u'：')
    splits.append(u'《')
    splits.append(u'》')
    splits.append(u'——')
    splits.append(u'————')
    splits.append(u'－')
    splits.append(u'＝')
    splits.append(u'＋')
    splits.append(u'～')
    splits.append(u'＃')
    splits.append(u'！')
    splits.append(u'·')
    splits.append(u'／')
    splits.append(u'）')
    splits.append(u'（')
    splits.append(u'　')
    splits.append(u'；')
    splits.append(u'’')
    splits.append(u'‘')

    start_pos = 0
    marks = []

    # remember the interpunctions' pos    
    for t in range(len(text)):
      if contains(text[t],splits):
         marks.append(t)
         
    if DEBUG:     
      print marks
    
    #array to hold stating position of every sentence
    sentence_pos = []  

    for m in marks:
        sentences.append(text[start_pos:m])
        sentence_pos.append(start_pos)
        start_pos = m+1

    #last item, don't forget it
    sentences.append(text[marks[-1]+1:])
    sentence_pos.append(marks[-1]+1)

    if DEBUG:
        for k in range(len(sentences)):
            print '[%d]%s' % (k,sentences[k])

    return sentences,sentence_pos

def contains(char,list):
    '''whether a unicode string contains the chars given by list'''
    for k in list:
      try: 
         if k == char:
            return True
      except UnicodeDecodeError:
          pass
        
    return False


def contain_alpha(sentence):
    '''
       Core method of segmentation
       It considers a mixing sentence,
       strip english words and return them
       as a list in the first of the returnning value;
       then the second returnning value is a non-spacechinese-sentence that without
       the words that stripped and returnned as list
       @return
          ewords   : english words in this sentence, list
          sentence : a non-space string without english
    '''

    #assume that sentence NOT has ' '    
    sentence = sentence.strip()

    if __do_not_have_chinese(sentence):
        return sentence.split(' '),None
    
    wstart = 0
    wlen = 0
    ewords = []
    k = 0

    wstart = 0    
    while k < len(sentence):
        NOALPHA = True
        #reach the end of sentence
        if k == len(sentence)-1 and wlen > 2:
              word = sentence[len(sentence) - wlen -1:].strip()
              ewords.append(word)
              sentence = sentence[:len(sentence) - wlen - 1]
              return ewords,sentence
            
        #if is non-chiness alphas
        if not u'\u4e00' <= sentence[k] <= u'\u9FB0':
            NOALPHA = False
            if not wlen > 0:
                wstart = k
            wlen = wlen + 1
            k = k + 1
        else:
            if wlen > 1 and NOALPHA == True:
              word = sentence[wstart:k].strip()
              ewords.append(word)
              sentence = sentence[:wstart] + sentence[k:]
              wlen = 0
              k = len(sentence[:wstart])
            elif   wlen == 1 and NOALPHA == True:
              word = sentence[wstart:k].strip()
              ewords.append(word)
              sentence = sentence[:wstart] + sentence[k:]
              wlen = 0
              k = len(sentence[:wstart])
            else:
              k = k + 1
        
    #if the sentence just remain the english word(s)
    if __do_not_have_chinese(sentence):
        ewords.extend(sentence.split(' '))
        sentences = None
    
    return ewords,sentence

def __do_not_have_chinese(text):
    #assume text NOT has ' '
    if len(text) < 2:
        return False
    t = True
    for k in text.strip():
        if u'\u4e00' <= k <= u'\u9FB0':
            t = False
    return t
    
def __is_english_word(word):
    ''' wether a word start with a english alphabet '''
    if(len(word)<=1):return False
    result = True
    for k in word:
        if not ( ( k >= 'a' and k <= 'z' ) or ( k >= 'A' and k <= 'Z') ):result = False

    if DEBUG:
      print word+" is english a word : "+ str(result)

    return result

def word_segment(text,pos_start,e_words,c_words,e_pos,c_pos):
    '''
      @parameter text  : a sentence
      @return e_words: a list of all english words
              c_words: a list of all chinese words
              decimal: a list of numbers
    '''
    #check if it is the file name
    if pos_start == [] and e_words == [] and c_words == [] and e_pos == [] and c_pos == []:
          encc,text = ec.zh2unicode(text)
          #if encc == 'unk':
             #raise Exception("Unknown encoding")
          #reload(sys)
          #sys.setdefaultencoding(encc)
    
    ewords = []
    cwords = []
    decimal = []
    skip_str = ' @ # $ ^ & * ( ) _ + = - ` { } [ ] \ \ | / ? < > " \' '
    skip = skip_str.split(" ")
    
    for m in range(len(text)):
        if skip.count(text[m]) > 0:
            text = text.replace(text[m],' ')    
    
    phrases = text.split(' ')
    cphrases = []

    #deal with the phrases    
    for phrase in phrases:
        _ewords,cphrase = contain_alpha(phrase)
        for k in _ewords:
            if not __is_english_word(k):
                try:decimal.append(int(k))
                except:pass
                _ewords.remove(k)

        ewords.extend(_ewords)
        if cphrase != None:
          cphrases.append(cphrase)
    
    #deal with chinese segment
    for ccphrase in cphrases:
      #print ccphrase
      for index in range(len(ccphrase)-1):
          cwords.append(ccphrase[index:index+2])
 
    #allocate the positions
    word_pos_e = []
    word_pos_c = []
    
    for k in ewords:word_pos_e.append(__find_multiple_index2(k,text,pos_start)) 
    for k in cwords:word_pos_c.append(__find_multiple_index2(k,text,pos_start))
        
    #for k in decimal:word_pos.append(__find_multiple_index(k))
    
    e_words.extend(ewords)
    c_words.extend(cwords)
    e_pos.extend(word_pos_e)
    c_pos.extend(word_pos_c)

    return e_words,c_words,decimal,e_pos,c_pos


def __find_multiple_index2(word,sentence,pos_start):
    iterator = re.finditer(word,sentence)
    wlist = []
    for i in iterator:
        try:
          wlist.append(i.start()+ pos_start)
        except Exception:
          pass
    return wlist

def ewords_filter(ewords):
    #some filting for english words
    return ewords

def clean_html(html_content):
    content_len = len(html_content)
    for i in range(content_len):
      try:   
        if html_content[i] == '<':
            for j in range(i,content_len):
                if html_content[j] == '>' :
                    html_content = \
                     html_content.replace(html_content[i:j],'')
      except IndexError:
        pass

    return html_content


def test_contain_alpha():
    a = u'决赛用图里面的地图名字叫Gaia'
    b,c = contain_alpha(a)
    print b,c

def test_full_english():
    a = 'fiction'
    print __do_not_have_chinese(a)


#remove duplication
#util method for list operation
def removeduplicate(blist):
    l = len(blist)
    if l <= 1 : return blist
    for idx in range(len(blist)):
        if idx >= len(blist) : break
        for k in range(len(blist)):
          try: 
            if k != idx and blist[idx] == blist[k]:
                blist.remove(blist[k])
          except IndexError:break
    return blist

#Daul version
def removeduplicate_daul(blist1,blist2):
    l = len(blist1)
    ll = len(blist2)
    if ll != l :
        print 'length not equal'
        return None,None
    if l <= 1 : return blist1,blist2
    for idx in range(len(blist1)):
        if idx >= len(blist1) : break
        for k in range(len(blist1)):
          try: 
            if k != idx and blist1[idx] == blist1[k]:
                blist1.remove(blist1[k])
                blist2.remove(blist2[k])
          except IndexError:break
    return blist1,blist2

# default content reader
# can be subclassing
class plugin_reader:
    def get_content(self):
        return self.content
    #to be inherited
    def read(self):
        try:
           self.content = open(self.filename).read()
        except Exception , inst:
           self.content = 'No such file, Perhaps file has been removed immediately.'
    def __init__(self,filename):
        self.filename = filename
        self.read()


#final output function, supposed to use within b-tree indexing
def segment_and_output(filename,text,callback=None):
    textfile = True
    if not text:
       textfile = False
    # parse filename
    # no snapshot
    # add to doc
    fewords,fcwords,fothers,fpos_e,fpos_c,fdocid = segment(filename,fcallback=callback,needsnap=False,adddoc=True)
    for i in range(len(fpos_e)):fpos_e[i]=(FIELD_FILENAME,)
    for i in range(len(fpos_c)):fpos_c[i]=(FIELD_FILENAME,)

    # parse main content
    # need snapshot
    # don't add to doc (last call did it)
    ewords,cwords,others,pos_e,pos_c,docid = segment(text,fcallback=callback,needsnap=True,adddoc=False,xdocid=fdocid)
    docid,fid = fdocid,fdocid
    out_text = StringIO.StringIO('')
    #merge filename index info
    ewords.extend(fewords)
    # to lower case
    for eid in range(len(ewords)):
        ewords[eid] = string.lower(ewords[eid])
    cwords.extend(fcwords)
    pos_e.extend(fpos_e)
    pos_c.extend(fpos_c)
    
    if INFO:
        print 'output to result text...'

    for ie in range(len(ewords)):
        time.sleep(vglobals.TEXT_SEGMENT_INTERVAL)
        counter = len(pos_e[ie])
        if counter ==0 : continue
        spc = '\n'
        if ie == 0 : spc = ''
        line = "%s%s %d[%d] %s" % (spc,ewords[ie],fid,counter,','.join(map(lambda a:str(a),pos_e[ie])))
        
        if pos_e[ie] != [-1,] and len(ewords[ie]) >=2:   
             out_text.write(line)

    for ie in range(len(cwords)):
        time.sleep(vglobals.TEXT_SEGMENT_INTERVAL)
        counter = len(pos_c[ie])
        if counter ==0 : continue
        spc = '\n'
        line = "%s%s %d[%d] %s" % (spc,cwords[ie],fid,counter,','.join(map(lambda a:str(a),pos_c[ie])))
        #drop the word that dosen't exsited
        if pos_c[ie] != [-1,] and len(cwords[ie]) >=2:   
             out_text.write(line)

    if INFO:
        print '\nwordsegment finished.'
    
    #sys.setdefaultencoding('UTF-8') 
    result = ec.zh2gbk(out_text.getvalue())[1]
    result = out_text.getvalue()
    out_text.close()
    return result

if __name__ == '__main__':
    f = open(outputFile2,'w')
    print 'file encoding:',f.encoding
    #sys.setdefaultencoding('UTF-8')
    f.write(segment_and_output("d:\\hanshu.TXT",33))
    #f.write(segment_and_output("d:\\1959年24期红旗.txt",33))
    f.flush()
    f.close()






'''
bugs

#1 数字前后跟1个中文字符 如：在1945年
#2 字符‘……’无法处理
#3 无法索引数字 算是bug吧




'''


