#!/usr/bin/pyton
# -*- coding: utf-8 -*-

# blog is in there,-- http://www.cnblogs.com/kaituorensheng/p/3629729.html

import sys
import os
import re
import pdb

class Word:
    def __init__(self, text='', freq = 0):
        self.text = text
        self.freq = freq
        self.length = len(text)     #因为这里用到了len求取文本的长度，所以在处理前，要将文本转为unicode

wordDic = {}
maxWordLength = 0

class LoaderDic:
    def __init__(self, dicPath):
        self.isLoad = False

        #无论用户是否传入'/',保证dicPath后面有一个'/',确保dirname(dicPath)正确性
        self.dicPath = dicPath + '/'
        print "init done"


    def isLoadDic():
        return self.isLoad

    def loadAllDic(self):
        from  os.path import join, dirname
        if self.isLoad == True:
            return None
        self.loadCharDic( join( dirname(self.dicPath), "chars.dic" ) )
        self.loadWordDic( join( dirname(dicPath), "words.dic" ) )
        self.loadWordDic( join( dirname(dicPath), "names.dic" ) )
        self.isLoad = True

    def loadCharDic(self, dicName):
        #pdb.set_trace()
        global maxWordLength
        fDic = file(dicName)
        for line in fDic.readlines():
            text, freq = line.split(' ')

            #这里保证词典文本是utf8编码,否则使用其它编码方式
            text = text.decode("utf-8")
            wordDic[text] = (len(text), int(freq) )
            if  len(text) > maxWordLength:
                maxWordLength = len(text)
        fDic.close()

    def loadWordDic(self, dicName):
        global  maxWordLength
        fDic = file(dicName)
        for line in fDic.readlines():
            text = unicode(line.strip(), "utf-8")
            wordDic[text] = ( len(text), 0)
            if  len(text) > maxWordLength:
                maxWordLength = len(text)

        fDic.close()

class Analysis:
    def __init__(self):
        pass

    def listAllSeg(self, text):
        self.recSegPrint(text, [])

    #递归分割文本:字符串在Python是不可变数据类型，所以在Python中是值传递
    def  recSegPrint(self, text, gridWords):
        if 0 == len(text):
            #print gridWords
            for word in gridWords:
                print word.encode('utf-8') + '/',
                # pass
            print
            return
        # 注意这里的bug:如果text还没有处理完毕,但是中文没有在词典中获得词汇,即getFirstTokens返回空列表
        #程序会退出循环,然后退出当前函数
        # 文本没处理完毕,一般不会取不到词,因为所有的字,在words.dic中,都是存在的
        # 再不济,也会取到单个字的词
        #pdb.set_trace()
        words = self.getFirstTokens(text)
#words = []
        for word in words:
            recText = text[ len(word): ]
            recGridWord = gridWords[:]
            recGridWord.append(word)

            #pdb.set_trace()
            self.recSegPrint(recText, recGridWord)

    def getFirstTokens(self, text):
        words = []

        if self.isChineseChar( text[0] ):
            return self.getPreChineseWords(text)

        if self.isASCIIabc(text[0]):
            words.append( getPreASCIIWord(text) )
            return words

        words.append( self.getPreSymbles(text) )
        return words

    def isChineseChar(self,charater):
        return 0x4e00 <= ord(charater) < 0x9fa6

    def isASCIIabc(self, character):
        return 'a' <= character <= 'z'
        #或者 return  ord('a') <= ord(character) <= ord('z')

    #此函数可能返回空的list
    def getPreChineseWords(self, text):
        words = []
        for i in range(1, maxWordLength + 1):
            word = wordDic.get( text[:i] )
            if word:
                words.append( text[:i] )
        return words


    def getPreASCIIWord(self, text):
        pos = 0
        while  (pos < len(text)) and isASCIIabc( text[pos] ) :
            pos += 1
        return text[0:pos]

    def getPreSymbles(self, text):
        pos = 0
        while  (pos < len(text) ) and (not self.isASCIIabc(text[pos]) ) and  (not self.isChineseChar(text[pos]) ) :
            pos += 1
        return text[0:pos]

if __name__ == "__main__":

    if len(sys.argv) < 3:
        print("Usage:python seg.py dicPath inputFile")
        exit()
    dicPath = sys.argv[1]
    inputFile = file(sys.argv[2])

#pdb.set_trace()
    loadDic = LoaderDic(dicPath)
    loadDic.loadAllDic()

    analysis = Analysis()
    for line in inputFile.readlines():
        line = line.strip().decode('utf-8')
        analysis.listAllSeg(line)
