#!/usr/bin/env python
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------#
# constructDictFields.py
# ZhihuiJin <zhihuijin@gmail.com>
# Thu Feb  8 13:28:03 JST 2007
#
#----------------------------------------------------------------------------#

""" This script is used to generate dictionary fields which are going to be
indexed by using Lucene package.
"""

import os, sys, optparse, codecs
import kanjidic
from dict import edict, pinyinTable, hanziTable, strokeTable, hanChar
from tools import kana

#----------------------------------------------------------------------------#
# PUBLIC
#----------------------------------------------------------------------------#

_kanjiDic       = kanjidic.getKanjidic()
_pinyinTable    = pinyinTable.getPinyinTable()
_strokeTable    = strokeTable.StrokeTable()
_cognateMap     = hanziTable.getCognateRelation()
_ch2jpShapeMap  = hanziTable.getHanziToKanjiShapeMapping()
_jp2chShapeMap  = hanziTable.getKanjiToHanziShapeMapping()

_chCommonChar   = hanChar.getChCommonChar()
_jpCommonChar   = hanChar.getJpCommonChar()
_unknownChar    = '#'

#----------------------------------------------------------------------------#
def dumpDict(dictionary, isJp, mergeFlag):
    """ dump dictionary
    """
    dictObj = edict.getDictionary(dictionary)

    for word in dictObj.keys():
        fields = []

        # stored fields
        dictEntry = dictObj[word]
        fields.append(['1', 'word', word])
        fields.append(['1', 'dictReading', dictEntry.reading])
        fields.append(['1', 'dictMeaning', dictEntry.meaning])

        # indexed fields
        miscField = set()
        miscField.add('len=' + str(len(word)))

        charField = set()
        for i in range(len(word)):
            char = word[i]
            prefix = str(i) + '_' 
            charField.add(prefix + char)

        fields.append(['0', 'char',  charField])
        fields.append(['0', 'miscInfo', miscField])

        if isJp:
            fields += getJpWordFields(word)
        else:
            fields += getChWordFields(word)

        outputFields (fields, mergeFlag)


#----------------------------------------------------------------------------#
def outputFields (fields, mergeFlag):
    """ output fields, merge the indexed fields into one field if 'mergeFlag'
    option is true
    """
    storeFields = [f for f in fields if f[0] == '1']
    indexFields = [f for f in fields if f[0] == '0']

    # stored fields
    for field in storeFields:
        print ':'.join(field).encode('utf-8')

    # indexed fields
    if not mergeFlag:
        for field in indexFields:
            field[2] = ' '.join (field[2])
            print ':'.join(field).encode('utf-8')

    else:
        mergeStr = ''
        for field in indexFields:
            if field[1] == 'onReading': 
                field[2] = [r + 'o' for r in field[2]]

            if field[1] == 'kunReading':
                field[2] = [r + 'k' for r in field[2]]

            field[2] = ' '.join (field[2])
            mergeStr += field[2] + ' ' 

        mergeField = ['0', 'index', mergeStr]
        print ':'.join(mergeField).encode('utf-8')

    print


#----------------------------------------------------------------------------#
def testWord():
    """ do testing on some words
    """
    chwords = [u'不学无术', u'实验室感染']
    jpwords = [u'会計検査院', ]

    for word in chwords:
        print "Testing Chinese word ", word 
        fields = getChWordFields(word)
        outputFields (fields, False)

    for word in jpwords:
        print "Testing Japanese word ", word
        fields = getJpWordFields(word)
        outputFields (fields, False)
       

#----------------------------------------------------------------------------#
def getJpWordFields(word):
    """ create indexing fields for Japanese word
    """
    strokeField        = set()
    onReadingField     = set() 
    kunReadingField    = set() 
    kanaReadingField   = set() 
    pinyinReadingField = set()
    mappedHanziField   = set()

    for i in range(len(word)):
        char = word[i]
        prefix = str(i) + '_' 

        charType = kana.scriptType(char)
        if charType == kana.Script.Katakana or charType == kana.Script.Hiragana:
            pinyinReadingField.add(prefix + 'kana')

        if  charType == kana.Script.Kanji :
            #result = _kanjiDic.get(char)
            #if result :
            #    strokeField.add(prefix + str(result.strokeCount))

            if char in _strokeTable:
                stroke = _strokeTable.asciiForm (char)
                count  =  min (6, len(stroke))
                strokeField.add(prefix + str(len(stroke)))
                for i in range(count):
                    strokeField.add(prefix + stroke[0:i+1])

            cognateSet = _cognateMap.forwardGet(char)
            shapeSet   = _jp2chShapeMap.forwardGet(char)
            for hanzi in cognateSet:
                result = _pinyinTable.get(hanzi)
                if result:
                    for r in result:
                        pinyinReadingField.add(prefix + _pinyinTable.stripTones(r))

            result = _kanjiDic.get(char)
            if result :
                for r in result.onReadings:
                    onReadingField.add(prefix + r)
                for r in result.kunReadings:
                    kunReadingField.add(prefix + r)
            kanaReadingField = onReadingField.union(kunReadingField)


            for hanzi in cognateSet.union (shapeSet):
                mappedHanziField.add(prefix + hanzi)

            if char not in _chCommonChar:
                mappedHanziField.add(prefix + _unknownChar)


    fields = []
    fields.append(['0', 'stroke', strokeField])
    fields.append(['0', 'onReading', onReadingField])
    fields.append(['0', 'kunReading', kunReadingField])
    fields.append(['0', 'kanaReading', kanaReadingField])
    fields.append(['0', 'pinyinReading', pinyinReadingField]) 
    fields.append(['0', 'mappedHanzi',  mappedHanziField])

    return fields


#----------------------------------------------------------------------------#
def getChWordFields(word):
    """ create indexing fields for Japanese word
    """
    strokeField        = set()
    onReadingField     = set() 
    kunReadingField    = set() 
    kanaReadingField   = set() 
    pinyinReadingField = set()
    mappedKanjiField   = set() 

    for i in range(len(word)):
        char = word[i]
        prefix = str(i) + '_' 

        cognateSet = _cognateMap.reverseGet(char)
        shapeSet   = _ch2jpShapeMap.forwardGet(char)

        if char in _strokeTable:
            stroke = _strokeTable.asciiForm (char)
            count  =  min (6, len(stroke))
            strokeField.add(prefix +  str(len(stroke)))
            for i in range(count):
                strokeField.add(prefix + stroke[0:i+1])

        result = _pinyinTable.get(char)
        if result:
            for r in result:
                pinyinReadingField.add(prefix + _pinyinTable.stripTones(r))

        for kanji in cognateSet:
            result = _kanjiDic.get(kanji)
            if result :
                for r in result.onReadings:
                    onReadingField.add(prefix + r)

                for r in result.kunReadings:
                    kunReadingField.add(prefix + r)

        kanaReadingField = onReadingField.union(kunReadingField)

        for kanji in cognateSet.union (shapeSet):
            mappedKanjiField.add(prefix + kanji)

        if char not in _jpCommonChar:
            mappedKanjiField.add(prefix + _unknownChar)

    fields = []
    fields.append(['0', 'stroke', strokeField])
    fields.append(['0', 'onReading', onReadingField])
    fields.append(['0', 'kunReading', kunReadingField])
    fields.append(['0', 'kanaReading', kanaReadingField])
    fields.append(['0', 'pinyinReading', pinyinReadingField]) 
    fields.append(['0', 'mappedKanji', mappedKanjiField])

    return fields


#----------------------------------------------------------------------------#
# PRIVATE
#----------------------------------------------------------------------------#

#----------------------------------------------------------------------------#
# MODULE EPILOGUE
#----------------------------------------------------------------------------#

def _createOptionParser():
    """ Creates an option parser instance to handle command-line options.
    """
    usage = \
"""%prog [options] 

Produce fields for each word in specified dictionary.
"""

    parser = optparse.OptionParser(usage)

    parser.add_option('--debug', action='store_true', dest='debug',
            default=False, help='Enables debugging mode [False]')

    parser.add_option('-d', '--dictionary', action='store', dest='dictionary',
            help='The dictionary to use ')

    parser.add_option('-j', '--jp', action='store_true', dest='type',
            default=True,
            help='Japanese dictionary [default]')

    parser.add_option('-c', '--ch', action='store_false', dest='type', 
            help='Chinese dictionary')

    parser.add_option('-m', '--merge', action='store_true', dest='merge', 
            default=False, help='Do testing')

    parser.add_option('-t', '--test', action='store_true', dest='test', 
            default=False, help='Do testing')

    return parser

#----------------------------------------------------------------------------#

def main(argv):
    """ The main method for this module.
    """
    parser = _createOptionParser()
    (options, args) = parser.parse_args(argv)

    if not options.debug:
        # we don't want psyco in debugging mode, since it merges together
        # stack frames
        try:
            import psyco
            psyco.profile()
        except:
            pass

    # execute new code here
    if (options.test):
        testWord()
    else:
        dumpDict(options.dictionary, options.type, options.merge)
    
    return

#----------------------------------------------------------------------------#

if __name__ == '__main__':
    main(sys.argv[1:])

#----------------------------------------------------------------------------#
  
# vim: ts=4 sw=4 sts=4 et tw=78:
