#!/usr/bin/env python
# -*- coding: UTF-8 -*-
### Copyright (C) 2009 Kelvin Ho <kelvin.ho.ka.wai@gmail.com>
###
### This library is free software; you can redistribute it and/or
### modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation; either version 2 of the
### License, or (at your option) any later version.
###
### This library is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
### General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this library; if not, write to the Free Software
### Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
### 02111-1307, USA.

import codecs
import cPickle as cpickle
import os
import re
import sys
 
import debug as D
import decorator

DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')



HIRAGANA =  u"あいうえおかきくけこさしすせそたちつてとなにぬねの"+\
            u"はひふへほまみむめもらりるれろやゆよわをん"+\
            u"がぎぐげござじずぜぞだぢづでどばびぶべぼぱぴぷぺぽ"+\
            u"ゔ"+\
            u"ぁぃぅぇぉっゃゅょゎ"
            

KATAKANA =  u"アイウエオカキクケコサシスセソタチツテトナニヌネノ"+\
            u"ハヒフヘホマミムメモラリルレロヤユヨワヲン"+\
            u"ガギグゲゴザジズゼゾダヂヅデドバビブベボパピプペポ"+\
            u"ヴ"+\
            u"ァィゥェォッャュョヮ"

A_RETSU = u"あかさたなはまらやわがざだばぱ"
I_RETSU = u"いきしちにひみり　　ぎじぢびぴ"
U_RETSU = u"うくすつぬふむるゆ　ぐずづぶぷ"
E_RETSU = u"えけせてねへめれ　　げぜでべぺ"
O_RETSU = u"おこそとのほもろよをごぞどぼぽ"
NN = u"ん"
NUM = u"１２３４５６７８９０"
SPECIAL = u"！＠＃＄％＾＆＊（）＿＋ー＝｛｝｜「」＼＜＞；’：”、。？・"

assert u'へ' in HIRAGANA
assert u'べ' in HIRAGANA
assert u'ぺ' in HIRAGANA
 
assert u'ヘ' in KATAKANA
assert u'ベ' in KATAKANA
assert u'ペ' in KATAKANA

 
R = {}

def euc_jp(s):
  return s.encode('euc-jp')
   
def cmp_strings(s1, s2):
  return -1 if _euc_jp(s1) < _euc_jp(s2) else (1 if _euc_jp(s1) > _euc_jp(s2) else 0)

def hira_to_kana(s):
  def conv(c): return KATAKANA[HIRAGANA.index(c)] if c in HIRAGANA else c
  l = [conv(c) for c in s]
  return ''.join(l)
  
def kana_to_hira(s):
  def conv(c): return HIRAGANA[KATAKANA.index(c)] if c in KATAKANA else c
  l = [conv(c) for c in s]
  return ''.join(l)

def conv_retsu(c, retsu1, retsu2):
  if c == ' ' or c not in retsu1: return c
  return retsu2[retsu1.index(c)]


class Rule(object):
  STUBS = {}
  CAN_EXACT_MATCH = {}
  
  def apply(self, s):
    g = []
    for stub in self.STUBS:
      if s.endswith(stub):
        if stub not in self.CAN_EXACT_MATCH and len(s) <= len(stub):
          continue
        g.extend(s[:-len(stub)]+rep for rep in self.STUBS[stub])
    g = list(set(g))
    return g

  def _apply(self, s):
    return []

class PastRule(Rule):
  STUBS = {
    u"た": (u"る",),
    u"いた": (u"く", u"いる"),
    u"きた": (u"くる",),
    u"した": (u"する", u"す"),
    u"った": (u"う", u"る", u"つ"),
    u"んだ": (u"ぶ", u"む", u"ぬ"),
    u"いった": (u"いく",),
    u"かった": (u"い",),
    u"だった": (u"だ",)
  }
  CAN_EXACT_MATCH = [u"した", u"いた", u"きた", u"いった", u"だった"]
     
class TeRule(Rule):
  STUBS = {
    u"て": (u"る",),
    u"いて": (u"く", u"いる"),
    u"きて": (u"くる",),
    u"して": (u"する", u"す"),
    u"って": (u"う", u"る", u"つ"),
    u"んで": (u"ぶ", u"む", u"ぬ"),
    u"いって": (u"いく",),
    u"だって": (u"だ",),
    u"くて": (u"い",)
  }
  CAN_EXACT_MATCH = [u"して", u"いて", u"きて", u"いって", u"だって"]

class MasuRule(Rule):
  STUBS = {
    u"ます": (u"る",),
    u"います": (u"いる", u"う"),
    u"きます": (u"くる", u"く"),
    u"します": (u"する", u"す"),
    u"ちます": (u"つ",),
    u"にます": (u"ぬ",),
    u"みます": (u"む",),
    u"ぎます": (u"ぐ",),
    u"ります": (u"る",),
    u"びます": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"います", u"きます", u"します"]

class MasenRule(Rule):
  STUBS = {
    u"ません": (u"る",),
    u"いません": (u"いる", u"う"),
    u"きません": (u"くる", u"く"),
    u"しません": (u"する", u"す"),
    u"ちません": (u"つ",),
    u"にません": (u"ぬ",),
    u"みません": (u"む",),
    u"ぎません": (u"ぐ",),
    u"りません": (u"る",),
    u"びません": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"いません", u"きません", u"しません"]

class MashitaRule(Rule):
  STUBS = {
    u"ました": (u"る",),
    u"いました": (u"いる", u"う"),
    u"きました": (u"くる", u"く"),
    u"しました": (u"する", u"す"),
    u"ちました": (u"つ",),
    u"にました": (u"ぬ",),
    u"みました": (u"む",),
    u"ぎました": (u"ぐ",),
    u"りました": (u"る",),
    u"びました": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"いました", u"きました", u"しました"]

class MashouRule(Rule):
  STUBS = {
    u"ましょう": (u"る",),
    u"いましょう": (u"いる", u"う"),
    u"きましょう": (u"くる", u"く"),
    u"しましょう": (u"する", u"す"),
    u"ちましょう": (u"つ",),
    u"にましょう": (u"ぬ",),
    u"みましょう": (u"む",),
    u"ぎましょう": (u"ぐ",),
    u"りましょう": (u"る",),
    u"びましょう": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"いましょう", u"きましょう",\
              u"しましょう"]


class PotentialRule(Rule):
  STUBS = {
    u"できる": (u"する",),
    u"える": (u"う",),
    u"ける": (u"く",),
    u"せる": (u"す",),
    u"てる": (u"つ",),
    u"ねる": (u"ぬ",),
    u"める": (u"む",),
    u"れる": (u"る",),
    u"げる": (u"ぐ",),
    u"べる": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"できる"]

class VolitionalRule(Rule):
  STUBS = {
    u"よう": (u"る",),
    u"しよう": (u"する",),
    u"こよう": (u"くる",),
    u"おう": (u"う",),
    u"こう": (u"く",),
    u"そう": (u"す",),
    u"とう": (u"つ",),
    u"のう": (u"ぬ",),
    u"もう": (u"む",),
    u"ろう": (u"る",),
    u"ごう": (u"ぐ",),
    u"ぼう": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"しよう", u"こよう"]

class NegativeRule(Rule):
  STUBS = {
    u"ない": (u"る",),
    u"んない": (u"る",),
    u"しない": (u"する",),
    u"こない": (u"くる",),
    u"わない": (u"う",),
    u"かない": (u"く",),
    u"さない": (u"す",),
    u"たない": (u"つ",),
    u"なない": (u"ぬ",),
    u"まない": (u"む",),
    u"らない": (u"る",),
    u"がない": (u"ぐ",),
    u"ばない": (u"ぶ",),

    u"くない": (u"い",),
  }
  CAN_EXACT_MATCH = [u"しない", u"こない"]

class ZuRule(Rule):
  STUBS = {
    u"ず": (u"る",),
    u"せず": (u"する",),
    u"わず": (u"う",),
    u"かず": (u"く",),
    u"さず": (u"す",),
    u"たず": (u"つ",),
    u"なず": (u"ぬ",),
    u"まず": (u"む",),
    u"らず": (u"る",),
    u"がず": (u"ぐ",),
    u"ばず": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"せず"]
     
class CausativeRule(Rule):
  STUBS = {
    u"させる": (u"る", u"する", u"す"),
    u"こさせる": (u"くる",),
    u"わせる": (u"う",),
    u"かせる": (u"く",),
    u"たせる": (u"つ",),
    u"なせる": (u"ぬ",),
    u"ませる": (u"む",),
    u"らせる": (u"る",),
    u"がせる": (u"ぐ",),
    u"ばせる": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"させる", u"こさせる"]

class PassiveRule(Rule):
  STUBS = {
    u"られる": (u"る",),
    u"される": (u"する",u"す"),
    u"こられる": (u"くる",),
    u"われる": (u"う",),
    u"かれる": (u"く",),
    u"たれる": (u"つ",),
    u"なれる": (u"ぬ",),
    u"まれる": (u"む",),
    u"られる": (u"る",),
    u"がれる": (u"ぐ",),
    u"ばれる": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"される", u"こられる"]
   
class NagaraRule(Rule):
  STUBS = {
    u"いながら": (u"いる", u"う"),
    u"きながら": (u"くる", u"く"),
    u"しながら": (u"する", u"す"),
    u"ちながら": (u"つ",),
    u"にながら": (u"ぬ",),
    u"みながら": (u"む",),
    u"ぎながら": (u"ぐ",),
    u"りながら": (u"る",),
    u"びながら": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"しながら", u"きながら", u"いながら"]

class TariRule(Rule):
  STUBS = {
    u"たり": (u"る",),
    u"いたり": (u"く", u"いる"),
    u"いったり": (u"いく"),
    u"きたり": (u"くる",),
    u"したり": (u"する", u"す"),
    u"ったり": (u"う", u"る", u"つ"),
    u"んだり": (u"ぶ", u"む", u"ぬ"),
  }
  CAN_EXACT_MATCH = [u"したり", u"きたり", u"いたり", u"いったり"]

class NasaiRule(Rule):
  STUBS = {
    u"なさい": (u"る",),
    u"いなさい": (u"いる", u"う"),
    u"きなさい": (u"くる", u"く"),
    u"しなさい": (u"する", u"す"),
    u"ちなさい": (u"つ",),
    u"になさい": (u"ぬ",),
    u"みなさい": (u"む",),
    u"ぎなさい": (u"ぐ",),
    u"りなさい": (u"る",),
    u"びなさい": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"いなさい", u"きなさい", u"しなさい"]

class SouRule(Rule):
  STUBS = {
    u"そう": (u"い", u"る"),
    u"よさそう": (u"いい",),
    u"なさそう": (u"ない",),
    u"いそう": (u"いる", u"う"),
    u"きそう": (u"くる", u"く"),
    u"しそう": (u"する", u"す"),
    u"ちそう": (u"つ",),
    u"にそう": (u"ぬ",),
    u"みそう": (u"む",),
    u"ぎそう": (u"ぐ",),
    u"りそう": (u"る",),
    u"びそう": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"よさそう", u"なさそう", u"いそう", u"しそう", u"きそう"]

class SugiRule(Rule):
  STUBS = {
    u"すぎ": (u"い", u"る"),
    u"よさすぎ": (u"いい",),
    u"なさすぎ": (u"ない",),
    u"いすぎ": (u"いる", u"う"),
    u"きすぎ": (u"くる", u"く"),
    u"しすぎ": (u"する", u"す"),
    u"ちすぎ": (u"つ",),
    u"にすぎ": (u"ぬ",),
    u"みすぎ": (u"む",),
    u"ぎすぎ": (u"ぐ",),
    u"りすぎ": (u"る",),
    u"びすぎ": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"よさすぎ", u"なさすぎ", u"いすぎ", u"しすぎ", u"きすぎ"]

class YasuiRule(Rule):
  STUBS = {
    u"やすい": (u"る",),
    u"いやすい": (u"いる", u"う"),
    u"きやすい": (u"くる", u"く"),
    u"しやすい": (u"する", u"す"),
    u"ちやすい": (u"つ",),
    u"にやすい": (u"ぬ",),
    u"みやすい": (u"む",),
    u"ぎやすい": (u"ぐ",),
    u"りやすい": (u"る",),
    u"びやすい": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"いやすい", u"きやすい", u"しやすい"]

class NikuiRule(Rule):
  STUBS = {
    u"にくい": (u"る",),
    u"いにくい": (u"いる", u"う"),
    u"きにくい": (u"くる", u"く"),
    u"しにくい": (u"する", u"す"),
    u"ちにくい": (u"つ",),
    u"ににくい": (u"ぬ",),
    u"みにくい": (u"む",),
    u"ぎにくい": (u"ぐ",),
    u"りにくい": (u"る",),
    u"びにくい": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"いにくい", u"きにくい", u"しにくい"]

class KuRule(Rule):
  STUBS = {
    u"く": (u"い",),
  }

class NRule(Rule):
  STUBS = {
    u"ん": (u"ない",),
  }
   
class NuRule(Rule):
  STUBS = {
    u"ぬ" : (u"ない",),
  }

class TaiRule(Rule):
  STUBS = {
    u"たい": (u"る",),
    u"いたい": (u"いる", u"う"),
    u"きたい": (u"くる", u"く"),
    u"したい": (u"する", u"す"),
    u"ちたい": (u"つ",),
    u"にたい": (u"ぬ",),
    u"みたい": (u"む",),
    u"ぎたい": (u"ぐ",),
    u"りたい": (u"る",),
    u"びたい": (u"ぶ",),
  }
  CAN_EXACT_MATCH = [u"いたい", u"きたい", u"したい"]

class SaRule(Rule):
  STUBS = {
    u"さ": (u"い",),
  }

class CasualRule(Rule):
  STUBS = {
    u"けぇ": (u"かい",),
    u"せぇ": (u"さい",),
    u"てぇ": (u"たい",),
    u"ねぇ": (u"ない",),
    u"めぇ": (u"まい",),
    u"れぇ": (u"らい",),
    u"げぇ": (u"がい",),
    u"べぇ": (u"ばい",),
  }

class NumberConverter(object):
  NUM = {
    (u"1", u"１"): (u"一",),
    (u"2", u"２"): (u"ニ",),
    (u"3", u"３"): (u"三",),
    (u"4", u"４"): (u"四",),
    (u"5", u"５"): (u"五",),
    (u"6", u"６"): (u"六",),
    (u"7", u"７"): (u"七",),
    (u"8", u"８"): (u"八",),
    (u"9", u"９"): (u"九",),
    (u"0", u"０"): (u"ゼロ",),
  }
  NUM_NAMES = [u"兆", u"千億", u"百億", u"十億", u"億", u"百万", \
              u"十万", u"万", u"千", u"百", u"十"]


class Dictionary(object):
  def __init__(self, **properties):
    self.dict_name = properties.setdefault('name')
    
    self.handle = open(self.dict_name)
    self.size = os.path.getsize(self.dict_name)

    line_ind_fn = self.dict_name+'_ln_ind'
    get_line_indexes = os.path.exists(line_ind_fn) and \
        os.path.getmtime(line_ind_fn)>os.path.getmtime(self.dict_name)
    if get_line_indexes:
      self.line_indexes = self.load_indexes(line_ind_fn)
    else:
      self.line_indexes = self.generate_line_indexes(line_ind_fn)


  def __len__(self):
    return len(self.line_indexes)

  def __getitem__(self, index):
    return self.get_entry(index)

  def load_indexes(self, fn):
    fh = open(fn)
    indexes = cpickle.load(fh)
    return indexes

  def generate_line_indexes(self, outfn):
    D.info('Generating line indexes for %s.' % (self.dict_name))
    indexes = []
    start_byte = 0
    if outfn: fh = open(outfn, 'w')
    self.handle.seek(0)
    for raw_line in self.handle:
      indexes.append(start_byte)
      start_byte += len(raw_line)
    if outfn:
      cpickle.dump(indexes, fh)
      fh.close()
    D.info('Line indexes generated.')
    return indexes

     
  def get_line(self, index):
    indexes = self.line_indexes
    if index >= len(indexes) or index < 0: raise Exception('Index out of bounds.')
    start_byte = indexes[index]
    end_byte = indexes[index+1] if index < len(indexes)-1 else self.size 
    self.handle.seek(start_byte)
    a = unicode(self.handle.read(end_byte-start_byte-1), 'utf-8')
    return a
     
  def get_entry(self, index):
    return self._get_entry(index)

  def _get_entry(self, index):
    return Entry()

  def find_by_kanji(self, key, start=0, end=-1, maxsameentries=5):
    def cmp_by_kanji(key, entry):
      e_key, e_kanji = euc_jp(key), euc_jp(entry.kanji)
      return -1 if e_key < e_kanji else \
              (1 if e_key > e_kanji else 0)
    def kanji_get_mid_index(mid): return mid
    def kanji_get_end_index(): return len(self)-1
     
    cmp = cmp_by_kanji
    get_mid = kanji_get_mid_index
    get_end = kanji_get_end_index

    def add_entries(fr, to):
      ens = []
      for i in range(fr, to):
        en = self.get_entry(i)
        if cmp(key, en) == 0:
          ens.append(en)
      return ens
     
    if end < 0: end = get_end() 
    if end-start <= maxsameentries*2:
      return add_entries(start, end+1)
    else:
      mid = get_mid((start+end)/2)
      ens = add_entries(mid-maxsameentries, mid+maxsameentries+1)
      if not ens:
        mid_entry = self.get_entry(mid)
        stat = cmp(key, mid_entry)
        return self.find_by_kanji(key, start, mid) if stat < 0 else \
                self.find_by_kanji(key, mid, end)
      else:
        if ens[0].index > mid-maxsameentries and ens[-1].index < mid+maxsameentries: return ens
        elif ens[0].index <= mid-maxsameentries: return self.find_by_kanji(key, mid-2*maxsameentries, mid-maxsameentries)
        else: return self.find_by_kanji(key, mid+maxsameentries, mid+2*maxsameentries)


class EDict(Dictionary):
  RE = re.compile(r"""
              (?P<kanji>[^[]+)\s
              (\[(?P<furigana>[^]]+)\]\s)?
              /(?P<exps>.+)/
              """, 
              re.IGNORECASE|re.VERBOSE|re.UNICODE)
  def __init__(self, **properties):
    Dictionary.__init__(self, **properties)

    line_ind_fn = self.dict_name+'_ln_ind'
    furi_ind_fn = self.dict_name+'_fu_ind'
    get_furi_indexes = os.path.exists(furi_ind_fn) and \
        os.path.getmtime(furi_ind_fn)>os.path.getmtime(line_ind_fn)
    if get_furi_indexes:
      self.furi_indexes = self.load_indexes(furi_ind_fn)
    else:
      self.furi_indexes = self.generate_furi_indexes(furi_ind_fn)

  def generate_furi_indexes(self, outfn):
    D.info('Generating furigana indexes for %s.' % (self.dict_name))
    indexes = {} 
    def add_index(furi, i):
      if furi not in indexes:
        indexes[furi] = []
      indexes[furi].append(i)
    for i in range(len(self)):
      e = self.get_entry(i)
      if e.furigana:
        add_index(e.furigana, i)
    if outfn:
      outfh = open(outfn, 'w')
      cpickle.dump(indexes, outfh)
      outfh.close()
    D.info('Furigana indexes generated.')
    return indexes

  def _get_entry(self, index):
    line = self.get_line(index)
    raw_entry = self.RE.match(line)
    e = Entry()
    e.kanji = raw_entry.group('kanji')
    e.furigana = raw_entry.group('furigana')
    e.exps = raw_entry.group('exps').split('/')
    e.index = index
    return e

  def find_by_furigana(self, key):
    if key not in self.furi_indexes: return None
    return [self[i] for i in self.furi_indexes[key]]


class KanjiDic(Dictionary):
  RE = re.compile(r"""
        (?P<kanji>\S)\s
        (?P<eucjp>[0-9A-F]{4})\s
        (?P<yomi>[^{]+)
        (?P<exps>.+)
        """, re.IGNORECASE|re.VERBOSE|re.UNICODE)

  def _get_entry(self, index):
    line = self.get_line(index)
    raw_entry = self.RE.match(line)
    e = Entry()
    e.kanji = raw_entry.group('kanji')
    e.exps = re.findall(r"{([^}]+)}", raw_entry.group('exps'), re.UNICODE)
    e.yomi = re.findall(r"[^T0-9\s]+", raw_entry.group('yomi'), re.UNICODE)
    return e

class Entry(object): 
  def __repr__(self):
    def f(v):
      if not v: return 'None'
      elif type(v) == list:
        return '['+'; '.join(f(i) for i in v)+']'
      elif type(v) == int:
        return '%d' % v
      else:
        return v.encode('utf-8') 
    return '<Entry: '+', '.join('%s->%s' % (f(k), f(v)) for k, v in self.__dict__.iteritems())+'>'
     
  def __eq__(self, other):
    if type(other) != Entry: raise notImplemented
    return self.kanji == other.kanji and \
            self.furigana == other.furigana and \
            self.exps == other.exps
  
def init():
  if 'running' in R: return
  R['edict'] = EDict(name=os.path.join(DATA_DIR, 'edict_utf8'))
  #R['kanjidic'] = KanjiDic(name='data/kanjidic_utf8')
  mod = sys.modules[__name__]
  #match all grammar rules defined in this module
  R['rules'] = [obj() for name, obj in mod.__dict__.items() if \
                  isinstance(obj, type) and re.match(r'.+Rule', name)]
  R['running'] = 'y'

def shutdown():
  del R['edict']
  del R['rules']
  del R['running']

def generate_guesses(s):
  rules = R['rules']
  more_guesses = True
  index = 0
  guesses = [s]
  curr_guesses = []
  def add_guesses(g, c):
    count = 0
    for ss in c:
      if ss not in g:
        g.append(ss)
        count += 1
    return count > 0

  while more_guesses:
    for rule in rules:
      for guess in guesses:
        curr_guesses.extend(rule.apply(guess))
    more_guesses = add_guesses(guesses, curr_guesses)
    curr_guesses = []
  guesses.remove(s)
  return guesses

def rikai(text, **properties):
  def cmp_entries(e1, e2):
    e1_len = len(e1.furigana if e1.furigana else e1.kanji)
    e2_len = len(e2.furigana if e2.furigana else e2.kanji)
    if e1_len < e2_len: return 1
    elif e1_len > e2_len: return -1
    else:
      if '(P)' in ''.join(e1.exps): return -1
      elif '(P)' in ''.join(e2.exps): return 1
      else: return 0
  def has_min_entry_len(entries, mlen):
    return len(entries) > 0 and (\
            max(len(entry.kanji) for entry in entries) > mlen or \
            max(len(entry.furigana) if entry.furigana else 0 \
                  for entry in entries) > mlen)
    
  minentrylen = properties.setdefault('minentrylen', 4)
  maxsubtxtlen = properties.setdefault('maxsubtxtlen', len(text))
  maxentries = properties.setdefault('maxentries', -1)

  entries = []
  dic = R['edict']

  for i in range(1, len(text)+1):
    curr_text = text[:i]
    if not curr_text or curr_text[0] in SPECIAL or \
            curr_text[0] in NUM:
      continue
    if i > maxsubtxtlen:
      break
    guesses = generate_guesses(curr_text)+[curr_text]
    D.info(' '.join(guesses))
    for guess in guesses:
      curr = dic.find_by_kanji(guess)
      if not curr: curr = dic.find_by_furigana(guess)
      if not curr: curr = dic.find_by_furigana(kana_to_hira(guess))
      if curr:
        entries.extend(curr)
    if has_min_entry_len(entries, minentrylen):
      D.info('At least one entry with length > %d found.' % minentrylen)
      break
    if len(entries) > maxentries and maxentries != -1:
      D.info('There are at least %d entries.' % maxentries)
      entries = entries[:maxentries]
      break

       
  entries.sort(cmp_entries)
  #if len(entries) > maxentries: entries = entries[:maxentries]
  return entries

  

  
  
#================================================================# 
#================================================================# 
#================================================================# 
#================================================================# 
    

def tester(tf):
  def _tester(tf, *args, **props):
    print '------------------'
    print '%s:\n' % tf.__name__.upper()
    tf(*args, **props)
    print '------------------'
    print '//////////////////'
  return decorator.decorator(_tester, tf)

@tester
def test_rules():
  pr = PastRule()
  tr = TeRule()
  mr = MasuRule()
  ptr = PotentialRule()
  nr = NegativeRule()
  zr = ZuRule()
  cr = CausativeRule()

  def _(r, s):
    print '%s=>%s' % (s, ' '.join(r.apply(s)))

  _(pr, u"かった")
  _(pr, u"した")
  _(pr, u"まいった")
  _(pr, u"あそんだ")
  _(pr, u"たかかった")
  _(pr, u"わるかった")
  _(pr, u"た")
  print '=================='
  _(tr, u"かって")
  _(tr, u"まいって")
  _(tr, u"あそんで")
  _(tr, u"えて")
  _(tr, u"かけて")
  _(tr, u"おもしろくて")
  _(tr, u"かて")
  _(tr, u"かっで")
  _(tr, u"で")
  print '=================='
  _(mr, u"いきます")
  _(mr, u"でます")
  _(mr, u"ばます")
  _(mr, u"ます")
  print '=================='
  _(ptr, u"はなせる")
  _(ptr, u"いける")
  _(ptr, u"いけ")
  _(ptr, u"ける")
  print '=================='
  _(nr, u"いかない")
  _(nr, u"かえない")
  _(nr, u"かえられない")
  print '=================='
  _(zr, u"かえず")
  _(zr, u"いかず")
  _(zr, u"いすず")
  print '=================='
  _(cr, u"かんがえさせる")
  _(cr, u"かえらせる")

@tester
def test_generate_guesses():
  print ' '.join(generate_guesses(u"かけて"))
  print ' '.join(generate_guesses(u"掛けて"))
  
@tester
def test_find():
  print R['edict'].find_by_kanji(u'俺')
  print R['edict'].find_by_kanji(u'お前')
  print R['edict'].find_by_furigana(u'おまえ')
  print R['edict'].find_by_furigana(u'おかまいなく')
  print R['edict'].find_by_furigana(u'かぎまわる')
  print R['edict'].find_by_furigana(u'かきあげ')
  print R['edict'].find_by_furigana(u'あく')
  
  print '=================='
  print R['kanjidic'].get_entry(1)
  print R['kanjidic'].get_entry(11)
  print R['kanjidic'].get_entry(12)
  print R['kanjidic'].find_by_kanji(u'的')

@tester
def test_rikai():
  def _(s):
    print '%s' % s
    for entry in rikai(s): print entry
  _(u"お前")
  _(u"カキアゲ")
  _(u"うらさびしくて")
  _(u"いった")
  _(u"勘弁する")
  _(u"勘弁して")

if __name__ == '__main__':
  D.D_LEVEL = D.D_INFO
  init()
  #test_find()
  #test_rikai()
  test_rules()




