#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Last modified: 2004年09月11日 星期六 16时47分01秒 [debian]

"""把 Unihan 网站的汉字资料转换成维基词典条目的格式
"""

__revision__ = '0.1'

import os
import sys
import urllib2
import re

RE_INFO = [
    # Glyphs
    r'<tr><th>The Unicode Standard</th><th>Your Browser</th><tr><td align=center><img align=middle src=".*"></td><td align=center><font size="7">(?P<hanzi>.*)</font></td></tr>',
    # Encoding Forms
    r"""(?m)<tr><th>Decimal</th><th>UTF-8</th><th>UTF-16</th><th>UTF-32</th></tr>
<tr><td align=center>(?P<enc_dec>.*?)</td><td align=center>(?P<enc_utf8>.*?)</td><td align=center>(?P<enc_utf16>.*?)</td><td align=center>(?P<enc_utf32>.*?)</td></tr>""",
    # IRG Sources
    r"""(?m)<tr><th>G-source</th><th>T-source</th><th>H-source</th><th>J-source</th><th>K-source</th><th>KP-source</th><th>V-source</th><th>U-source</th></tr>
<tr><td align=center>(?P<src_g>.*?)</td><td align=center>(?P<src_t>.*?)</td><td align=center>(?P<src_h>.*?)</td><td align=center>(?P<src_j>.*?)</td><td align=center>(?P<src_k>.*?)</td><td align=center>(?P<src_kp>.*?)</td><td align=center>(?P<src_v>.*?)</td><td align=center>(?P<src_u>.*?)</td></tr>""",
    # Mappings to Major Standards
    # Chinese
    r"""(?m)<tr><th>GB 2312</th><th>GB 12345</th><th>CNS 11643-1986</th><th>CNS 11643-1992</th><th>CCCII</th><th>Big Five</th><th>HK SCS</th></tr>
<tr><td align=center>(?P<zh_gb2312>.*?)</td><td align=center>(?P<zh_gb12345>.*?)</td><td align=center>(?P<zh_cns1986>.*?)</td><td align=center>(?P<zh_cns1992>.*?)</td><td align=center>(?P<zh_cccii>.*?)</td><td align=center>(?P<zh_big5>.*?)</td><td align=center>(?P<zh_hk>.*?)</td></tr>""",
    # Japanese
    r"""(?m)<tr><th>JIS X 0208</th><th>JIS X 0212</th><th>JIS X 0213</th></tr>
<tr><td align=center>(?P<jp_jisx0208>.*?)</td><td align=center>(?P<jp_jisx0212>.*?)</td><td align=center>(?P<jp_jisx0213>.*?)</td></tr>""",
    # Korean
    r"""(?m)<tr><th>KS X 1001:1992</th><th>KS X 1002:1991</th><th>KPS 9566-97</th><th>KPS 10721-2000</th></tr>
<tr><td align=center>(?P<ko_1001>.*?)</td><td align=center>(?P<ko_1002>.*?)</td><td align=center>(?P<ko_9566>.*?)</td><td align=center>(?P<ko_10721>.*?)</td></tr>""",
    # Other
    r"""(?m)<tr><th>EACC</th><th>Xerox</th><th>PRC Telegraph</th><th>ROC Telegraph</th></tr>
<tr><td align=center>(?P<srcx_eacc>.*?)</td><td align=center>(?P<x_xerox>.*?)</td><td align=center>(?P<x_prc>.*?)</td><td align=center>(?P<x_roc>.*?)</td></tr>""",
    # Dictionary Information
    #IRG Indices
    r"""(?m)<tr><th>KangXi</th><th>Morohashi</th><th>Dae Jaweon</th><th>Hanyu Da Zidian</th></tr>
<tr><td align=center>(?P<irg_kangxi>.*?)</td><td align=center>(?P<irg_morohashi>.*?)</td><td align=center>(?P<irg_daejaweon>.*?)</td><td align=center>(?P<irg_dazidian>.*?)</td></tr>""",
    #Other Indices
    r"""(?m)<tr><th>Cowles</th><th>Fenn</th><th>Karlgren</th><th>Nelson</th><th>Mathews</th><th>Meyer-Wempe</th><th>Lau</th></tr>
<tr><td align=center>(?P<indx_cowles>.*?)</td><td align=center>(?P<indx_fenn>.*?)</td><td align=center>(?P<indx_karlgren>.*?)</td><td align=center>(?P<indx_nelson>.*?)</td><td align=center>(?P<indx_mathews>.*?)</td><td align=center>(?P<indx_meyerwempe>.*?)</td><td align=center>(?P<indx_lau>.*?)</td></tr>""",
    #Radical-stroke Counts
    r"""(?m)<tr><th>Unicode</th><th>KangXi</th><th>Japanese</th><th>Korean</th><th>Morohashi</th></tr>
<tr><td align=center>(?P<stroke_unicode>.*?)</td><td align=center>(?P<stroke_kangxi>.*?)</td><td align=center>(?P<stroke_japanese>.*?)</td><td align=center>(?P<stroke_korean>.*?)</td><td align=center>(?P<stroke_morohashi>.*?)</td></tr>""",
    #Phonetic Data
    r"""(?m)<tr><th>Cantonese</th><th>Mandarin</th><th>Tang</th><th>Japanese On</th><th>Japanese Kun</th><th>Sino-Korean</th></tr>
<tr><td align=center>(?P<phoetic_cantonese>.*?)</td><td align=center>(?P<phoetic_mandarin>.*?)</td><td align=center>(?P<phoetic_tang>.*?)</td><td align=center>(?P<phoetic_japaneseon>.*?)</td><td align=center>(?P<phoetic_japanesekun>.*?)</td><td align=center>(?P<phoetic_sinokorean>.*?)</td></tr>""",
    #Other Dictionary Data
    r"""(?m)<tr><th>Definition</th><th>Total Strokes</th><th>Phonetic</th><th>Cangjie</th></tr>
<tr><td align=center>(?P<dicx_definition>.*?)</td><td align=center>(?P<dicx_strokes>.*?)</td><td align=center>(?P<dicx_phonetic>.*?)</td><td align=center>(?P<dicx_cangjie>.*?)</td></tr>""",
    #Variants
    r"""<tr><th>Simplified</th><th>Traditional</th><th>Semantic</th><th>Specialized Semantic</th><th>Z</th></tr>
<tr>"""+
    '<td align=center>((<a href="(?P<var_zh_cn_url>.*?)">(?P<var_zh_cn_code>.*?)\(<img align=middle src="(?P<var_zh_cn_img>.*?)">\)</a>   </td>)|(?P<var_zh_cn>.*?))</td>'+
    '<td align=center>((<a href="(?P<var_zh_tw_url>.*?)">(?P<var_zh_tw_code>.*?)\(<img align=middle src="(?P<var_zh_tw_img>.*?)">\)</a>   </td>)|(?P<var_zh_tw>.*?))</td>'+
    '<td align=center>((<a href="(?P<var_sem_url>.*?)">(?P<var_sem_code>.*?)\(<img align=middle src="(?P<var_sem_img>.*?)">\)</a>   </td>)|(?P<var_sem>.*?))</td>'+
    '<td align=center>((<a href="(?P<var_spcsem_url>.*?)">(?P<var_spcsem_code>.*?)\(<img align=middle src="(?P<var_spcsem_img>.*?)">\)</a>   </td>)|(?P<var_spcsem>.*?))</td>'+
    '<td align=center>((<a href="(?P<var_z_url>.*?)">(?P<var_z_code>.*?)\(<img align=middle src="(?P<var_z_img>.*?)">\)</a>   </td>)|(?P<var_z>.*?))</td>',
    ]

# Other Data
RE_OTHER = r'<tr><td align="left">(?P<item>.*?)</td><td align=left><code>(?P<value>.*?)</code></td></tr>'

# Chinese Compounds
RE_CHINESE = r'<tr><td align="left">(?P<word>.*?)</td><td align="left">(?P<pinyin>.*?)</td><td align="left">(?P<other>.*?)</td><td align="left">(?P<english>.*?)</td></tr>'

# Japanese Compounds
RE_JAPANESE = r'<tr><td align="left">(?P<word>.*?)</td>  <td align="left">(?P<jp>.*?)</td>  <td align="left">(?P<english>.*?)</td></tr>'

def GetPage(codepoint):
    """获取网页正文"""
    baseurl = 'http://www.unicode.org/cgi-bin/GetUnihanData.pl?codepoint=%s'

    filename = codepoint.lower() + '.html'
    url = baseurl % codepoint

    if not os.path.isfile(filename):
        u = urllib2.urlopen(url)
        open(filename, 'w').write(u.read())

    u = open(filename)
    return u.read()

def FormatPage(htmldata):
    """格式化网页"""
    RAW_BEGIN = '<!-- BEGIN CONTENTS -->'
    RAW_UNIHAN = '<h1>Unihan data for '
    html = htmldata

    # 不要页面前面的页头和导航链结等
    iStart = html.find(RAW_BEGIN)
    if iStart >= 0:
        html = html[iStart:]
    else:
        raise "not found CONTENTS"

    # 再去掉一点
    iStart = html.find(RAW_UNIHAN)
    if iStart >= 0:
        html = html[iStart:]
    else:
        raise "not found Unihan data"

    # 消去每行前面的空格
    html = re.sub(r'\n[ \t]+', r'\n', html)

    def oneline(matchobj):
        return matchobj.group(0).replace('\n', '')

    # 把表格中的 <tr>...</tr> 都放在一行上
    html = re.sub(r'(?ms)<tr>.*?</tr>', oneline, html)

    return html

def SplitPage(htmldata):
    """把正文分成几个部分"""
    RAW_CHINESE = '<p><b>Chinese Compounds</b>'
    RAW_JAPANESE = '<p><b>Japanese Compounds</b>'
    RAW_OTHERDATA = '<h2>Other Data</h2>'
    html = htmldata

    open('u.html', 'w').write(html)
    iChinese = html.find(RAW_CHINESE)
    iJapanese = html.find(RAW_JAPANESE)
    iOtherData = html.find(RAW_OTHERDATA)

    if iOtherData < 0:
        raise "not found Other Data"
    if iJapanese < 0:
        iJapanese = iOtherData
    if iChinese < 0:
        iChinese = iJapanese

    # 基本信息
    baseinfo = html[:iChinese]
    # 汉语词组
    chinese = html[iChinese:iJapanese]
    # 日语词组
    japanese = html[iJapanese:iOtherData]
    # 其它信息
    otherdata = html[iOtherData:]

    return baseinfo, chinese, japanese, otherdata

def removeNbspDict(aDict):
    for k,v in aDict.iteritems():
        if not v:
            aDict[k] = ''
        else:
            aDict[k] = v.replace('&nbsp;', ' ')

def AnalyPage(url):
    """分析页面，提取所有有用信息"""
    html = GetPage(url)
    html = FormatPage(html)
    info, chinese, japanese, otherdata = SplitPage(html)

    # 基本信息
    infoDict = {}
    for rawstr in RE_INFO:
        matchobj = re.search(rawstr, info)
        if matchobj:
            infoDict.update(matchobj.groupdict())
    removeNbspDict(infoDict)

    # 其它信息
    otherDict = {}
    for m in re.finditer(RE_OTHER, otherdata):
        n = m.groups()
        otherDict[n[0]] = n[1]
    removeNbspDict(otherDict)

    # 汉语词组
    chineseList = []
    for m in re.finditer(RE_CHINESE, chinese):
        d = m.groupdict()
        removeNbspDict(d)
        chineseList.append(d)

    # 日语词组
    japaneseList = []
    for m in re.finditer(RE_JAPANESE, japanese):
        d = m.groupdict()
        removeNbspDict(d)
        japaneseList.append(d)

    return infoDict, chineseList, japaneseList, otherDict

"""
维基词典条目内容：
字，图
读音，拼音，注音，威妥玛拼音，音调
简体字，繁体字，异体字，会意，反切
笔划顺序
检索信息，部首，笔划，四角号码，|仓颉
技术信息，Unicode，GBK，BIG5
词组，顺序，逆序
外部链结，Unihan
"""
def Wikify(codepoint):
    """把各种资料整理成 Wikidict 的条目格式
    """
    infoDict, chineseList, japaneseList, otherDict = AnalyPage(codepoint)

    #print 'infoDict', len(infoDict.keys())
    #print 'otherDict', len(otherDict.keys())
    #print 'chineseList', len(chineseList)
    #print 'japaneseList', len(japaneseList)

    baseWiki = """
字: %(hanzi)s

==编码==
*十进制：%(enc_dec)s
*UTF-8: %(enc_utf8)s
*UTF-16: %(enc_utf16)s
*UTF-32: %(enc_utf32)s

==读音==
*[[普通话]]: %(phoetic_mandarin)s
*[[粤语]]: %(phoetic_cantonese)s
*Tang: %(phoetic_tang)s
*Japanese On: %(phoetic_japaneseon)s
*Japanese Kun: %(phoetic_japanesekun)s
*Sino-Korean: %(phoetic_sinokorean)s
"""
    baseWiki = baseWiki % infoDict

    zhWords = "[[%(word)s]], %(pinyin)s, %(english)s"
    zhWordsWiki = []
    for line in chineseList:
        zhWordsWiki.append(zhWords % line)
    zhWordsWiki = '\n'.join(zhWordsWiki)

    wiki = baseWiki + zhWordsWiki
    return wiki

if __name__ == '__main__':
    # 例如，输入 4e2d 将转换`中'字
    # http://www.unicode.org/cgi-bin/GetUnihanData.pl?codepoint=4e2d

    codepoint = ''

    if len(sys.argv) > 1:
        codepoint = sys.argv[1]

    if not codepoint:
        codepoint = raw_input("Input codepoint(for example: 4e2d for 中): ")

    print Wikify(codepoint)
