# -*- coding: utf-8 -*-

"""
方便生成 Markdown 表格
"""

import re
from collections import OrderedDict

RE_WORD = re.compile(r'^([a-zA-Z0-9\(\)\./ \-]+)([^a-zA-Z0-9\(\)\./ \-].+)$')


def zh_len(_str):
    """
    中文长度
    """
    return len(_str.decode('utf-8').encode('gbk'))


def wordsplit(filepath, lower, auto, column=3):
    """
    分割文本，生成单词表
    """
    with open(filepath) as _file:
        content = _file.read()
    word_lines = [i.strip() for i in content.split('\n')]
    word_lines = [i for i in word_lines if i]
    words = OrderedDict()
    error_content = []
    index = 1
    line = []
    for word_line in word_lines:
        matched = RE_WORD.search(word_line)
        if not matched:
            error_content.append(word_line)
            continue
        item = matched.groups()
        en_word = item[0].strip()
        if lower:
            en_word = en_word.lower()
        zh_word = item[1].strip()
        words[zh_word] = en_word

    default_word_width_en = 14
    default_word_width_zh = 10
    if auto:
        default_word_width_en = max([len(i) for i in words.values()] + [14])
        default_word_width_zh = max([zh_len(i) for i in words.keys()] + [10])

    print (' | '.join([i.ljust(default_word_width_zh + 2) for i in (['中文', '英文'] * column)])).rstrip()
    print ' | '.join(['-' * default_word_width_zh, '-' * default_word_width_en] * column)

    for zh_word, en_word in words.items():
        zh_word_len = zh_len(zh_word)

        # 计算真正需要用在 ljust 中的长度
        width_zh_word_fix = len(zh_word) - zh_word_len
        word_width_zh = default_word_width_zh + width_zh_word_fix

        # 英文长度足够的话，填补一下中文多出来的部分
        width_zh_word_extra = max(0, (zh_word_len - default_word_width_zh))
        word_width_en = default_word_width_en - width_zh_word_extra

        line.extend([zh_word.ljust(word_width_zh), en_word.ljust(word_width_en)])
        if index == column:
            print (' | '.join(line)).rstrip()
            line = []
            index = 0
        index += 1

    if error_content:
        print '\n'.join(['', '*' * 50, ' Error '.center(50, '*'), '*' * 50] + error_content)


def main():
    """
    主方法

    是否转换成小写 --lower
    自动定义长度 --auto
    """

    import os
    root_dir = os.path.dirname(os.path.abspath(__file__))
    default_word_file = 'words.tmp'

    import argparse
    parser = argparse.ArgumentParser(description='单词文本切割工具')
    parser.add_argument('filepath', nargs='?', metavar='file', type=str, help='单词文本', default=default_word_file)
    parser.add_argument('--lower', action='store_true')
    parser.add_argument('--auto', action='store_true')
    parser.add_argument('--column', type=int, default=3)
    args = parser.parse_args()

    optional_paths = [
        os.path.abspath(args.filepath),
        os.path.abspath(os.path.join('../', args.filepath)),
        os.path.join(root_dir, args.filepath),
        os.path.abspath(os.path.join(root_dir, '..', args.filepath))
    ]
    for path in optional_paths:
        if os.path.exists(path):
            filepath = path
            break
    else:
        raise Exception('文件不存在')
    wordsplit(filepath, args.lower, args.auto, column=args.column)


if __name__ == '__main__':
    main()
