from typing import Dict, List
import spacy
from spacy.tokens.token import Token
from class_gen.definition import Class, Method
nlp = spacy.load("en_core_web_sm")


def get_conj(t: Token, res: List[Token]) -> None:
    '''获取并列元素'''
    if t:
        res.append(t)
        for c in t.children:
            if c.dep_ == 'conj':
                return get_conj(c, res)
    pass


def get_compound_word(t: Token) -> str:
    '''获取复合词'''
    if t:
        if t.dep_ == 'compound':
            return t.lemma_ + '_' + t.head.lemma_
        name = t.lemma_
        for child in t.children:
            if child.dep_ == 'compound':
                name = child.lemma_ + '_' + name
        return name
    else:
        return None

# 用后缀匹配是因为同一个后缀有类似的修饰关系,如:dobj和pobj


def trace_path(root: Token, path: List[str], i=0) -> Token:
    '''向上搜索修饰链,后缀匹配'''
    if root:
        if i == len(path):
            return root
        if root.dep_.endswith(path[i]):
            return trace_path(root.head, path, i+1)
    return None


def find_path(root: Token, path: List[str], i=0) -> Token:
    '''向下搜索修饰链,后缀匹配'''
    if root:
        if i == len(path):
            return root
        for t in root.children:
            if t.dep_.endswith(path[i]):
                r = find_path(t, path, i+1)
                if r:
                    return r
    return None


def search_path(root: Token, path: List[str], i=0) -> Token:
    '''搜索修饰链,后缀匹配
    ^开头向上搜索
    否则向下搜索
    '''
    if root:
        if i == len(path):
            return root
        if path[i].startswith('^'):
            if root.dep_.endswith(path[i][1:]):
                return search_path(root.head, path, i+1)
            pass
        else:
            for t in root.children:
                if t.dep_.endswith(path[i]):
                    r = search_path(t, path, i+1)
                    if r:
                        return r
            pass
    return None


def update_class(res: Dict[str, Class], c: Class) -> None:
    '''更新类字典'''
    if c.name in res:
        res[c.name] += c
    else:
        res[c.name] = c


def search_classes(sentence: str, res: Dict[str, Class]):
    '''搜索句子里的类'''
    doc = nlp(sentence)
    exclude = set(['lot'])
    for t in doc:
        if t.lemma_ in exclude:
            continue
        if t.tag_.startswith('NN'):
            if t.dep_ == 'compound':
                # 防止复合词重复计入
                continue
            c = Class(get_compound_word(t))
            search_interface2(c, t)
            search_methods(c, t, res)
            search_attrs(c, t)
            search_father_class(c, t)
            update_class(res, c)
    pass


def get_vb_prt(t: Token) -> str:
    '''获得动词词组'''
    part = find_path(t, ['prt'])
    if part:
        return t.lemma_+'_'+part.lemma_
    return t.lemma_


def fill_methods(c: Class, ts: Token, bd: bool, res: Dict[str, Class], share: bool):
    '''填充方法到类中'''
    # 排除特殊动词
    exclude = set(['be', 'have', 'seem'])
    for t in ts:
        if not t or t.lemma_ in exclude:
            continue
        # if t.tag_.startswith('VB'):
        m = Method(get_vb_prt(t))
        search_paras(m, t)
        search_interface(c, t)
        search_attrs1(c, t)
        if bd:
            m1 = Method(t.lemma_)
            m1.addPara(c.name)
            for p in m.paras:
                clazz = Class(p)
                clazz.addMethod(m1)
                update_class(res, clazz)
        else:
            c.addMethod(m)
    if share:
        # 共享参数
        names = set(map(lambda x: x.lemma_, ts))
        paras = set()
        for k in c.methods:
            if k in names:
                paras = paras | c.methods[k].paras
        for k in c.methods:
            if k in names:
                c.methods[k].paras = paras
        pass
    pass


def search_methods(c: Class, r: Token, res: Dict[str, Class]):
    '''从类开始搜索方法'''
    ts = []
    if r.dep_ == 'ROOT':
        c.filter = False
        t = search_path(r, ['prep'])
        if t and t.tag_ == 'UH':
            ts.append(t)
        pass
    # 并列谓语,共用参数
    conj_v=[]
    get_conj(trace_path(r, ['nsubj']), conj_v)
    fill_methods(c, conj_v, False, res, True)
    conj_v = []
    # 被动
    get_conj(trace_path(r, ['nsubjpass']), conj_v)
    fill_methods(c, conj_v, True, res, True)
    # 普通方法
    ts.append(find_path(trace_path(r, ['nsubj']), ['acomp', 'xcomp']))
    ts.append(find_path(r, ['relcl']))
    ts.append(find_path(trace_path(r, ['dobj']), ['xcomp']))
    fill_methods(c, ts, False, res, False)
    pass


def search_interface(c: Class, r: Token):
    '''从方法开始搜索接口'''
    # advmod
    ts = []
    ts.append(find_path(r, ['advmod']))
    for t in ts:
        if not t:
            continue
        c.addInterface(t.lemma_)
    pass


def search_interface2(c: Class, r: Token):
    '''从类开始搜索接口'''
    # advmod
    ts = []
    ts.append(find_path(r, ['amod']))
    t = search_path(r, ['^nsubj', 'acomp'])
    t = search_path(r, ['^nsubj', 'oprd'])
    if t:
        # 保证不被过滤
        c.filter = False
        ts.append(t)
    for t in ts:
        if not t:
            continue
        c.addInterface(t.lemma_)
    pass


def is_time(t: Token) -> bool:
    word = t.lemma_
    if word.isdigit():
        return True
    return False


def search_paras(m: Method, r: Token):
    '''从方法开始搜索参数'''
    ts = []
    t = find_path(r, ['dobj', 'prep', 'pobj'])
    if t:
        get_conj(t, ts)
    else:
        get_conj(find_path(r, ['dobj']), ts)
    get_conj(find_path(r, ['xcomp', 'dobj']), ts)
    get_conj(find_path(r, ['pobj']), ts)
    get_conj(find_path(r, ['prep', 'dobj']), ts)
    for t in ts:
        if not t:
            continue
        m.addPara(get_compound_word(t))
    ts.clear()
    # 可能是时间
    get_conj(find_path(r, ['prep', 'pobj']), ts)
    for t in ts:
        if not t:
            continue
        if is_time(t):
            m.addPara('time')
        else:
            m.addPara(get_compound_word(t))
    pass


def search_father_class(c: Class, r: Token):
    '''寻找父类'''
    ts = []
    t = search_path(r, ['^nsubj', 'attr'])
    if t:
        if t.tag_.startswith('NN') and t.head.lemma_ == 'be':
            ts.append(t)
    for t in ts:
        if not t:
            continue
        c.inherit(get_compound_word(t))
    pass


def search_attrs(c: Class, r: Token):
    '''从类开始搜索属性'''
    ts = []
    t = trace_path(r, ['nsubj'])
    if t and t.lemma_ == 'have':
        get_conj(find_path(t, ['dobj']), ts)
    ts.append(find_path(r, ['acl']))
    ts.append(find_path(r, ['prep', 'pobj']))
    t = trace_path(r, ['pobj'])
    if t:
        if t.lemma_ == 'on' or t.lemma_ == 'of':
            ts.append(trace_path(t, ['prep']))
    for t in ts:
        if not t:
            continue
        c.setAttr(get_compound_word(t))
    pass


def search_attrs1(c: Class, r: Token):
    '''从方法开始搜索属性'''
    ts = []
    t = find_path(r, ['dobj', 'poss'])
    if t:
        get_conj(find_path(r, ['dobj']), ts)
    for t in ts:
        if not t:
            continue
        c.setAttr(get_compound_word(t))
    pass


def toMermaid(d: Dict[str, Class]) -> str:
    '''结果转Mermaid代码'''
    res = 'classDiagram\n'
    for k in d:
        clazz = d[k]
        t = str(clazz)
        if t or (not clazz.filter):
            '''
            过滤减小误差

            过滤原理:
            如果一个类和其他个关键类没有联系,
            那么这个类多半不重要。
            具体实现是如果这个类没有属性和方法
            而且其他关键类和它没联系就可以过滤。
            但终究只是比较简单的想法,所以设置了
            一个filter属性确保可以绕过过滤。
            '''
            res += t
            for superClass in clazz.superClasses:
                if superClass.startswith('interface$'):
                    superClass = superClass.split('$')[1]
                    res += f'{superClass} <|.. {clazz.name}\n'
                else:
                    res += f'{superClass} <|-- {clazz.name}\n'
    return res


# 缩写配置
config = {
    'e.g.': '',
    'i.e.': '',
    'etc.': '.',
    # 临时替换特殊符号
    '&': ' and ',
    ':': ',',
    '- ': ''
}


def abbreviation_handler(text: str) -> str:
    for k in config:
        text = text.replace(k, config[k])
    return text


time_word = set([
    'monday', 'mon.',
    'tuesday', 'tue.',
    'wednesday', 'wed.',
    'thursday', 'thu.',
    'friday', 'fri.',
    'saturday', 'sat.',
    'sunday', 'sun.',
    'year', 'month', 'day',
    'today', 'yesterday', 'tomorrow',
])


def time_words_handler(text: str) -> str:
    for word in time_word:
        text = text.replace(word, ' 2021 ')
    return text


def show(s: str):
    '''显示修饰树'''
    doc = nlp(s)
    print('flowchart TD')
    for t in doc:
        id1 = f'{t.text}#{t.tag_}'
        id2 = f'{t.head.text}#{t.head.tag_}'
        print(f'{id2}({id2})-->|{t.dep_}|{id1}({id1})')
