# -*-coding:utf-8-*-
# -*-coding:utf-8-*-
# coding:utf-8
from utils import read_txt,cut_sent,read_hed,read_word
from Syntactic_Analysis import ParseResult

'''
被字句 细分类
'''

def find_key_idx(fenci_res):
    for word in fenci_res:
        if word in ["被", "让","叫","给","为"]:
            return fenci_res.index(word)
    return -1

'''
判定条件
'''
def judge_type(hed_info, modifi_info, fenci_res, pos_res):
    mt_info = modifi_info['mt_info']  # MT成分
    pob_info = modifi_info['pob_info']  # POB成分
    adv_info = modifi_info['adv_info']  # ADV成分
    vv_info = modifi_info['vv_info']    # VV成分
    adv_h = read_hed(adv_info, hed_info['word'])  # 修饰HED的ADV成分

    key_idx = find_key_idx(fenci_res)

    type_lst = ['主||“被/给”+动+其他成分',
                '主||“被/让/叫/给”+宾+动+其他成分',
                '主||状+“被/让/叫/给”+宾+动+其他成分',
                '主||(状)+“被/让/叫/给”+宾+状+动+其他成分',
                '主||“让/叫/被”+宾+“给”+动+其他成分',
                '主||“被/为”+宾+“所”+动',
                '主||“被/让/叫/给”+宾+动1（+宾1）+动2（+宾2）',
                '其他句型']

    noun_lst = ['n', 'f', 's', 't', 'nr', 'ns', 'nt', 'nw', 'nz', 'r', 'an', 'vn', 'PER']
    verb_lst = ['v', 'vd', 'vn']
    adj_lst = ['a', 'ad', 'an']
    quan_lst = ['m', 'q']

    type_id = len(type_lst) - 1
    if key_idx == -1:               # 没有关键字
        return type_lst[type_id]


    if hed_info['pos'] in verb_lst and(
            (read_word(pob_info,['被','为']))
            or (hed_info['word'] in ["让","叫"])
            or (read_word(adv_info,['给']))):
        if pos_res[key_idx+1] in noun_lst:      # 关键词后有宾语
            if hed_info['pos'] in verb_lst and vv_info:
                type_id = 6
                return type_lst[type_id]
            if fenci_res[key_idx+2] == '给':     # 宾语后有“给”字
                type_id = 4
                return type_lst[type_id]
            if read_word(mt_info,['所']):     # "所"在宾语后作MT
                type_id = 5
                return type_lst[type_id]
            if [adv for adv in adv_info if (adv['num'] > key_idx)]:      # 关键词后有状语
                type_id = 3
                return type_lst[type_id]
            if adv_h and fenci_res.index(adv_h['word']) < key_idx:      # 关键词前有状语
                type_id = 2
                return type_lst[type_id]
            type_id = 1
            return type_lst[type_id]
        type_id = 0
        return type_lst[type_id]
    return type_lst[type_id]



if __name__ == '__main__':
    path = './data/test_bei.txt' #读取数据
    sentences = read_txt(path)
    for sent in sentences:
        print('*'*125)
        print(sent)
        fenci_res, pos_res, ddp_res = cut_sent(sent)
        pr = ParseResult(fenci_res, pos_res, ddp_res)
        print('分词：',fenci_res)
        print('词性：',pos_res)
        print('句法：',ddp_res)
        modifi_info = pr.get_modifi_info()
        hed_info = pr.get_hed_info()
        print(judge_type(hed_info,modifi_info,fenci_res, pos_res))
