from stanfordcorenlp import StanfordCoreNLP
from nltk.tokenize import sent_tokenize
import math, time
import json
import pymysql
db = pymysql.connect(host='localhost',
                     user='root',
                     password='',
                     database='os-ecology',
                     port=3306)
cursor = db.cursor()

nlp = StanfordCoreNLP(r'/Users/yuronan/develop/code/python/stanford-corenlp-4.3.2', lang='en')


class MiningAspect:
    def __init__(self):
        self.train_path = './open-source/paddle_2020_pr.json'
        self.candipos_path = './data/hownet_neg_new.txt'
        self.candineg_path = './data/hownet_pos_new.txt'
        self.sentiment_path = './data/sentic_hownet.txt'
        self.stop_word_path = './data/stop_word.txt'

    def mining_aspect(self, train_data, sentiment_path, stop_word_path):
        stop_word = [line.strip().split('\t')[0] for line in open(stop_word_path)]
        def check_words(sent):
            if set(sentiment_words).intersection(set(sent)):
                return list(set(sentiment_words).intersection(set(sent)))
            else:
                return False
        def sentence_token_nltk(str):
            sent_tokenize_list = sent_tokenize(str)
            return sent_tokenize_list
        def preRules(aspect_and_tag, sentence_with_dependency, originIndex, sentence_with_tag):
            result = []
            def compundRules(aspect_and_tag, sentence_with_dependency, originIndex, sentence_with_tag):
                # 返回元组，一个值代表是否为评价对象，第二个值是否用到前置规则,第三个值为评价对象数组
                aspect = aspect_and_tag[0]
                if aspect in stop_word or aspect.lower() in stop_word or aspect_and_tag[1] not in (
                        'VB', 'VBN', 'VBZ', 'NN', 'NNS', 'NNP', 'NNPS'):
                    return ()
                # 拼接动词短语
                if aspect_and_tag[1] in ('VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'):
                    for dependency_inner in sentence_with_dependency:
                        if dependency_inner[1] == originIndex and dependency_inner[0] == 'compound:prt':
                            aspect_comp = sentence_with_tag[dependency_inner[2] - 1][0]
                            return (1, aspect + ' ' + aspect_comp)
                if aspect_and_tag[1] in ('NN', 'NNS', 'NNP', 'NNPS'):
                    # 名词后如果为介词，认为是一个评价对象
                    # if sentence_with_tag[originIndex][1] == 'IN' and sentence_with_tag[originIndex][0] in ('in', 'on'):
                    #     return (True, 1, aspect + ' ' + sentence_with_tag[originIndex][0])
                    for dependency_inner in sentence_with_dependency:
                        # 名词修饰
                        if dependency_inner[1] == originIndex and dependency_inner[0] in ('nummod', 'compound:prt') and abs(
                                originIndex - dependency_inner[2]) == 1:
                            aspect_comp = sentence_with_tag[dependency_inner[2] - 1][0]
                            return (1, aspect + ' ' + aspect_comp)
                        # 复合名词，如果中间间隔一个词关系为形容词修饰或者复合名词，则联合
                        if dependency_inner[1] == originIndex and dependency_inner[0] == 'compound' and abs(
                                originIndex - dependency_inner[2] == 2):
                            aspect_comp = sentence_with_tag[dependency_inner[2] - 1][0]
                            for dependency_inner_inner in sentence_with_dependency:
                                if dependency_inner_inner[1] == dependency_inner[1] and dependency_inner_inner[
                                    0] in ('amod', 'nummod', 'compound') and abs(
                                    dependency_inner[1] - dependency_inner_inner[2] == 1):
                                    aspect_comp_comp = sentence_with_tag[dependency_inner_inner[2] - 1][0]
                                    for dependency_inner_inner_inner in sentence_with_dependency:
                                        if dependency_inner_inner_inner[1] == dependency_inner[2] and \
                                                dependency_inner_inner_inner[
                                                    0] in ('amod', 'compound') and abs(
                                            dependency_inner_inner_inner[2] - dependency_inner[2]) == 1:
                                            aspect_comp_comp_comp = sentence_with_tag[dependency_inner_inner_inner[2] - 1][0]
                                            return (1, aspect_comp_comp_comp + ' ' + aspect_comp + ' ' + aspect_comp_comp + ' ' + aspect)
                                    return (1, aspect_comp + ' ' + aspect_comp_comp + ' ' + aspect)
                                # if dependency_inner_inner[1] == dependency_inner[1] and dependency_inner_inner[0] == 'amod' and abs(
                                #         dependency_inner[1] - dependency_inner_inner[2] == 2):
                                #     aspect_comp_comp = sentence_with_tag[dependency_inner_inner[2] - 1][0]
                                #     return (True, 1, aspect_comp_comp + ' ' + aspect_comp + ' ' + aspect)
                        # 连续复合名词
                        if dependency_inner[1] == originIndex and dependency_inner[0] == 'compound' and abs(
                                originIndex - dependency_inner[2] == 1):
                            aspect_comp = sentence_with_tag[dependency_inner[2] - 1][0]
                            for dependency_inner_inner in sentence_with_dependency:
                                if dependency_inner_inner[1] == dependency_inner[2] and dependency_inner_inner[
                                    0] == ('compound', 'nummod') and abs(
                                    dependency_inner[2] - dependency_inner_inner[2] == 1):
                                    aspect_comp_comp = sentence_with_tag[dependency_inner_inner[2] - 1][0]
                                    return (1, aspect_comp_comp + ' ' + aspect_comp + ' ' + aspect)
                                if dependency_inner_inner[1] == dependency_inner[1] and dependency_inner_inner[
                                    0] in ('compound', 'nummod') and dependency_inner_inner[2] - dependency_inner[1] == 1:
                                    aspect_comp_comp = sentence_with_tag[dependency_inner_inner[2] - 1][0]
                                    return (1, aspect_comp + ' ' + aspect + ' ' + aspect_comp_comp)
                                # if dependency_inner_inner[1] == dependency_inner[1] and dependency_inner_inner[0] == 'amod' and abs(
                                #         dependency_inner[1] - dependency_inner_inner[2] == 2):
                                #     aspect_comp_comp = sentence_with_tag[dependency_inner_inner[2] - 1][0]
                                #     return (True, 1, aspect_comp_comp + ' ' + aspect_comp + ' ' + aspect)
                            return (1, aspect_comp + ' ' + aspect)
                return (0, aspect)

            if aspect_and_tag[0] in stop_word or aspect_and_tag[0].lower() in stop_word or aspect_and_tag[1] not in (
                    'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'NN', 'NNS', 'NNP', 'NNPS'):
                return []
            # 考虑并列关系
            # conj
            conj_flag = False
            for dependency in sentence_with_dependency:
                if dependency[0] in ('conj', 'appos') and dependency[1] == originIndex:
                    conj_flag = True
                    aspect_and_tag_second = sentence_with_tag[dependency[2] - 1]
                    originIndex_second = dependency[2]
                    final_apsect = compundRules(aspect_and_tag_second, sentence_with_dependency, originIndex_second, sentence_with_tag)
                    if final_apsect and final_apsect[1]:
                        if dependency[0] == 'appos':
                            result.append(final_apsect)
            # CC
            if not conj_flag and originIndex < len(sentence_with_tag) and sentence_with_tag[originIndex][0] in ('and', 'or'):
                final_apsect = compundRules(sentence_with_tag[originIndex + 1], sentence_with_dependency, originIndex + 1,
                                            sentence_with_tag)
                if final_apsect and final_apsect[1]:
                    result.append(final_apsect)

            final_apsect = compundRules(aspect_and_tag, sentence_with_dependency, originIndex, sentence_with_tag)
            if final_apsect and final_apsect[1]:
                result.append(final_apsect)
            return result
        sentiment_words = [line.strip().split(',')[0] for line in open(sentiment_path)]
        sentiment_words_and_polarity = [line.strip().split(',') for line in open(sentiment_path)]
        rule_one_count = 0
        rule_two_count = 0
        rule_three_count = 0
        rule_four_count = 0
        rule_five_count = 0
        sentence_count = 0
        # 倒入数据
        with open(train_data, 'r') as jsonfile:
            json_string = json.load(jsonfile)
            event_list = json_string['Paddle']
            for event in event_list:
                print(event)
                sentence_origin = event['body']
                if sentence_origin:
                    # if sentence_count == 10:
                    #     break
                    line = sentence_origin.strip().replace('%', ' percent')
                    sent_tokenize_list = sentence_token_nltk(line)
                    sentence_and_apsect = []
                    aspect_senti_pair_index = []
                    for sentence in sent_tokenize_list:
                        # 查找情感词，需要忽略大小写，否则部分词无法匹配
                        check_result = check_words(nlp.word_tokenize(sentence.lower()))
                        if check_result:
                            sentence_with_tag = nlp.pos_tag(sentence)
                            word_list_lower = [line[0].lower() for line in sentence_with_tag]
                            word_list = [line[0] for line in sentence_with_tag]
                            # print(check_result)
                            sentence_with_dependency = nlp.dependency_parse(sentence)
                            # print(sentence_with_dependency)
                            sentiment_word_and_origin_index_arr = []
                            for index, value in enumerate(word_list_lower):
                                if value in check_result:
                                    sentiment_word_and_origin_index_arr.append((index, value))
                            for sentiment_word_lower in sentiment_word_and_origin_index_arr:
                                # 情感词索引,情感词找到原词不能是小写
                                senti_word_index = sentiment_word_lower[0]
                                aspect_senti_pair_index.append(senti_word_index)
                                sentiment_word = word_list[senti_word_index]
                                # 情感词词性
                                senti_word_tag = sentence_with_tag[senti_word_index][1]
                                # 情感词极性
                                polarity = '3'
                                for senti in sentiment_words_and_polarity:
                                    if senti[0] == sentiment_word_lower[1]:
                                        if 'neg' == senti[2]:
                                            polarity = '0'
                                        else:
                                            polarity = '1'
                                # print(polarity)
                                # 依存分析索引
                                senti_dependency_index = senti_word_index + 1
                                ''' 规则1 情感词为形容词 '''
                                if senti_word_tag in ('JJ', 'JJR', 'JJS'):
                                    ''' 同一个情感词obl优先级最高'''
                                    obl_flag = False
                                    for dependency in sentence_with_dependency:
                                        # 如果存在形容词修饰关系
                                        if senti_dependency_index in dependency:
                                            if 'nsubj' in dependency:
                                                aspect_arr = preRules(sentence_with_tag[dependency[2] - 1],
                                                                  sentence_with_dependency, dependency[2],
                                                                  sentence_with_tag)
                                                # print(sentiment_word + '---' + aspect)
                                                for aspect in aspect_arr:
                                                    obl_flag = True
                                                    rule_one_count += 1
                                                    sentence_and_apsect.append(
                                                        sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)

                                    if not obl_flag:
                                        ''' 同一个情感词nsubj优先级第二高'''
                                        nsubj_flag = False
                                        for dependency in sentence_with_dependency:
                                            # 如果存在形容词修饰关系
                                            if senti_dependency_index in dependency:
                                                if 'obl' in dependency:
                                                    # 修饰对象必须为名词
                                                    if sentence_with_tag[dependency[2] - 1][1] in (
                                                            'NN', 'NNS', 'NNP', 'NNPS', 'VB', 'VBD', 'VBG', 'VBN', 'VBP',
                                                            'VBZ'):
                                                        aspect_arr = preRules(sentence_with_tag[dependency[2] - 1],
                                                                          sentence_with_dependency, dependency[2],
                                                                          sentence_with_tag)
                                                        # print(sentiment_word + '---' + aspect)
                                                        for aspect in aspect_arr:
                                                            nsubj_flag = True
                                                            rule_one_count += 1
                                                            sentence_and_apsect.append(
                                                                sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)

                                        if not nsubj_flag:
                                            for dependency in sentence_with_dependency:
                                                # 如果存在形容词修饰关系
                                                if senti_dependency_index in dependency:
                                                    # 修饰对象必须为名词
                                                    if 'amod' in dependency and senti_dependency_index == dependency[2]:
                                                        if abs(dependency[1] - dependency[2]) == 2:
                                                            aspect_arr = preRules(sentence_with_tag[dependency[1] - 1],
                                                                              sentence_with_dependency, dependency[1],
                                                                              sentence_with_tag)
                                                            for aspect in aspect_arr:
                                                                rule_one_count += 1
                                                                sentence_and_apsect.append(sentiment_word + ',' + str(aspect[0]) + ',' + sentence_with_tag[dependency[2]-1][0] + ' ' + sentence_with_tag[dependency[1] - 1][0] + ',' + polarity)
                                                        else:

                                                            aspect_arr = preRules(sentence_with_tag[dependency[1] - 1],
                                                                              sentence_with_dependency, dependency[1],
                                                                              sentence_with_tag)
                                                            for aspect in aspect_arr:
                                                                rule_one_count += 1
                                                                sentence_and_apsect.append(
                                                                    sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                                        # print(sentiment_word + '---' + aspect)
                                                    if 'nsubj' in dependency:
                                                        # 修饰对象必须为名词
                                                        if sentence_with_tag[dependency[2] - 1][1] in (
                                                                'NN', 'NNS', 'NNP', 'NNPS', 'VB', 'VBD', 'VBG', 'VBN',
                                                                'VBP',
                                                                'VBZ'):
                                                            aspect_arr = preRules(sentence_with_tag[dependency[2] - 1],
                                                                              sentence_with_dependency, dependency[2],
                                                                              sentence_with_tag)
                                                            # print(sentiment_word + '---' + aspect)
                                                            for aspect in aspect_arr:
                                                                rule_one_count += 1
                                                                sentence_and_apsect.append(
                                                                    sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                                                break
                                                    if 'obl' in dependency:
                                                        # 修饰对象必须为名词
                                                        if sentence_with_tag[dependency[2] - 1][1] in (
                                                                'NN', 'NNS', 'NNP', 'NNPS', 'VB', 'VBD', 'VBG', 'VBN',
                                                                'VBP', 'VBZ'):
                                                            aspect_arr = preRules(sentence_with_tag[dependency[2] - 1],
                                                                              sentence_with_dependency, dependency[2],
                                                                              sentence_with_tag)
                                                            # print(sentiment_word + '---' + aspect)
                                                            for aspect in aspect_arr:
                                                                rule_one_count += 1
                                                                sentence_and_apsect.append(
                                                                    sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                                    if 'xcomp' in dependency:
                                                        if sentence_with_tag[dependency[2] - 1][1] in (
                                                                'NN', 'NNS', 'NNP', 'NNPS', 'VB', 'VBD', 'VBG', 'VBN',
                                                                'VBP',
                                                                'VBZ'):
                                                            aspect_arr = preRules(sentence_with_tag[dependency[2] - 1],
                                                                              sentence_with_dependency, dependency[2],
                                                                              sentence_with_tag)
                                                            # print(sentiment_word + '---' + aspect)
                                                            for aspect in aspect_arr:
                                                                rule_one_count += 1
                                                                sentence_and_apsect.append(
                                                                    sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                                        if sentence_with_tag[dependency[1] - 1][1] in (
                                                                'NN', 'NNS', 'NNP', 'NNPS', 'VB', 'VBD', 'VBG', 'VBN',
                                                                'VBP',
                                                                'VBZ'):
                                                            xcomp_flag = False
                                                            for dependency_inner in sentence_with_dependency:
                                                                if 'nsubj' in dependency_inner and dependency_inner[1] == dependency[1]:
                                                                    aspect_arr = preRules(
                                                                        sentence_with_tag[dependency_inner[2] - 1],
                                                                        sentence_with_dependency, dependency_inner[2],
                                                                        sentence_with_tag)
                                                                    # print(sentiment_word + '---' + aspect)
                                                                    for aspect in aspect_arr:
                                                                        xcomp_flag = True
                                                                        rule_one_count += 1
                                                                        sentence_and_apsect.append(
                                                                            sentiment_word + ',' + str(
                                                                                aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                                            if not xcomp_flag:
                                                                aspect_arr = preRules(sentence_with_tag[dependency[1] - 1],
                                                                                      sentence_with_dependency, dependency[1],
                                                                                      sentence_with_tag)
                                                                for aspect in aspect_arr:
                                                                    rule_one_count += 1
                                                                    sentence_and_apsect.append(
                                                                        sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                                    # if 'parataxis' in dependency:
                                                    #     aspect_arr = preRules(sentence_with_tag[dependency[2] - 1],
                                                    #                       sentence_with_dependency, dependency[2],
                                                    #                       sentence_with_tag)
                                                    #     # print(sentiment_word + '---' + aspect)
                                                    #     for aspect in aspect_arr:
                                                    #         sentence_and_apsect.append(
                                                    #             sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1])
                                                    #         print('parataxis---' + sentiment_word + ',' + str(
                                                    #             aspect[0]) + ',' + aspect[1])
                                                    #         print(sentence)
                                                    # if 'advcl' in dependency:
                                                    #     aspect_arr = preRules(sentence_with_tag[dependency[2] - 1],
                                                    #                           sentence_with_dependency, dependency[2],
                                                    #                           sentence_with_tag)
                                                    #     # print(sentiment_word + '---' + aspect)
                                                    #     for aspect in aspect_arr:
                                                    #         sentence_and_apsect.append(
                                                    #             sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1])
                                                    #         print('advcl---' + sentiment_word + ',' + str(
                                                    #             aspect[0]) + ',' + aspect[1])
                                                    #         print(sentence)
                                                    if 'nmod' in dependency:
                                                        vb_index = dependency[1]
                                                        for dependency_inner in sentence_with_dependency:
                                                            if dependency_inner[1] == vb_index and dependency_inner[
                                                                0] == 'nsubj' and \
                                                                    sentence_with_tag[dependency_inner[2] - 1][1] in (
                                                                    'NN', 'NNS', 'NNP', 'NNPS'):
                                                                aspect_arr = preRules(
                                                                    sentence_with_tag[dependency_inner[2] - 1],
                                                                    sentence_with_dependency, dependency_inner[2],
                                                                    sentence_with_tag)
                                                                for aspect in aspect_arr:
                                                                    rule_one_count += 1
                                                                    sentence_and_apsect.append(
                                                                        sentiment_word + ',' + str(aspect[0]) + ',' +
                                                                        aspect[1] + ',' + polarity)
                                                    if 'advmod' in dependency:
                                                        vb_index = dependency[1]
                                                        if dependency[2] == senti_dependency_index:
                                                            if sentence_with_tag[vb_index - 1][1] in (
                                                                        'NN', 'NNS', 'NNP', 'NNPS'):
                                                                aspect_arr = preRules(
                                                                sentence_with_tag[dependency[1] - 1],
                                                                sentence_with_dependency, dependency[1],
                                                                sentence_with_tag)
                                                                # for aspect in aspect_arr:
                                                                #     sentence_and_apsect.append(
                                                                #         sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1])
                                                                #     print('advmod---NN' + sentiment_word + ',' + str(
                                                                #         aspect[0]) + ',' + aspect[1])
                                                                #     print(sentence)
                                                            else:
                                                                nsubj_flag = False
                                                                for dependency_inner in sentence_with_dependency:
                                                                    if dependency_inner[1] == vb_index and dependency_inner[
                                                                        0] == 'nsubj' and \
                                                                            sentence_with_tag[dependency_inner[2] - 1][1] in (
                                                                            'NN', 'NNS', 'NNP', 'NNPS'):
                                                                        aspect_arr = preRules(
                                                                            sentence_with_tag[dependency_inner[2] - 1],
                                                                            sentence_with_dependency, dependency_inner[2],
                                                                            sentence_with_tag)
                                                                        for aspect in aspect_arr:
                                                                            nsubj_flag = True
                                                                            rule_one_count += 1
                                                                            sentence_and_apsect.append(
                                                                                sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                                                        # print(sentiment_word + '---' + aspect)
                                ''' 规则2 情感词为名词 '''
                                if senti_word_tag in ('NN', 'NNS', 'NNP', 'NNPS'):
                                    for dependency in sentence_with_dependency:
                                        # 如果存在名词修饰关系
                                        if senti_dependency_index == dependency[1] and sentence_with_tag[dependency[2] - 1][
                                            1] in ('NN', 'NNS', 'NNP', 'NNPS'):
                                            if 'nmod' in dependency:
                                                aspect_arr = preRules(sentence_with_tag[dependency[2] - 1],
                                                                  sentence_with_dependency, dependency[2],
                                                                  sentence_with_tag)
                                                for aspect in aspect_arr:
                                                    rule_two_count += 1
                                                    sentence_and_apsect.append(
                                                        sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                                # print(sentiment_word + '---' + aspect)
                                        if senti_dependency_index == dependency[1]:
                                            if 'nsubj' in dependency:
                                                aspect_arr = preRules(sentence_with_tag[dependency[2] - 1],
                                                                  sentence_with_dependency, dependency[2],
                                                                  sentence_with_tag)
                                                for aspect in aspect_arr:
                                                    rule_two_count += 1
                                                    sentence_and_apsect.append(
                                                        sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)

                                ''' 规则3 情感词为副词 '''
                                if senti_word_tag in ('RB', 'RBR', 'RBS'):
                                    for dependency in sentence_with_dependency:
                                        # 如果存在副词修饰关系，且副词是修饰词不是被修饰词
                                        if senti_dependency_index in dependency:
                                            word_and_tag = sentence_with_tag[dependency[1] - 1]
                                            vb_index = dependency[1]
                                            if 'advmod' in dependency and dependency[2] == senti_dependency_index:
                                                if word_and_tag[1] in ('NN', 'NNS', 'NNP', 'NNPS'):
                                                    flag = False
                                                    for dependency_inner in sentence_with_dependency:
                                                        if dependency_inner[1] == vb_index and dependency_inner[
                                                            0] in ['nsubj'] and \
                                                                sentence_with_tag[dependency_inner[2] - 1][1] in (
                                                                'NN', 'NNS', 'NNP', 'NNPS'):
                                                            aspect_arr = preRules(
                                                                sentence_with_tag[dependency_inner[2] - 1],
                                                                sentence_with_dependency, dependency_inner[2],
                                                                sentence_with_tag)
                                                            for aspect in aspect_arr:
                                                                flag = True
                                                                rule_three_count += 1
                                                                sentence_and_apsect.append(
                                                                    sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                                            # print(sentiment_word + '---' + aspect)
                                                            break
                                                    if not flag:
                                                        aspect_arr = preRules(word_and_tag,
                                                                          sentence_with_dependency, dependency[1],
                                                                          sentence_with_tag)
                                                        for aspect in aspect_arr:
                                                            rule_three_count += 1
                                                            sentence_and_apsect.append(
                                                                sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                                elif word_and_tag[1] in ('VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'):
                                                    flag = False
                                                    for dependency_inner in sentence_with_dependency:
                                                        if dependency_inner[1] == vb_index and dependency_inner[
                                                            0] in ['obl', 'obj', 'nsubj'] and \
                                                                sentence_with_tag[dependency_inner[2] - 1][1] in (
                                                                'NN', 'NNS', 'NNP', 'NNPS'):
                                                            aspect_arr = preRules(sentence_with_tag[dependency_inner[2] - 1],
                                                                              sentence_with_dependency, dependency_inner[2],
                                                                              sentence_with_tag)
                                                            for aspect in aspect_arr:
                                                                flag = True
                                                                rule_three_count += 1
                                                                sentence_and_apsect.append(
                                                                    sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                                            # print(sentiment_word + '---' + aspect)
                                                            break
                                                    if not flag:
                                                        aspect_arr = preRules(word_and_tag,
                                                                          sentence_with_dependency, dependency[1],
                                                                          sentence_with_tag)
                                                        for aspect in aspect_arr:
                                                            rule_three_count += 1
                                                            sentence_and_apsect.append(
                                                                sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                                else:
                                                    for dependency_inner in sentence_with_dependency:
                                                        if dependency_inner[1] == vb_index and dependency_inner[
                                                            0] == 'nsubj' and sentence_with_tag[dependency_inner[2] - 1][
                                                            1] in (
                                                                'NN', 'NNS', 'NNP', 'NNPS'):
                                                            aspect_arr = preRules(sentence_with_tag[dependency_inner[2] - 1],
                                                                              sentence_with_dependency, dependency_inner[2],
                                                                              sentence_with_tag)
                                                            for aspect in aspect_arr:
                                                                rule_three_count += 1
                                                                sentence_and_apsect.append(
                                                                    sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                                            # print(sentiment_word + '---' + aspect)
                                                            break
                                            if 'nsubj' in dependency:
                                                # 修饰对象必须为名词
                                                if sentence_with_tag[dependency[2] - 1][1] in ('NN', 'NNS', 'NNP', 'NNPS'):
                                                    aspect_arr = preRules(sentence_with_tag[dependency[2] - 1],
                                                                      sentence_with_dependency, dependency[2],
                                                                      sentence_with_tag)
                                                    # print(sentiment_word + '---' + aspect)
                                                    for aspect in aspect_arr:
                                                        rule_three_count += 1
                                                        sentence_and_apsect.append(
                                                            sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)

                                            # if 'obl' in dependency:
                                            #     # 修饰对象必须为名词
                                            #     if sentence_with_tag[dependency[2] - 1][1] in ('NN', 'NNS', 'NNP', 'NNPS'):
                                            #         aspect_arr = preRules(sentence_with_tag[dependency[2] - 1],
                                            #                           sentence_with_dependency, dependency[2],
                                            #                           sentence_with_tag)
                                            #         # print(sentiment_word + '---' + aspect)
                                            #         for aspect in aspect_arr:
                                            #             sentence_and_apsect.append(
                                            #                 sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1])
                                            #             print('obl---' + sentiment_word + ',' + str(
                                            #                 aspect[0]) + ',' + aspect[1])
                                            #             print(sentence)

                                ''' 规则4 情感词为动词 '''
                                if senti_word_tag in ('VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'):
                                    ''' 同一个情感词obl优先级最高'''
                                    obl_flag = False
                                    for dependency in sentence_with_dependency:
                                        # 如果存在形容词修饰关系
                                        if senti_dependency_index in dependency:
                                            if 'nsubj' in dependency or 'nsubj:pass' in dependency:
                                                # 修饰对象必须为名词
                                                if sentence_with_tag[dependency[2] - 1][1] in (
                                                        'NN', 'NNS', 'NNP', 'NNPS', 'VB', 'VBD', 'VBG', 'VBN', 'VBP',
                                                        'VBZ'):
                                                    aspect_arr = preRules(sentence_with_tag[dependency[2] - 1],
                                                                      sentence_with_dependency, dependency[2],
                                                                      sentence_with_tag)
                                                    # print(sentiment_word + '---' + aspect)
                                                    for aspect in aspect_arr:
                                                        obl_flag = True
                                                        rule_four_count += 1
                                                        sentence_and_apsect.append(
                                                            sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)

                                            elif 'obl' in dependency:
                                                if sentence_with_tag[dependency[2] - 1][1] in ('NN', 'NNS', 'NNP', 'NNPS'):
                                                    aspect_arr = preRules(sentence_with_tag[dependency[2] - 1],
                                                                          sentence_with_dependency, dependency[2],
                                                                          sentence_with_tag)
                                                    for aspect in aspect_arr:
                                                        rule_four_count += 1
                                                        sentence_and_apsect.append(
                                                            sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                            elif 'obj' in dependency:

                                                aspect_arr = preRules(sentence_with_tag[dependency[2] - 1],
                                                                      sentence_with_dependency, dependency[2],
                                                                      sentence_with_tag)
                                                for aspect in aspect_arr:
                                                    rule_four_count += 1
                                                    sentence_and_apsect.append(
                                                        sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                            elif 'amod' in dependency:
                                                aspect_arr = preRules(sentence_with_tag[dependency[1] - 1],
                                                                      sentence_with_dependency, dependency[1],
                                                                      sentence_with_tag)
                                                for aspect in aspect_arr:
                                                    rule_four_count += 1
                                                    sentence_and_apsect.append(
                                                        sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                            elif 'acl' in dependency:
                                                aspect_arr = preRules(sentence_with_tag[dependency[1] - 1],
                                                                      sentence_with_dependency, dependency[1],
                                                                      sentence_with_tag)
                                                for aspect in aspect_arr:
                                                    rule_four_count += 1
                                                    sentence_and_apsect.append(
                                                        sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)

                                ''' 规则5 情感词为语气词 '''
                                if senti_word_tag in ('UH'):
                                    for dependency in sentence_with_dependency:
                                        # 如果存在副词修饰关系
                                        if senti_dependency_index in dependency:
                                            if 'discourse' in dependency:
                                                aspect_arr = preRules(sentence_with_tag[dependency[1] - 1],
                                                                  sentence_with_dependency, dependency[1],
                                                                  sentence_with_tag)
                                                for aspect in aspect_arr:
                                                    rule_five_count += 1
                                                    sentence_and_apsect.append(
                                                        sentiment_word + ',' + str(aspect[0]) + ',' + aspect[1] + ',' + polarity)
                                                break
                        # if check_words(nlp.word_tokenize(sentence)):
                        #     print(sentence)
                        # print(nlp.dependency_parse(sentence))
                    mini_aspect = [line.strip().split(',')[2] for line in sentence_and_apsect]
                    # print(mini_aspect)
                    sentence_count += 1
                    print(sentence_and_apsect)
                    for singleAspect in sentence_and_apsect:
                        singleAspectArr = singleAspect.split(',')
                        sql = "INSERT INTO `os-ecology`.asp_senti_comment_1(`creat_at`, `polarity`, `aspect_word`, `sentiment_word`, `repo_name`, `comment`, `actor_id`, `actor_login`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"
                        try:
                            cursor.execute(sql, (event['created_at'], int(singleAspectArr[3]), singleAspectArr[2], singleAspectArr[0], 'Paddle', line, event['actor_id'], event['actor_login']))
                            # connection is not autocommit by default. So you must commit to save
                            # your changes.
                            db.commit()
                        except Exception as e:
                            # 发生错误时回滚
                            print(e)
                            db.rollback()
                        # print(line)

                    if sentence_count % 10 == 0:
                        print('---------------')
        # db.close()
        # print(sentence_count)
        # print(rule_five_count)
        # print(rule_four_count)
        # print(rule_three_count)
        # print(rule_two_count)
        # print(rule_one_count)

    def miningApsect(self):
        print('step 1/4:...mining aspect ...')
        start_time = time.time()
        seg_data = self.mining_aspect(self.train_path, self.sentiment_path, self.stop_word_path)
        end_time1 = time.time()
        print('step 1/4 finished:...cost {0}...'.format((end_time1 - start_time)))
        # print('step 2/4:...collect cowords ...')
        # cowords_list = self.collect_cowords(self.sentiment_path, seg_data)
        # end_time2 = time.time()
        # print('step 2/4 finished:...cost {0}...'.format((end_time2 - end_time1)))
        # print('step 3/4:...compute sopmi ...')
        # pmi_dict = self.collect_candiwords(seg_data, cowords_list, self.sentiment_path)
        # end_time3 = time.time()
        # print('step 1/4 finished:...cost {0}...'.format((end_time3 - end_time2)))
        # print('step 4/4:...save candiwords ...')
        # self.save_candiwords(pmi_dict, self.candipos_path, self.candineg_path)
        # end_time = time.time()
        # print('finished! cost {0}'.format(end_time - start_time))


def test():
    ma = MiningAspect()
    ma.miningApsect()


test()
