from stanfordcorenlp import StanfordCoreNLP
from nltk.tokenize import sent_tokenize
import math, time
import threading

nlp = StanfordCoreNLP(r'/Users/yuronan/develop/code/python/stanford-corenlp-4.3.2', lang='en')


class MiningAspect:
    def __init__(self):
        self.train_path = './data/train.ft.txt'
        self.candipos_path = './data/hownet_neg_new.txt'
        self.candineg_path = './data/hownet_pos_new.txt'
        self.sentiment_path = './data/sentic_hownet.txt'
        self.stop_word_path = './data/stop_word.txt'

    def mining_aspect(self, train_data, sentiment_path, stop_word_path,start, end):
        stop_word = [line.strip().split('\t')[0] for line in open(stop_word_path)]
        def check_words(sent):
            if set(sentiment_words).intersection(set(sent)):
                return list(set(sentiment_words).intersection(set(sent)))
            else:
                return False

        def sentence_token_nltk(str):
            sent_tokenize_list = sent_tokenize(str)
            return sent_tokenize_list

        def preRules(aspect_and_tag, sentence_with_dependency, originIndex, sentence_with_tag):
            # 返回元组，一个值代表是否为评价对象，第二个值是否用到前置规则,第三个值为评价对象数组
            aspect = aspect_and_tag[0]
            if aspect in stop_word:
                return (False, False)
            # 拼接动词短语
            if aspect_and_tag[1] in ('VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'):
                for dependency_inner in sentence_with_dependency:
                    if dependency_inner[1] == originIndex and dependency_inner[0] == 'compound:prt':
                        aspect_comp = sentence_with_tag[dependency_inner[2] - 1][0]
                        return (True, 1, aspect + ' ' + aspect_comp)
            if aspect_and_tag[1] in ('NN', 'NNS', 'NNP', 'NNPS'):
                for dependency_inner in sentence_with_dependency:
                    if dependency_inner[1] == originIndex and dependency_inner[0] == 'compound':
                        aspect_comp = sentence_with_tag[dependency_inner[2] - 1][0]
                        return (True, 1, aspect_comp + ' ' + aspect)
                    if dependency_inner[1] == originIndex and dependency_inner[0] == 'nummod':
                        aspect_comp = sentence_with_tag[dependency_inner[2] - 1][0]
                        return (True, 1, aspect + ' ' + aspect_comp)
            return (True, 0, aspect)
        sentiment_words = [line.strip().split(',')[0] for line in open(sentiment_path)]
        count = 0
        all_count = 0
        line_list = []
        for line in open(train_data):
            count += 1
            line_list.append(line.strip()[11:].replace('%', ' percent'))
            if count == 10000:
                break
        for line in line_list[start : end]:
            try:
                all_count += 1
                # count += 1
                # if count == 1000:
                #     break
                # line = line.strip()[11:].replace('%', ' percent')
                sent_tokenize_list = sentence_token_nltk(line)
                sentence_and_apsect = []
                for sentence in sent_tokenize_list:
                    # 查找情感词，需要忽略大小写，否则部分词无法匹配
                    check_result = check_words(nlp.word_tokenize(sentence.lower()))
                    if check_result:
                        sentence_with_tag = nlp.pos_tag(sentence)
                        word_list_lower = nlp.word_tokenize(sentence.lower())
                        word_list = [line[0] for line in sentence_with_tag]
                        # print(check_result)
                        sentence_with_dependency = nlp.dependency_parse(sentence)
                        # print(sentence_with_dependency)
                        for sentiment_word_lower in check_result:
                            # 情感词索引,情感词找到原词不能是小写
                            senti_word_index = word_list_lower.index(sentiment_word_lower)
                            sentiment_word = word_list[senti_word_index]
                            # 情感词词性
                            senti_word_tag = sentence_with_tag[senti_word_index][1]
                            # 依存分析索引
                            senti_dependency_index = senti_word_index + 1
                            ''' 规则1 情感词为形容词 '''
                            if senti_word_tag in ('JJ', 'JJR', 'JJS'):
                                for dependency in sentence_with_dependency:
                                    # 如果存在形容词修饰关系
                                    if senti_dependency_index in dependency:
                                        # 修饰对象必须为名词
                                        if 'amod' in dependency and senti_dependency_index == dependency[2] and \
                                                sentence_with_tag[dependency[1] - 1][1] in ('NN', 'NNS', 'NNP', 'NNPS'):
                                            aspect = preRules(sentence_with_tag[dependency[1] - 1],
                                                              sentence_with_dependency, dependency[1],
                                                              sentence_with_tag)
                                            if aspect[0]:
                                                sentence_and_apsect.append(
                                                    sentiment_word + ',' + str(aspect[1]) + ',' + aspect[2])
                                            # print(sentiment_word + '---' + aspect)
                                        if 'nsubj' in dependency:
                                            # 修饰对象必须为名词
                                            if sentence_with_tag[dependency[2] - 1][1] in ('NN', 'NNS', 'NNP', 'NNPS'):
                                                aspect = preRules(sentence_with_tag[dependency[2] - 1],
                                                                  sentence_with_dependency, dependency[1],
                                                                  sentence_with_tag)
                                                # print(sentiment_word + '---' + aspect)
                                                if aspect[0]:
                                                    sentence_and_apsect.append(
                                                        sentiment_word + ',' + str(aspect[1]) + ',' + aspect[2])
                                        if 'advmod' in dependency:
                                            vb_index = dependency[1]
                                            if dependency[2] == senti_dependency_index:
                                                for dependency_inner in sentence_with_dependency:
                                                    if dependency_inner[1] == vb_index and dependency_inner[
                                                        0] == 'nsubj' and sentence_with_tag[dependency_inner[2] - 1][
                                                        1] in (
                                                            'NN', 'NNS', 'NNP', 'NNPS'):
                                                        aspect = preRules(sentence_with_tag[dependency_inner[2] - 1],
                                                                          sentence_with_dependency, dependency[1],
                                                                          sentence_with_tag)
                                                        if aspect[0]:
                                                            sentence_and_apsect.append(
                                                                sentiment_word + ',' + str(aspect[1]) + ',' + aspect[2])
                                                        # print(sentiment_word + '---' + aspect)
                                                        break
                                            # if sentence_with_tag[vb_index - 1][1] in ('NN', 'NNS', 'NNP', 'NNPS'):
                                            #     aspect = sentence_with_tag[vb_index - 1][0]
                                            #     print(sentiment_word + '---' + aspect)
                            ''' 规则2 情感词为名词 '''
                            if senti_word_tag in ('NN', 'NNS', 'NNP', 'NNPS'):
                                for dependency in sentence_with_dependency:
                                    # 如果存在名词修饰关系
                                    if senti_dependency_index == dependency[1] and sentence_with_tag[dependency[2] - 1][
                                        1] in ('NN', 'NNS', 'NNP', 'NNPS'):
                                        if 'nmod' in dependency:
                                            aspect = preRules(sentence_with_tag[dependency[2] - 1],
                                                              sentence_with_dependency, dependency[1],
                                                              sentence_with_tag)
                                            if aspect[0]:
                                                sentence_and_apsect.append(
                                                    sentiment_word + ',' + str(aspect[1]) + ',' + aspect[2])
                                            # print(sentiment_word + '---' + aspect)
                                    if senti_dependency_index == dependency[2]:
                                        if 'nsubj' in dependency:
                                            aspect = preRules(sentence_with_tag[dependency[1] - 1],
                                                              sentence_with_dependency, dependency[1],
                                                              sentence_with_tag)
                                            if aspect[0]:
                                                sentence_and_apsect.append(
                                                    sentiment_word + ',' + str(aspect[1]) + ',' + aspect[2])
                            ''' 规则3 情感词为副词 '''
                            if senti_word_tag in ('RB', 'RBR', 'RBS'):
                                for dependency in sentence_with_dependency:
                                    # 如果存在副词修饰关系，且副词是修饰词不是被修饰词
                                    if senti_dependency_index in dependency:
                                        word_and_tag = sentence_with_tag[dependency[1] - 1]
                                        vb_index = dependency[1]
                                        if 'advmod' in dependency and dependency[2] == senti_dependency_index:
                                            if word_and_tag[1] in ('NN', 'NNS', 'NNP', 'NNPS'):
                                                aspect = preRules(word_and_tag,
                                                                  sentence_with_dependency, dependency[1],
                                                                  sentence_with_tag)
                                                if aspect[0]:
                                                    sentence_and_apsect.append(
                                                        sentiment_word + ',' + str(aspect[1]) + ',' + aspect[2])
                                                # print(sentiment_word + '---' + aspect)
                                            else:
                                                for dependency_inner in sentence_with_dependency:
                                                    if dependency_inner[1] == vb_index and dependency_inner[
                                                        0] == 'nsubj' and sentence_with_tag[dependency_inner[2] - 1][
                                                        1] in (
                                                            'NN', 'NNS', 'NNP', 'NNPS'):
                                                        aspect = preRules(sentence_with_tag[dependency_inner[2] - 1],
                                                                          sentence_with_dependency, dependency[1],
                                                                          sentence_with_tag)
                                                        if aspect[0]:
                                                            sentence_and_apsect.append(
                                                                sentiment_word + ',' + str(aspect[1]) + ',' + aspect[2])
                                                        # print(sentiment_word + '---' + aspect)
                                                        break
                                        if 'nsubj' in dependency:
                                            # 修饰对象必须为名词
                                            if sentence_with_tag[dependency[2] - 1][1] in ('NN', 'NNS', 'NNP', 'NNPS'):
                                                aspect = preRules(sentence_with_tag[dependency[2] - 1],
                                                                  sentence_with_dependency, dependency[2],
                                                                  sentence_with_tag)
                                                # print(sentiment_word + '---' + aspect)
                                                if aspect[0]:
                                                    sentence_and_apsect.append(
                                                        sentiment_word + ',' + str(aspect[1]) + ',' + aspect[2])

                            ''' 规则4 情感词为动词 '''
                            if senti_word_tag in ('VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'):
                                for dependency in sentence_with_dependency:
                                    # 如果存在副词修饰关系
                                    if senti_dependency_index in dependency:
                                        if 'obj' in dependency:
                                            if sentence_with_tag[dependency[2] - 1][1] in ('NN', 'NNS', 'NNP', 'NNPS'):
                                                aspect = preRules(sentence_with_tag[dependency[2] - 1],
                                                                  sentence_with_dependency, dependency[1],
                                                                  sentence_with_tag)
                                                if aspect[0]:
                                                    sentence_and_apsect.append(
                                                        sentiment_word + ',' + str(aspect[1]) + ',' + aspect[2])
                                            # print(sentiment_word + '---' + aspect)
                            # print(senti_dependency_index)

                    # if check_words(nlp.word_tokenize(sentence)):
                    #     print(sentence)
                    # print(nlp.dependency_parse(sentence))
                print(sentence_and_apsect)
            except Exception as e:
                print(e)
        print('all_count' + str(all_count))

    def miningApsect(self, start, end):
        print('step 1/4:...mining aspect ...')
        start_time = time.time()
        seg_data = self.mining_aspect(self.train_path, self.sentiment_path, self.stop_word_path, start, end)
        end_time1 = time.time()
        print('step 1/4 finished:...cost {0}...'.format((end_time1 - start_time)))
        # print('step 2/4:...collect cowords ...')
        # cowords_list = self.collect_cowords(self.sentiment_path, seg_data)
        # end_time2 = time.time()
        # print('step 2/4 finished:...cost {0}...'.format((end_time2 - end_time1)))
        # print('step 3/4:...compute sopmi ...')
        # pmi_dict = self.collect_candiwords(seg_data, cowords_list, self.sentiment_path)
        # end_time3 = time.time()
        # print('step 1/4 finished:...cost {0}...'.format((end_time3 - end_time2)))
        # print('step 4/4:...save candiwords ...')
        # self.save_candiwords(pmi_dict, self.candipos_path, self.candineg_path)
        # end_time = time.time()
        # print('finished! cost {0}'.format(end_time - start_time))
    def startMultiThread(self):
        threads = []
        t1 = threading.Thread(target=self.miningApsect, args=(0, 3300))
        threads.append(t1)
        t2 = threading.Thread(target=self.miningApsect, args=(3300, 6625))
        threads.append(t2)
        t3 = threading.Thread(target=self.miningApsect, args=(6625, 10000))
        threads.append(t3)
        for t in threads:
            t.setDaemon(True)
            t.start()
        t.join()
        print('ok')

def test():
    ma = MiningAspect()
    ma.startMultiThread()


test()
