import pandas as pd
from ltp import LTP

import numpy as np
import time

start=time.time()

np.set_printoptions(threshold=20000)#全部输出 
pd.set_option('display.max_colwidth',20000)#全部输出 
pd.set_option('max_colwidth',20000)
pd.set_option('display.max_rows',20000)
pd.set_option('display.max_columns',20000)

data = pd.read_csv('../../data/data-完整连城诀数据集.csv', encoding='utf-8')
# print(data['comment'])


dicts_sentence={}
'''
存储每个文章的分句后的结果，
分句"是LTP进行语法/句法分析前的要求
因为LTP对一个句子处理的长度上限是512(来自官方github中issue的回复)
所以对较长的文本，要进行分句，分句子后才能进行语法分析和句法分析

'''

ltp=LTP()#初始化一个对象

for index,chapter in enumerate(data['comment']):
    # print(type(chapter))
    dicts_sentence[index+1]=ltp.sent_split([chapter])
print("------------------------分句效果-----------------------------")
'''
#{1:第1章内容分句结果,2:第2章内容分句结果}
for (k,v) in dicts_sentence.items():
    print(v)
    print("\n")

'''

# 由于sdp的格式是(x1,x2,xxxx)
# 我们当然希望通过x1,x2来恢复"句法关系"对应的实际的substring是什么？
# 恢复需要通过分句结果
dicts_seg={}
for (k,v) in  dicts_sentence.items():
    dicts_seg[k]=[] 
    for sentence in v:
        seg,hidden=ltp.seg([sentence])
        dicts_seg[k].append(seg)
print("------------------------dicts_seg分词效果-----------------------------")
'''
for (k,v) in dicts_seg.items():
    print(v)
    print("\n")
'''





dicts_sdp={}
for (k,v) in  dicts_sentence.items():
    print("k=",k)
    dicts_sdp[k]=[] 
    for sentence in v:
        # print(type(sentence))
        seg,hidden=ltp.seg([sentence])
        sdp = ltp.sdp(hidden, graph=False)
        # print("sdp=",sdp)
        # print(type(dicts_sdp[k]))
        # print("k=",k)
        dicts_sdp[k].append(sdp)
print("------------------------语义解析效果-----------------------------")
'''
for (k,v) in dicts_sdp.items():
    for sentence in v:
        print(sentence)
        print("\n")
    print("------------一个chapter(语义解析效果)结束----------")
'''





print("------------------------去除语义解析效果中不相邻的语义成分-----------------")
for (k,v) in dicts_sdp.items():#遍历每个chapter
    for sentence in v:#遍历每个句子
        remove_index_list=[]
        for index,item in enumerate(sentence[0]):#遍历每个句子中的语法分析成分
            if abs(item[0]-item[1])>1:
                remove_index_list.append(index)#对于不相邻的例如(4,99,xxx)这种语法关系,在这里记录去除的下标
        x=0
        for y in remove_index_list:#这个地方为什么要写如此奇怪的代码呢?因为删除一个列表中元素的时候,其他元素的下标也会发生变动.
            sentence[0].pop(y-x)
            x+=1



print("\n")
print("\n")
print("\n")

print("-------------------------------------------------------------------")
chapter_final_seg=[]#将过滤掉"不相邻的词语组合后的结果"映射到原始数据，获得需要统计的substring





print("-----------------------------------------------------------------------")


chapter_list=[]
for (k,chapter) in dicts_sdp.items():
    # print(v)
    #[[[(1, 2, 'AGT'), (3, 2, 'dCONT'), (5, 6, 'FEAT'), (10, 11, 'mNEG'), (11, 12, 'MANN'), (13, 12, 'DATV')]], [[(1, 2, 'AGT'), (3, 4, 'AGT'), (5, 4, 'mDEPD')]]]
    # print("\n")
    print("------------------章----------------------------")
    word_list=[]#表示小说整整一章的内容(分词且进行过sdp分析,且去除了不相邻的sdp结果)
    for j,sentence in enumerate(chapter):#sentence:[[(1, 2, 'AGT'), (3, 2, 'dCONT'), (5, 6, 'FEAT'), (10, 11, 'mNEG'), (11, 12, 'MANN'), (13, 12, 'DATV')]]
        
        for wordgroup in sentence[0]:#sentence[0]:[(1, 2, 'AGT'), (3, 2, 'dCONT'), (5, 6, 'FEAT'), (10, 11, 'mNEG'), (11, 12, 'MANN'), (13, 12, 'DATV')]
            
            print("输出词组=\n",wordgroup)#wordgroup格式例子:(1, 2, 'AGT')
            print("wordgroup[0]=",wordgroup[0])
            print("wordgroup[1]=",wordgroup[1])
            print("dicts_seg[k]=",dicts_seg[k])
            if wordgroup[0]<wordgroup[1]:
                word_list.append(dicts_seg[k][j][0][wordgroup[0]-1]+dicts_seg[k][j][0][wordgroup[1]-1])#这里为什么要-1呢？因为str下标从0开始计算，ltp的seg下标从1开始计算
            else:
                word_list.append(dicts_seg[k][j][0][wordgroup[1]-1]+dicts_seg[k][j][0][wordgroup[0]-1])
    chapter_list.append(word_list)






'''
print("------------------------------------------输出当前结果-----------------------------------------------------")
for chapter in chapter_list:
    print(chapter)
    print("\n")
'''


print("----------------------------------------整本小说的词频统计----------------------------------------------")
book=[]    
for chapter in chapter_list:
    book=book+chapter
corpus = pd.DataFrame(book, columns=['word'])
corpus['cnt'] = 1
# 分组统计
g = corpus.groupby(['word']).agg({'cnt': 'count'}).sort_values('cnt', ascending=False)
print(g)


end=time.time()
print("总共运行耗时",end-start)


g.to_csv('./sdp_result.csv',index=True,sep=' ')