import codecs

from gensim.corpora.dictionary import Dictionary
from gensim.models import Word2Vec
import jieba.posseg as pseg
from langconv import Converter
import pandas as pd
import numpy as np
import re
import codecs
import os
import xlrd
import os
LTP_DATA_DIR = './ltp_data_v3.4.0/ltp_data_v3.4.0'  # ltp模型目录的路径
par_model_path = os.path.join(LTP_DATA_DIR, 'parser.model')  # 依存句法分析模型路径，模型名称为`parser.model`

from pyltp import Parser
parser = Parser() # 初始化实例
parser.load(par_model_path)  # 加载模型


# filtrate = re.compile(u'[^\u4E00-\u9FA5，]')
# filtrate1 = re.compile(u'[^\u4E00-\u9FA5:,]')

def cht_to_chs(line):
    line = Converter('zh-hans').convert(line)
    line.encode('utf-8')
    return line


def loadData():
    words = []
    labWs = []
    labCs = []
    labEs = []
    path = 'data/input'
    # path = 'C:/Users/NewUser/Desktop/data'
    for file in os.walk(path):
        for filename in file[2]:
            child = os.path.join(path, str(filename))
            lib = pd.read_excel(child, header=None, index=None).fillna(0)
            for word in lib[0]:
                word = str(word).replace('\t', '')
                word = cht_to_chs(word)
                word = word.replace(' ', '')
                words.append(word)
            for line in lib[1]:
                line = str(line)
                line = line.replace(' ', '')
                line = cht_to_chs(line)
                lin = line.split('，')
                labWs.append(lin)
            for line in lib[2]:
                line = str(line)
                line = line.replace(' ', '')
                line = cht_to_chs(line)
                lin = line.split('，')
                labCs.append(lin)
            for line in lib[3]:
                line = str(line)
                line = line.replace(' ', '')
                line = cht_to_chs(line)
                lin = line.split('，')
                # xlrd.xldate_as_datetime(line[0].value, 0)
                labEs.append(lin)
    return words, labWs, labCs, labEs


def data_prepare(words, labWs, labCs, labEs):
    dataList = []
    labelList = []
    flagList = []
    parserList=[]


    for i in range(len(words)):
        if i % 100 == 0:
            print(i, end=',')
        # datalist 将每一句话的每一个字作为一个元素加进去
        dataList.append(list(words[i]))

        # flaglist
        sequence = []
        sequence_words=[]
        sequence_flag = []
        sequence_parser = []
        data = []
        data_flag = []
        dataP = []
        dataP_flag = []
        lines = pseg.cut(words[i])
        #分词结果标注
        for word, flag in lines:
            sequence.append(list(word))
            sequence_words.append(word)
            sequence_flag.append(flag)
        arcs = parser.parse(sequence_words,sequence_flag)  # 句法分析
        # print("\t".join("%d:%s" % (arc.head, arc.relation) for arc in arcs))
        for arc in arcs:
            sequence_parser.append(arc.relation)
        for s in range(len(sequence_flag)):
            for zi in sequence[s]:
                data.append(zi)
                data_flag.append(sequence_flag[s])
        flagList.append(data_flag)
        for s in range(len(sequence_parser)):
            for zi in sequence[s]:
                dataP.append(zi)
                dataP_flag.append(sequence_parser[s])
        parserList.append(dataP_flag)

        # label
        labWstring = labWs[i]
        labCstring = labCs[i]
        labEstring = labEs[i]


        if len(labWs[i]) == 0 and len(labCs[i]) == 0 and len(labEs[i]) == 0:
            label = (len(words[i])) * "O"
        else:
            for j in range(len(labWstring)):
                if len(labWstring[j]) == 1:
                    pattern = "I"
                    words[i] = re.sub(labWstring[j], pattern, str(words[i]))
                if len(labWstring[j]) > 1:
                    pattern = "L" + (len(labWstring[j]) - 2) * "M" + "X"
                    words[i] = re.sub(labWstring[j], pattern, str(words[i]))
            for k in range(len(labCstring)):
                if len(labCstring[k]) == 1:
                    pattern = "r"
                    words[i] = re.sub(labCstring[k], pattern, str(words[i]))
                if len(labCstring[k]) > 1:
                    pattern = "l" + (len(labCstring[k]) - 2) * "m" + "x"
                    words[i] = re.sub(labCstring[k], pattern, str(words[i]))
            for p in range(len(labEstring)):
                if len(labEstring[p]) > 1:
                    pattern = "J" + (len(labEstring[p]) - 2) * "Q" + "K"
                    words[i] = re.sub(labEstring[p], pattern, str(words[i]))
                if len(labEstring[p]) == 1:
                    pattern = "q"
                    words[i] = re.sub(labEstring[p], pattern, str(words[i]))
            label = re.sub(u'[^LXMlmxJQKqIr]', "0", words[i])
        labelList.append(list(label))
    parser.release()  # 释放模型
    return dataList, labelList , parserList , flagList



def write(words, lab, parser,flag):
    #    word,flag,lab = data_prepare(words,labels)
    fw = codecs.open('data/sample_train.txt', 'w', 'utf-8')
    for i in range(len(words)):
        for j in range(len(words[i])):
            line = ''.join([words[i][j] + '\t' + flag[i][j]+'\t'+parser[i][j] + '\t' + lab[i][j]]) + '\n'
            fw.writelines(line)
        fw.writelines('\n')
    fw.close()


# word,flag,lab = data_prepare(words,labels)

def embedding_sentences(sentences):
    w2vModel = Word2Vec(sentences, size=64, window=5, min_count=1)
    w2vModel.save('Model/Word2vec_model.pkl')
    gensim_dict = Dictionary()
    gensim_dict.doc2bow(w2vModel.wv.vocab.keys(), allow_update=True)
    w2indx = {v: k for k, v in gensim_dict.items()}
    w2vec = {word: w2vModel[word] for word in w2indx.keys()}
    return w2vec


words, labWs, labCs, labEs = loadData()
#  flagList,
dataList, labelList, parserList ,flagList, = data_prepare(words, labWs, labCs, labEs)
# 写入txt flagList,
write(dataList,labelList, parserList,flagList)
# 构造embedding
embedding_sentences(words)
