import jieba
import jieba.analyse
import jieba.posseg as pos
import os
import joblib
import numpy as np
from pyltp import Segmentor
from pyltp import Postagger

folder_source = "./train_delete_knowledge/10fold_x_delete_knowledge"
folder_source_y = "./train_delete_knowledge/10fold_y_delete_knowledge"
word2vec = joblib.load("word2vec.pkl")
target_folder = "./target"

if not os.path.exists(target_folder):
    os.mkdir(target_folder)

LTP_DATA_DIR = './ltp_model/ltp_data_v3.4.0'
cws_model_path = os.path.join(LTP_DATA_DIR, 'cws.model')
pos_model_path = os.path.join(LTP_DATA_DIR, 'pos.model')

segmentor = Segmentor()
segmentor.load(cws_model_path)
tagger = Postagger()
tagger.load(pos_model_path)

def save_file(filename_preprocessed, folder, file_, np_or_txt):
    folder = os.path.join(target_folder, folder)
    if not os.path.exists(folder):
        os.mkdir(folder)
    if np_or_txt:
        file_ = np.array(file_)
        np.save(os.path.join(folder, filename_preprocessed), file_)
    else:
        with open(os.path.join(folder, filename_preprocessed), 'w', encoding='utf-8')as f:
            for sentence in file_:
                f.write(sentence + '\n')


def seg_pos_vec(filename_raw):
    with open(folder_source+"/" + filename_raw, 'r', encoding='utf-8')as f:
        data = f.read().splitlines()
    data = ["".join(sentence.split(" ")) for sentence in data]
    data_seg = [" ".join(segmentor.segment(sentence)) for sentence in data]
    data_segments = [segmentor.segment(sentence) for sentence in data]
    data_pos = [" ".join(tagger.postag(sentence_segment)) for sentence_segment in data_segments]
    data_vec = [[np.array(word2vec[word]) if word in word2vec else np.array(word2vec['啊']) for word in sentence.split(" ")] for sentence in data_seg]

    # save
    save_file(filename_preprocessed=filename_raw, folder="segment", file_=data_seg, np_or_txt=False)
    save_file(filename_preprocessed=filename_raw, folder='tagger', file_=data_pos, np_or_txt=False)
    save_file(filename_preprocessed=filename_raw.split('.')[0]+".npy", folder='vector', file_=data_vec, np_or_txt=True)

# x
filename_all = os.listdir(folder_source+"/")
for file_name in filename_all:
    seg_pos_vec(file_name)

segmentor.release()
tagger.release()

# y
filename_all_y = os.listdir(folder_source_y + "/")
y_raw_path = os.path.join(target_folder, 'y')
if not os.path.exists(y_raw_path):
    os.mkdir(y_raw_path)
for file_name in filename_all_y:
    with open(os.path.join(folder_source_y, file_name), 'r', encoding='utf-8')as f:
        data_y = [label for label in f.read().splitlines()]
    data_y_npy = np.array(data_y)
    np.save(os.path.join(target_folder+'/vector/', file_name.split(".")[0]+".npy"), data_y_npy)
    with open(os.path.join(y_raw_path, file_name), 'w', encoding='utf-8')as f:
        for label in data_y:
            f.write(label+'\n')

# # npy->txt
# file_y_all = os.listdir("./y/")
# for file_name in file_y_all:
#     npy_file = np.load(os.path.join("./y/", file_name))
#     with open("./y_raw/"+(file_name).split('.')[0]+".txt", 'w')as f:
#         for i in list(npy_file):
#             f.write(i + '\n')
#
# # txt -> npy
# folder = "./train_video/10fold_y_1337/"
# target = "./train_video/10fold_y_1337_vector/"
# file_y_all = os.listdir(folder)
# for file_name in file_y_all:
#     with open(os.path.join(folder, file_name), 'r', encoding='utf-8')as f:
#         data = [label for label in f.read().splitlines()]
#     data = np.array(data)
#     np.save(os.path.join(target, file_name.split('.')[0]+".npy"), data)
