import nltk
import pandas as pd
import numpy as np
from nltk.tokenize import word_tokenize

dataset_root = r'./PanJiu/data/'


###sn_log数据路径
sn_log = dataset_root + "preliminary_sel_log_dataset.csv"
sn_log_a = dataset_root + "additional_sel_log_dataset.csv"


##train数据路径
train_label = dataset_root + "preliminary_train_label_dataset.csv"
train_label_s = dataset_root + "preliminary_train_label_dataset_s.csv"

##测试数据
test_log = dataset_root + "preliminary_sel_log_dataset_a.csv"
test_submit = dataset_root + "preliminary_submit_dataset_a.csv"

###生成corpus train test submit数据集
class DataProcess(object):
    def __init__(self):
        self.name = "data preprocess"
        
    def read_data_log(self,path):
        sel_data = pd.read_csv(path)
        sel_data.sort_values(by=['sn','time'],inplace=True)
        sel_data.reset_index(drop=True, inplace=True)
        return sel_data

    def build_corpus_list(self,path):
        """
        从指定路径读取数据文件，并构建语料库列表。

        :param path: 数据文件的路径，用于读取数据。
        :return: 包含每个唯一序列号对应的最后 10 条消息连接后的字符串列表。
        """
        # 调用 read_data_log 方法读取指定路径的数据文件，并对数据进行排序和索引重置
        sel_data = self.read_data_log(path)
        # 从数据中提取唯一的序列号列表，去除重复项并保留第一个出现的项
        sn_list = sel_data['sn'].drop_duplicates(keep='first').to_list()
        # 遍历唯一序列号列表，为每个序列号筛选出对应的数据行，提取最后 10 条消息并连接成字符串
        tail_msg_list = ['.'.join(sel_data[sel_data['sn']==i]['msg'].tail(10).to_list()) for i in sn_list]
        return tail_msg_list

    def merge_log_dataset(self,sn_log,sn_log_a):
        tail_msg_list = self.build_corpus_list(sn_log)
        tail_msg_list_a = self.build_corpus_list(sn_log_a)
        merge_log_dataset_list = tail_msg_list + tail_msg_list_a
        return merge_log_dataset_list
   
    def write_file(self,path,data_list):
        with open(path,'w',encoding='utf-8') as f:
            for line in data_list:
                f.write(line)
                f.write("\n")  

    def save_local_corpus(self,write_file,sn_log,sn_log_a):
        merge_log_dataset_list  = self.merge_log_dataset(sn_log,sn_log_a)
        print("merge_log_dataset_list".format(len(merge_log_dataset_list)))
        self.write_file(write_file,merge_log_dataset_list)
   

    def save_local_train(self,write_file,sn_log_path,train_dataset_path,train_dataset_s_path):
        sel_data = self.read_data_log(sn_log_path)
        train_object = self.read_data_train(train_dataset_path)
        train_object_s = self.read_data_train(train_dataset_s_path)
        train_list_result = []
        train_list_result_s = []
        for i,row in train_object.iterrows():
            text = '.'.join(sel_data[(sel_data['sn']==row['sn'])&(sel_data['time']<=row['fault_time'])].tail(10)['msg']).lower()
            result = str(row['sn']) + "\t" + text + "\t" + str(train_object['label'].values[i])
            train_list_result.append(result)

        for i,row in train_object_s.iterrows():
            text = '.'.join(sel_data[(sel_data['sn']==row['sn'])&(sel_data['time']<=row['fault_time'])].tail(10)['msg']).lower()
            result = str(row['sn']) + "\t" + text + "\t" + str(train_object['label'].values[i])
            train_list_result_s.append(result)

        merge_train_dataset_list = train_list_result + train_list_result_s
        print("merge_train_dataset_list:{}".format(len(merge_train_dataset_list)))
        self.write_file(write_file,merge_train_dataset_list)

    def save_local_test(self,write_file,sn_log_test_path,train_dataset_path):
        sn_log_test  = self.read_data_log(sn_log_test_path)
        submit_list = self.read_data_train(train_dataset_path)
        test_list_result = []
        for i,row in submit_list.iterrows():
            text = '. '.join(sn_log_test[(sn_log_test['sn']==row['sn'])&(sn_log_test['time']<=row['fault_time'])].tail(10)['msg']).lower()
            result = str(row['sn']) + "\t" + text 
            test_list_result.append(result)
        print("test_list_result:{}".format(len(test_list_result)))
        self.write_file(write_file,test_list_result)

    def read_data_train(self,path):
        train_ = pd.read_csv(path)
        train_.sort_values(by=['sn', 'fault_time'], inplace=True)
        train_.reset_index(drop=True, inplace=True)
        return train_


if __name__ == '__main__':
    data_preprocess = DataProcess()
    data_preprocess.save_local_corpus("corpus_test.csv",sn_log,sn_log_a)
    data_preprocess.save_local_train("train_test.csv",sn_log,train_label,train_label_s)
    data_preprocess.save_local_test("test_test.csv",test_log,test_submit)
    
###这样就得到了三个原始数据集