import numpy as np


a_data_num = 96 * 14

data1 = np.genfromtxt('F:\AIOPS\基于迁移学习的多指标异常检测\code\label-tool-cyp\data\移动_txt_400_raw\data_0_99.txt', delimiter=',')
# data2 = np.genfromtxt('F:\AIOPS\基于迁移学习的多指标异常检测\code\label-tool-cyp\data\移动_txt_400_raw\data_100_199.txt', delimiter=',')
data3 = np.genfromtxt('F:\AIOPS\基于迁移学习的多指标异常检测\code\label-tool-cyp\data\移动_txt_400_raw\data_200_299.txt', delimiter=',')
# data4 = np.genfromtxt('F:\AIOPS\基于迁移学习的多指标异常检测\code\label-tool-cyp\data\移动_txt_400_raw\data_300_399.txt', delimiter=',')
# data = np.vstack([data1, data2, data3, data4])
data = np.vstack([data1[:50*a_data_num], data3[:50*a_data_num]])

all_data_list = []
for data_index in range(100):
    data_item = data[data_index*a_data_num:(data_index+1)*a_data_num]
    # data_item = np.transpose(data_item)
    # all_data_list.append(data_item)
    np.savetxt(f'F:\AIOPS\移动研究院\code\label-tool-cyp\preprocess_data\data\omnianomaly_data\\train\{data_index}.txt', data_item[:a_data_num // 2], delimiter=',', fmt='%.2f')
    np.savetxt(f'F:\AIOPS\移动研究院\code\label-tool-cyp\preprocess_data\data\omnianomaly_data\\test\{data_index}.txt', data_item, delimiter=',', fmt='%.2f')

# np.save(r'F:\AIOPS\基于迁移学习的多指标异常检测\code\label-tool-cyp\preprocess_data\data\yidong_data.npy', np.array(all_data_list))

# # 获取label的txt文件
# all_label = np.load('F:\AIOPS\基于迁移学习的多指标异常检测\code\label-tool-cyp\label_yidong_v2.npy')
# for index, label in enumerate(all_label):
#     np.savetxt(f'F:\AIOPS\基于迁移学习的多指标异常检测\code\label-tool-cyp\preprocess_data\data\omnianomaly_data\label\\{index}.txt', label, fmt='%.2f')