# 0. 引入必要的包
import os
import glob
import time
import numpy as np
from tqdm import tqdm
from util import get, preprocess_image, preprocess_image2, preprocess_image3, dump
from sklearn.model_selection import train_test_split

# 1. 读取配置文件中的信息
train_dir = get("train") # 获取 训练数据路径
char_styles = get("char_styles") # 获取 字符样式列表，注意: 必须是列标
new_size = get("new_size") # 获取 新图像大小元组, 注意: 必须包含h和w

# 2. 生成X,y
print("# 读取训练数据并进行预处理")
X, X2, X3, y = [], [], [], []
for i, style in enumerate(char_styles):
    image_files = glob.glob(f"../data/shufa/train/train_{style}*")
    for element in tqdm(image_files, desc=f"处理 {style} 图像", unit="it", position=i):
        A = preprocess_image(element, new_size)
        B = preprocess_image2(element, new_size)
        C = preprocess_image3(element, new_size)
        label = os.path.basename(element).split("_")[1]
        label_index = char_styles.index(label)
        X.append(A)
        X2.append(B)
        X3.append(C)
        y.append(label_index)
        time.sleep(0.01)

X, X2, X3, y = np.array(X), np.array(X2), np.array(X3), np.array(y)

#print(X.shape)
# 3. 分割测试集和训练集
print("# 将数据按 80% 和 20% 的比例分割")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y, test_size=0.2)
X3_train, X3_test, y3_train, y3_test = train_test_split(X3, y, test_size=0.2)
#TODO


# 4. 序列化分割后的训练和测试样本
# TODO
dump((X_train,X_test,y_train,y_test),"(X_train,X_test,y_train,y_test)","{}/Xy".format(get("Xy_root")))
dump((X2_train, X2_test, y2_train, y2_test),"(X2_train, X2_test, y2_train, y2_test)","{}/Xy2".format(get("Xy_root")))
dump((X3_train, X3_test, y3_train, y3_test),"(X3_train, X3_test, y3_train, y3_test)","{}/Xy3".format(get("Xy_root")))