# 0. 引入必要的包
# TODO
from util import get, preprocess_image1, preprocess_image2, preprocess_image3, dump
import glob
from tqdm import tqdm
import numpy as np
from sklearn.model_selection import train_test_split

# 1. 读取配置文件中的信息l
train_dir = get("train")  # 获取 训练数据路径
char_styles = get("char_styles")  # 获取 字符样式列表，注意: 必须是列标
new_size = get("new_size")  # 获取 新图像大小元组, 注意: 必须包含h和w

# 2. 生成X,y
print("# 读取训练数据并进行预处理，")
X1 = []
X2 = []
X3 = []
y = []

# image_files=五种书法的路径放在一个列表里
image_files = [glob.glob(f"{train_dir}/train_{category}*") for category in char_styles]

for i in range(5):
    for element in tqdm(image_files[i], desc=f"处理 {char_styles[i]} 图像", unit="it"):
        label = element.split('_')[1]
        X1.append(preprocess_image1(element, new_size))
        X2.append(preprocess_image2(element, new_size))
        X3.append(preprocess_image3(element, new_size))
        y.append(char_styles.index(label))

X1 = np.array(X1, dtype=np.float32)
X2 = np.array(X2, dtype=np.float32)
X3 = np.array(X3, dtype=np.float32)
y = np.array(y, dtype=np.int32)
# 3.分割测试集和训练集
print("# 将数据按 80% 和 20% 的比例分割")
# TODO
X1_train, X1_test, y1_train, y1_test = train_test_split(X1, y, test_size=0.2, random_state=0)
X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y, test_size=0.2, random_state=0)
X3_train, X3_test, y3_train, y3_test = train_test_split(X3, y, test_size=0.2, random_state=0)
# 4. 序列化分割后的训练和测试样本/
# TODO
dump((X1_train, X1_test, y1_train, y1_test), '(X1_train, X1_test, y1_train, y1_test)', './Xys/Xy1')
dump((X2_train, X2_test, y2_train, y2_test), '(X2_train, X2_test, y2_train, y2_test)', './Xys/Xy2')
dump((X3_train, X3_test, y3_train, y3_test), '(X3_train, X3_test, y3_train, y3_test)', './Xys/Xy3')
