import numpy as np
import pandas as pd
import os
import logging
import pickle
from sklearn.model_selection import train_test_split
from lazypredict.Supervised import LazyClassifier
import joblib
import cv2
from tqdm import tqdm
from imutils import paths

# 配置日志记录的基本设置，设置日志级别为INFO，并定义日志格式
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


# 此函数用于获取文件的大小（以MB为单位）
def get_size(file):
    return os.path.getsize(file) / (1024 * 1024)


# 此函数用于从图像数据创建特征矩阵X和标签向量y
def createXY(train_folder, dest_folder, method='flat', batch_size=64):
    x_file_path = os.path.join(dest_folder, "X.pkl")
    y_file_path = os.path.join(dest_folder, "y.pkl")

    # 如果X和y的数据文件已经存在，则直接读取并返回
    if os.path.exists(x_file_path) and os.path.exists(y_file_path):
        logging.info("X和y已经存在，直接读取")
        logging.info(f"X文件大小:{get_size(x_file_path):.2f}MB")
        logging.info(f"y文件大小:{get_size(y_file_path):.2f}MB")

        with open(x_file_path, 'rb') as f:
            X = pickle.load(f)
        with open(y_file_path, 'rb') as f:
            y = pickle.load(f)
        return X, y

    logging.info("读取所有图像，生成X和y")
    # 获取训练文件夹中所有图像的路径
    image_paths = list(paths.list_images(train_folder))

    X = []
    y = []
    if method == 'flat':
        model = None

    num_batches = len(image_paths) // batch_size + (1 if len(image_paths) % batch_size else 0)

    # 按批次处理图像数据，保留读取图像的进度条显示
    for idx in tqdm(range(num_batches), desc="读取图像"):
        batch_images = []
        batch_labels = []

        start = idx * batch_size
        end = min((idx + 1) * batch_size, len(image_paths))

        for i in range(start, end):
            image_path = image_paths[i]
            # 如果方法是'flat'，以灰度模式读取图像并调整大小
            img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
            img = cv2.resize(img, (32, 32))
            batch_images.append(img)

            label = os.path.basename(image_path).split('.')[0]
            label = 1 if label == 'dog' else 0
            batch_labels.extend([label])

        batch_images = np.array(batch_images)
        # 如果方法是'flat'，将图像数据展平
        batch_pixels = batch_images.reshape(batch_images.shape[0], -1)

        X.extend(batch_pixels)
        y.extend(batch_labels)

    logging.info(f"X.shape: {np.shape(X)}")
    logging.info(f"y.shape: {np.shape(y)}")

    with open(x_file_path, 'wb') as f:
        pickle.dump(X, f)
    with open(y_file_path, 'wb') as f:
        pickle.dump(y, f)

    return X, y


# 使用自定义函数createXY从猫狗数据集创建特征和标签，并划分训练集和测试集
train_X, train_y = createXY(
    train_folder="data/train",
    dest_folder=".",
    method='flat'
)
X_train, X_test, y_train, y_test = train_test_split(np.array(train_X), np.array(train_y), test_size=0.5, random_state=2023)

# 使用 LazyClassifier 自动选择和评估各种分类器
clf = LazyClassifier()
result, _ = clf.fit(X_train, X_test, y_train, y_test)

# 打印表头
print(f"{'Model':<30}{'Accuracy':<15}{'F1 Score':<15}{'Time Taken':<15}")

# 遍历结果打印各分类器信息
for index, row in result.iterrows():
    print(f"{index:<30}{row['Accuracy']:<15.4f}{row['F1 Score']:<15.4f}{row['Time Taken']:<15.2f}")

# 获取准确率最高的模型
best_model_name = result['Accuracy'].idxmax()
print("\n准确率最高的模型是: ", best_model_name)

# clf.models 是包含所有训练过的模型 (名称, 模型对象) 键值对的字典
best_model = clf.models[best_model_name]

# 保存准确率最高的模型
with open(f"{best_model_name}_best_model.pkl", "wb") as f:
    pickle.dump(best_model, f)