import time
import numpy as np
import cv2
import os
from os.path import exists
from imutils import paths
import pickle
import logging
from lazypredict.Supervised import LazyClassifier
import joblib
from tabulate import tabulate
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
import lightgbm as lgb


# 此函数用于获取文件的大小（以MB为单位）
def get_size(file):
    return os.path.getsize(file) / (1024 * 1024)


# 此函数用于从图像数据创建特征矩阵X和标签向量y
def createXY(train_folder, dest_folder, method='flat', batch_size=64):
    x_file_path = os.path.join(dest_folder, "X.pkl")
    y_file_path = os.path.join(dest_folder, "y.pkl")

    if os.path.exists(x_file_path) and os.path.exists(y_file_path):
        logging.info("X和y已经存在，直接读取")
        logging.info(f"X文件大小:{get_size(x_file_path):.2f}MB")
        logging.info(f"y文件大小:{get_size(y_file_path):.2f}MB")

        with open(x_file_path, 'rb') as f:
            X = pickle.load(f)
        with open(y_file_path, 'rb') as f:
            y = pickle.load(f)
        return X, y

    logging.info("读取所有图像，生成X和y")
    image_paths = list(paths.list_images(train_folder))

    X = []
    y = []

    if method == 'flat':
        model = None

    num_batches = len(image_paths) // batch_size + (1 if len(image_paths) % batch_size else 0)

    for idx in range(num_batches):
        batch_images = []
        batch_labels = []

        start = idx * batch_size
        end = min((idx + 1) * batch_size, len(image_paths))

        for i in range(start, end):
            image_path = image_paths[i]
            img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
            img = cv2.resize(img, (32, 32))
            batch_images.append(img)

            label = os.path.basename(image_path).split('.')[0]
            label = 1 if label == 'dog' else 0
            batch_labels.extend([label])

        batch_images = np.array(batch_images)
        batch_pixels = batch_images.reshape(batch_images.shape[0], -1)

        X.extend(batch_pixels)
        y.extend(batch_labels)

    logging.info(f"X.shape: {np.shape(X)}")
    logging.info(f"y.shape: {np.shape(y)}")

    with open(x_file_path, 'wb') as f:
        pickle.dump(X, f)
    with open(y_file_path, 'wb') as f:
        pickle.dump(y, f)

    return X, y


# 使用自定义函数createXY从猫狗数据集创建特征和标签，并划分训练集和测试集
train_X, train_y = createXY(
    train_folder="cat_dog_data\train",
    dest_folder=".",
    method='flat'
)
# 将列表转换为numpy数组
X_train, X_test, y_train, y_test = train_test_split(np.array(train_X), np.array(train_y), test_size=0.5, random_state=2023)

# 使用LazyClassifier自动选择和评估各种分类器
clf = LazyClassifier()

# 记录时间
start_time = time.time()

result, _ = clf.fit(X_train, X_test, y_train, y_test)

end_time = time.time()
total_time = end_time - start_time

# 将结果转换为适合表格打印的数据格式
table_data = []
for model_name, metrics in result.iterrows():
    y_pred = clf.models[model_name].predict(X_test)
    f1 = f1_score(y_test, y_pred)
    row = [model_name, metrics['Accuracy'], metrics['Balanced Accuracy'], metrics['ROC AUC'], f1, total_time]
    table_data.append(row)

# 定义表格的表头
headers = ["Model", "Accuracy", "Balanced Accuracy", "ROC AUC", "F1 Score", "Time Taken (s)"]
# 以表格形式打印结果
print(tabulate(table_data, headers=headers, tablefmt="simple"))

# 获取准确率最高的模型
best_model_name = result['Accuracy'].idxmax()
print("\nAccuracy最高的模型是: ", best_model_name)

# 获取最佳模型对象
best_model = clf.models[best_model_name]
# 保存准确率最高的模型
with open(f"{best_model_name}_best_model.pkl", "wb") as f:
    pickle.dump(best_model, f)

print("模型已保存成功。")