import os
import cv2
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import precision_score, recall_score, f1_score, roc_curve, auc
import time
from matplotlib import pyplot as plt
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.applications import VGG19
from tensorflow.keras.applications.vgg19 import preprocess_input
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator


# 定义定位胸部的函数
def locate_chest(image):
    blurred = cv2.GaussianBlur(image, (5, 5), 0)
    _, thresh = cv2.threshold(blurred, 45, 255, cv2.THRESH_BINARY)
    contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    max_contour = max(contours, key=cv2.contourArea)
    x, y, w, h = cv2.boundingRect(max_contour)
    cropped_image = image[y:y + h, x:x + w]
    return cropped_image


# 加载图像并应用胸部定位函数
def load_images_from_folder(folder):
    images = []
    labels = []
    for filename in os.listdir(folder):
        class_folder = os.path.join(folder, filename)
        if os.path.isdir(class_folder):
            for root, _, files in os.walk(class_folder):
                for img in files:
                    img_path = os.path.join(root, img)
                    image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
                    if image is not None:
                        image = locate_chest(image)
                        image = cv2.resize(image, (64, 64))
                        images.append(image)
                        labels.append(1 if filename == 'cancer' else 0)
    return images, labels


def augment_data(images, labels):
    datagen = ImageDataGenerator(
        rotation_range=20,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        fill_mode='nearest'
    )
    augmented_images = []
    augmented_labels = []
    for image, label in zip(images, labels):
        image = image.reshape((1,) + image.shape + (1,))
        i = 0
        for batch in datagen.flow(image, batch_size=1):
            augmented_images.append(batch[0].reshape(image.shape[1:3]))
            augmented_labels.append(label)
            i += 1
            if i >= 5:  # 每张图像生成5个增强样本
                break
    return augmented_images, augmented_labels


def extract_features(images):
    # 使用预训练的 VGG19 模型
    base_model = VGG19(weights='imagenet', include_top=False, input_shape=(64, 64, 3))
    model = Model(inputs=base_model.input, outputs=base_model.get_layer('block5_pool').output)

    # 将灰度图像转换为三通道图像
    images = [cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) for image in images]
    images = np.array(images)
    images = preprocess_input(images)

    # 提取特征
    features = model.predict(images)
    features = features.reshape(features.shape[0], -1)
    return features


def train_model(X_train, y_train, X_valid, y_valid):
    # 定义随机森林分类器
    rf = RandomForestClassifier(random_state=42)

    # 定义超参数网格
    param_grid = {
        'n_estimators': [100, 200, 300],
        'max_depth': [10, 20, 30],
        'min_samples_split': [2, 5, 10],
        'min_samples_leaf': [1, 2, 4],
        'bootstrap': [True, False]
    }

    # 使用网格搜索进行超参数优化
    grid_search = GridSearchCV(estimator=rf, param_grid=param_grid, cv=3, n_jobs=-1, verbose=2)
    grid_search.fit(X_train, y_train)

    # 使用验证集评估模型
    best_model = grid_search.best_estimator_
    y_valid_pred = best_model.predict(X_valid)
    print("Validation Metrics:")
    print_metrics(y_valid, y_valid_pred)

    # 返回最佳模型
    return best_model


def predict(model, X_test):
    y_pred = model.predict(X_test)
    return y_pred


def print_metrics(y_test, y_pred):
    precision = precision_score(y_test, y_pred)
    recall = recall_score(y_test, y_pred)
    f1 = f1_score(y_test, y_pred)
    fpr, tpr, _ = roc_curve(y_test, y_pred)
    roc_auc = auc(fpr, tpr)
    print(f'Precision: {precision}\nRecall: {recall}\nF1 Score: {f1}\nAUC: {roc_auc}')
    plt.figure()
    plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic')
    plt.legend(loc="lower right")
    plt.show()


def main():
    start_time = time.time()
    X_train, y_train = load_images_from_folder('D:\\kaggle\\final\\data1\\train')
    X_valid, y_valid = load_images_from_folder('D:\\kaggle\\final\\data1\\valid')
    X_test, y_test = load_images_from_folder('D:\\kaggle\\final\\data1\\test')

    # 数据增强
    X_train, y_train = augment_data(X_train, y_train)
    X_valid, y_valid = augment_data(X_valid, y_valid)

    # 打乱数据
    X_train, y_train = shuffle(X_train, y_train, random_state=42)
    X_valid, y_valid = shuffle(X_valid, y_valid, random_state=42)

    # 提取特征
    X_train = extract_features(X_train)
    X_valid = extract_features(X_valid)
    X_test = extract_features(X_test)

    # 标准化
    scaler = StandardScaler()
    X_train = scaler.fit_transform(X_train)
    X_valid = scaler.transform(X_valid)
    X_test = scaler.transform(X_test)

    print(f'Training set size: {len(X_train)}, Validation set size: {len(X_valid)}, Test set size: {len(X_test)}')

    model = train_model(X_train, y_train, X_valid, y_valid)
    y_pred = predict(model, X_test)
    print_metrics(y_test, y_pred)

    end_time = time.time()
    print(f'Program running time: {end_time - start_time}')


if __name__ == "__main__":
    main()

'''
Validation Metrics:
Precision: 0.912568306010929
Recall: 0.9408450704225352
F1 Score: 0.926490984743412
AUC: 0.863755868544601
Precision: 0.8933333333333333
Recall: 0.9436619718309859
F1 Score: 0.9178082191780823
AUC: 0.8384976525821597
Program running time: 562.7631728649139
'''