import os
import time
import cv2
import numpy as np
import pandas as pd
from keras.applications.resnet import ResNet50, preprocess_input
from keras.preprocessing import image
from keras.models import Model, Sequential
from keras.layers import Dense, Flatten, BatchNormalization, Dropout, Concatenate, Input, GlobalAveragePooling2D, Multiply, Add, Reshape, Conv2D
from sklearn.metrics import classification_report, roc_curve, auc
import matplotlib.pyplot as plt
from keras.utils import to_categorical
from keras.optimizers import Adam
from keras.losses import categorical_crossentropy
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from sklearn.preprocessing import StandardScaler
import tensorflow as tf

# 定义注意力块
def attention_block(inputs):
    # 通道注意力机制
    channel = GlobalAveragePooling2D()(inputs)
    channel = Dense(inputs.shape[-1] // 8, activation='relu')(channel)
    channel = Dense(inputs.shape[-1], activation='sigmoid')(channel)
    channel = Reshape((1, 1, inputs.shape[-1]))(channel)
    channel_attention = Multiply()([inputs, channel])

    # 空间注意力机制
    spatial = Conv2D(1, (7, 7), padding='same', activation='sigmoid')(inputs)
    spatial_attention = Multiply()([inputs, spatial])

    # 合并注意力机制
    attention = Add()([channel_attention, spatial_attention])
    return attention

# 定义模型架构
def create_model():
    base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
    x = base_model.output
    x = attention_block(x)  # 加入注意力块
    x = Flatten()(x)
    x = BatchNormalization()(x)
    x = Dense(512, activation='relu')(x)
    x = Dropout(0.5)(x)
    image_output = Dense(256, activation='relu')(x)  # new output layer for image features

    # Freeze the layers except the last few layers
    for layer in base_model.layers[:-10]:
        layer.trainable = False

    # Define text input model
    text_input = Input(shape=(5,))
    text_x = Dense(128, activation='relu')(text_input)
    text_x = Dropout(0.5)(text_x)
    text_output = Dense(256, activation='relu')(text_x)

    # Combine image and text models
    combined = Concatenate()([image_output, text_output])
    combined = Dense(512, activation='relu')(combined)
    combined = Dropout(0.5)(combined)
    combined = Dense(256, activation='relu')(combined)
    combined = Dropout(0.5)(combined)
    final_output = Dense(2, activation='softmax')(combined)

    # Create the final model
    model = Model(inputs=[base_model.input, text_input], outputs=final_output)

    # Compile the model
    model.compile(optimizer=Adam(learning_rate=0.0001), loss=categorical_crossentropy, metrics=['accuracy'])

    return model

# Function to detect and crop chest area
def detect_and_crop_chest(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    chest_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_fullbody.xml')
    chests = chest_cascade.detectMultiScale(gray, 1.1, 4)
    for (x, y, w, h) in chests:
        return image[y:y + h, x:x + w]
    return image  # return original image if no chest is detected

# Load and preprocess images
def load_images_from_folder(folder):
    images = []
    for filename in os.listdir(folder):
        img = cv2.imread(os.path.join(folder, filename))
        if img is not None:
            img = detect_and_crop_chest(img)
            img = cv2.resize(img, (224, 224))
            images.append(img)
    return images

# Load and preprocess text data
def load_text_data(csv_file):
    df = pd.read_csv(csv_file)
    text_data = df[['SCC', 'CEA', 'CK19', 'NSE', 'ProGRP']].values
    labels = df['Lung_cancer'].values
    return text_data, labels

# Load training images and text data
train_dir = 'D:\\kaggle\\final\\data1\\train'
cancer_train_dir = os.path.join(train_dir, 'cancer')
normal_train_dir = os.path.join(train_dir, 'normal')

cancer_train_images = load_images_from_folder(cancer_train_dir)
normal_train_images = load_images_from_folder(normal_train_dir)

cancer_train_text, cancer_train_labels = load_text_data(os.path.join(cancer_train_dir, 'data.csv'))
normal_train_text, normal_train_labels = load_text_data(os.path.join(normal_train_dir, 'data.csv'))

# Load validation images and text data
valid_dir = 'D:\\kaggle\\final\\data1\\valid'
cancer_valid_dir = os.path.join(valid_dir, 'cancer')
normal_valid_dir = os.path.join(valid_dir, 'normal')

cancer_valid_images = load_images_from_folder(cancer_valid_dir)
normal_valid_images = load_images_from_folder(normal_valid_dir)

cancer_valid_text, cancer_valid_labels = load_text_data(os.path.join(cancer_valid_dir, 'data.csv'))
normal_valid_text, normal_valid_labels = load_text_data(os.path.join(normal_valid_dir, 'data.csv'))

# Load testing images and text data
test_dir = 'D:\\kaggle\\final\\data1\\test'
cancer_test_dir = os.path.join(test_dir, 'cancer')
normal_test_dir = os.path.join(test_dir, 'normal')

cancer_test_images = load_images_from_folder(cancer_test_dir)
normal_test_images = load_images_from_folder(normal_test_dir)

cancer_test_text, cancer_test_labels = load_text_data(os.path.join(cancer_test_dir, 'data.csv'))
normal_test_text, normal_test_labels = load_text_data(os.path.join(normal_test_dir, 'data.csv'))

# Combine data
X_train_images = np.array(cancer_train_images + normal_train_images)
X_train_text = np.vstack((cancer_train_text, normal_train_text))
y_train = np.array(cancer_train_labels.tolist() + normal_train_labels.tolist())

X_valid_images = np.array(cancer_valid_images + normal_valid_images)
X_valid_text = np.vstack((cancer_valid_text, normal_valid_text))
y_valid = np.array(cancer_valid_labels.tolist() + normal_valid_labels.tolist())

X_test_images = np.array(cancer_test_images + normal_test_images)
X_test_text = np.vstack((cancer_test_text, normal_test_text))
y_test = np.array(cancer_test_labels.tolist() + normal_test_labels.tolist())

# Preprocess images
X_train_images = preprocess_input(X_train_images)
X_valid_images = preprocess_input(X_valid_images)
X_test_images = preprocess_input(X_test_images)

# Standardize text data
scaler = StandardScaler()
X_train_text = scaler.fit_transform(X_train_text)
X_valid_text = scaler.transform(X_valid_text)
X_test_text = scaler.transform(X_test_text)

# Apply importance weights to text data
importance_weights = np.array([0.09210638, 0.14757179, 0.22065559, 0.43121099, 0.10845525])
X_train_text = X_train_text * importance_weights
X_valid_text = X_valid_text * importance_weights
X_test_text = X_test_text * importance_weights

# Convert labels to categorical
y_train = to_categorical(y_train, num_classes=2)
y_valid = to_categorical(y_valid, num_classes=2)
y_test = to_categorical(y_test, num_classes=2)

# 定义和训练多个模型
num_models = 3
models = []
for i in range(num_models):
    model = create_model()
    checkpoint = ModelCheckpoint(filepath=f'./checkpoints/model_{i}.h5', monitor='val_loss', mode='auto',
                                 save_best_only=True)
    early_stopping = EarlyStopping(verbose=1, patience=5)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.00001)

    history = model.fit([X_train_images, X_train_text], y_train, epochs=5000,
                        validation_data=([X_valid_images, X_valid_text], y_valid),
                        callbacks=[checkpoint, early_stopping, reduce_lr])
    models.append(model)

# 预测测试集
y_preds = []
for model in models:
    y_pred = model.predict([X_test_images, X_test_text])
    y_preds.append(y_pred)

# 将所有模型的预测结果进行平均
y_pred_avg = np.mean(y_preds, axis=0)

# Convert predictions to labels
y_pred_labels = np.argmax(y_pred_avg, axis=1)
y_test_labels = np.argmax(y_test, axis=1)

# Print classification report
print(classification_report(y_test_labels, y_pred_labels, digits=4))

# Calculate and plot ROC curve and AUC
fpr, tpr, _ = roc_curve(y_test_labels, y_pred_labels)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.show()

'''
epoch=50
              precision    recall  f1-score   support

           0     0.8636    0.6333    0.7308        30
           1     0.8608    0.9577    0.9067        71

    accuracy                         0.8614       101
   macro avg     0.8622    0.7955    0.8187       101
weighted avg     0.8616    0.8614    0.8544       101


epoch=100
              precision    recall  f1-score   support

           0     0.8889    0.8000    0.8421        30
           1     0.9189    0.9577    0.9379        71

    accuracy                         0.9109       101
   macro avg     0.9039    0.8789    0.8900       101
weighted avg     0.9100    0.9109    0.9095       101

epoch=200
              precision    recall  f1-score   support

           0     0.8966    0.8667    0.8814        30
           1     0.9444    0.9577    0.9510        71

    accuracy                         0.9307       101
   macro avg     0.9205    0.9122    0.9162       101
weighted avg     0.9302    0.9307    0.9303       101

epoch=300
              precision    recall  f1-score   support

           0     0.9000    0.9000    0.9000        30
           1     0.9577    0.9577    0.9577        71

    accuracy                         0.9406       101
   macro avg     0.9289    0.9289    0.9289       101
weighted avg     0.9406    0.9406    0.9406       101

epoch=350
              precision    recall  f1-score   support

           0     0.9000    0.9000    0.9000        30
           1     0.9577    0.9577    0.9577        71

    accuracy                         0.9406       101
   macro avg     0.9289    0.9289    0.9289       101
weighted avg     0.9406    0.9406    0.9406       101

epoch=400
              precision    recall  f1-score   support

           0     0.9032    0.9333    0.9180        30
           1     0.9714    0.9577    0.9645        71

    accuracy                         0.9505       101
   macro avg     0.9373    0.9455    0.9413       101
weighted avg     0.9512    0.9505    0.9507       101

epoch=450
              precision    recall  f1-score   support

           0     0.8929    0.8333    0.8621        30
           1     0.9315    0.9577    0.9444        71

    accuracy                         0.9208       101
   macro avg     0.9122    0.8955    0.9033       101
weighted avg     0.9200    0.9208    0.9200       101

epoch=500
              precision    recall  f1-score   support

           0     0.9259    0.8333    0.8772        30
           1     0.9324    0.9718    0.9517        71

    accuracy                         0.9307       101
   macro avg     0.9292    0.9026    0.9145       101
weighted avg     0.9305    0.9307    0.9296       101

epoch=600
              precision    recall  f1-score   support

           0     0.8966    0.8667    0.8814        30
           1     0.9444    0.9577    0.9510        71

    accuracy                         0.9307       101
   macro avg     0.9205    0.9122    0.9162       101
weighted avg     0.9302    0.9307    0.9303       101

epoch=800
              precision    recall  f1-score   support

           0     0.9310    0.9000    0.9153        30
           1     0.9583    0.9718    0.9650        71

    accuracy                         0.9505       101
   macro avg     0.9447    0.9359    0.9401       101
weighted avg     0.9502    0.9505    0.9502       101
epoch=1000
              precision    recall  f1-score   support

           0     0.9286    0.8667    0.8966        30
           1     0.9452    0.9718    0.9583        71

    accuracy                         0.9406       101
   macro avg     0.9369    0.9192    0.9274       101
weighted avg     0.9403    0.9406    0.9400       101

epoch=2000
              precision    recall  f1-score   support

           0     0.8889    0.8000    0.8421        30
           1     0.9189    0.9577    0.9379        71

    accuracy                         0.9109       101
   macro avg     0.9039    0.8789    0.8900       101
weighted avg     0.9100    0.9109    0.9095       101

'''