import os
import time
import cv2
import numpy as np
import pandas as pd
from keras.applications.resnet import ResNet50, preprocess_input
from keras.preprocessing import image
from keras.models import Model, Sequential
from keras.layers import Dense, Flatten, BatchNormalization, Dropout, Concatenate, Input, GlobalAveragePooling2D, Multiply, Add, Reshape, Conv2D
from sklearn.metrics import classification_report, roc_curve, auc
import matplotlib.pyplot as plt
from keras.utils import to_categorical
from keras.optimizers import Adam
from keras.losses import categorical_crossentropy
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from sklearn.preprocessing import StandardScaler
from keras.preprocessing.image import ImageDataGenerator

# 定义注意力块
def attention_block(inputs):
    # 通道注意力机制
    channel = GlobalAveragePooling2D()(inputs)
    channel = Dense(inputs.shape[-1] // 8, activation='relu')(channel)
    channel = Dense(inputs.shape[-1], activation='sigmoid')(channel)
    channel = Reshape((1, 1, inputs.shape[-1]))(channel)
    channel_attention = Multiply()([inputs, channel])

    # 空间注意力机制
    spatial = Conv2D(1, (7, 7), padding='same', activation='sigmoid')(inputs)
    spatial_attention = Multiply()([inputs, spatial])

    # 合并注意力机制
    attention = Add()([channel_attention, spatial_attention])
    return attention

# Load ResNet50 model
base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
x = base_model.output
x = attention_block(x)  # 加入注意力块
x = Flatten()(x)
x = BatchNormalization()(x)
x = Dense(1024, activation='relu')(x)  # 增加神经元
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
image_output = Dense(256, activation='relu')(x)  # new output layer for image features

# Freeze the layers except the last few layers
for layer in base_model.layers[:-10]:
    layer.trainable = False

# Function to detect and crop chest area
def detect_and_crop_chest(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    chest_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_fullbody.xml')
    chests = chest_cascade.detectMultiScale(gray, 1.1, 4)
    for (x, y, w, h) in chests:
        return image[y:y+h, x:x+w]
    return image  # return original image if no chest is detected

# Load and preprocess images
def load_images_from_folder(folder):
    images = []
    for filename in os.listdir(folder):
        img = cv2.imread(os.path.join(folder, filename))
        if img is not None:
            img = detect_and_crop_chest(img)
            img = cv2.resize(img, (224, 224))
            images.append(img)
    return images

# Load and preprocess text data
def load_text_data(csv_file):
    df = pd.read_csv(csv_file)
    text_data = df[['SCC', 'CEA', 'CK19', 'NSE', 'ProGRP']].values
    labels = df['Lung_cancer'].values
    return text_data, labels

# Load training images and text data
train_dir = 'D:\\kaggle\\final\\data1\\train'
cancer_train_dir = os.path.join(train_dir, 'cancer')
normal_train_dir = os.path.join(train_dir, 'normal')

cancer_train_images = load_images_from_folder(cancer_train_dir)
normal_train_images = load_images_from_folder(normal_train_dir)

cancer_train_text, cancer_train_labels = load_text_data(os.path.join(cancer_train_dir, 'data.csv'))
normal_train_text, normal_train_labels = load_text_data(os.path.join(normal_train_dir, 'data.csv'))

# Load validation images and text data
valid_dir = 'D:\\kaggle\\final\\data1\\valid'
cancer_valid_dir = os.path.join(valid_dir, 'cancer')
normal_valid_dir = os.path.join(valid_dir, 'normal')

cancer_valid_images = load_images_from_folder(cancer_valid_dir)
normal_valid_images = load_images_from_folder(normal_valid_dir)

cancer_valid_text, cancer_valid_labels = load_text_data(os.path.join(cancer_valid_dir, 'data.csv'))
normal_valid_text, normal_valid_labels = load_text_data(os.path.join(normal_valid_dir, 'data.csv'))

# Load testing images and text data
test_dir = 'D:\\kaggle\\final\\data1\\test'
cancer_test_dir = os.path.join(test_dir, 'cancer')
normal_test_dir = os.path.join(test_dir, 'normal')

cancer_test_images = load_images_from_folder(cancer_test_dir)
normal_test_images = load_images_from_folder(normal_test_dir)

cancer_test_text, cancer_test_labels = load_text_data(os.path.join(cancer_test_dir, 'data.csv'))
normal_test_text, normal_test_labels = load_text_data(os.path.join(normal_test_dir, 'data.csv'))

# Combine data
X_train_images = np.array(cancer_train_images + normal_train_images)
X_train_text = np.vstack((cancer_train_text, normal_train_text))
y_train = np.array(cancer_train_labels.tolist() + normal_train_labels.tolist())

X_valid_images = np.array(cancer_valid_images + normal_valid_images)
X_valid_text = np.vstack((cancer_valid_text, normal_valid_text))
y_valid = np.array(cancer_valid_labels.tolist() + normal_valid_labels.tolist())

X_test_images = np.array(cancer_test_images + normal_test_images)
X_test_text = np.vstack((cancer_test_text, normal_test_text))
y_test = np.array(cancer_test_labels.tolist() + normal_test_labels.tolist())

# Preprocess images
X_train_images = preprocess_input(X_train_images)
X_valid_images = preprocess_input(X_valid_images)
X_test_images = preprocess_input(X_test_images)

# Standardize text data
scaler = StandardScaler()
X_train_text = scaler.fit_transform(X_train_text)
X_valid_text = scaler.transform(X_valid_text)
X_test_text = scaler.transform(X_test_text)

# Apply importance weights to text data
importance_weights = np.array([0.09210638 ,0.14757179, 0.22065559, 0.43121099, 0.10845525])
X_train_text = X_train_text * importance_weights
X_valid_text = X_valid_text * importance_weights
X_test_text = X_test_text * importance_weights

# Convert labels to categorical
y_train = to_categorical(y_train, num_classes=2)
y_valid = to_categorical(y_valid, num_classes=2)
y_test = to_categorical(y_test, num_classes=2)

# Define text input model
text_input = Input(shape=(5,))
text_x = Dense(128, activation='relu')(text_input)
text_x = Dropout(0.5)(text_x)
text_x = Dense(128, activation='relu')(text_x)  # 增加一层
text_x = Dropout(0.5)(text_x)
text_output = Dense(256, activation='relu')(text_x)

# Combine image and text models
combined = Concatenate()([image_output, text_output])
combined = Dense(1024, activation='relu')(combined)  # 增加神经元
combined = Dropout(0.5)(combined)
combined = Dense(512, activation='relu')(combined)
combined = Dropout(0.5)(combined)
final_output = Dense(2, activation='softmax')(combined)

# Create the final model
model = Model(inputs=[base_model.input, text_input], outputs=final_output)

# Compile the model
model.compile(optimizer=Adam(learning_rate=0.00001), loss=categorical_crossentropy, metrics=['accuracy'])  # 调整学习率

# Print the model summary
model.summary()

# Data augmentation
datagen = ImageDataGenerator(
    rotation_range=20,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest'
)

# Callbacks
checkpoint_dir = './checkpoints'
os.makedirs(checkpoint_dir, exist_ok=True)

checkpoint = ModelCheckpoint(filepath=os.path.join(checkpoint_dir, 'chest_CT_SCAN-ResNet50.h5'),
                             monitor='val_loss',
                             mode='auto',
                             save_best_only=True)
early_stopping = EarlyStopping(verbose=1, patience=10)  # 增加耐心值
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.00001)

# Train model
start_time = time.time()
history = model.fit(datagen.flow([X_train_images, X_train_text], y_train, batch_size=32),
                    epochs=2000,  # 增加训练轮数
                    validation_data=([X_valid_images, X_valid_text], y_valid),
                    callbacks=[checkpoint, early_stopping, reduce_lr])
end_time = time.time()

# Predict on test set
y_pred = model.predict([X_test_images, X_test_text])

# Convert predictions to labels
y_pred_labels = np.argmax(y_pred, axis=1)
y_test_labels = np.argmax(y_test, axis=1)

# Print classification report
print(classification_report(y_test_labels, y_pred_labels, digits=4))

# Calculate and plot ROC curve and AUC
fpr, tpr, _ = roc_curve(y_test_labels, y_pred_labels)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.show()

# Print program running time
print(f'Program running time: {end_time - start_time}')

'''
epoch=200
              precision    recall  f1-score   support

           0     0.8462    0.7333    0.7857        30
           1     0.8933    0.9437    0.9178        71

    accuracy                         0.8812       101
   macro avg     0.8697    0.8385    0.8518       101
weighted avg     0.8793    0.8812    0.8786       101

epoch=2000

'''