import os
import sys
import cv2
# from keras.preprocessing.image import img_to_array
import imutils
from keras.models import load_model
import numpy as np
import dlib
from sklearn.metrics import accuracy_score, classification_report

# import tensorflow as tf

# gpus = tf.config.experimental.list_physical_devices('GPU')
# if gpus:
#     try:
#         # Set memory growth to true
#         for gpu in gpus:
#             tf.config.experimental.set_memory_growth(gpu, True)
#         logical_gpus = tf.config.experimental.list_logical_devices('GPU')
#         print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
#     except RuntimeError as e:
#         print(f"Error: {e}")


detection_model_path = './model/haarcascade_frontalface_default.xml'
emotion_model_path = './model/_mini_XCEPTION.102-0.66.hdf5'

face_detection = cv2.CascadeClassifier(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
# emotion_classifier = tf.keras.models.load_model(emotion_model_path, compile=False)
# EMOTIONS = ["angry" ,"disgust","scared", "happy", "sad", "surprised", "neutral"]
EMOTIONS = ["angry" ,"disgust","afraid", "happy", "sad", "surprise", "neutral"]
noEmotion = [0,0,0,0,0,0,0]
emptyLabel = "no face"
emptyfaces = (0,0,0,0)



def recognize_frame(frame):

    label='sad'
    if(frame is None):
       #return label
       return noEmotion, emptyLabel
    frame = imutils.resize(frame, width=300)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    faces = face_detection.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=3, minSize=(30, 30),
                                            flags=cv2.CASCADE_SCALE_IMAGE)

    
                                            
    canvas = np.zeros((250, 300, 3), dtype="uint8")
    frameClone = frame.copy()
    if len(faces) > 0:
        faces = sorted(faces, reverse=True,
                       key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
        (fX, fY, fW, fH) = faces
        # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
        # the ROI for classification via the CNN
        roi = gray[fY:fY + fH, fX:fX + fW]
        roi = cv2.resize(roi, (64, 64))
        roi = roi.astype("float") / 255.0
        # roi = img_to_array(roi) # lifang535 remove
        roi = np.array(roi)
        roi = np.expand_dims(roi, axis=0)

        preds = emotion_classifier.predict(roi)[0]
        emotion_probability = np.max(preds)
        label = EMOTIONS[preds.argmax()]
        
        # print(f"preds: {preds}")
        # print(f"preds.argmax(): {preds.argmax()}")
        # print(f"EMOTIONS[preds.argmax()]: {EMOTIONS[preds.argmax()]}")
        print(f"label: {label}")
        
        result = list(preds)
        return result, label
        #print('label: '+label)
        #return label

    else:
        return noEmotion, emptyLabel
        #return label
        
def accuracy_test():
    head = f'S'
    dataset_path = f'../../../model_test/dataset/KDEF/KDEF_rename/{head}'
    wrong_image_dir = f'../../../model_test/dataset/KDEF/KDEF_wrong_xception'
    
    if os.path.exists(wrong_image_dir):
        os.system(f'rm -r {wrong_image_dir}')
    os.makedirs(wrong_image_dir, exist_ok=True)
    
    true_labels = []
    predicted_labels = []
    wrong_image_path = []
    
    filenames = os.listdir(dataset_path)
    test_num = min(100000, len(filenames))
    # 随机打乱，设置 seed 保证每次运行结果一致
    np.random.seed(0)
    np.random.shuffle(filenames)
    filenames = filenames[:test_num]
    
    i = 0
    
    for filename in filenames:
        if i % 10 == 0:
            print(f"========== Processing {i}/{test_num} ==========")
        i += 1
        
        if filename.endswith('.JPG'):
            image_path = os.path.join(dataset_path, filename)
            image = cv2.imread(image_path) # BGR
            result, label = recognize_frame(image)
            if label != filename.split(".")[0]:
                wrong_image_path.append(image_path)
                print(f"Wrong image path: {image_path} - Predicted: {label} - True: {filename.split('.')[0]}")
            true_label = filename.split('.')[0]
            true_labels.append(true_label)
            predicted_labels.append(label)
            
    accuracy = accuracy_score(true_labels, predicted_labels)
    report = classification_report(true_labels, predicted_labels)
    print(f"Accuracy: {accuracy}")
    print(f"Report: {report}")
    print(f"Wrong images: {wrong_image_path}")
    print("Accuracy test finished.")
    
    for image_path in wrong_image_path:
        image = cv2.imread(image_path)
        cv2.imwrite(os.path.join(wrong_image_dir, os.path.basename(image_path)), image)
    print("Wrong images saved.")
    
    result_path = f"./accuracy/xception.txt"

    with open(result_path, 'w') as f:
        f.write(f"Head: {head}\n")
        f.write(f'Accuracy: {accuracy:.2f}\n')
        f.write(classification_report(true_labels, predicted_labels, target_names=['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']))


if __name__ == '__main__':
    image_path = './image/1.jpg'
    image = cv2.imread(image_path)
    result, label = recognize_frame(image)
    
    for emotion, proba in zip(EMOTIONS, result):
        print(f"{emotion}: {round(float(proba), 8)}")
    print(f"Predicted: {label}")

    # accuracy_test()
