import cv2
import mediapipe as mp
from mediapipe.tasks.python import vision, BaseOptions
import matplotlib.pyplot as plt
import numpy as np

# %%
def visualize_blendshapes(result):
    if not result.face_blendshapes:
        print("No face blendshapes detected.")
        return
    
    blendshapes = result.face_blendshapes[0]
    categories = [shape.category_name + f"_{idx}" for idx, shape in enumerate(blendshapes)]
    scores = [shape.score for shape in blendshapes]
    
    plt.figure(figsize=(12, 20))
    y_pos = range(len(categories))
    plt.barh(y_pos, scores)
    plt.yticks(y_pos, categories)
    plt.xlabel('Score')
    plt.title('Face BlendShapes Visualization')
    plt.tight_layout()

def visualize_landmarks(image, result):    
    landmarks = result.face_landmarks[0]
    fig, ax = plt.subplots()
    ax.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
    
    for landmark in landmarks:
        ax.plot(landmark.x * image.shape[1], landmark.y * image.shape[0], 'ro', markersize=1)
    
    plt.title('Face Landmarks')
    plt.axis('off')

def visualize_transformation_matrix(result):
    if not result.facial_transformation_matrixes:
        print("No facial transformation matrix detected.")
        return
    matrix = result.facial_transformation_matrixes[0]
    fig = plt.figure(figsize=(10, 8))
    ax = fig.add_subplot(111, projection='3d')
    # Plot original coordinate system
    scale = 0.1
    origin = [0, 0, 0]
    X, Y, Z = np.array([[1,0,0], [0,1,0], [0,0,1]]) * scale
    ax.quiver(*origin, *X, color='r', label='Original X')
    ax.quiver(*origin, *Y, color='g', label='Original Y')
    ax.quiver(*origin, *Z, color='b', label='Original Z')
    # Extract rotation and translation from the matrix
    rotation = matrix[:3, :3]
    translation = matrix[:3, 3] * 1e-3
    translation[2] *= 0 # z轴忽略不计
    # Plot transformed coordinate system
    transformed_X = rotation[:, 0] * scale
    transformed_Y = rotation[:, 1] * scale
    transformed_Z = rotation[:, 2] * scale
    ax.quiver(*translation, *transformed_X, color='r', linestyle='dashed', label='Transformed X')
    ax.quiver(*translation, *transformed_Y, color='g', linestyle='dashed', label='Transformed Y')
    ax.quiver(*translation, *transformed_Z, color='b', linestyle='dashed', label='Transformed Z')
    # Plot translation vector
    ax.quiver(*origin, *translation, color='m', linewidth=2, label='Translation')
    # Set plot limits and labels
    max_range = np.array([X.max(), Y.max(), Z.max(), translation[0], translation[1], translation[2]]).max()
    ax.set_xlim([-max_range, max_range])
    ax.set_ylim([-max_range, max_range])
    ax.set_zlim([-max_range, max_range])
    ax.set_xlabel('X')
    ax.set_ylabel('Y')
    ax.set_zlabel('Z')
    ax.set_title('Facial Transformation Matrix')
    # Add a legend
    ax.legend()

# %%
# Initialize MediaPipe Face Mesh
base_options = BaseOptions(model_asset_path="./data_utils/blendshape_capture/face_landmarker.task")
options = vision.FaceLandmarkerOptions(base_options=base_options,
                                       output_face_blendshapes=True,
                                       output_facial_transformation_matrixes=True,
                                       num_faces=1)
detector = vision.FaceLandmarker.create_from_options(options)


# Usage
# image_path = 'data/zara/ori_imgs/0.jpg' 
image_path = 'data/Simplilearn/ori_imgs/510.jpg' 
# Read the image
image = mp.Image.create_from_file(image_path)
image_np = image.numpy_view()

# Process the image
result = detector.detect(image)
w2c = result.facial_transformation_matrixes[0]

# %%
visualize_blendshapes(result)
visualize_landmarks(image_np, result)
visualize_transformation_matrix(result)
plt.show(block=True)