import cv2
import numpy as np
import os
import matplotlib.pyplot as plt
from pathlib import Path
import time
from matplotlib.patches import Polygon
import threading
from concurrent.futures import ThreadPoolExecutor

class MergedPosePositionVisualizer:
    """合并的Pose和Position可视化器 - 左边显示pose，右边显示position"""

    def __init__(self, rect_size=(0.8, 2.5), scale_factor=100):
        """
        初始化合并可视化器
        
        Args:
            rect_size: 矩形大小（宽, 长），单位为米，默认为(0.8, 2.5)
            scale_factor: 米到像素的转换因子，默认为100（1米=100像素）
        """
        # Position部分的参数
        self.rect_size = rect_size
        self.scale_factor = scale_factor
        self.rect_width, self.rect_height = rect_size
        self.max_vec_length = max(self.rect_width, self.rect_height) * self.scale_factor 
        self.position_canvas_width = int(self.max_vec_length + 200)
        self.position_canvas_height = int(self.max_vec_length + 200)
        
        # Pose部分的颜色定义
        self.pose_colors = {
            'left_top': (0, 255, 0),    # 绿色
            'left_0': (255, 0, 255),    # 紫色
            'left_bottom': (255, 0, 0),  # 蓝色
            'bottom_0': (0, 128, 128),   # 深青色
            'right_bottom': (0, 255, 255), # 黄色
            'right_0': (255, 255, 0),    # 青色
            'right_top': (0, 0, 255),    # 红色
            'top_0': (128, 0, 128),      # 深紫色
        }
        
        # 定义连接顺序
        self.connect_order = [
            'left_top', 'left_0', 'left_bottom',
            'bottom_0', 'right_bottom', 'right_0',
            'right_top', 'top_0', 'left_top'
        ]
        
        # 字体设置
        self.font = cv2.FONT_HERSHEY_SIMPLEX
        self.font_scale = 0.4
        self.font_thickness = 1
        
        # 输出目录
        self.output_dir = "results_merged_visualization"
        os.makedirs(self.output_dir, exist_ok=True)
        
        # 设置matplotlib后端
        plt.switch_backend('Agg')
        
        # 线程锁
        self._lock = threading.Lock()
        
        print(f"合并可视化器初始化完成，输出目录: {self.output_dir}")

    def _compute_rectangle_points(self, vector):
        """计算矩形的四个角点"""
        if vector is None:
            return None
            
        center_x, center_y = vector['center'][0], vector['center'][1]
        left_vec = vector['left_vec'][:2]
        bottom_vec = vector['bottom_vec'][:2]
        
        lx, ly = left_vec
        len_left = np.linalg.norm(left_vec)
        bx, by = bottom_vec
        len_bottom = np.linalg.norm(bottom_vec)
        
        if len_left > 1e-5:
            left_vec_scaled = np.array(left_vec) / len_left * self.rect_height * self.scale_factor
        else:
            left_vec_scaled = np.array([0, self.rect_height * self.scale_factor])
            
        if len_bottom > 1e-5:
            bottom_vec_scaled = np.array(bottom_vec) / len_bottom * self.rect_width * self.scale_factor
        else:
            bottom_vec_scaled = np.array([self.rect_width * self.scale_factor, 0])
        
        center = np.array([center_x * self.scale_factor, center_y * self.scale_factor])
        
        if len_left > 1e-5:
            perp1 = np.array([left_vec_scaled[1], -left_vec_scaled[0]]) / np.linalg.norm(left_vec_scaled)
            perp2 = -perp1
            
            dot1 = np.dot(bottom_vec_scaled, perp1)
            dot2 = np.dot(bottom_vec_scaled, perp2)
            
            if dot1 > dot2:
                adjusted_bottom_vec = perp1 * np.linalg.norm(bottom_vec_scaled)
            else:
                adjusted_bottom_vec = perp2 * np.linalg.norm(bottom_vec_scaled)
        else:
            adjusted_bottom_vec = bottom_vec_scaled

        bottom_left = center
        bottom_right = center + adjusted_bottom_vec
        top_left = center + left_vec_scaled
        top_right = top_left + adjusted_bottom_vec
        
        return np.array([bottom_left, bottom_right, top_right, top_left])

    def _draw_rectangle(self, vector, ax, label_prefix="Standard", color=(0, 1, 0), thickness=2):
        """在matplotlib轴上绘制矩形"""
        if vector is None:
            return
            
        points = self._compute_rectangle_points(vector)
        if points is None:
            return
        
        polygon = Polygon(points, closed=True, edgecolor=color, 
                         linewidth=thickness, fill=False, label=f"{label_prefix}Rect")
        ax.add_patch(polygon)

    def _create_pose_image(self, pose_dict, st_pose_dict, image_input):
        """创建pose可视化图像"""
        # 处理图像输入 - 可能是路径字符串或numpy数组
        if isinstance(image_input, (str, os.PathLike)):
            # 如果是路径，读取图像
            image = cv2.imread(str(image_input))
            if image is None:
                # 如果无法读取图像，创建一个默认的白色图像
                image = np.ones((400, 600, 3), dtype=np.uint8) * 255
        elif isinstance(image_input, np.ndarray):
            # 如果直接传入numpy数组，直接使用
            image = image_input.copy()
        else:
            # 其他情况创建默认图像
            image = np.ones((400, 600, 3), dtype=np.uint8) * 255
        
        # 调整图像大小以适合显示
        target_height = 720
        h, w = image.shape[:2]
        target_width = int(w * target_height / h)
        image = cv2.resize(image, (target_width, target_height))
        
        # 绘制标准位置 (绿色)
        if st_pose_dict is not None:
            self._draw_pose_on_image(image, st_pose_dict, (0, 255, 0), is_reference=True)
        
        # 绘制当前位置 (红色)
        if pose_dict is not None:
            self._draw_pose_on_image(image, pose_dict, (0, 0, 255), is_reference=False)
        
        return image

    def _draw_pose_on_image(self, image, pose_dict, base_color, is_reference=False):
        """在图像上绘制pose"""
        if pose_dict is None:
            return
        
        # 收集有效点
        valid_points = []
        point_data = []
        
        for name in self.connect_order:
            if name in pose_dict and pose_dict[name]:
                x, y = pose_dict[name]
                point = (int(x), int(y))
                valid_points.append(point)
                
                if not is_reference:  # 只为当前位置添加彩色点和标签
                    color = self.pose_colors.get(name, base_color)
                    point_data.append((point, name, color))
        
        # 绘制连线
        if len(valid_points) >= 2:
            cv2.polylines(image, [np.array(valid_points)], 
                         isClosed=True, color=base_color, thickness=2)
        
        # 绘制点
        for point in valid_points:
            if is_reference:
                cv2.circle(image, point, 3, (192, 192, 192), -1)  # 参考点用灰色
            else:
                cv2.circle(image, point, 5, base_color, -1)
        
        # 绘制标签（仅非参考点）
        if not is_reference:
            for point, name, color in point_data:
                cv2.putText(image, name, (point[0]+10, point[1]), 
                           self.font, self.font_scale, color, self.font_thickness)

    def _create_position_plot(self, standard_vector, test_vector, filename):
        """创建position可视化图表"""
        # 创建新的figure
        fig, ax = plt.subplots(figsize=(6, 6))
        ax.set_xlim(-50, self.position_canvas_width)
        ax.set_ylim(-50, self.position_canvas_height)
        ax.set_aspect('equal')
        ax.set_title(f"Position: Standard vs Test ({filename})", fontsize=10)
        
        # 绘制矩形
        self._draw_rectangle(standard_vector, ax, 
                           label_prefix="Standard", color=(0, 1, 0), thickness=2)
        self._draw_rectangle(test_vector, ax, 
                           label_prefix="Test", color=(0, 0, 1), thickness=2)
        
        ax.legend()
        
        # 转换为图像
        fig.canvas.draw()
        # 获取图像数据
        buf = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
        buf = buf.reshape(fig.canvas.get_width_height()[::-1] + (3,))
        
        # 转换为BGR格式 (OpenCV格式)
        position_image = cv2.cvtColor(buf, cv2.COLOR_RGB2BGR)
        
        plt.close(fig)
        return position_image

    def visualize_merged(self, pose_dict, st_pose_dict, standard_vector, test_vector, image_input, timestamp=None):
        """
        创建合并的可视化图像：左边显示pose，右边显示position
        
        Args:
            pose_dict: 当前pose字典
            st_pose_dict: 标准pose字典
            standard_vector: 标准位置向量
            test_vector: 测试位置向量
            image_input: 图像路径(str)或图像数组(numpy.ndarray)
            timestamp: 时间戳，用作文件名(可选)
        """
        try:
            # 处理文件名
            if timestamp is not None:
                filename = f"{timestamp}.png"
            elif isinstance(image_input, (str, os.PathLike)):
                filename = os.path.basename(str(image_input))
            else:
                # 如果是numpy数组且没有timestamp，使用当前时间
                import datetime
                current_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:-3]
                filename = f"merged_{current_time}.png"
            
            # 创建pose图像（左边）
            pose_image = self._create_pose_image(pose_dict, st_pose_dict, image_input)
            
            # 创建position图表（右边）
            position_image = self._create_position_plot(standard_vector, test_vector, filename)
            
            # 调整两个图像的高度使其一致
            pose_height = pose_image.shape[0]
            position_height = position_image.shape[0]
            target_height = max(pose_height, position_height)
            
            # 调整pose图像
            if pose_height != target_height:
                pose_width = int(pose_image.shape[1] * target_height / pose_height)
                pose_image = cv2.resize(pose_image, (pose_width, target_height))
            
            # 调整position图像
            if position_height != target_height:
                position_width = int(position_image.shape[1] * target_height / position_height)
                position_image = cv2.resize(position_image, (position_width, target_height))
            
            # 水平合并图像
            merged_image = np.hstack([pose_image, position_image])
            
            # 添加分割线
            split_x = pose_image.shape[1]
            cv2.line(merged_image, (split_x, 0), (split_x, target_height), (128, 128, 128), 2)
            
            # 添加标题
            cv2.putText(merged_image, "POSE", (20, 30), 
                       cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
            cv2.putText(merged_image, "POSITION", (split_x + 20, 30), 
                       cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
            
            # 保存合并后的图像
            output_path = os.path.join(self.output_dir, f"merged_{filename}")
            cv2.imwrite(output_path, merged_image, [cv2.IMWRITE_JPEG_QUALITY, 90])
            
            print(f"合并结果已保存至: {output_path}")
            return output_path
            
        except Exception as e:
            import traceback
            print(f"合并可视化过程中发生错误: {e}")
            print(traceback.format_exc())
            return None

    def batch_visualize(self, data_list):
        """
        批量处理可视化
        
        Args:
            data_list: 包含每个图像数据的列表，每个元素为字典：
                      {
                          'pose_dict': pose字典,
                          'st_pose_dict': 标准pose字典,
                          'standard_vector': 标准位置向量,
                          'test_vector': 测试位置向量,
                          'image_input': 图像路径或图像数组,
                          'timestamp': 时间戳(可选)
                      }
        """
        results = []
        total = len(data_list)
        
        print(f"开始批量处理 {total} 个图像...")
        
        for i, data in enumerate(data_list, 1):
            print(f"处理进度: {i}/{total}")
            
            result = self.visualize_merged(
                data.get('pose_dict'),
                data.get('st_pose_dict'),
                data.get('standard_vector'),
                data.get('test_vector'),
                data.get('image_input'),  # 改为image_input
                data.get('timestamp')     # 添加timestamp
            )
            
            if result:
                results.append(result)
        
        print(f"批量处理完成，成功处理 {len(results)} 个图像")
        return results

# 兼容性类，保持原有接口
class OptimizedPositionVisualizer(MergedPosePositionVisualizer):
    """向后兼容的PositionVisualizer"""
    def visualize_position(self, standard_vector, test_vector, image_input, timestamp=None):
        return self.visualize_merged(None, None, standard_vector, test_vector, image_input, timestamp)

class OptimizedPoseVisualizer(MergedPosePositionVisualizer):
    """向后兼容的PoseVisualizer"""
    def visualize_pose(self, pose_dict, st_pose_dict, image_input, output_dir=None, timestamp=None):
        if output_dir and output_dir != self.output_dir:
            self.output_dir = output_dir
            os.makedirs(self.output_dir, exist_ok=True)
        return self.visualize_merged(pose_dict, st_pose_dict, None, None, image_input, timestamp)

class PositionVisualizer(OptimizedPositionVisualizer):
    """向后兼容的PositionVisualizer"""
    pass

class PoseVisualizer(OptimizedPoseVisualizer):
    """向后兼容的PoseVisualizer"""
    pass

# 性能测试和示例使用
def test_merged_visualizer():
    """测试合并可视化器"""
    
    # 创建测试数据
    test_vector = {
        'center': [1.0, 1.5],
        'left_vec': [0, 2.5, 0],
        'bottom_vec': [0.8, 0, 0]
    }
    
    standard_vector = {
        'center': [0.5, 1.0],
        'left_vec': [0, 2.5, 0],
        'bottom_vec': [0.8, 0, 0]
    }
    
    pose_dict = {
        'left_top': (100, 100),
        'left_0': (100, 200),
        'left_bottom': (100, 300),
        'bottom_0': (200, 300),
        'right_bottom': (300, 300),
        'right_0': (300, 200),
        'right_top': (300, 100),
        'top_0': (200, 100)
    }
    
    st_pose_dict = {
        'left_top': (120, 120),
        'left_0': (120, 220),
        'left_bottom': (120, 320),
        'bottom_0': (220, 320),
        'right_bottom': (320, 320),
        'right_0': (320, 220),
        'right_top': (320, 120),
        'top_0': (220, 120)
    }
    
    # 创建测试图像
    test_image_path = "/tmp/test_image.png"
    test_image = np.ones((640, 480, 3), dtype=np.uint8) * 255
    cv2.imwrite(test_image_path, test_image)
    
    # 创建合并可视化器
    visualizer = MergedPosePositionVisualizer()
    
    # 测试单个可视化
    print("测试单个合并可视化...")
    start_time = time.time()
    result = visualizer.visualize_merged(
        pose_dict, st_pose_dict, 
        standard_vector, test_vector, 
        test_image_path,
        timestamp="test_sample"  # 添加timestamp测试
    )
    single_time = time.time() - start_time
    print(f"单个合并可视化耗时: {single_time*1000:.2f}ms")
    
    # 测试直接传入numpy数组
    print("\n测试直接传入numpy数组...")
    start_time = time.time()
    result2 = visualizer.visualize_merged(
        pose_dict, st_pose_dict, 
        standard_vector, test_vector, 
        test_image,  # 直接传入numpy数组
        timestamp="numpy_array_test"
    )
    array_time = time.time() - start_time
    print(f"numpy数组输入耗时: {array_time*1000:.2f}ms")
    
    # 测试批量处理
    print("\n测试批量处理...")
    batch_data = []
    for i in range(5):
        batch_data.append({
            'pose_dict': pose_dict,
            'st_pose_dict': st_pose_dict,
            'standard_vector': standard_vector,
            'test_vector': test_vector,
            'image_input': test_image,  # 使用numpy数组
            'timestamp': f"batch_test_{i+1:03d}"
        })
    
    start_time = time.time()
    results = visualizer.batch_visualize(batch_data)
    batch_time = time.time() - start_time
    print(f"批量处理5个图像总耗时: {batch_time*1000:.2f}ms")
    print(f"平均每个图像耗时: {batch_time/5*1000:.2f}ms")
    
    return visualizer

if __name__ == "__main__":
    test_merged_visualizer()