import cv2
import os
import sys
package_path = "/system/lib"
if package_path not in sys.path:
    sys.path.insert(0,package_path)

import numpy as np

def calculate_bias(original_image):
    # Apply fixed thresholding for binarization
    _, binary = cv2.threshold(original_image, 115, 255, cv2.THRESH_BINARY)
    
    # Apply morphological operations to clean up the image
    kernel = np.ones((5, 5), np.uint8)
    cleaned_image = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)
    cleaned_image = cv2.morphologyEx(cleaned_image, cv2.MORPH_OPEN, kernel)
    
    height = cleaned_image.shape[0]
    width = cleaned_image.shape[1]
    
    # Calculate image midpoint
    mid_x = width // 2
    mid_y = height // 2
    
    # Find road boundaries (black regions)
    road_region = cleaned_image == 0
    
    # Sample points along y-axis
    y_samples = np.linspace(0, height-1, 20).astype(int)
    left_points = []
    right_points = []
    
    for y in y_samples:
        # Find black pixels in this row
        black_pixels = np.where(road_region[y, :])[0]
        if len(black_pixels) > 0:
            # Get leftmost and rightmost black pixels
            left_points.append([black_pixels[0], y])
            right_points.append([black_pixels[-1], y])
    
    if len(left_points) > 0 and len(right_points) > 0:
        left_points = np.array(left_points)
        right_points = np.array(right_points)
        
        # Fit lines to boundary points
        left_fit = np.polyfit(left_points[:, 1], left_points[:, 0], 1)
        right_fit = np.polyfit(right_points[:, 1], right_points[:, 0], 1)
        
        # Convert to BGR for colored visualization
        original_image = cv2.cvtColor(cleaned_image, cv2.COLOR_GRAY2BGR)
        
        # Generate points for drawing lines
        y1, y2 = 0, height
        left_x1 = int(left_fit[0] * y1 + left_fit[1])
        left_x2 = int(left_fit[0] * y2 + left_fit[1])
        right_x1 = int(right_fit[0] * y1 + right_fit[1])
        right_x2 = int(right_fit[0] * y2 + right_fit[1])
        
        # Draw boundary lines
        cv2.line(original_image, (left_x1, y1), (left_x2, y2), (255, 0, 0), 2)  # Blue
        cv2.line(original_image, (right_x1, y1), (right_x2, y2), (0, 255, 0), 2)  # Green
        
        # Calculate and draw midline
        mid_x1 = (left_x1 + right_x1) // 2
        mid_x2 = (left_x2 + right_x2) // 2
        cv2.line(original_image, (mid_x1, y1), (mid_x2, y2), (0, 0, 255), 2)  # Red
        
        # Draw center point
        cv2.drawMarker(original_image, (mid_x, mid_y), (255, 255, 0), cv2.MARKER_TILTED_CROSS, 20, 2)
        
        # Calculate intersection of midline with mid_y
        try:
            mid_x_at_height = int(((mid_x2 - mid_x1) * mid_y / height) + mid_x1)
            mid_x_at_height = np.clip(mid_x_at_height, 0, width-1)
            
            # Draw line between center point and midline
            cv2.line(original_image, (mid_x, mid_y), (mid_x_at_height, mid_y), (255, 255, 0), 2)
            
            # Calculate normalized bias (-100 to 100)
            bias = -100 * (mid_x_at_height - mid_x) / (width/2)
            bias = np.clip(bias, -100, 100)
            
            return bias, original_image
            
        except Exception as e:
            print(f"Error calculating bias: {e}")
            return None, original_image
    
    return None, cv2.cvtColor(cleaned_image, cv2.COLOR_GRAY2BGR)

def analyze_horizontal_line(cleaned_image, original_image):
    height, width = cleaned_image.shape
    
    # Find rows where at least 60% of the pixels are zero
    zero_threshold = 0.6 * width
    zero_rows = np.where(np.sum(cleaned_image == 0, axis=1) >= zero_threshold)[0]
    
    if len(zero_rows) >= 2:
        # Mark the first and last zero rows as boundaries
        first_zero_row = zero_rows[0]
        last_zero_row = zero_rows[-1]
        
        # Count zero pixels above first row and below last row
        zeros_above = np.sum(cleaned_image[:first_zero_row] == 0)
        zeros_below = np.sum(cleaned_image[last_zero_row:] == 0)
        
        # Determine if it's start or end line based on zero pixel distribution
        if zeros_above > zeros_below:
            print("Detected: Start Line (more zeros above)")
            return 0  # Start line
        else:
            print("Detected: End Line (more zeros below)")
            return 1  # End line
    
    print("Detected: Straight Line (no clear boundary rows)")
    return 2  # No clear boundary rows found

def infer_from_image(image_path):
    # Read the image
    frame = cv2.imread(image_path)
    if frame is None:
        print(f"Error: Could not read image from {image_path}")
        return

    try:
        # Convert to grayscale
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        
        # Apply fixed thresholding
        _, binary = cv2.threshold(gray_frame, 115, 255, cv2.THRESH_BINARY)
        
        # Apply morphological operations to clean up the image
        kernel = np.ones((5, 5), np.uint8)
        cleaned_image = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)
        cleaned_image = cv2.morphologyEx(cleaned_image, cv2.MORPH_OPEN, kernel)
        
        # Get pred_idx from horizontal line analysis
        pred_idx = analyze_horizontal_line(cleaned_image, frame.copy())
        print(f"pred_idx: {pred_idx}")
        
        # Calculate bias
        bias, annotated_frame = calculate_bias(binary)

        # Create a 2x2 visualization grid
        height, width = frame.shape[:2]
        canvas = np.ones((height * 2, width * 2, 3), dtype=np.uint8) * 255
        
        # Convert binary frame to 3 channels for display
        binary_3ch = cv2.cvtColor(binary, cv2.COLOR_GRAY2BGR)
        cleaned_image_3ch = cv2.cvtColor(cleaned_image, cv2.COLOR_GRAY2BGR)
        
        # Place images in 2x2 grid
        canvas[:height, :width] = frame  # Top-left: Original
        canvas[:height, width:] = binary_3ch  # Top-right: Preprocessed
        canvas[height:, :width] = cleaned_image_3ch  # Bottom-left: Cleaned
        canvas[height:, width:] = annotated_frame  # Bottom-right: Annotated - already in BGR

        # Add labels
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(canvas, 'Original', (10, 30), font, 1, (0, 0, 0), 2)
        cv2.putText(canvas, 'Preprocessed', (width + 10, 30), font, 1, (0, 0, 0), 2)
        cv2.putText(canvas, 'Cleaned', (10, height + 30), font, 1, (0, 0, 0), 2)
        cv2.putText(canvas, 'Annotated', (width + 10, height + 30), font, 1, (0, 0, 0), 2)
        
        TS_pos = 50
        TS_class = 1
        if bias is not None:
            # Add text with results
            if pred_idx == 0:
                TS_class = 0
                class_text = "Start"
            elif pred_idx == 1:
                TS_class = 1
                class_text = "End"
            elif pred_idx == 2 and bias < 0:
                TS_class = 2
                class_text = "Left"
                TS_pos = int(abs(bias))
            elif pred_idx == 2 and bias >= 0:
                TS_class = 3
                class_text = "Right"
                TS_pos = int(abs(bias))
            else:
                TS_class = 1
                class_text = "End"
            print(f"TS_class: {TS_class}, TS_pos: {TS_pos}")
            
            text_y = height * 2 - 30
            cv2.putText(canvas, f'Class: {pred_idx}', (10, text_y), font, 0.7, (0, 0, 0), 2)
            cv2.putText(canvas, f'Bias: {bias:.2f}', (width + 10, text_y), font, 0.7, (0, 0, 0), 2)

        # Display the canvas
        cv2.imshow('Analysis Results', canvas)
        cv2.waitKey(0)

    finally:
        cv2.destroyAllWindows()

if __name__ == '__main__':
    # Specify your image path here
    image_path = 'C:\\Users\\jxk33\\Desktop\\code\\tracking_car\\dataimages_160.png'
    infer_from_image(image_path)