import cv2
import numpy as np
import time
import gc
import sys
import os
import argparse
import subprocess
import signal
import datetime
import select
import struct
import threading

# Auto-detect screen resolution
def get_screen_resolution():
    try:
        # Method 1: Using fbset command (suitable for most embedded Linux)
        output = subprocess.check_output(['fbset', '-s']).decode('utf-8')
        for line in output.split('\n'):
            if 'geometry' in line:
                parts = line.split()
                if len(parts) >= 3:
                    width = int(parts[1])
                    height = int(parts[2])
                    return width, height
        
        # Method 2: If system supports X11
        try:
            output = subprocess.check_output('xrandr | grep "\*" | cut -d" " -f4', shell=True).decode('utf-8')
            if 'x' in output:
                width, height = map(int, output.strip().split('x'))
                return width, height
        except:
            pass
            
        # Method 3: Use default values
        return 640, 480
    except Exception as e:
        return 640, 480

# Safe exit signal handler
def signal_handler(sig, frame):
    global exit_flag
    exit_flag[0] = True

# Parse command line arguments
parser = argparse.ArgumentParser(description='OpenCV Face Detection')
parser.add_argument('--width', type=int, default=None, help='Video width, auto-detect by default')
parser.add_argument('--height', type=int, default=None, help='Video height, auto-detect by default')
parser.add_argument('--scale', type=float, default=0.5, help='Processing scale factor')
parser.add_argument('--camera', type=int, default=1, help='Camera index')
parser.add_argument('--timeout', type=int, default=0, help='Program runtime in seconds, 0 for never timeout')
parser.add_argument('--fullscreen', action='store_true', help='Fullscreen display mode')
parser.add_argument('--save-path', type=str, default='./captured_images', help='Path to save captured images')
parser.add_argument('--auto-save', action='store_true', help='Automatically save frames with faces')
parser.add_argument('--save-interval', type=int, default=3, help='Minimum interval in seconds between auto-saves')
args = parser.parse_args()

# Create save directory if it doesn't exist
if not os.path.exists(args.save_path):
    try:
        os.makedirs(args.save_path)
    except Exception as e:
        args.save_path = './'

# Register SIGINT signal handler (Ctrl+C)
signal.signal(signal.SIGINT, signal_handler)

# Auto-detect screen resolution
screen_width, screen_height = get_screen_resolution()

# Use command line parameters or auto-detected resolution, limit max values to avoid camera errors
if args.width is not None:
    VIDEO_WIDTH = args.width
else:
    VIDEO_WIDTH = min(screen_width, 640)

if args.height is not None:
    VIDEO_HEIGHT = args.height
else:
    VIDEO_HEIGHT = min(screen_height, 480)

PROCESS_SCALE = args.scale

# Try different paths to load cascade classifier
cascade_paths = [
    "haarcascade_frontalface_default.xml",
    "haarcascade_frontalface_alt2.xml",
    "lbpcascade_frontalface.xml",
    "/usr/local/share/opencv4/haarcascades/haarcascade_frontalface_default.xml",
    "/usr/local/share/opencv4/haarcascades/haarcascade_frontalface_alt2.xml",
    "/usr/local/share/opencv4/haarcascades/lbpcascade_frontalface.xml",
    "/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml",
    "/usr/share/opencv/haarcascades/haarcascade_frontalface_alt2.xml",
]

# Load face detector
face_cascade = None
for cascade_path in cascade_paths:
    if os.path.exists(cascade_path):
        face_cascade = cv2.CascadeClassifier(cascade_path)
        if not face_cascade.empty():
            break

# If unable to load cascade classifier
if face_cascade is None or face_cascade.empty():
    sys.exit(1)

# Get camera reference
video_capture = None
try:
    video_capture = cv2.VideoCapture(args.camera)
    video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, VIDEO_WIDTH)
    video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, VIDEO_HEIGHT)
    video_capture.set(cv2.CAP_PROP_FPS, 30)
    
    if not video_capture.isOpened():
        sys.exit(1)
except Exception as e:
    sys.exit(1)

# Initialize variables
face_detections = []
process_this_frame = True
last_process_time = time.time()
last_save_time = time.time()
frame_count = 0
start_time = time.time()
exit_flag = [False]
display_save_message = False
save_message_time = 0
frame_to_save = False

save_button = {'x': 10, 'y': 50, 'w': 100, 'h': 50, 'text': 'Save', 'color': (0, 120, 255)}
exit_button = {'x': 120, 'y': 50, 'w': 100, 'h': 50, 'text': 'Exit', 'color': (0, 0, 255)}
button_hover = None
button_clicked = None
last_touch_event = None

save_button_pressed = False
exit_button_pressed = False
button_press_duration = 0.5
save_button_press_time = 0
exit_button_press_time = 0

def check_button_press(x, y):
    global frame_to_save, exit_flag, save_button, exit_button, save_button_pressed, exit_button_pressed, save_button_press_time, exit_button_press_time
    
    if (save_button['x'] <= x <= save_button['x'] + save_button['w'] and 
        save_button['y'] <= y <= save_button['y'] + save_button['h']):
        frame_to_save = True
        save_button_pressed = True
        save_button_press_time = time.time()
        return True
    
    elif (exit_button['x'] <= x <= exit_button['x'] + exit_button['w'] and 
          exit_button['y'] <= y <= exit_button['y'] + exit_button['h']):
        exit_flag[0] = True
        exit_button_pressed = True
        exit_button_press_time = time.time()
        return True
    
    return False

class LinuxTouchInput:
    def __init__(self):
        self.touch_devices = []
        self.touch_pos = [0, 0]
        self.last_touch_time = 0
        self.running = False
        self.thread = None
        self.touch_callback = None
        self.x = 0
        self.y = 0
        self.touch_id = -1
        self.last_touch_coordinates = {}
        self.find_touch_devices()
    
    def find_touch_devices(self):
        touch_devices = []
        import glob
        for dev_path in glob.glob('/dev/input/event*'):
            try:
                with open(dev_path, 'rb') as f:
                    touch_devices.append(dev_path)
            except Exception as e:
                pass
        
        self.touch_devices = touch_devices
        return touch_devices
    
    def start(self, callback):
        if self.thread and self.thread.is_alive():
            return False
        
        self.touch_callback = callback
        self.running = True
        self.thread = threading.Thread(target=self._input_thread)
        self.thread.daemon = True
        self.thread.start()
        return True
    
    def stop(self):
        self.running = False
        if self.thread:
            self.thread.join(1.0)
    
    def _input_thread(self):
        if not self.touch_devices:
            return
        
        fds = []
        for dev_path in self.touch_devices:
            try:
                fd = open(dev_path, 'rb')
                fds.append(fd)
            except Exception as e:
                pass
        
        if not fds:
            return
        
        fd_list = [fd.fileno() for fd in fds]
        
        device_info = {}
        
        try:
            while self.running:
                r, w, e = select.select(fd_list, [], [], 0.1)
                
                if not r:
                    continue
                
                for fdnum in r:
                    for fd in fds:
                        if fd.fileno() == fdnum:
                            try:
                                event_size = 16
                                event_data = fd.read(event_size)
                                
                                if len(event_data) < event_size:
                                    continue
                                
                                event = struct.unpack('llHHI', event_data)
                                
                                dev_id = fd.fileno()
                                if dev_id not in device_info:
                                    device_info[dev_id] = {'events': set()}
                                device_info[dev_id]['events'].add((event[2], event[3]))
                                
                                if event[2] == 3 and event[3] == 53:
                                    self.x = event[4]
                                    
                                elif event[2] == 3 and event[3] == 54:
                                    self.y = event[4]
                                
                                elif event[2] == 3 and event[3] == 57:
                                    old_id = self.touch_id
                                    self.touch_id = event[4]
                                    
                                    if self.touch_id != 4294967295 and self.touch_id >= 0:
                                        self.last_touch_coordinates[self.touch_id] = [self.x, self.y]
                                    
                                    elif self.touch_id == 4294967295 and old_id >= 0:
                                        current_time = time.time()
                                        
                                        if (current_time - self.last_touch_time) > 0.3:
                                            self.last_touch_time = current_time
                                            
                                            if self.touch_callback:
                                                try:
                                                    self.touch_callback(self.x, self.y)
                                                except Exception as callback_error:
                                                    pass
                            except Exception as e:
                                pass
        finally:
            for fd in fds:
                try:
                    fd.close()
                except:
                    pass

def save_image(frame, has_faces=False):
    try:
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        face_text = "_with_face" if has_faces else ""
        filename = os.path.join(args.save_path, f"captured_{timestamp}{face_text}.jpg")
        cv2.imwrite(filename, frame)
        return True, filename
    except Exception as e:
        return False, None

def on_mouse_click(event, x, y, flags, param):
    global frame_to_save, exit_flag
    
    if event == cv2.EVENT_LBUTTONDOWN:
        check_button_press(x, y)

def handle_touch_event(x, y):
    global frame_to_save, exit_flag, save_button_pressed, exit_button_pressed, save_button_press_time, exit_button_press_time
    
    try:
        if 50 <= x <= 150 and 450 <= y <= 500:
            frame_to_save = True
            save_button_pressed = True
            save_button_press_time = time.time()
            return
            
        elif 500 <= x <= 600 and 450 <= y <= 500:
            exit_flag[0] = True
            exit_button_pressed = True
            exit_button_press_time = time.time()
            return
    except Exception as e:
        pass

def draw_button(frame, button, is_hover=False):
    color = (button['color'][0] + 50, button['color'][1] + 50, button['color'][2] + 50) if is_hover else button['color']
    cv2.rectangle(frame, (button['x'], button['y']), 
                 (button['x'] + button['w'], button['y'] + button['h']), 
                 color, -1)
    cv2.rectangle(frame, (button['x'], button['y']), 
                 (button['x'] + button['w'], button['y'] + button['h']), 
                 (255, 255, 255), 1)
    text_size = cv2.getTextSize(button['text'], cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
    text_x = button['x'] + (button['w'] - text_size[0]) // 2
    text_y = button['y'] + (button['h'] + text_size[1]) // 2
    cv2.putText(frame, button['text'], (text_x, text_y), 
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)

def check_keyboard(frame, faces_detected):
    global display_save_message, save_message_time
    
    key = cv2.waitKey(1) & 0xFF
    if key == ord('q') or key == 27:
        return True
    elif key == ord('s'):
        success, filename = save_image(frame, len(faces_detected) > 0)
        if success:
            display_save_message = True
            save_message_time = time.time()
    return False

gc.collect()

window_name = 'Face Detection (OpenCV)'
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, on_mouse_click)

if args.fullscreen:
    cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)

touch_input = LinuxTouchInput()
touch_input.start(handle_touch_event)

try:
    mouse_x, mouse_y = 0, 0
    mouse_pressed = False
    last_press_time = 0
    
    while not exit_flag[0]:
        try:
            if args.timeout > 0 and (time.time() - start_time) > args.timeout:
                break
                
            ret, frame = video_capture.read()
            
            if not ret or frame is None:
                time.sleep(0.5)
                continue
                
            frame_count += 1
            current_time = time.time()
            
            if frame_to_save:
                success, filename = save_image(frame, len(face_detections) > 0)
                if success:
                    display_save_message = True
                    save_message_time = time.time()
                frame_to_save = False
                button_clicked = None
            
            if process_this_frame and (current_time - last_process_time) > 0.3:
                small_frame = cv2.resize(frame, (0, 0), fx=PROCESS_SCALE, fy=PROCESS_SCALE)
                
                gray_frame = cv2.cvtColor(small_frame, cv2.COLOR_BGR2GRAY)
                
                gray_frame = cv2.equalizeHist(gray_frame)
                
                gray_frame = cv2.GaussianBlur(gray_frame, (5, 5), 0)
                
                faces = face_cascade.detectMultiScale(
                    gray_frame,
                    scaleFactor=1.1,
                    minNeighbors=3,
                    minSize=(15, 15),
                    flags=cv2.CASCADE_SCALE_IMAGE
                )
                
                face_detections = faces
                
                if args.auto_save and len(face_detections) > 0 and (current_time - last_save_time) > args.save_interval:
                    success, _ = save_image(frame, True)
                    if success:
                        last_save_time = current_time
                        display_save_message = True
                        save_message_time = current_time
                
                if frame_count % 100 == 0:
                    gc.collect()
                    
                last_process_time = current_time
            
            process_this_frame = frame_count % 4 == 0
            
            for (x, y, w, h) in face_detections:
                scale_factor = 1.0 / PROCESS_SCALE
                x = int(x * scale_factor)
                y = int(y * scale_factor)
                w = int(w * scale_factor)
                h = int(h * scale_factor)
                
                cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
                
                label_h = 25 if VIDEO_HEIGHT >= 240 else 15
                cv2.rectangle(frame, (x, y+h-label_h), (x+w, y+h), (0, 0, 255), cv2.FILLED)
                font = cv2.FONT_HERSHEY_DUPLEX
                
                font_scale = 0.6 if VIDEO_WIDTH < 640 else 0.8
                cv2.putText(frame, "Face", (x+6, y+h-6), font, font_scale, (255, 255, 255), 1)
            
            face_count_text = f"Faces: {len(face_detections)}"
            cv2.putText(frame, face_count_text, (5, 35), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
            
            fps = 1.0 / (time.time() - current_time + 0.001)
            fps_text = f"FPS: {fps:.1f}"
            cv2.putText(frame, fps_text, (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
            
            current_time = time.time()
            if save_button_pressed and (current_time - save_button_press_time) > button_press_duration:
                save_button_pressed = False
            
            if exit_button_pressed and (current_time - exit_button_press_time) > button_press_duration:
                exit_button_pressed = False
            
            save_button_color = (0, 100, 255) if save_button_pressed else (0, 200, 255)
            save_button_x = 20
            save_button_y = VIDEO_HEIGHT - 70
            save_button_w = 120
            save_button_h = 60
            
            save_button['x'] = save_button_x
            save_button['y'] = save_button_y
            save_button['w'] = save_button_w
            save_button['h'] = save_button_h
            
            cv2.rectangle(frame, 
                        (save_button_x, save_button_y), 
                        (save_button_x + save_button_w, save_button_y + save_button_h), 
                        save_button_color, -1)
            
            border_thickness = 5 if save_button_pressed else 3
            cv2.rectangle(frame, 
                        (save_button_x, save_button_y), 
                        (save_button_x + save_button_w, save_button_y + save_button_h), 
                        (255, 255, 255), border_thickness)
            
            text_offset = 2 if save_button_pressed else 0
            cv2.putText(frame, "SAVE", 
                    (save_button_x + 20, save_button_y + 40 + text_offset), 
                    cv2.FONT_HERSHEY_SIMPLEX, 1.1, (255, 255, 255), 2)
            
            exit_button_color = (0, 0, 200) if exit_button_pressed else (0, 0, 255)
            exit_button_x = VIDEO_WIDTH - 140
            exit_button_y = VIDEO_HEIGHT - 70
            exit_button_w = 120
            exit_button_h = 60
            
            exit_button['x'] = exit_button_x
            exit_button['y'] = exit_button_y
            exit_button['w'] = exit_button_w
            exit_button['h'] = exit_button_h
            
            cv2.rectangle(frame, 
                        (exit_button_x, exit_button_y), 
                        (exit_button_x + exit_button_w, exit_button_y + exit_button_h), 
                        exit_button_color, -1)
            
            border_thickness = 5 if exit_button_pressed else 3
            cv2.rectangle(frame, 
                        (exit_button_x, exit_button_y), 
                        (exit_button_x + exit_button_w, exit_button_y + exit_button_h), 
                        (255, 255, 255), border_thickness)
            
            text_offset = 2 if exit_button_pressed else 0
            cv2.putText(frame, "EXIT", 
                    (exit_button_x + 30, exit_button_y + 40 + text_offset), 
                    cv2.FONT_HERSHEY_SIMPLEX, 1.1, (255, 255, 255), 2)
            
            if display_save_message and time.time() - save_message_time < 2.0:
                msg = "IMAGE SAVED!"
                text_size = cv2.getTextSize(msg, cv2.FONT_HERSHEY_SIMPLEX, 1.2, 2)[0]
                text_x = (VIDEO_WIDTH - text_size[0]) // 2
                
                cv2.rectangle(frame, 
                            (text_x - 10, 30 - 10), 
                            (text_x + text_size[0] + 10, 30 + text_size[1] + 10), 
                            (0, 0, 0), -1)
                cv2.putText(frame, msg, (text_x, 30 + text_size[1] // 2), 
                            cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 255), 2)
            else:
                display_save_message = False
            
            cv2.imshow(window_name, frame)
            
            key = cv2.waitKey(1) & 0xFF
            
            if key == 27 or key == ord('q'):
                exit_flag[0] = True
                
        except Exception as e:
            time.sleep(0.1)
        
finally:
    if touch_input:
        touch_input.stop()
    
    if video_capture is not None:
        video_capture.release()
    cv2.destroyAllWindows()
    
    gc.collect()