import subprocess
import re
import threading
import time
import queue
import logging
from typing import Generator
from hdc import Hdc, HdcError

class StressLogger:
    def __init__(self):
        self.properties = {
                "hdc": None,
                "device_id": "", 
                "bundle_name": "",
                "pid": -1
            }

    def set_property(self, key: str, value: any):
        self.properties[key] = value
        
    def start(self):
        pass
    
    def process(self):
        pass
    
class StressLogManager:
    def __init__(self):
        self.stress_logs: list[StressLogger] = []
    
    def add_stress_logger(self, stress_log: StressLogger) -> None:
        self.stress_logs.append(stress_log)
        
    def clear_stress_logger(self) -> None:
        self.stress_logs.clear()
    
    def start_stress_loggers(self) -> None:
        for stress_log in self.stress_logs:
            stress_log.start() 
            
    def process_stress_loggers(self) -> None:
        for stress_log in self.stress_logs:
            stress_log.process()
    
# -------------------------- stresslog1 --------------------------#
LOG_FILTER_REGEX = r"\[gc\]"  # Regex to filter hilog output for GC messages
GC_TIME_START = re.compile(r"Epoch #(\d+): Started. Time since last GC (\d+) microseconds")
GC_LIVE_BYTES = re.compile(r"Epoch #(\d+): live bytes after one gc: (\d+) bytes")
GC_MARK = re.compile(r"Epoch #(\d+): Mark: (\d+) objects")
GC_SWEEP_EXTRA_OBJ = re.compile(r"Epoch #(\d+): Sweep extra objects: swept (\d+) objects, kept (\d+) objects")
GC_SWEEP = re.compile(r"Epoch #(\d+): Sweep: swept (\d+) objects, kept (\d+) objects")
GC_HEAP_MEMORY = re.compile(r"Epoch #(\d+): Heap memory usage: before (\d+) bytes, after (\d+) bytes")
GC_TIME_TO_PAUSE_1 = re.compile(r"Epoch #(\d+): Time to pause #1: (\d+) microseconds")
GC_MUTATORS_PAUSE_TIME_1 = re.compile(r"Epoch #(\d+): Mutators pause time #1: (\d+) microseconds")
GC_TIME_TO_PAUSE_2 = re.compile(r"Epoch #(\d+): Time to pause #2: (\d+) microseconds")
GC_MUTATORS_PAUSE_TIME_2 = re.compile(r"Epoch #(\d+): Mutators pause time #2: (\d+) microseconds")
GC_TIME_FINISH = re.compile(r"Epoch #(\d+): Finished. Total GC epoch time is (\d+) microseconds")

class MyStressLogger(StressLogger):
    def __init__(self):
        super().__init__()
        self.log_queue = queue.Queue()
        self.stop_event = threading.Event()
        self.log_data = []
        self.peak_data = { "live_bytes_after_one_gc": 0, "heap_memory_before_gc": 0, "heap_memory_after_gc": 0, "total_gc_epoch_time": 0 }
        self.log_thread = None
    
    def __stream_logs(self, log_filter_regex: str) -> Generator[str, None, None]:
        """
        Streams logs from the device, filtered by PID and a regex pattern.

        Note: This runs `hilog` as a persistent process. Ensure proper handling
                (e.g., run in a thread and manage the process lifetime).

        Args:
            pid (int): The process ID to filter logs for.
            log_filter_regex (str): The regex pattern to filter log messages.

        Yields:
            str: Individual log lines matching the filter.

        Raises:
            HdcError: If the hilog command fails to start.
        """
        # Construct the command carefully. Using -P requires the PID.
        # The regex filter is applied using -e. Ensure regex is properly quoted for the shell.
        # Using single quotes around the regex in the shell command string
        hdc: Hdc = self.properties.get("hdc", None)
        pid = self.properties.get("pid", -1)
        device_id = self.properties.get("device_id", None)
        if hdc is None or pid < 0 or device_id is None:
            raise HdcError("[StressLog] hdc or pid or device_id is None")
        
        hilog_command = f"hilog -P {pid} -e '{log_filter_regex}'"
        full_command = [hdc.path, device_id, "shell", hilog_command] if device_id == "" else [self.hdc.path, "shell", hilog_command]
        logging.info(f"[StressLog] Starting log stream with command: {' '.join(full_command)}")
        try:
            # Use Popen for continuous streaming
            process = subprocess.Popen(
                full_command,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                text=True,
                bufsize=1,  # Line buffered
            )
            # Yield lines from stdout
            if process.stdout:
                for line in iter(process.stdout.readline, ""):
                    yield line.strip()
            else:
                logging.warning("[StressLog] Log stream process has no stdout.")

            # After the loop (process ended or stdout closed), check final status
            process.wait()  # Ensure process resource is cleaned up
            if process.returncode != 0:
                logging.warning(
                    f"[StressLog] Log stream process exited with code {process.returncode}"
                )
            else:
                logging.info("[StressLog] Log stream finished.")
        except FileNotFoundError:
            logging.error(f"HDC executable not found at '{hdc.path}'")
            raise HdcError(f"HDC executable not found at '{hdc.path}'")
        except Exception as e:
            logging.error(f"Error starting or reading log stream: {e}")
            raise HdcError(f"Error during log streaming: {e}") from e
    
    def __log_thread_func(self):
        """
        Monitors logs in a separate thread and puts relevant lines into a queue.
        """
        pid = self.properties.get("pid", -1)
        if pid == -1:
            logging.error("[StressLog] PID not found in properties, cannot start log monitoring.")
            return
        
        logging.info(f"[StressLog] Starting log monitoring for PID {pid}.")
        try:
            for log_line in self.__stream_logs(LOG_FILTER_REGEX):
                if self.stop_event.is_set():
                    logging.info("[StressLog] Stop event received, exiting.")
                    break
                if log_line:
                    # Only put lines potentially containing GC info
                    if "Epoch #" in log_line:
                        self.log_queue.put(log_line)
                    # Optional: Log raw lines for debugging
                    # logging.debug(f"[StressLog] Raw log: {log_line}")
        except HdcError as e:
            logging.error(f"[StressLog] Error streaming logs: {e}")
        except Exception as e:
            logging.error(f"[StressLog] Unexpected error: {e}", exc_info=True)
        finally:
            logging.info("[StressLog] Log monitoring stopped.")
            # Signal main thread that logging ended (optional, if needed)
            self.log_queue.put(None)  # Sentinel value
            
    def __match_logs(self, log_line):
        # logging.info(f"log line: {log_line}")
        match = GC_TIME_START.search(log_line)
        if match:
            epoch = int(match.group(1))
            time_start = int(match.group(2))
            logging.info(f"--- GC Epoch {epoch}: time start {time_start} microseconds ---")
            self.log_data.append(epoch)
            self.log_data.append(time_start)
        match = GC_LIVE_BYTES.search(log_line)
        if match:
            epoch = int(match.group(1))
            live_bytes = int(match.group(2))
            if live_bytes > self.peak_data["live_bytes_after_one_gc"]:
                self.peak_data["live_bytes_after_one_gc"] = live_bytes
            logging.info(f"--- GC Epoch {epoch}: live bytes after one gc {live_bytes / (1024*1024):.2f} MB ---")
            self.log_data.append(live_bytes/(1024*1024))
            
        match = GC_MARK.search(log_line)
        if match:
            epoch = int(match.group(1))
            mark_obj = int(match.group(2))
            logging.info(f"--- GC Epoch {epoch}: mark {mark_obj} objects ---")
            self.log_data.append(mark_obj)
            
        match = GC_SWEEP_EXTRA_OBJ.search(log_line)
        if match:
            epoch = int(match.group(1))
            swept_extra_obj = int(match.group(2))
            kept_extra_obj = int(match.group(3))
            logging.info(f"--- GC Epoch {epoch}: swept extra {swept_extra_obj} objects, kept {kept_extra_obj} objects ---")
            self.log_data.append(swept_extra_obj)
            self.log_data.append(kept_extra_obj)
            
        match = GC_SWEEP.search(log_line)
        if match:
            epoch = int(match.group(1))
            swept_obj = int(match.group(2))
            kept_obj = int(match.group(3))
            logging.info(f"--- GC Epoch {epoch}: swept extra {swept_obj} objects, kept {kept_obj} objects ---")
            self.log_data.append(swept_obj)
            self.log_data.append(kept_obj)
            
        match = GC_HEAP_MEMORY.search(log_line)
        if match:
            epoch = int(match.group(1))
            mem_before = int(match.group(2))
            mem_after = int(match.group(3))
            if mem_before > self.peak_data["heap_memory_before_gc"]:
                self.peak_data["heap_memory_before_gc"] = mem_before
                self.peak_data["heap_memory_after_gc"] = mem_after
            logging.info(f"--- GC Epoch {epoch}: Memory Before={mem_before / (1024*1024):.2f} MB, After={mem_after / (1024*1024):.2f} MB ---")
            self.log_data.append(mem_before/(1024*1024))
            self.log_data.append(mem_after/(1024*1024))
            
        match = GC_TIME_TO_PAUSE_1.search(log_line)
        if match:
            epoch = int(match.group(1))
            time_pause1 = int(match.group(2))
            logging.info(f"--- GC Epoch {epoch}: time to pause #1 {time_pause1} microseconds ---")
            self.log_data.append(time_pause1)
            
        match = GC_MUTATORS_PAUSE_TIME_1.search(log_line)
        if match:
            epoch = int(match.group(1))
            mutators_time_pause1 = int(match.group(2))
            logging.info(f"--- GC Epoch {epoch}: time to mutators pause #1 {mutators_time_pause1} microseconds ---")
            self.log_data.append(mutators_time_pause1)
            
        match = GC_TIME_TO_PAUSE_2.search(log_line)
        if match:
            epoch = int(match.group(1))
            time_pause2 = int(match.group(2))
            logging.info(f"--- GC Epoch {epoch}: time to pause #2 {time_pause2} microseconds ---")
            self.log_data.append(time_pause2)
            
        match = GC_MUTATORS_PAUSE_TIME_2.search(log_line)
        if match:
            epoch = int(match.group(1))
            mutators_time_pause2 = int(match.group(2))
            logging.info(f"--- GC Epoch {epoch}: time to mutators pause #2 {mutators_time_pause2} microseconds ---")
            self.log_data.append(mutators_time_pause2)
            
        match = GC_TIME_FINISH.search(log_line)
        if match:
            epoch = int(match.group(1))
            time_end = int(match.group(2))
            if time_end > self.peak_data["total_gc_epoch_time"]:
                self.peak_data["total_gc_epoch_time"] = time_end
            logging.info(f"--- GC Epoch {epoch}: total gc epoch time is {time_end} microseconds ---")
            self.log_data.append(time_end)
        
        logging.info(f"single gc log data: {self.log_data}")
        self.log_data.clear()      

    def start(self):
        self.log_thread =threading.Thread(target=self.__log_thread_func, daemon=True)
        self.log_thread.start()
        logging.info("[StressLog] Log stream started, collecting logs...")
        
    def process(self):
        """Processes log lines from the queue and extracts memory info."""
        try:
            while not self.log_queue.empty():
                log_line = self.log_queue.get_nowait()
                if log_line is None:  # Check for sentinel value
                    logging.info("[StressLog] Log monitor thread signaled completion.")
                    return  # Stop processing if monitor thread ended
                self.__match_logs(log_line)
                self.log_queue.task_done()  # Mark task as done for the queue
            logging.info(f"{self.peak_data}")
        except queue.Empty:
            pass  # No logs to process right now
        finally:
            pass