"""
Local Dumbo benchmark implementation
"""

import os
import time
import subprocess
from typing import Dict, Any
from benchmark.utils import BenchError, Print, run_command, get_timestamp, ensure_directory


class LocalBench:
    """Local benchmark implementation for Dumbo"""
    
    def __init__(self, ctx):
        self.ctx = ctx
        self.root_dir = os.getcwd()
        self.data_dir = os.path.join(self.root_dir, "data")
        ensure_directory(self.data_dir)
    
    def run(self, bench_params: Dict[str, Any], node_params: Dict[str, Any]):
        """Run local benchmark"""
        Print.subheading("Starting local Dumbo benchmark")
        
        # Extract parameters
        nodes = bench_params['nodes'][0]
        duration = bench_params['duration']
        batch_size = bench_params.get('batch_size', 100)
        
        # Create run directory
        timestamp = get_timestamp()
        run_dir = os.path.join(self.data_dir, timestamp)
        ensure_directory(run_dir)
        ensure_directory(os.path.join(run_dir, "logs"))
        ensure_directory(os.path.join(run_dir, "results"))
        
        Print.info(f"Run directory: {run_dir}")
        Print.info(f"Nodes: {nodes}, Duration: {duration}s, Batch size: {batch_size}")
        
        try:
            # Build Dumbo binary
            self._build_binary()
            
            # Generate configurations
            self._generate_configs(nodes, run_dir, node_params)
            
            # Start nodes
            self._start_nodes(nodes, run_dir)
            
            # Wait for benchmark
            Print.info(f"Running benchmark for {duration} seconds...")
            time.sleep(duration)
            
            # Stop nodes
            self._stop_nodes()
            
            # Parse results
            self._parse_results(run_dir, nodes, duration)
            
            Print.success(f"Local benchmark completed. Results in: {run_dir}")
            
        except Exception as e:
            self._stop_nodes()  # Ensure cleanup
            raise BenchError(f"Local benchmark failed: {e}")
    
    def _build_binary(self):
        """Build Dumbo binary"""
        Print.info("Building Dumbo binary...")
        
        # Go mod tidy
        returncode, stdout, stderr = run_command("go mod tidy", self.root_dir)
        if returncode != 0:
            raise BenchError(f"go mod tidy failed: {stderr}")
        
        # Build binary
        returncode, stdout, stderr = run_command("go build -o dumbo .", self.root_dir)
        if returncode != 0:
            raise BenchError(f"go build failed: {stderr}")
        
        Print.success("Dumbo binary built successfully")
    
    def _generate_configs(self, nodes: int, run_dir: str, node_params: Dict[str, Any]):
        """Generate node configurations"""
        Print.info("Generating node configurations...")
        
        # Generate config template
        config_template = {
            'id_name': {i: f'node{i}' for i in range(nodes)},
            'id_ip': {i: f'127.0.0.1' for i in range(nodes)},
            'id_p2p_port': {i: 9000 + i for i in range(nodes)},
            'log_level': node_params['dumbo']['log_level'],
            'max_pool': 10,
            'timeout': 1000,
            'mock_latency': node_params['dumbo']['mock_latency'],
            'ddos': True,
            'max_payload_size': node_params['dumbo']['max_payload_size'],
            'tx_size': node_params['dumbo']['tx_size'],
            'wait_time': node_params['dumbo']['wait_time']
        }
        
        # Write config template
        import yaml
        template_path = os.path.join(run_dir, 'config_temp.yaml')
        with open(template_path, 'w') as f:
            yaml.dump(config_template, f, default_flow_style=False)
        
        # Generate individual node configurations
        config_gen_dir = os.path.join(self.root_dir, "config_gen")
        returncode, stdout, stderr = run_command(
            f"cp {template_path} ./config_temp.yaml && go run main.go",
            config_gen_dir
        )
        if returncode != 0:
            raise BenchError(f"Config generation failed: {stderr}")
        
        # Fix case sensitivity and copy configs
        returncode, stdout, stderr = run_command(
            "sed -i 's/TSPubKey:/tspubkey:/g; s/TSShare:/tsshare:/g' *.yaml && cp *.yaml " + run_dir,
            config_gen_dir
        )
        if returncode != 0:
            raise BenchError(f"Config processing failed: {stderr}")
        
        Print.success("Node configurations generated")
    
    def _start_nodes(self, nodes: int, run_dir: str):
        """Start Dumbo nodes"""
        Print.info(f"Starting {nodes} Dumbo nodes...")
        
        # Kill any existing tmux sessions
        run_command("tmux kill-server || true")
        
        # Start nodes
        for i in range(nodes):
            session = f"dumbo_{i}"
            cmd = f"cd {run_dir} && {self.root_dir}/dumbo -config node{i}"
            tmux_cmd = f'tmux new -d -s "{session}" "{cmd} > {run_dir}/logs/node-{i}.log 2>&1"'
            
            returncode, stdout, stderr = run_command(tmux_cmd)
            if returncode != 0:
                raise BenchError(f"Failed to start node {i}: {stderr}")
            
            Print.info(f"Started node {i}")
        
        # Wait for nodes to initialize
        time.sleep(3)
        Print.success("All nodes started")
    
    def _stop_nodes(self):
        """Stop all Dumbo nodes"""
        Print.info("Stopping nodes...")
        run_command("tmux kill-server || true")
        Print.success("All nodes stopped")
    
    def _parse_results(self, run_dir: str, nodes: int, duration: int):
        """Parse benchmark results"""
        Print.info("Parsing benchmark results...")
        
        # Parse logs for statistics
        logs_dir = os.path.join(run_dir, "logs")
        results_dir = os.path.join(run_dir, "results")
        
        if not os.path.exists(logs_dir):
            Print.warning("No logs directory found")
            return
        
        log_files = [f for f in os.listdir(logs_dir) if f.endswith('.log')]
        if not log_files:
            Print.warning("No log files found")
            return
        
        # Parse each log file
        stats = {
            'total_rbc_outputs': 0,
            'total_aba_decisions': 0,
            'total_mvba_outputs': 0,
            'total_acs_completions': 0,
            'total_consensus_rounds': 0,
            'total_errors': 0,
            'per_file_results': {}
        }
        
        for log_file in log_files:
            log_path = os.path.join(logs_dir, log_file)
            file_stats = self._parse_log_file(log_path)
            stats['per_file_results'][log_file] = file_stats
            
            # Aggregate stats
            for key in ['rbc_outputs', 'aba_decisions', 'mvba_outputs', 'acs_completions', 'consensus_rounds', 'errors']:
                stats[f'total_{key}'] += file_stats.get(key, 0)
        
        # Calculate rates
        if duration > 0:
            stats['rbc_outputs_per_sec'] = stats['total_rbc_outputs'] / duration
            stats['aba_decisions_per_sec'] = stats['total_aba_decisions'] / duration
            stats['mvba_outputs_per_sec'] = stats['total_mvba_outputs'] / duration
            stats['consensus_rounds_per_sec'] = stats['total_consensus_rounds'] / duration
        
        # Save results to CSV
        import csv
        result_file = os.path.join(results_dir, "result.csv")
        with open(result_file, 'w', newline='') as f:
            writer = csv.writer(f)
            writer.writerow([
                'timestamp', 'N', 'duration_s', 'total_rbc_outputs', 'total_aba_decisions',
                'total_mvba_outputs', 'total_acs_completions', 'total_consensus_rounds',
                'total_errors', 'rbc_outputs_per_sec', 'aba_decisions_per_sec',
                'mvba_outputs_per_sec', 'consensus_rounds_per_sec', 'per_file_results'
            ])
            writer.writerow([
                get_timestamp(), nodes, duration, stats['total_rbc_outputs'], stats['total_aba_decisions'],
                stats['total_mvba_outputs'], stats['total_acs_completions'], stats['total_consensus_rounds'],
                stats['total_errors'], stats.get('rbc_outputs_per_sec', 0), stats.get('aba_decisions_per_sec', 0),
                stats.get('mvba_outputs_per_sec', 0), stats.get('consensus_rounds_per_sec', 0),
                str(stats['per_file_results'])
            ])
        
        # Print summary
        Print.subheading("Benchmark Results Summary")
        Print.info(f"Total RBC Outputs: {stats['total_rbc_outputs']}")
        Print.info(f"Total ABA Decisions: {stats['total_aba_decisions']}")
        Print.info(f"Total MVBA Outputs: {stats['total_mvba_outputs']}")
        Print.info(f"Total ACS Completions: {stats['total_acs_completions']}")
        Print.info(f"Total Consensus Rounds: {stats['total_consensus_rounds']}")
        Print.info(f"Total Errors: {stats['total_errors']}")
        Print.info(f"Consensus Rounds/sec: {stats.get('consensus_rounds_per_sec', 0):.2f}")
        Print.info(f"Results saved to: {result_file}")
    
    def _parse_log_file(self, log_path: str) -> Dict[str, int]:
        """Parse individual log file for statistics"""
        stats = {
            'file': os.path.basename(log_path),
            'lines': 0,
            'rbc_outputs': 0,
            'aba_decisions': 0,
            'mvba_outputs': 0,
            'acs_completions': 0,
            'consensus_rounds': 0,
            'errors': 0
        }
        
        try:
            with open(log_path, 'r', errors='ignore') as f:
                for line in f:
                    stats['lines'] += 1
                    if 'RBC delivered' in line:
                        stats['rbc_outputs'] += 1
                    elif 'ABA decided' in line:
                        stats['aba_decisions'] += 1
                    elif 'MVBA delivered' in line:
                        stats['mvba_outputs'] += 1
                    elif 'ACS completed' in line:
                        stats['acs_completions'] += 1
                    elif 'Consensus round completed' in line:
                        stats['consensus_rounds'] += 1
                    elif 'error' in line.lower() or 'panic' in line.lower():
                        stats['errors'] += 1
        except Exception as e:
            stats['parse_error'] = str(e)
        
        return stats

