import torch_npu
import torch
import tilelang
import tilelang.language as T
from backends.backend_registry import register_backend, Backend
from utils.correctness import execute_template
from utils.performance import time_execution_event_template
from config import project_root_path, ascendc_device
import os
import pickle
import random, string
import shutil
import sys
import io
import contextlib
import subprocess
import tempfile
import threading
import os

class OutputCapture:
    def __init__(self):
        self.stdout = io.StringIO()
        self.stderr = io.StringIO()
        self.original_stdout = sys.stdout
        self.original_stderr = sys.stderr
        self.captured_output = []
        self.captured_error = []
        
    def __enter__(self):
        sys.stdout = self.stdout
        sys.stderr = self.stderr
        return self
        
    def __exit__(self, exc_type, exc_val, exc_tb):
        sys.stdout = self.original_stdout
        sys.stderr = self.original_stderr
        
    def get_output(self):
        return self.stdout.getvalue(), self.stderr.getvalue()

@register_backend('tilelang')
class TileLangBackend(Backend):
    def __init__(self):
        self.context = {}
        self.device = self.get_device()
        self.cache_dir = None

    def get_device(self):
        return torch.device('npu:0')

    def set_cache_dir(self, cache_dir):
        self.cache_dir = cache_dir
        tilelang.cache.set_cache_dir(cache_dir)

    def get_cache_dir(self):
        return self.cache_dir

    def get_hardware_name(self):
        return ascendc_device  # torch_npu.npu.get_device_name(device) causes crash

    def compile(self, generated_code, op, ref_src=""):
        # Create temporary files to capture all output
        with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix='.out') as stdout_file:
            with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix='.err') as stderr_file:
                stdout_path = stdout_file.name
                stderr_path = stderr_file.name
        
        # Store original file descriptors
        original_stdout_fd = os.dup(1)
        original_stderr_fd = os.dup(2)
        
        try:
            # Execute generated code from a real file so source lookups succeed
            os.makedirs(os.path.join(project_root_path, 'generated'), exist_ok=True)
            code_filename = os.path.join(project_root_path, 'generated', f'{op}_tilelang_{"".join(random.choices(string.ascii_letters + string.digits, k=10))}.py')
            with open(code_filename, 'w') as f:
                f.write(generated_code)
            compiled_obj = compile(generated_code, code_filename, "exec")
            
            # Redirect file descriptors 1 and 2 to our files
            stdout_fd = os.open(stdout_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
            stderr_fd = os.open(stderr_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
            
            # Redirect stdout and stderr at the file descriptor level
            os.dup2(stdout_fd, 1)
            os.dup2(stderr_fd, 2)
            
            # Also redirect Python's sys.stdout and sys.stderr
            original_stdout = sys.stdout
            original_stderr = sys.stderr
            sys.stdout = open(stdout_path, 'w')
            sys.stderr = open(stderr_path, 'w')
            
            try:
                exec(compiled_obj, self.context)
                exec(ref_src, self.context)
                get_inputs = self.context['get_inputs']
                get_init_inputs = self.context['get_init_inputs']
                ModelNew = self.context['ModelNew']

                init_inputs = get_init_inputs()
                init_inputs = [
                    x.to(device=self.device) if isinstance(x, torch.Tensor) else x for x in init_inputs
                ]
                with torch.no_grad():
                    custom_model = ModelNew(*init_inputs).to(self.device)
                    inputs = get_inputs()
                    inputs = [
                        x.to(device=self.device) if isinstance(x, torch.Tensor) else x
                        for x in inputs
                    ]  
                    custom_model(*inputs)
            finally:
                # Restore Python's stdout/stderr
                if sys.stdout != original_stdout:
                    sys.stdout.close()
                if sys.stderr != original_stderr:
                    sys.stderr.close()
                sys.stdout = original_stdout
                sys.stderr = original_stderr
                
                # Close our file descriptors
                os.close(stdout_fd)
                os.close(stderr_fd)
                
                # Restore original file descriptors
                os.dup2(original_stdout_fd, 1)
                os.dup2(original_stderr_fd, 2)
            
            os.remove(code_filename)
            return True, None
        except Exception as e:
            os.chdir(project_root_path)
            
            # Read captured output from files
            stdout_content = ""
            stderr_content = ""
            try:
                with open(stdout_path, 'r') as f:
                    stdout_content = f.read()
                with open(stderr_path, 'r') as f:
                    stderr_content = f.read()
            except:
                pass
            
            # Create comprehensive error message
            error_details = []
            if stdout_content.strip():
                error_details.append(f"STDOUT:\n{stdout_content}")
            if stderr_content.strip():
                error_details.append(f"{stderr_content}")
            # if str(e).strip():
            #     error_details.append(f"EXCEPTION:\n{str(e)}")
            
            full_error_message = "\n\n".join(error_details) if error_details else str(e)
            return False, full_error_message
        finally:
            # Clean up temporary files and file descriptors
            try:
                os.close(original_stdout_fd)
                os.close(original_stderr_fd)
                os.unlink(stdout_path)
                os.unlink(stderr_path)
            except:
                pass

    def correctness_execution(self, ref_src):
        synchronize = torch_npu.npu.synchronize
        try:
            exec(ref_src, self.context)
        except Exception as e:
            raise RuntimeError(f"Failed to compile reference model: {str(e)}")
        return execute_template(synchronize, self.device, self.context)

    def time_execution(self, eval_target='ModelNew'):
        event_class = torch_npu.npu.Event
        synchronize = torch_npu.npu.synchronize
        return time_execution_event_template(self.context, self.device, synchronize, event_class, eval_target)

    def cleanup(self):
        del self.context
        tilelang.cache.clear_cache()
        torch_npu.npu.empty_cache()
        torch_npu.npu.synchronize(device=self.device)
        shutil.rmtree(self.cache_dir)
