from kernel import Kernel
from concurrent.futures import ProcessPoolExecutor
import time
import subprocess
import json
import os
import multiprocessing

# Set multiprocessing start method to 'spawn' for NPU compatibility
multiprocessing.set_start_method('spawn', force=True)

def _worker_process(kernel_data, info_data):
    """
    Worker function for executing kernel in separate process
    """
    # 在子进程中重新创建kernel
    kernel = Kernel(kernel_data['op'], kernel_data['category'], 
                  kernel_data['language'], kernel_data['ref_src_path'], 
                  index=kernel_data['index'])
    kernel.set_context(info_data)
    result = kernel.execute(info_data)
    kernel.cleanup(context=kernel.context, info=info_data)
    return result

class Env:
    def __init__(self, op, category, language, ref_src_path=None, max_workers=10):
        self.op = op
        self.category = category
        self.language = language
        self.ref_src_path = ref_src_path
        self.max_workers = max_workers

    def reset(self):
        pass

    def step(self, codes):
        kernels = [Kernel(self.op, self.category, self.language, self.ref_src_path, index=i) for i in range(len(codes))]
        results = []
        
        # 阶段1：并行编译所有kernels
        print("Phase 1: Parallel compilation...")
        futures = []
        with ProcessPoolExecutor(max_workers=self.max_workers) as executor:
            for i, (kernel, code) in enumerate(zip(kernels, codes)):
                futures.append(executor.submit(kernel.compile, code))
                time.sleep(2)  # 增加延迟时间
        
        _results = [future.result() for future in futures]
        compile_success = [result['compiled'] for result in _results]
        print(f"Compile success: {compile_success.count(True)}/{len(kernels)}")
        
        # 阶段2：串行测试已编译的kernels（使用进程隔离）
        print("Phase 2: Serial testing with process isolation...")
        for i, (kernel, _result) in enumerate(zip(kernels, _results)):
            result = _result
            
            if _result['compiled']:
                print(f"Testing kernel {i+1}/{len(kernels)}")
                # 使用进程隔离执行kernel
                execute_result = self._execute_kernel_in_process(kernel, _result['info'])
                result.update(execute_result)
                kernel.cleanup(context=kernel.context, info=_result['info'])
            else:
                kernel.cleanup(context=kernel.context, info=_result['info'])
            
            if 'info' in result:
                del result['info']
            results.append(result)
            
            # 添加延迟以确保设备资源完全释放
            if i < len(kernels) - 1:  # 不是最后一个kernel
                time.sleep(3)  # 增加延迟时间
        
        return results

    def _execute_kernel_in_process(self, kernel, info):
        """
        在独立进程中执行kernel，避免操作符注册冲突
        """
        # 准备kernel数据
        kernel_data = {
            'op': kernel.op,
            'category': kernel.category,
            'language': kernel.language,
            'ref_src_path': kernel.ref_src_path,
            'index': kernel.index
        }
        
        # 使用进程池执行
        with ProcessPoolExecutor(max_workers=1) as executor:
            future = executor.submit(_worker_process, kernel_data, info)
            result = future.result()
        
        return result

if __name__ == "__main__":
    env = Env(op="mse_loss", category="loss", language="ascendc")
    code_path = "/home/ma-user/work/MultiKernelBench/prompts/ascendc_new_model_mse_loss.py"
    codes = [open(code_path, 'r').read() for _ in range(3)]
    results = env.step(codes)
    print(results)