import os
from utils.evaluation_utils import eval_single  
from openai import OpenAI
import re
import json
from concurrent.futures import ThreadPoolExecutor
import datetime
import argparse
import multiprocessing as mp
from multiprocessing import Process, Queue
import time

BENCHMARK_PATH = 'reference'

def get_code(path):
    with open(path, 'r') as f:
        code = f.read()
    return code


#TODO: pytorch的功能，问题描述，评测回路ncu
class Env:
    def __init__(self, op, category):
        self.op = op
        self.category = category
        self.problem_path = os.path.join(BENCHMARK_PATH, category, f'{op}.py')
        self.arch = get_code(self.problem_path)

    def reset(self):
        return self.arch
    
    def step(self, action):
        results = {}
        for code in action:
            
            eval_result = eval_single(
                code, self.op, 'tilelang', category=self.category
            )
            eval_result = dict(eval_result)
            results[code] = eval_result

        info = {
            'category': self.category,
            'op': self.op,
            'eval_result': results
        }

        results_list = list(results.items())
        valid_results = [(code, result) for code, result in results_list if result['compiled'] and result['correctness']]

        best_result = min(valid_results, key=lambda x: x[1]['performance']['mean']) if valid_results else results_list[0]

        return self.arch, best_result, False, info

    
class Agent:
    def __init__(self, env: Env, api_key, api_url, model, temperature=0.7, max_tokens=8192, logdir='logdir/', n=1):
        self.env = env
        self.model = model
        self.temperature = temperature
        self.max_tokens = max_tokens
        # Workaround: some environments set SSL_CERT_FILE/SSL_CERT_DIR to unreadable paths
        # causing httpx/OpenAI client initialization to fail with PermissionError.
        for _ssl_env_var in ("SSL_CERT_FILE", "SSL_CERT_DIR"):
            if _ssl_env_var in os.environ:
                os.environ.pop(_ssl_env_var, None)

        self.client = OpenAI(
            api_key=api_key,
            base_url=api_url,
        )
        self.n = n
        self.hist_codes = []
        self.hist_results = []
        
        self.logdir = logdir
        os.makedirs(self.logdir, exist_ok=True)

    def _parser_result(self, result):
        result = result[1]
        if result['compiled']:
            if result['correctness']:
                return f"compile success, correctness: {result['correctness']}, runtime: {result['runtime']} ms"
            else:
                return 'compile success, but wrong result: ' + str(result)
        else:
            return 'compile error: ' + str(result)

    def get_prompt(self):
        system_prompt = "You are an expert in TileLang Ascendc programming and performance optimization. Please write custom TileLang Ascendc kernels to replace the pytorch operators in the given architecture with fixed hyperparameters to get speedups. You are only limited by your imagination.\n"

        # if not self.hist_codes:
        system_prompt += f"""
        Here's an example to show you the syntax of inline embedding custom TileLang Ascendc operators in torch: The example given architecture is: \n
        ``` \n
        {get_code('./reference/math/add.py')}
        ``` \n
        The example new arch with custom CUDA kernels looks like this: 
        <code>
        {get_code('./prompts/tilelang_new_model_add.py')}
        </code>\n
        ``` \n
        {get_code('./reference/matmul/gemm.py')}
        ``` \n
        The example new arch with custom CUDA kernels looks like this: 
        <code>
        {get_code('./prompts/tilelang_new_model_gemm.py')}
        </code>\n
        """

        user_prompt = f"""You are given the following architecture: \n
        ```
        {self.env.arch}
        ```
        """
        if self.hist_codes:
            user_prompt += "Here are some of your previous attempts:\n"
            for i, (code, result) in enumerate(zip(self.hist_codes[-1:], self.hist_results[-1:])):
                user_prompt += f"Attempt {i+1}:\n"
                user_prompt += f"Code:\n<code>{code}</code>\n"
                user_prompt += f"Result: {self._parser_result(result)}\n"

        user_prompt += "Generate the better code named ModelNew with custom TileLang Ascendc operators! Name your generated output code ModelNew. Output the new code in codeblocks <code> </code>. Please generate real code, NOT pseudocode, make sure the code compiles and is fully functional on the given GPU device. Just output the new model code and NO testing code!"
        user_prompt += "Here are some hints you should know:\n"
        user_prompt += """
1. When a tensor is initialized directly with PyTorch, e.g., `a = torch.randn(M, K)`, PyTorch sets the tensor’s dtype to float by default, not float16. It will only be float16 if you explicitly set half, e.g., `a = torch.randn(M, K).half()`. Please keep this in mind.        
"""
        
        messages = [
            {'role': 'system', 'content': system_prompt},
            {'role': 'user', 'content': user_prompt}
        ]

        return messages

    def get_response(self, messages):
        futures = []
        with ThreadPoolExecutor(max_workers=self.n) as executor:
            for _ in range(self.n):
                futures.append(executor.submit(self.client.chat.completions.create, model=self.model, messages=messages, temperature=self.temperature if self.n > 1 else 0.1, max_tokens=self.max_tokens, n=1))
        responses = [future.result().choices[0].message.content for future in futures]
        return responses

    def parse_response(self, response):
        code = re.findall('<code>(.+?)</code>', response, flags=re.DOTALL)
        if not code:
            code = re.findall('```python(.+?)```', response, flags=re.DOTALL)
        if not code:
            code = re.findall('```(.+?)```', response, flags=re.DOTALL)
        return code[-1]

    def save_generation(self, messages, response, code, iteration=1, sample_id=1):
        iteration_dir = os.path.join(self.logdir, f'iter_{iteration}')
        os.makedirs(iteration_dir, exist_ok=True)

        with open(os.path.join(iteration_dir, f'messages.json'), 'w', encoding='utf-8') as f:
            json.dump(messages, f, indent=4, ensure_ascii=False)

        with open(os.path.join(iteration_dir, f'response_{sample_id}.txt'), 'w', encoding='utf-8') as f:
            f.write(response)

        with open(os.path.join(iteration_dir, f'code_{sample_id}.py'), 'w', encoding='utf-8') as f:
            f.write(code)

    def save_result(self, result, iteration=1):
        iteration_dir = os.path.join(self.logdir, f'iter_{iteration}')
        os.makedirs(iteration_dir, exist_ok=True)
        with open(os.path.join(iteration_dir, f'result.json'), 'w', encoding='utf-8') as f:
            json.dump(result, f, indent=4, ensure_ascii=False)

    def run(self, iteration=0, result=None):
        if result is not None:
            self.hist_results.append(result)
            self.hist_codes.append(result[0])

        messages = self.get_prompt()
        responses = self.get_response(messages)
        codes = []
        for i, response in enumerate(responses):
            code = self.parse_response(response)
            self.save_generation(messages, response, code, iteration=iteration, sample_id=i)
            codes.append(code)
        
        return codes

def run_one_problem(category, op, args, base_logdir):
    env = Env(op=op, category=category)
    logdir = os.path.join(base_logdir, category, f'{op}')
    agent = Agent(env, api_key=args.api_key, api_url=args.api_url, model=args.model, n=args.n, logdir=logdir)

    arch = env.reset()
    result = None

    all_results = []
    for iteration in range(args.reflection_num):
        codes = agent.run(result=result, iteration=iteration)
        state, result, done, info = env.step(codes)
        agent.save_result(info, iteration=iteration)
        result[1]['op'] = op
        result[1]['category'] = category
        result[1]['iteration'] = iteration
        all_results.append(result[1])
        print(result[1])
        if result[1]['correctness']:
            generation_path = os.path.join('generation_pool', category)
            os.makedirs(generation_path, exist_ok=True)
            with open(os.path.join(generation_path, f'{op}.py'), 'w', encoding='utf-8') as f:
                f.write(result[0])
            print(f"Results saved to generation_pool/{category}/{op}.py")
            break

    with open(os.path.join(logdir, f'final_result.json'), 'w', encoding='utf-8') as f:
        json.dump(all_results, f, indent=4, ensure_ascii=False)

    return all_results


def run_one_problem_worker(category, op, args, base_logdir, result_queue):
    """
    工作进程函数，在独立进程中执行run_one_problem
    """
    try:
        result = run_one_problem(category, op, args, base_logdir)
        result_queue.put((True, op, result))
    except Exception as e:
        result_queue.put((False, op, f"[FAIL] Process error: {str(e)}"))


def run_one_problem_in_process(category, op, args, base_logdir, timeout=180):
    """
    在独立进程中执行run_one_problem，防止主进程中断影响执行
    
    Args:
        category: 问题类别
        op: 操作名称
        args: 参数
        base_logdir: 基础日志目录
        timeout: 超时时间（秒），默认180秒（3分钟）
    
    Returns:
        tuple: (success, op, result)
    """
    # 创建队列用于进程间通信
    result_queue = Queue()
    
    # 创建并启动子进程
    process = Process(target=run_one_problem_worker, 
                     args=(category, op, args, base_logdir, result_queue))
    process.start()
    try:
        # 等待结果，设置超时
        start_time = time.time()
        while process.is_alive():
            if time.time() - start_time > timeout:
                print(f"[WARNING] Process for {op} timeout after {timeout} seconds, terminating...")
                process.terminate()
                process.join(timeout=10)  # 等待10秒让进程正常结束
                if process.is_alive():
                    process.kill()  # 强制杀死进程
                return False, op, f"[FAIL] Process timeout after {timeout} seconds"
            time.sleep(0.1)  # 短暂休眠避免忙等待
        
        # 检查进程是否正常结束
        if process.exitcode != 0:
            return False, op, f"[FAIL] Process exited with code {process.exitcode}"
        
        # 获取结果
        if not result_queue.empty():
            success, op_name, result = result_queue.get()
            return success, op_name, result
        else:
            return False, op, "[FAIL] No result from process"
            
    except Exception as e:
        print(f"[ERROR] Error managing process for {op}: {str(e)}")
        if process.is_alive():
            process.terminate()
            process.join(timeout=10)
            if process.is_alive():
                process.kill()
        return False, op, f"[FAIL] Process management error: {str(e)}"
    finally:
        # 确保进程被清理
        if process.is_alive():
            process.terminate()
            process.join(timeout=10)
            if process.is_alive():
                process.kill()


def run_problems_parallel(category, problem_set, args, base_logdir, max_workers=1):
    """
    并行执行多个问题，每个问题在独立进程中运行
    
    Args:
        category: 问题类别
        problem_set: 问题列表
        args: 参数
        base_logdir: 基础日志目录
        max_workers: 最大并行进程数
    
    Returns:
        list: 结果列表
    """
    max_workers = min(max_workers, len(problem_set))
    print(f"=================Running {len(problem_set)} problems with {max_workers} parallel processes=================")
    
    results = []
    completed_count = 0
    
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 提交所有任务
        future_to_op = {}
        for op in problem_set:
            # print(f"=================Submitting {op} to process pool=================")
            future = executor.submit(run_one_problem_in_process, category, op, args, base_logdir, timeout=((120+30*args.n)*args.reflection_num))
            future_to_op[future] = op
        
        # 收集结果
        for future in future_to_op:
            op = future_to_op[future]
            try:
                # print(f"=================Waiting for {op} to complete=================")
                success, op_name, result = future.result()
                completed_count += 1
                
                if success:
                    # print(f"================={op} completed successfully ({completed_count}/{len(problem_set)})=================")
                    results.append(result)
                else:
                    # print(f"================={op} failed: {result} ({completed_count}/{len(problem_set)})=================")
                    results.append(None)
            except Exception as e:
                completed_count += 1
                # print(f"================={op} exception: {str(e)} ({completed_count}/{len(problem_set)})=================")
                results.append(None)
    
    print(f"=================All {len(problem_set)} problems completed=================")
    return results


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--op', type=str, default=None)
    parser.add_argument('--categories', type=str, default='math')
    parser.add_argument('--n', type=int, default=1)
    parser.add_argument('--reflection_num', type=int, default=1)
    parser.add_argument('--api_key', type=str, default=None)
    parser.add_argument('--api_url', type=str, default=None)
    parser.add_argument('--model', type=str, default=None)
    parser.add_argument('--max_workers', type=int, default=1, help='Maximum number of parallel processes')
    parser.add_argument('--debug', action='store_true', help='Debug mode')
    parser.add_argument('--filter_generation', action='store_true', help='Filter generation pool')
    args = parser.parse_args()

    _logdir = os.path.join('logdir/', f'{datetime.datetime.now().strftime("%Y%m%d_%H%M%S")}')
    os.makedirs(_logdir, exist_ok=True)

    categories = args.categories.split(',')
    for category in categories:
        if args.op is None:
            problem_set = os.listdir(os.path.join(BENCHMARK_PATH, category))
            problem_set = [op.split('.')[0] for op in problem_set]
            if args.filter_generation:
                problem_set = [op for op in problem_set if not os.path.exists(os.path.join('generation_pool', category, op + '.py'))]
        else:
            problem_set = [args.op]

        print(f"=================Category {category}=================")
        print(f"================={len(problem_set)} Problems=================")

        # 使用多进程+多线程执行，每个问题在独立进程中运行，多个问题并行执行
        # results = run_problems_parallel(category, problem_set, args, _logdir, args.max_workers)

        if args.debug:
            results = run_one_problem_in_process(category, problem_set[0], args, _logdir)
            # results = run_one_problem(category, problem_set[0], args, _logdir)
        else:
            results = run_problems_parallel(category, problem_set, args, _logdir, args.max_workers)
    
