# -*- coding: utf-8 -*-
# @Time    : 2025/2/10 10:49
# @Author  : 
# @File    : eval.py
# @Software: PyCharm 
# @Comment :

import os
import re
import csv
import json
import torch
import tempfile
import transformers
import subprocess
import argparse

from tqdm import tqdm
from peft import PeftModel
from pathlib import Path
from pyverilog.vparser.parser import parse
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer, BitsAndBytesConfig

from config import Config

# os.environ["CUDA_VISIBLE_DEVICES"] = "1"

def load_model(lora=False):
    '''
    加载模型和tokenizer
    @param lora: 是否加载lora模型
    '''

    tokenizer = AutoTokenizer.from_pretrained(
        Config.pretrained_model_path,
        use_fast=False,
        trust_remote_code=True,
        padding_side='left',
    )

    bnb_config = BitsAndBytesConfig(
        load_in_8bit = True,
        # bnb_4bit_use_double_quant = True,
        # bnb_4bit_quant_type = "nf4",
        # bnb_4bit_compute_dtype = Config.torch_dtype,
    )

    model = AutoModelForCausalLM.from_pretrained(
        pretrained_model_name_or_path = Config.pretrained_model_path,
        # quantization_config = bnb_config,
        # low_cpu_mem_usage = True,
        trust_remote_code = True,
        torch_dtype = Config.torch_dtype,
        use_cache=True,
        device_map="auto",
    )

    # model = AutoModelForCausalLM.from_pretrained(
    #     pretrained_model_name_or_path=Config.pretrained_model_path,
    #     trust_remote_code=True,
    #     torch_dtype=Config.torch_dtype,
    #     use_cache=True,
    #     device_map="auto",
    # )

    if lora:
        peft_model_id = Config.lora_model_path
        model = PeftModel.from_pretrained(model, peft_model_id)
        model.merge_and_unload()

    return model, tokenizer

def gen_verilog(prompts, model, tokenizer):
    tokenizer.pad_token = tokenizer.eos_token

    inputs = tokenizer(
        prompts, 
        return_tensors="pt", 
        padding=True, 
        add_special_tokens=True, 
        return_attention_mask=True
    )
    input_ids = inputs["input_ids"].to('cuda')
    attention_masks = inputs['attention_mask'].to('cuda')

    generation_config = transformers.GenerationConfig(
        do_sample = True,
        max_new_tokens = 500, 
        temperature = 0.95,
        use_cache=True,
    )

    streamer = TextStreamer(tokenizer, skip_special_tokens=True)
    
    with torch.no_grad():
        generation_output = model.generate(
            input_ids=input_ids,
            attention_mask=attention_masks,
            generation_config=generation_config,
            pad_token_id=tokenizer.eos_token_id,
            # streamer=streamer,
        )

    output_texts = []
    for i in range(len(input_ids)):
        output_text = tokenizer.decode(generation_output[i][len(input_ids[i]) - 1:], skip_special_tokens=True).strip()
        output_texts.append(output_text)
    
    return output_texts

def syntactic_analyse(verilog_code):
    """
    语法分析
    """
    # 创建一个临时文件
    with tempfile.NamedTemporaryFile(mode='w+', delete=False) as temp_file:
        temp_file.write(verilog_code)
        temp_file_name = temp_file.name

    # 解析这个临时文件
    try:
        parse([temp_file_name])
        return True
    except Exception as e:
        return False

def func_analyse(task, verilog_code):
    """
    功能测试
    """
    testbench_path = task / 'testbench.v'
    # testbench_path = 'test_case/accu/testbench.v'

    try:
        with open(testbench_path, 'r') as file:
            testbench_code = file.read()

        modified_testbench = verilog_code + "\n" + testbench_code

        # 将修改后的测试台代码写入文件
        with open('testbench.v', 'w') as file:
            file.write(modified_testbench)

        # 使用iverilog编译testbench，生成vvp可执行文件
        # 运行vvp仿真，并捕获输出
        try:
            compile_command = f"iverilog -o simulation testbench.v"
            subprocess.run(compile_command, shell=True, check=True)
            simulation_command = f"vvp simulation -lxt2"
            result = subprocess.run(simulation_command, shell=True, check=True, stdout=subprocess.PIPE, text=True, timeout=20)
        except:
            return False
 
        # 分析输出
        if "Your Design Passed" in result.stdout:
            return True
        else:
            return False
    finally:
        # 删除testbench文件
        os.remove('testbench.v')
        if os.path.exists('simulation'):
            os.remove('simulation')
        pass
    
def fetch_task_prompt(task):
    file_path = task / 'design_description.txt'
    with open(file_path, 'r') as file:
        prompt = file.read()
    return prompt

def load_jsonl(file_path):
    with open(file_path, 'r') as file:
        return [json.loads(line) for line in file]

def auto_rtllm(model_name, model, tokenizer):
    # # 遍历 base_path 下的所有子文件夹
    # base_path = Path('./eval/rtllm')
    # csv_file_path = f"{model_name}.csv"

    # with open(csv_file_path, mode='w', newline='') as file:
    #     writer = csv.writer(file)
    #     writer.writerow(['Task', 'Prompt', 'Verilog Code', 'Syntax Check', 'Functionality Check'])

    #     cnt, syntax_pass_cnt, function_pass_cnt = 0, 0, 0

    #     tasks, prompts = [], []
    #     for task in base_path.iterdir():
    #         if task.is_dir():
    #             prompt = fetch_task_prompt(task)
    #             tasks.append(task)
    #             prompts.append(prompt)
        
    #     verilog_codes = []
    #     for idx in tqdm(range(0, len(prompts) // 8 + 1)):
    #         if idx * 8 + 7 < len(prompts):
    #             prompts_batch = prompts[idx*8:idx*8+8]
    #         else:
    #             prompts_batch = prompts[idx*8:]
    #         verilog_codes_batch = gen_verilog(prompts_batch, model, tokenizer)
    #         print(verilog_codes_batch)
    #         verilog_codes.extend(verilog_codes_batch)

    #     for i, verilog_code in enumerate(verilog_codes):
    #         task = tasks[i]
    #         prompt = prompts[i]
    #         verilog_code = verilog_code.replace("Module", "module")

    #         phrase = "Give me the complete code."

    #         # 在verilog_code找到"Give me the complete code."这句话，然后把前面的全部删掉
    #         index = verilog_code.find(phrase)
    #         if index != -1:
    #             verilog_code = verilog_code[index + len(phrase):]

    #         pattern = re.compile(r'module\s+.*?endmodule', re.DOTALL)  # re.DOTALL 让 . 匹配包括换行在内的任意字符
    #         mat = pattern.search(verilog_code)

    #         if mat:
    #             verilog_code = mat.group()
    #             # print(f"Task: {task}, Verilog code: {verilog_code}")       
    #             syntactic_flag = syntactic_analyse(verilog_code)
    #             if syntactic_flag:
    #                 functionality_flag = func_analyse(task, verilog_code)
    #             else:
    #                 functionality_flag = False
    #         else:
    #             syntactic_flag = False
    #             functionality_flag = False

    #         cnt += 1
    #         if syntactic_flag:
    #             syntax_pass_cnt += 1
    #         if functionality_flag:
    #             function_pass_cnt += 1

    #         print(f"Task: {task}, Syntax: {syntactic_flag}, Function: {functionality_flag}\n")
    #         writer.writerow([task.name, prompt, verilog_code, syntactic_flag, functionality_flag])

    #     print(f"Syntax Pass Rate: {syntax_pass_cnt/cnt*100}%, Function Pass Rate: {function_pass_cnt/cnt*100}%")
    pass

def auto_verilog_eval(model, tokenizer, mode):
    result_file = f'./eval-results/{Config.model_name}_{"_lora" if Config.eval_lora else ''}_verilog-eval_{mode}.jsonl'
    log_file = f'./eval-results/{Config.model_name}_{"_lora" if Config.eval_lora else ""}_verilog-eval_{mode}.log'
    # 获取文件所在的目录路径
    directory = os.path.dirname(result_file)

    # 确保目录存在（如果不存在则创建）
    os.makedirs(directory, exist_ok=True)  # `exist_ok=True` 避免目录已存在时抛出异常
    
    descriptions = load_jsonl(f'./eval/verilog-eval/descriptions/VerilogDescription_{mode}.jsonl')
    data_list = load_jsonl(f'./eval/verilog-eval/data/VerilogEval_{mode}.log')

    # Dict[task_id, module_top]
    module_tops = {data.get('task_id'): data.get('prompt') for data in data_list}

    tasks, prompts = [], []
    for desc in descriptions:
        task_id = desc.get('task_id')
        detail = desc.get('detail_description')
        if task_id in module_tops:
            tasks.append(task_id)
            module_top = module_tops[task_id]
            prompts.append(f"{detail}\nThe name and interface of the module must be as follows:\n\
                             {module_top}请仅请为我生成完整代码，不要有多余输出:\n```verilog\n")
        else:
            print(f"[INF]Task:{task_id} not in module_tops")
    
    batch_size = Config.eval_batch
    module_pattern = re.compile(r'module\s+.*?endmodule', re.DOTALL)
    module_header_pattern = re.compile(r'module\s+.*?;', re.DOTALL)
    with open(result_file, 'w') as f:
        for idx in tqdm(range(0, len(tasks), batch_size)):
            prompts_batch = prompts[idx:idx + batch_size]
            verilog_codes_batch = gen_verilog(prompts_batch, model, tokenizer)
            
            for i, verilog_code in enumerate(verilog_codes_batch):
                task_id = tasks[idx + i]
                # print(verilog_code)
                mat = module_pattern.search(verilog_code)
                if mat:
                    # 去除模块头信息（从 module 到分号 ;）
                    verilog_code = module_header_pattern.sub("", mat.group()).strip()
                else:
                    verilog_code = "[ERO]gen_verilog() error!"
                new_data = {
                    "task_id": task_id,
                    "completion": verilog_code
                }
                f.write(json.dumps(new_data) + '\n')
    try:
        result = subprocess.run(
            f"cd ./eval/verilog-eval && evaluate_functional_correctness ../../{result_file} --problem_file data/VerilogEval_{mode}.jsonl",
            shell=True,
            check=True,
            stdout=subprocess.PIPE,
            text=True
        )
        with open(log_file, 'w') as f:
            f.write(result.stdout)
    except subprocess.CalledProcessError as e:
        print(f"An error occurred while evaluating functional correctness: {e}")
        
def main(benchmark, mode=None):
    """
    Benchmark:
        'rtllm-v2.0.0': https://github.com/hkust-zhiyao/RTLLM （2025/02/18 未 release 但在 main 分支发布）
        'verilog-eval-v1.0.0': https://github.com/NVlabs/verilog-eval/blob/release/1.0.0
    """
    lora = True
    model, tokenizer = load_model(lora)

    if benchmark == 'verilog-eval':
        auto_verilog_eval(model, tokenizer, mode)
    elif benchmark == 'rtllm':
        auto_rtllm(model, tokenizer)
    else:
        print("Invalid benchmark type. Please choose from 'rtllm' or 'verilogeval'.")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Run model benchmark tests.")
    parser.add_argument('--benchmark', type=str, default="verilog-eval", choices=['rtllm', 'verilog-eval'], help='The benchmark type to run.')
    parser.add_argument('--mode', type=str, choices=['Human', 'Machine'], help='The mode for verilog-eval benchmark.')
    args = parser.parse_args()

    if args.benchmark == 'verilog-eval' and not args.mode:
        parser.error("--mode is required when benchmark is 'verilog-eval'")

    main(args.benchmark, args.mode)

    
