from utils.api_connector import ModelConnector
from utils.common import extract_code_block
from utils.doc_tree_indexer import DocTreeIndexer

def _generate_prompt(sample_case, doc_splitter: DocTreeIndexer):
    """
    根据测试用例和语义文档分片器生成提示信息
    """
    
    prompt = (
    "Based on the following test steps and supplementary information, please determine:\n"
    "1. Whether each step has implicit requirements for additional prerequisite or post-requisite steps.\n"
    "2. How each step can be implemented using the command-line interface (CLI).\n\n"
    )
    for step in sample_case["steps"]:
        prompt += f"---\nStep Description: {step['description']}\n\nSupplementary Information:\n"

        relevant_chunks = doc_splitter.retrieve(step['description'])
        for j, chunk in enumerate(relevant_chunks):
            prompt += f"  片段{j+1} (相似度: {chunk['score']:.4f}): {chunk['text']}\n"
        prompt += "\n\n"

    prompt += "---\n"

    prompt += "## Output Format\n\n"
    prompt += (
        "Please provide the CLI commands for the test steps described above in the following format:\n\n"
        "```\n"
        "# Step 1: Description\n"
        "# Necessary explanation\n"
        "CLI Command 1\n"
        "CLI Command 2\n\n"
        "# Step 2: Description\n"
        "# Necessary explanation\n"
        "CLI Command 3\n"
        "(and so on...)\n"
        "```"
    )
    prompt += "\n\nAdditional Requirements:\n"
    prompt += "1. Ensure the output contains exactly one code block enclosed within triple backticks (```). All content outside the code block will be ignored.\n"
    prompt += "2. Each CLI command must include the prompt indicating the current view, ensuring that transitions between views are explicitly stated.\n"
    prompt += "3. Assume that multiple commands within the same code block will be executed sequentially. Consider dependencies between commands, including prerequisite or post-requisite steps, and transitions between views.\n"
    prompt += "4. If the generated commands include any uncertain parameters or variables, use the <var-name> format to explicitly represent configurable parameters.\n"
    
    return prompt
    
def main(llm: ModelConnector, doc_splitter: DocTreeIndexer, sample_case: dict, verbose: bool = False) -> str:
    """
    主函数，处理命令行参数并执行网络配置
    """

    # 生成提示信息
    prompt = _generate_prompt(sample_case, doc_splitter)

    if verbose:
        print("Generated Prompt:")
        print(prompt)

    reply = llm.send_request(prompt, verbose=verbose)
    # from examples.lallmaIndex import send_request
    # reply = asyncio.run(send_request(prompt))
    
    cli_code = extract_code_block(reply)
    if verbose:
        print("Generated CLI Command:")
        print(reply)
        print("Extracted CLI Code:")
        print(cli_code if cli_code else "No CLI code extracted.")

    # TODO: 检查输出的代码是否符合要求
    # 1. 生成的接口是否在cli代码中能找到
    # 2. 肯定也包括一些测试数据，（端口名称之类的）是否存在，不确定的变量要指出，之后作为参数传递

    return cli_code
