import { v4 as uuidv4 } from "uuid";
import {
  BaseMessage,
  isToolMessage,
  ToolMessage,
} from "@langchain/core/messages";
import { createDiagnoseErrorToolFields } from "@openswe/shared/open-swe/tools";

import { z } from "zod";
import { ModelTokenData, GraphConfig } from "@openswe/shared/open-swe/types";
import { createLogger, LogLevel } from "../../utils/logger.js";
import { getAllLastFailedActions } from "../../utils/tool-message-error.js";
import { getMessageString } from "../../utils/message/content.js";
import {
  loadModel,
  supportsParallelToolCallsParam,
} from "../../utils/llms/index.js";
import { LLMTask } from "@openswe/shared/open-swe/llm-task";
import { trackCachePerformance } from "../../utils/caching.js";
import { getModelManager } from "../../utils/llms/model-manager.js";

const logger = createLogger(LogLevel.INFO, "SharedDiagnoseError");

const systemPrompt = `You are operating as a terminal-based agentic coding assistant built by LangChain. It wraps LLM models to enable natural language interaction with a local codebase. You are expected to be precise, safe, and helpful.

The last few commands you tried to execute failed with an error. Please carefully diagnose the error, and provide a helpful explanation of exactly what the issue is, and how you can fix it.

Following these rules when diagnosing the error:
  - You should provide a clear, concise, and helpful explanation of exactly what the issue is, and how you can fix it.
  - You do not want to be overly verbose in your diagnosis. You should only include information which is directly relevant to diagnosing and fixing the error.
  - NEVER make up reasons, or make a guess as to what the issue is. Your reasoning must ALWAYS be grounded in the information provided to you.
    - Making up reasons, or making a guess can lead to more problems, so it's best to say you don't know rather than make up a reason.
  - Reference specific lines of code, or context from the conversation history to support your diagnosis.
  
Here are the last actions you attempted which resulted in errors:
{FAILED_ACTIONS_OUTPUTS}

Below is an up to date tree of the codebase (going 3 levels deep). This is up to date, and is updated after every action you take. Always assume this is the most up to date context about the codebase.
It was generated by using the \`tree\` command, passing in the gitignore file to ignore files and directories you should not have access to (\`git ls-files | tree --fromfile -L 3\`). It is always executed inside the repo directory: {REPO_DIRECTORY}
{CODEBASE_TREE}

Please carefully go over all of this information, and provide a helpful explanation of exactly what the issue is, and how you can fix it. When you are ready to provide your diagnosis, call the \`diagnose_error\` tool.
`;

const userPrompt = `Here is the full conversation history from the steps taken to complete the current task, along with the user's initial request:

{CONVERSATION_HISTORY}

Please carefully go over all of this information, and provide a helpful explanation of exactly what the issue is, and how you can fix it. When you are ready to provide your diagnosis, call the \`diagnose_error\` tool.`;

const diagnoseErrorTool = createDiagnoseErrorToolFields();

const formatSystemPrompt = (
  messages: BaseMessage[],
  codebaseTree: string,
): string => {
  const lastFailedActions = getAllLastFailedActions(messages);

  return systemPrompt
    .replace(
      "{FAILED_ACTIONS_OUTPUTS}",
      `<failed-action-output>${lastFailedActions}</failed-action-output>`,
    )
    .replace(
      "{CODEBASE_TREE}",
      `<codebase-tree>\n${codebaseTree || "No codebase tree generated yet."}\n</codebase-tree>`,
    );
};

const formatUserPrompt = (messages: BaseMessage[]): string => {
  return userPrompt.replace(
    "{CONVERSATION_HISTORY}",
    messages.map(getMessageString).join("\n"),
  );
};

interface DiagnoseErrorInputs {
  messages: BaseMessage[];
  codebaseTree: string;
  tokenData?: ModelTokenData[];
}

type DiagnoseErrorUpdate = Partial<DiagnoseErrorInputs>;

export async function diagnoseError(
  state: DiagnoseErrorInputs,
  config: GraphConfig,
): Promise<DiagnoseErrorUpdate> {
  const lastFailedAction = state.messages.findLast(
    (m) => isToolMessage(m) && m.status === "error",
  );
  if (!lastFailedAction?.content) {
    throw new Error("No failed action found in messages");
  }

  logger.info("The last few tool calls resulted in errors. Diagnosing error.");

  const model = await loadModel(config, LLMTask.SUMMARIZER);
  const modelManager = getModelManager();
  const modelName = modelManager.getModelNameForTask(
    config,
    LLMTask.SUMMARIZER,
  );
  const modelSupportsParallelToolCallsParam = supportsParallelToolCallsParam(
    config,
    LLMTask.SUMMARIZER,
  );
  const modelWithTools = model.bindTools([diagnoseErrorTool], {
    tool_choice: diagnoseErrorTool.name,
    ...(modelSupportsParallelToolCallsParam
      ? {
          parallel_tool_calls: false,
        }
      : {}),
  });

  const response = await modelWithTools.invoke([
    {
      role: "system",
      content: formatSystemPrompt(state.messages, state.codebaseTree),
    },
    {
      role: "user",
      content: formatUserPrompt(state.messages),
    },
  ]);

  const toolCall = response.tool_calls?.[0];
  if (!toolCall) {
    throw new Error("Failed to generate a tool call when diagnosing error.");
  }

  logger.info("Diagnosed error successfully.", {
    diagnosis: (toolCall.args as z.infer<typeof diagnoseErrorTool.schema>)
      .diagnosis,
  });

  const toolMessage = new ToolMessage({
    id: uuidv4(),
    tool_call_id: toolCall.id ?? "",
    content: `Successfully diagnosed error. Please use the diagnosis to continue with the next action.`,
    name: toolCall.name,
    status: "success",
    additional_kwargs: {
      is_diagnosis: true,
    },
  });

  return {
    messages: [response, toolMessage],
    tokenData: trackCachePerformance(response, modelName),
  };
}
