import { Injectable } from '@nestjs/common';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { Annotation, MemorySaver, StateGraph } from '@langchain/langgraph';
import { ToolNode } from '@langchain/langgraph/prebuilt';
import { ChatOpenAI } from '@langchain/openai';
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts';

import { WeaviateService } from 'src/core/weaviate/weaviate.service';
import { CreateAgentDto } from './dto/create-agent.dto';
import { createSearchTool, createSceneTool } from './tools';

const GraphState = Annotation.Root({
  messages: Annotation<BaseMessage[]>({
    reducer: (x, y) => x.concat(y),
  }),
});

@Injectable()
export class AgentService {
  app: ReturnType<StateGraph<any>['compile']>;

  constructor(readonly weaviateService: WeaviateService) {
    const tools = [createSearchTool(weaviateService), createSceneTool()];

    const toolNode = new ToolNode<typeof GraphState.State>(tools);

    const llm = new ChatOpenAI({
      model: process.env.OPENAI_MODEL,
      apiKey: process.env.OPENAI_API_KEY,
      configuration: { baseURL: process.env.OPENAI_API_BASE },
      streaming: true,
      streamUsage: false,
    }).bindTools(tools);

    // Define the function that determines whether to continue or not
    function shouldContinue(state: typeof GraphState.State) {
      const messages = state.messages;
      const lastMessage = messages[messages.length - 1] as AIMessage;

      return lastMessage.tool_calls?.length ? 'tools' : '__end__';
    }

    // Define the function that calls the model
    async function callModel(state: typeof GraphState.State) {
      const prompt = ChatPromptTemplate.fromMessages([
        [
          'system',
          `You are a helpful AI assistant, collaborating with other assistants. Use the provided tools to progress towards answering the question. 
          If you are unable to fully answer, that's OK, another assistant with different tools will help where you left off. Execute what you can to make progress.
           If you or any of the other assistants have the final answer or deliverable, prefix your response with FINAL ANSWER so the team knows to stop. 
           You have access to the following tools: {tool_names}.\n{system_message}\nCurrent time: {time}.`,
        ],
        new MessagesPlaceholder('messages'),
      ]);

      const formattedPrompt = await prompt.formatMessages({
        system_message: 'You are helpful XMagital HMI 2D and 3D Chatbot Agent.',
        time: new Date().toISOString(),
        tool_names: tools.map((tool) => tool.name).join(', '),
        messages: state.messages,
      });

      const result = await llm.invoke(formattedPrompt);

      return { messages: [result] };
    }

    // Define a new graph
    const workflow = new StateGraph(GraphState)
      .addNode('agent', callModel)
      .addNode('tools', toolNode)
      .addEdge('__start__', 'agent')
      .addConditionalEdges('agent', shouldContinue)
      .addEdge('tools', 'agent');

    const checkpointer = new MemorySaver();

    this.app = workflow.compile({ checkpointer }) as ReturnType<StateGraph<any>['compile']>;
  }

  async getChatResponse(createAgentDto: CreateAgentDto, id: string) {
    const { query } = createAgentDto;

    // Use the Runnable
    const finalState = await this.app.invoke(
      { messages: [new HumanMessage(query)] },
      { recursionLimit: 15, configurable: { thread_id: id } },
    );

    console.log(finalState.messages[finalState.messages.length - 1].content);

    return finalState.messages[finalState.messages.length - 1].content;
  }

  async *getChatResponseStream(createAgentDto: CreateAgentDto, id: string) {
    const { query } = createAgentDto;

    const stream = this.app.streamEvents(
      { messages: [new HumanMessage(query)] },
      { recursionLimit: 15, configurable: { thread_id: id }, version: 'v2' },
    );

    for await (const event of stream) {
      if (event.event === 'on_tool_start') {
        yield { event: 'TOOL_START', name: event.name };
      } else if (event.event === 'on_tool_end') {
        yield { event: 'TOOL_END', name: event.name };
      } else if (event.event === 'on_chat_model_start') {
        yield { event: 'CHAT_MODEL_START', name: event.name };
      } else if (event.event === 'on_chat_model_end') {
        yield { event: 'CHAT_MODEL_END', name: event.name };
      } else if (event.event === 'on_chat_model_stream') {
        if (event.data.chunk?.content) {
          yield {
            event: 'CHAT_MODEL_STREAM',
            name: event.name,
            content: (event.data.chunk?.content as string) ?? '',
          };
        }
      } else if (event.event === 'on_chain_start' && event.name === 'LangGraph') {
        yield { event: 'CHAIN_START', name: event.name };
      } else if (event.event === 'on_chain_end' && event.name === 'LangGraph') {
        yield { event: 'CHAIN_END', name: event.name };
      }
    }
  }
}
