import { ToolCallLLM } from "./base";
import type {
  ChatResponse,
  ChatResponseChunk,
  CompletionResponse,
  LLMChatParamsNonStreaming,
  LLMChatParamsStreaming,
  LLMCompletionParamsNonStreaming,
  LLMCompletionParamsStreaming,
  LLMMetadata,
  ToolCall,
  ToolCallLLMMessageOptions,
} from "./type";

export class MockLLM extends ToolCallLLM {
  metadata: LLMMetadata;
  options: {
    timeBetweenToken: number;
    responseMessage: string;
    mockToolCallResponse?: {
      toolCalls: ToolCall[];
      responseMessage?: string;
    };
  };
  supportToolCall: boolean = true;

  constructor(options?: {
    timeBetweenToken?: number;
    responseMessage?: string;
    metadata?: LLMMetadata;
    mockToolCallResponse?: {
      toolCalls: ToolCall[];
      responseMessage?: string;
    };
  }) {
    super();
    this.options = {
      timeBetweenToken: options?.timeBetweenToken ?? 20,
      responseMessage: options?.responseMessage ?? "This is a mock response",
      ...(options?.mockToolCallResponse && {
        mockToolCallResponse: options.mockToolCallResponse,
      }),
    };
    this.metadata = options?.metadata ?? {
      model: "MockLLM",
      temperature: 0.5,
      topP: 0.5,
      contextWindow: 1024,
      tokenizer: undefined,
      structuredOutput: true,
    };
  }

  chat(
    params: LLMChatParamsStreaming<object, object>,
  ): Promise<AsyncIterable<ChatResponseChunk>>;
  chat(
    params: LLMChatParamsNonStreaming<object, object>,
  ): Promise<ChatResponse<object>>;
  async chat(
    params:
      | LLMChatParamsStreaming<object, object>
      | LLMChatParamsNonStreaming<object, object>,
  ): Promise<AsyncIterable<ChatResponseChunk> | ChatResponse<object>> {
    const responseMessage = this.options.responseMessage;
    const timeBetweenToken = this.options.timeBetweenToken;
    const mockToolCallResponse = this.options.mockToolCallResponse;

    // Check if we have tools and should simulate tool calls
    const shouldSimulateToolCalls = params.tools && mockToolCallResponse;

    if (params.stream) {
      if (shouldSimulateToolCalls) {
        return (async function* () {
          // First yield the tool call
          yield {
            delta: "",
            raw: {},
            options: {
              toolCall: mockToolCallResponse.toolCalls,
            } as ToolCallLLMMessageOptions,
          };
        })();
      } else {
        return (async function* () {
          for (const char of responseMessage) {
            yield { delta: char, raw: {} };
            await new Promise((resolve) =>
              setTimeout(resolve, timeBetweenToken),
            );
          }
        })();
      }
    }

    if (shouldSimulateToolCalls) {
      return {
        message: {
          content: mockToolCallResponse.responseMessage || "",
          role: "assistant",
          options: {
            toolCall: mockToolCallResponse.toolCalls,
          } as ToolCallLLMMessageOptions,
        },
        raw: {},
      };
    }

    return {
      message: { content: responseMessage, role: "assistant" },
      raw: {},
    };
  }

  async complete(
    params: LLMCompletionParamsStreaming,
  ): Promise<AsyncIterable<CompletionResponse>>;
  async complete(
    params: LLMCompletionParamsNonStreaming,
  ): Promise<CompletionResponse>;
  async complete(
    params: LLMCompletionParamsStreaming | LLMCompletionParamsNonStreaming,
  ): Promise<AsyncIterable<CompletionResponse> | CompletionResponse> {
    const responseMessage = this.options.responseMessage;
    const timeBetweenToken = this.options.timeBetweenToken;

    if (params.stream) {
      return (async function* () {
        for (const char of responseMessage) {
          yield { delta: char, text: char, raw: {} };
          await new Promise((resolve) => setTimeout(resolve, timeBetweenToken));
        }
      })();
    }

    return { text: responseMessage, raw: {} };
  }
}
