import { useFetchSSE } from "./useFetchSSE";

export function useChatByOllama(userChatItem, llmModel, responseOptions) {
  const { content: message } = userChatItem;
  const ollamaUrl = import.meta.env.VITE_APP_OLLAMA_API_URL;
  const { model } = llmModel;

  let url = "";
  let body = {};
  if (model.startsWith("deepseek")) {
    url = ollamaUrl + "/api/chat";
    const { deepseek } = llmModel;
    if (deepseek) {
      body = {
        messages: [
          {
            role: "user",
            content: message,
          },
          {
            role: "system",
            content: "你是一个喜欢逐步解释思考过程的助手",
          },
        ],
        model: model,
        stream: true,
      };
    } else {
      body = {
        messages: [
          {
            role: "user",
            content: message,
          },
        ],
        model: model,
        stream: true,
      };
    }
  } else if (model.startsWith("llama") || model.startsWith("llava")) {
    url = ollamaUrl + "/api/generate";
    body = {
      model: model,
      prompt: message,
      // images: [userChatItem.images[0].replace("data:image/png;base64,", "")],
      images: [],
    };
    for(let i = 0; i < userChatItem.images.length; i++){
      body.images.push(userChatItem.images[i].replace("data:image/png;base64,", ""));
    }
    
  } else {
    url = ollamaUrl + "/api/chat";
    body = {
      messages: [
        {
          role: "user",
          content: message,
        },
      ],
      model: model,
      stream: true,
    };
  }

  const controller = new AbortController();
  const { signal } = controller;
  useFetchSSE(
    url,
    {
      method: "POST",
      headers: {
        "Content-Type": "application/json",
      },
      body: JSON.stringify(body),
      // body: body,
      signal,
    },
    responseOptions
  );
  return { controller };
}
