defmodule SockWorkstation.GrpcLLMClient do
  @moduledoc """
  gRPC 客户端模块，用于与服务器交互。
  """
  use GenServer
  alias Llm.ChatService.Stub
  alias Llm.ChatRequest
  alias Llm.ChatMessage
  alias SockWorkstation.CoopMgrGen

  @server_address "localhost:50051"
  @name __MODULE__

  # @prompt

  # """
  # 你应该以JSON Blob格式响应，确保聊天的结果可以使用相关语言的流式json库解析，否则会出错。
  # """

  defstruct role: "",
            channel: nil,
            history: [],
            # sent: nil,
            messages: [],
            content: ""

  # 初始化 GenServer
  def start_link(session_id) do
    GenServer.start_link(@name, session_id, name: via_tuple(session_id, "grpc_llm"))
  end

  # 初始化状态
  def init(session_id) do
    # 连接到 gRPC 服务器
    {:ok, channel} = GRPC.Stub.connect(@server_address)

    self_pid = self()
    CoopMgrGen.fetch_llm_pid(session_id, self_pid)
    # 初始化消息列表
    messages = [
      %ChatMessage{
        role: "system",
        content: "今天是#{Date.utc_today()}，你是一个乐于解答各种问题的助手，你的任务是为用户提供专业、准确、有见地的建议。"
      },
      %ChatMessage{role: "user", content: "你好"}
    ]

    # 初始化历史记录
    # history =
    #   Enum.map(messages, fn message ->
    #     %{role: message.role, content: message.content}
    #   end)

    # 开始对话循环
    # GenServer.cast(via_tuple(session_id, "grpc_llm"), :chat_loop)
    make_chat_loop(session_id)

    {:ok,
     %{
       channel: channel,
       messages: messages,
       current_answer: "",
       history: [],
       session_id: session_id,
       self_pid: self_pid
     }}
  end

  def get_state(session_id) do
    GenServer.call(via_tuple(session_id, "grpc_llm"), :get_state)
  end

  def update_history(session_id) do
    GenServer.cast(via_tuple(session_id, "grpc_llm"), :update_history)
  end

  def current_answer(session_id, answer) do
    GenServer.cast(via_tuple(session_id, "grpc_llm"), {:current_answer, answer})
  end

  def make_chat_loop(session_id) do
    GenServer.cast(via_tuple(session_id, "grpc_llm"), :chat_loop)
  end

  def handle_call(:get_state, _from, state) do
    {:reply, state, state}
  end

  # 处理消息
  def handle_cast(:chat_loop, state) do
    chat_loop(state.session_id, state.channel, state.messages)
    {:noreply, state}
  end

  def handle_cast({:new_message, new_message}, state) do
    # sts_message = %ChatMessage{role: "system", content: @prompt}
    msgs = state.messages ++ [new_message]

    make_chat_loop(state.session_id)
    {:noreply, %{state | messages: msgs}}
  end

  # 更新历史记录
  def handle_cast(:update_history, state) do
    msgs =
      Enum.map(state.messages, fn message ->
        %{role: message.role, content: message.content}
      end)

    updated_history =
      state.history ++
        msgs ++
        [
          %{role: "assistant", content: state.current_answer}
        ]

    new_state = %{state | history: updated_history, messages: [], current_answer: ""}
    {:noreply, new_state}
  end

  def handle_cast({:current_answer, answer}, state) do
    new_state = %{state | current_answer: state.current_answer <> answer}
    {:noreply, new_state}
  end

  # 外部调用函数：发送新消息
  def send_new_msg(session_id, message) do
    GenServer.cast(
      via_tuple(session_id, "grpc_llm"),
      {:new_message, %ChatMessage{role: "user", content: message}}
    )
  end

  # def terminate(_reason, state) do
  #   CoopMgrGen.rm_llm_pid(state.session_id)
  #   :ok
  # end

  # 内部函数：处理对话循环
  defp chat_loop(session_id, channel, messages) do
    # 构建请求
    request = %ChatRequest{model: "glm-4-0520", messages: messages}

    # 调用 gRPC 流式方法
    IO.puts("服务器响应：")

    case Stub.chat_stream(channel, request) do
      {:ok, stream} ->
        # Exile.stream!(~w(cat), input: stream)

        # |> Enum.into("")
        # |> IO.write()

        stream
        |> Enum.each(fn {:ok, response} ->
          # 输出响应内容
          # send(sent, response.delta)
          IO.write(response.delta)
          # 更新状态
          current_answer(session_id, response.delta)
        end)

        # IO.puts("\n")
        IO.write("\n\e[2K")
        IEx.dont_display_result()
        # 将响应内容保存到历史记录
        # updated_history = [
        #   %{role: "assistant", content: response.delta} | history
        # ]

        update_history(session_id)

      # IO.puts("")

      {:error, reason} ->
        IO.puts("请求失败: #{inspect(reason)}")
    end
  end

  defp via_tuple(session_id, type) do
    {:via, Registry, {SockWorkstation.SessionRegistry, {session_id, type}}}
  end
end

# 继续对话
# next_question = IO.gets("\n您还有其他问题吗？\n\n请输入（或输入 'exit' 退出）：\n\n") |> String.trim()

# if next_question == "exit" do
#   IO.puts("对话结束。")
# else
#   # 将新问题添加到消息列表
#   GenServer.cast(
#     @name,
#     {:new_message, ChatMessage.new(role: "user", content: next_question)}
#   )

# defmodule GrpcLLMClient do
#   @moduledoc """
#   gRPC 客户端模块，用于与服务器交互。
#   """

#   alias Chat.ChatService.Stub
#   alias Chat.ChatRequest
#   alias Chat.ChatMessage

#   @server_address "localhost:50051"

#   # GenServer 回调函数
#   use GenServer

#   # 初始化 GenServer
#   def start_link(question) do
#     GenServer.start_link(@name, question, name: @name)
#   end

#   # 初始化状态
#   def init(question) do
#     # 连接到 gRPC 服务器
#     {:ok, channel} = GRPC.Stub.connect(@server_address)

#     # 初始化消息列表
#     messages = [
#       ChatMessage.new(role: "system", content: "你是一个乐于解答各种问题的助手，你的任务是为用户提供专业、准确、有见地的建议。"),
#       ChatMessage.new(role: "user", content: question)
#     ]

#     # 初始化历史记录
#     history = []

#     # 开始对话循环
#     GenServer.cast(@name, :chat_loop)

#     {:ok, %{channel: channel, messages: messages, history: history}}
#   end

#   # 处理消息
#   def handle_cast(:chat_loop, state) do
#     chat_loop(state.channel, state.messages, state.history)
#     {:noreply, state}
#   end

#   def handle_cast({:new_message, new_message}, state) do
#     new_state = %{state | messages: state.messages ++ [new_message]}
#     GenServer.cast(@name, :chat_loop)
#     {:noreply, new_state}
#   end

#   # 外部调用函数：发送新消息
#   def send_message(message) do
#     GenServer.cast(@name, {:new_message, ChatMessage.new(role: "user", content: message)})
#   end

#   # 内部函数：处理对话循环
#   defp chat_loop(channel, messages, history) do
#     # 构建请求
#     request = ChatRequest.new(model: "glm-4-0520", messages: messages)

#     # 调用 gRPC 流式方法
#     IO.puts("服务器响应：")
#     case Stub.chat_stream(channel, request) do
#       {:ok, stream} ->
#         stream
#         |> Enum.each(fn {:ok, response} ->
#           # 将响应内容保存到历史记录
#           updated_history = [response.delta | history]
#           # 输出响应内容
#           IO.write(response.delta)
#           # 更新状态
#           GenServer.update(@name, fn state ->
#             %{state | history: updated_history}
#           end)
#         end)
#         IO.puts("")

#       {:error, reason} ->
#         IO.puts("请求失败: #{inspect(reason)}")
#     end

#     # 继续对话
#     next_question = IO.gets("\n您还有其他问题吗？请输入（或输入 'exit' 退出）：") |> String.trim()

#     if next_question == "exit" do
#       IO.puts("对话结束。")
#     else
#       # 将新问题添加到消息列表
#       GenServer.cast(@name, {:new_message, ChatMessage.new(role: "user", content: next_question)})
#     end
#   end

#   # 更新状态
#   def update(pid, update_fn) do
#     GenServer.update(pid, update_fn)
#   end
# end

# defmodule GrpcLLMClient do
#   @moduledoc """
#   gRPC 客户端模块，用于与服务器交互。
#   """

#   alias Chat.ChatService.Stub
#   alias Chat.ChatRequest
#   alias Chat.ChatMessage

#   @server_address "localhost:50051"

#   # GenServer 回调函数
#   use GenServer

#   # 初始化 GenServer
#   def start_link(question) do
#     GenServer.start_link(@name, question, name: @name)
#   end

#   # 初始化状态
#   def init(question) do
#     # 连接到 gRPC 服务器
#     {:ok, channel} = GRPC.Stub.connect(@server_address)

#     # 初始化消息列表
#     messages = [
#       ChatMessage.new(role: "system", content: "你是一个乐于解答各种问题的助手，你的任务是为用户提供专业、准确、有见地的建议。"),
#       ChatMessage.new(role: "user", content: question)
#     ]
#     # 初始化历史记录
#     history = []

#     # 开始对话循环
#     GenServer.cast(@name, :chat_loop)

#     {:ok, %{channel: channel, messages: messages, history: history}}
#   end

#   # 处理消息
#   def handle_cast(:chat_loop, state) do
#     chat_loop(state.channel, state.messages, state.history)
#     {:noreply, state}
#   end

#   def handle_cast({:new_message, new_message}, state) do
#     new_state = %{state | messages: state.messages ++ [new_message]}
#     GenServer.cast(@name, :chat_loop)
#     {:noreply, new_state}
#   end

#   # 外部调用函数：发送新消息
#   def send_message(message) do
#     GenServer.cast(@name, {:new_message, ChatMessage.new(role: "user", content: message)})
#   end

#   # 内部函数：处理对话循环
#   defp chat_loop(channel, messages, history) do
#     # 构建请求
#     request = ChatRequest.new(model: "glm-4-0520", messages: messages)

#     # 调用 gRPC 流式方法
#     IO.puts("服务器响应：")
#     case Stub.chat_stream(channel, request) do
#       {:ok, stream} ->
#         stream
#         |> Enum.each(fn {:ok, response} ->
#           IO.write(response.delta)
#         end)
#         IO.puts("")

#       {:error, reason} ->
#         IO.puts("请求失败: #{inspect(reason)}")
#     end

# 继续对话
# next_question = IO.gets("\n您还有其他问题吗？请输入（或输入 'exit' 退出）：") |> String.trim()

# if next_question == "exit" do
# IO.puts("对话结束。")
# else
# 将新问题添加到消息列表
# GenServer.cast(@name, {:new_message, ChatMessage.new(role: "user", content: next_question)})
# end
#   end
# end

# defmodule GrpcLLMClient do
#   @moduledoc """
#   gRPC 客户端模块，用于与服务器交互。
#   """

#   alias Chat.ChatService.Stub
#   alias Chat.ChatRequest
#   alias Chat.ChatMessage

#   @server_address "localhost:50051"

#   # GenServer 回调函数
#   use GenServer

#   # 初始化 GenServer
#   def start_link(question) do
#     GenServer.start_link(@name, question, name: @name)
#   end

#   # 初始化状态
#   def init(question) do
#     # 连接到 gRPC 服务器
#     {:ok, channel} = GRPC.Stub.connect(@server_address)

#     # 初始化消息列表
#     messages = [
#       ChatMessage.new(role: "system", content: "你是一个乐于解答各种问题的助手，你的任务是为用户提供专业、准确、有见地的建议。"),
#       ChatMessage.new(role: "user", content: question)
#     ]

#     # 开始对话循环
#     chat_loop(channel, messages)

#     {:ok, %{channel: channel, messages: messages}}
#   end

#   # 处理消息
#   def handle_call(:chat_loop, _from, state) do
#     chat_loop(state.channel, state.messages)
#     {:reply, :ok, state}
#   end

#   def handle_cast({:new_message, new_message}, state) do
#     new_state = %{state | messages: state.messages ++ [new_message]}
#     chat_loop(new_state.channel, new_state.messages)
#     {:noreply, new_state}
#   end

#   defp chat_loop(channel, messages) do
#     # 构建请求
#     request = ChatRequest.new(model: "glm-4-0520", messages: messages)

#     # 调用 gRPC 流式方法
#     IO.puts("服务器响应：")
#     case Stub.chat_stream(channel,request) do
#       {:ok, stream} ->
#         stream
#         |> Enum.each(fn {:ok, response} ->
#           IO.write(response.delta)
#         end)
#         IO.puts("")

#       {:error, reason} ->
#         IO.puts("请求失败: #{inspect(reason)}")
#     end
#   end
# end

# defmodule GrpcLLMClient do
#   @moduledoc """
#   gRPC 客户端模块，用于与服务器交互。
#   """

#   alias Chat.ChatService.Stub
#   alias Chat.ChatRequest
#   alias Chat.ChatMessage

#   @server_address "localhost:50051"

#   def run(question) do
#     # 连接到 gRPC 服务器
#     {:ok, channel} = GRPC.Stub.connect(@server_address)

#     # 初始化消息列表
#     messages = [
#       ChatMessage.new(role: "system", content: "你是一个乐于解答各种问题的助手，你的任务是为用户提供专业、准确、有见地的建议。"),
#       ChatMessage.new(role: "user", content: question)
#     ]

#     # 开始对话循环
#     chat_loop(channel, messages)
#   end

#   defp chat_loop(channel, messages) do
#     # 构建请求
#     request = ChatRequest.new(model: "glm-4-0520", messages: messages)

#     # 调用 gRPC 流式方法
#     IO.puts("服务器响应：")
#     case Stub.chat_stream(channel, request) do
#       {:ok, stream} ->
#         stream
#         |> Enum.each(fn {:ok, response} ->
#           IO.write(response.delta)
#         end)
#         IO.puts("")

#       {:error, reason} ->
#         IO.puts("请求失败: #{inspect(reason)}")
#     end
#     # case Stub.chat_stream(channel, request) do
#     #   {:ok, stream} ->
#     #     stream
#     #     |> Enum.each(fn response ->
#     #       # 流式输出响应内容
#     #       IO.write(response.delta)
#     #     end)

#     #     # 换行
#     #     IO.puts("")

#     #   {:error, reason} ->
#     #     IO.puts("请求失败: #{inspect(reason)}")
#     # end

#     # 继续对话
#     next_question = IO.gets("\n您还有其他问题吗？请输入（或输入 'exit' 退出）：") |> String.trim()

#     if next_question == "exit" do
#       IO.puts("对话结束。")
#     else
#       # 将新问题添加到消息列表
#       new_message = ChatMessage.new(role: "user", content: next_question)
#       chat_loop(channel, messages ++ [new_message])
#     end
#   end

#   # defp chat_loop(channel, messages) do
#   #   # 构建请求
#   #   request = ChatRequest.new(model: "glm-4-0520", messages: messages)

#   #   # 调用 gRPC 流式方法
#   #   IO.puts("服务器响应：")
#   #   channel
#   #   |> Stub.chat_stream(request)
#   #   |> Enum.each(fn response ->
#   #     IO.write(response.delta)  # 流式输出响应内容
#   #   end)
#   #   IO.puts("")  # 换行

#   #   # 继续对话
#   #   next_question = IO.gets("\n您还有其他问题吗？请输入（或输入 'exit' 退出）：") |> String.trim()
#   #   if next_question == "exit" do
#   #     IO.puts("对话结束。")
#   #   else
#   #     # 将新问题添加到消息列表
#   #     new_message = ChatMessage.new(role: "user", content: next_question)
#   #     chat_loop(channel, messages ++ [new_message])
#   #   end
#   # end
# end
