"use client";
import React, { useState, useRef, useEffect } from "react";
import { Input, Button, Switch, ConfigProvider } from "antd";
import { chatWithAiModel1 } from "@/ai/flows/chat-with-ai-model-1";
import { chatWithAiModel2 } from "@/ai/flows/chat-with-ai-model-2";
import { ScrollArea } from "@/components/ui/scroll-area";
import "./page.css";
import { SendOutlined } from "@ant-design/icons";
import dotenv from "dotenv";

dotenv.config();
interface AIChatDisplayProps {
  messages: { content: string; isAI: boolean }[];
  isStreaming: boolean;
}

const AIChatDisplay: React.FC<AIChatDisplayProps> = ({
  messages,
  isStreaming,
}) => {
  const bottomRef = useRef<HTMLDivElement>(null);

  useEffect(() => {
    bottomRef.current?.scrollIntoView({ behavior: "smooth" });
  }, [messages]);

  return (
    <ScrollArea
      className="AIChatDisplay flex-1 p-4 overflow-y-auto"
      style={{ maxHeight: "500px" }}
    >
      {messages.map((message, index) => (
        <div
          key={index}
          className={`message mb-2 p-2 rounded-md ${
            message.isAI
              ? "bg-secondary text-secondary-foreground"
              : "bg-primary text-primary-foreground"
          }`}
          style={{ whiteSpace: "pre-wrap", wordBreak: "break-word" }}
        >
          {message.content}
        </div>
      ))}
      {isStreaming && (
        <div className="ai-typing mb-2 p-2 rounded-md bg-muted text-muted-foreground">
          AI is typing...
        </div>
      )}
      <div ref={bottomRef} />
    </ScrollArea>
  );
};

const Page = () => {
  const [input, setInput] = useState("");
  const [ai1Messages, setAi1Messages] = useState<
    { content: string; isAI: boolean }[]
  >([]);
  const [ai2Messages, setAi2Messages] = useState<
    { content: string; isAI: boolean }[]
  >([]);
  const [isAI1Streaming, setIsAI1Streaming] = useState(false);
  const [isAI2Streaming, setIsAI2Streaming] = useState(false);
  const [traditionalTokens, setTraditionalTokens] = useState(0);
  const [reasoningTokens, setReasoningTokens] = useState(0);
  const [isReasoningOn, setIsReasoningOn] = useState(true);

  const handleSend = async () => {
    if (!input.trim()) return;

    // 清空输出框内容
    setAi1Messages([]);
    setAi2Messages([]);

    const userMessage = { content: input, isAI: false };

    setAi1Messages((prev) => [...prev, { content: "", isAI: true }]);
    setAi2Messages((prev) => [...prev, { content: "", isAI: true }]);

    setInput("");
    setIsAI1Streaming(true);
    setIsAI2Streaming(true);

    try {
      const [stream1, stream2] = await Promise.all([
        chatWithAiModel1({ prompt: input }),
        chatWithAiModel2({ prompt: input }, isReasoningOn),
      ]);

      // 并发处理两个模型流
      await Promise.all([
        processModelStream(
          stream1,
          setAi1Messages,
          setIsAI1Streaming,
          setTraditionalTokens
        ),
        processModelStream(
          stream2,
          setAi2Messages,
          setIsAI2Streaming,
          setReasoningTokens
        ),
      ]);
    } catch (error) {
      console.error("Error processing streams:", error);
    }
  };

  // processModelStream 函数保持不变
  const processModelStream = async (
    stream: ReadableStream<{ response: string }>,
    setMessages: React.Dispatch<
      React.SetStateAction<{ content: string; isAI: boolean }[]>
    >,
    setIsStreaming: React.Dispatch<React.SetStateAction<boolean>>,
    setTokens: React.Dispatch<React.SetStateAction<number>>
  ) => {
    const reader = stream.getReader();
    let decoder = new TextDecoder();
    let aiResponse = "";
    let totalTokens = 0;

    try {
      const processChunk = async () => {
        const { done, value } = await reader.read();
        if (done) {
          setIsStreaming(false);
          return;
        }

        const decodedValue = decoder.decode(
          new TextEncoder().encode(value.response)
        );
        aiResponse += decodedValue;

        // 更新消息状态
        setMessages((prev) => {
          const lastMessage = prev[prev.length - 1];
          if (lastMessage && lastMessage.isAI) {
            return [
              ...prev.slice(0, prev.length - 1),
              { content: aiResponse, isAI: true },
            ];
          } else {
            return [...prev, { content: aiResponse, isAI: true }];
          }
        });

        // 更新 token 计数器
        totalTokens += Math.ceil(decodedValue.length / 4); // 1 token ≈ 4 个字符
        setTokens(totalTokens);

        // 继续处理下一个 chunk
        setTimeout(processChunk, 0); // 使用 setTimeout 确保非阻塞
      };

      // 开始处理流
      await processChunk();
    } catch (error) {
      console.error("Error processing model stream:", error);
      setMessages((prev) => [
        ...prev,
        { content: "Error fetching response.", isAI: true },
      ]);
      setIsStreaming(false);
    }
  };

  return (
    <ConfigProvider
      theme={{
        token: {
          // Seed Token，影响范围大
          colorPrimary: "#eb5929",
          //   borderRadius: 2,
          // 派生变量，影响范围小
          //   colorBgContainer: "#f6ffed",
        },
      }}
    >
      <div className="comparison-page">
        <div className="input-section">
          <Input.TextArea
            rows={4}
            placeholder="Enter your text here..."
            value={input}
            onChange={(e) => setInput(e.target.value)}
            className="input-textarea"
          />
          <Button
            type="primary"
            onClick={handleSend}
            className="send-button"
            variant="outlined"
            shape="round"
            icon={<SendOutlined />}
          >
            Send
          </Button>
        </div>

        {/* 独立显示文本 */}
        <div className="info-section">
          <div className="info-box">
            <h2>{process.env.FRIEND_CLOUD_TITLE || "友商云 LLM API"}</h2>
            <div
              style={{
                display: "flex",
                alignItems: "center",
                height: 40,
                justifyContent: "center",
              }}
            >
              Tokens: <span className="token-count">{traditionalTokens}</span>
            </div>
          </div>
          <div className="info-box">
            <h2>H200 vLLM DeepSeek-R1</h2>
            <div
              style={{
                display: "flex",
                alignItems: "center",
                height: 40,
                justifyContent: "center",
              }}
            >
              Tokens: <span className="token-count">{reasoningTokens}</span>
            </div>
          </div>
        </div>

        {/* 输出框部分 */}
        <div className="models-section">
          <div className="model-box traditional">
            <div className="content-area">
              <AIChatDisplay
                messages={ai1Messages}
                isStreaming={isAI1Streaming}
              />
            </div>
          </div>
          <div className="model-box reasoning">
            {/* <div
              style={{
                display: "flex",
                position: "absolute",
                left: 20,
                top: 20,
                alignItems: "center",
                fontSize: 14,
                gap: 15,
                color: "#eb5929",
              }}
            >
              <Switch
                checked={isReasoningOn}
                onChange={(checked) => setIsReasoningOn(checked)}
                className="reasoning-switch"
              />
              <span className="reasoning-label">Reasoning On</span>
            </div> */}
            <div className="content-area">
              <AIChatDisplay
                messages={ai2Messages}
                isStreaming={isAI2Streaming}
              />
            </div>
          </div>
        </div>
      </div>
    </ConfigProvider>
  );
};

export default Page;
