'use server';
/**
 * @fileOverview This file defines a Genkit flow for interacting with the first AI model.
 *
 * - chatWithAiModel2 - A function that sends a prompt to the first AI model and returns the response.
 * - ChatWithAiModel2Input - The input type for the chatWithAiModel2 function.
 * - ChatWithAiModel2Output - The return type for the chatWithAiModel2 function.
 */

import {ai, openApiLocal, sendVolcengine} from '@/ai/ai-instance';
import {z} from 'genkit';
import {ChatWithAiModel1Input} from "@/ai/flows/chat-with-ai-model-1";

const ChatWithAiModel2InputSchema = z.object({
    prompt: z.string().describe('The prompt to send to the AI model.'),
});
export type ChatWithAiModel2Input = z.infer<typeof ChatWithAiModel2InputSchema>;

const ChatWithAiModel2OutputSchema = z.object({
    response: z.string().describe('The response from the AI model.'),
});
export type ChatWithAiModel2Output = z.infer<typeof ChatWithAiModel2OutputSchema>;

// export async function chatWithAiModel2(input: ChatWithAiModel2Input): Promise<ChatWithAiModel2Output> {
//     return chatWithAiModel2Flow(input);
// }

const chatWithAiModel2Prompt = ai.definePrompt({
    name: 'chatWithAiModel2Prompt',
    input: {
        schema: z.object({
            prompt: z.string().describe('The prompt to send to the AI model.'),
        }),
    },
    output: {
        schema: z.object({
            response: z.string().describe('The response from the AI model.'),
        }),
    },
    prompt: `You are a helpful AI assistant. Please respond to the following prompt: {{{prompt}}}`,
});

const chatWithAiModel2Flow = ai.defineFlow<
    typeof ChatWithAiModel2InputSchema,
    typeof ChatWithAiModel2OutputSchema
>(
    {
        name: 'chatWithAiModel2Flow',
        inputSchema: ChatWithAiModel2InputSchema,
        outputSchema: ChatWithAiModel2OutputSchema,
    },
    async input => {
        const {output} = await chatWithAiModel2Prompt(input);
        return output!;
    }
);
// 流式输出函数
export async function chatWithAiModel2(input: ChatWithAiModel1Input, reasoningOn: boolean): Promise<ReadableStream<ChatWithAiModel2Output>> {
    const stream = openApiLocal(input.prompt, reasoningOn);

    // 创建一个 ReadableStream
    return new ReadableStream<ChatWithAiModel2Output>({
        async start(controller) {
            let fullResponse = '';
            for await (const chunk of stream) {
                fullResponse += chunk;
                controller.enqueue({response: chunk}); // 逐步返回响应内容
            }
            controller.close();
        },
    });
}
