import { BaseLLM } from "./baseLLM";
import { ChatMessage } from "../index";
import { streamSse } from "./stream";

/**
 * GLM (智谱AI) 模型实现类
 * 支持 GLM-4 系列模型
 */
export class GLM extends BaseLLM {
    apiKey: string;
    modelName: string;
    url: string;

    constructor(apiKey: string, modelName: string, url: string) {
        super();
        this.apiKey = apiKey;
        this.modelName = modelName;
        this.url = url;
    }
      
    protected _getHeaders() {
        return {
            "Content-Type": "application/json",
            "Authorization": `Bearer ${this.apiKey}`,
        };
    }

    protected _convertArgs(messages: ChatMessage[]) {
        return {
            messages: messages,
            model: this.modelName,
        };
    }

    public async *_legacystreamComplete(
        _messages: ChatMessage[],
        signal: AbortSignal,
    ): AsyncGenerator<string> {
        const args = this._convertArgs(_messages);
    
        const response = await this.fetch(new URL(this.url), {
            method: "POST",
            headers: this._getHeaders(),
            body: JSON.stringify({
                ...args,
                stream: true,
            }),
            signal,
        });
    
        for await (const value of streamSse(response)) {
            // GLM API 返回格式
            let text = value.choices?.[0]?.delta?.content;
            if (text) {
                yield text;
            }
        }
    }

    public async *_streamChat(
        messages: ChatMessage[],
        signal: AbortSignal,
    ): AsyncGenerator<ChatMessage> {
        for await (const content of this._legacystreamComplete(
            messages,
            signal
        )) {
            yield {
                role: "assistant",
                content,
            };
        }
    }
}

