import { BaseLLM, BaseLLMParams } from"@langchain/core/language_models/llms";
import { CallbackManagerForLLMRun } from"@langchain/core/callbacks/manager";
import { GenerationChunk } from"@langchain/core/outputs";
import { LLMResult, Generation } from"@langchain/core/outputs";

interface CustomLLMParams extends BaseLLMParams {
  apiKey: string;
  modelName: string;
  apiEndpoint: string;
}

export class CustomLLM extends BaseLLM {
  apiKey: string;
  modelName: string;
  apiEndpoint: string;

constructor(params: CustomLLMParams) {
    super(params);
    this.apiKey = params.apiKey;
    this.modelName = params.modelName;
    this.apiEndpoint = params.apiEndpoint;
  }

  _llmType(): string {
    return"tongyi";
  }


async *_streamResponseChunks(
    prompt: string,
    options: this["ParsedCallOptions"],
    runManager?: CallbackManagerForLLMRun
  ): AsyncGenerator<GenerationChunk> {
    const response = await fetch(this.apiEndpoint, {
      method: "POST",
      headers: {
        "Content-Type": "application/json",
        "Authorization": `Bearer ${this.apiKey}`
      },
      body: JSON.stringify({
        model: this.modelName,
        messages: [
          {
            role: "user",
            content: prompt
          }
        ],
        stream: true
      })
    });

    if (!response.ok || !response.body) {
      const errorResponse = await response.json();
      thrownewError(JSON.stringify(errorResponse));
    }

    const reader = response.body.getReader();
    const decoder = new TextDecoder();

    while (true) {
      const { done, value } = await reader.read();
      if (done) break;

      const chunk = decoder.decode(value);
      const lines = chunk.split("\n");

      for (const line of lines) {
        if (line.trim() === "") continue;
        try {

          // 检查是否是结束标记
                    if (line === 'data: [DONE]') {
                        console.log('流式响应结束');
                        continue;
                    }
                    
                    // 确保数据以 "data: " 开头
                    if (!line.startsWith('data: ')) {
                        console.log('跳过非数据行:', line);
                        continue;
                    }
                    
                    const jsonStr = line.replace(/^data: /, '');
                    
                    const data = JSON.parse(jsonStr);
                    if (data.choices?.[0]?.delta?.content) {
                        const text = data.choices[0].delta.content;
                        const generationChunk = new GenerationChunk({
                            text: text,
                            generationInfo: {}
                        });
                        yield generationChunk;
                        await runManager?.handleLLMNewToken(text);
                    }
                } catch (e) {
                    console.log('解析错误:', e);
                    console.log('错误数据:', line);
                }
            }
        }
    }

    async _generate(
        prompts: string[],
        options: this["ParsedCallOptions"],
        runManager?: CallbackManagerForLLMRun
    ): Promise<LLMResult> {
        const prompt = prompts[0];
        const chunks: Generation[] = [];
        
        forawait (const chunk of this._streamResponseChunks(prompt, options, runManager)) {
            const text = chunk.text;
            // 实时输出到控制台
            process.stdout.write(text);
            chunks.push({ text });
        }
        // 输出换行
        process.stdout.write('\n');
        
        return {
            generations: [chunks]
        };
    }

    async streamResponse(
        prompt: string,
        options: this["ParsedCallOptions"],
        onToken: (token: string) =>void
    ): Promise<void> {
        forawait (const chunk of this._streamResponseChunks(prompt, options)) {
            onToken(chunk.text);
        }
    }
} 