import { Injectable } from '@nestjs/common';
import { Observable } from 'rxjs';
import { ChatOpenAI } from "@langchain/openai";
import { CallbackManager } from '@langchain/core/callbacks/manager';
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatDto } from '../bot/dto/chat.dto';
import { Response } from "express";
import { info } from 'console';
import { LLMResult } from '@langchain/core/outputs';
import { run } from 'node:test';

@Injectable()
export class LlmService {

    getChatModel(model: string, streaming = false, callbackManager: CallbackManager) {
        return new ChatOpenAI({
            openAIApiKey: 'sk-bOfEm5HGdsmwFXtz051e65967c0741238dE04b47478749Bd',
            modelName: model,
            temperature: 0.8,
            streaming: streaming,
            callbackManager: callbackManager,
        }, {
            baseURL: 'https://oneapi.starringshop.com/v1'
        });
    }

    async chatCompletion(chat: ChatDto): Promise<any> {
        const chatModel = this.getChatModel('qwen-max', false, CallbackManager.fromHandlers({
            handleLLMEnd: async (output: LLMResult, runId: string) => {
                console.info(runId, output)
            }
        }))

        const prompt = ChatPromptTemplate.fromMessages([
            ["system", "You are a good ai."],
            ["user", "{input}"],
        ]);
        const chain = prompt.pipe(chatModel);

        const ret = await chain.invoke({
            input: chat.query,
        });

        info(ret)
        return ret.content;
    }

    async chatCompletionSse(chat: ChatDto): Promise<Observable<MessageEvent>> {
        return new Observable<MessageEvent>(subscriber => {
            // Handle connection closed

            const chatModel = this.getChatModel('qwen-max', true, CallbackManager.fromHandlers({
                handleLLMNewToken: async (token) => {
                    const data = token;
                    subscriber.next({ data } as MessageEvent);
                },
                handleLLMEnd: async (output: LLMResult, runId: string) => {
                    console.info(runId, output)
                    subscriber.complete();
                },
                handleLLMError: async (e) => {
                    console.error('handleLLMError', e);
                    subscriber.error(e);
                },
            }))

            const prompt = ChatPromptTemplate.fromMessages([
                ["system", "You are a good ai."],
                ["user", "{input}"],
            ]);
            const chain = prompt.pipe(chatModel);

            chain.invoke({
                input: chat.query,
            });
        })

    }
}
