# -*- coding: utf-8 -*-
import requests
import json
import logging
from typing import Any, Dict, List, Text, Optional
import sys
from llama_index.llms.ollama import Ollama
from llama_index.core import Settings
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.embeddings.fastembed import FastEmbedEmbedding
import chromadb
from llama_index.core.llms import ChatMessage

logger = logging.getLogger(__name__)

class QwenAPI:
    """qwen """

    def __init__(self ):
        self.llm = Ollama(model="codeqwen:7b-chat",request_timeout=60000.0)
        embed_model = FastEmbedEmbedding(model_name="BAAI/bge-small-zh-v1.5")  
        Settings.llm = self.llm
        Settings.embed_model = embed_model
        db2 = chromadb.PersistentClient(path="/home/softrobot/kb/chromadb")
        chroma_collection = db2.get_or_create_collection("docs")
        vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
        self.index = VectorStoreIndex.from_vector_store(
            vector_store,
            embed_model=embed_model,
        )
	

    def predict(self, prompt):
        messages = [
            ChatMessage( role="system", content="You are an NLP expert" ),
            ChatMessage(role="user", content=prompt),
        ]
        resp = self.llm.chat(messages)
        return resp

    def predict_req(self, msg):
        logger.info('#### predict='+ msg )
        # headers = {"Content-Type": "application/json; charset=utf-8"}
        # ret = requests.post(url='http://localhost:9090/hi', headers=headers)
        # print(ret.text)

        params = { "model": "qwen:0.5b-chat",   "messages": [     {       "role": "user",       "content": msg     }   ],   "stream": False }
        json_data = json.dumps(params)
        # print(json_data)
        headers = {"Content-Type": "application/json; charset=utf-8"}
        ret = requests.post(url='http://localhost:11434/api/chat', data=json_data, headers=headers)
        print(ret.text)
        json_data = json.loads(ret.text)
        return json_data['message']['content']


qwen = QwenAPI()
ret = qwen.predict('你好')
print(ret)


