# # image vector db
# from openai import OpenAI
# client = OpenAI(api_key="Empty", base_url="http://127.0.0.1:11434/v1")
# response = client.chat.completions.create(
#             model="qwen2.5:7b-instruct-q4_0",
#             messages=[
#                 {'role': 'system','content': "You are a helpful assistant!"},
#                 {'role': 'user','content': "Hi"}],
#             max_tokens=2048,
#             temperature=0.2
#         )
# print(response.choices[0].message.content)
#
#
# # from llama_index.llms.ollama import Ollama
# #
# # llm = Ollama(model="qwen2.5:7b-instruct-q4_0", request_timeout=120.0)
# # resp = llm.complete("Hi!")
# # print(resp)
#
# from llama_index.llms.openllm import OpenLLM
#
# llm = OpenLLM(
#     model="qwen2.5:7b-instruct-q4_0", api_base="http://127.0.0.1:11434/v1", api_key="Empty"
# )
# resp = llm.complete("Hi!")
# print(resp)

# embedding 模型
from openai import OpenAI

client = OpenAI(base_url="http://127.0.0.1:11434/v1", api_key="Empty")

response = client.embeddings.create(
    input="Your text string goes here",
    model="bge-m3"
)
print(response.data[0].embedding)
