
from langchain_community.llms import Xinference
from langchain_core.prompts import PromptTemplate

llm = Xinference(
    server_url="http://192.168.21.240:9997",
    model_uid = 'my-llm' # replace model_uid with the model UID return from launching the model
)
# print(llm(
#             prompt="Q: where can we visit in the capital of France? A:",
#             generate_config={"max_tokens": 1024, "stream": False},
#         ))
# print("\n\n\n")
template = 'What is the largest {kind} on the earth?'
prompt = PromptTemplate(template=template, input_variables=['kind'])
llm_chain = prompt | llm
generated = llm_chain.invoke({"kind":"plant"})
print(generated)

#  By python client
# from xinference.client import RESTfulClient
# client = RESTfulClient("http://192.168.21.240:9997")
# model = client.get_model("my-llm")
# print(model.chat(
#     messages=[
#         {
#             "role": "user",
#             "content": "What is the largest animal?"
#         }]
# ))

# By restful api
# curl -X 'POST' \
#   'http://192.168.21.240:9997/v1/chat/completions' \
#   -H 'accept: application/json' \
#   -H 'Content-Type: application/json' \
#   -d '{
#     "model": "my-llm",
#     "messages": [
#         {
#             "role": "system",
#             "content": "You are a helpful assistant."
#         },
#         {
#             "role": "user",
#             "content": "What is the largest animal?"
#         }
#     ]
#   }'