import gradio as gr import requests from g4f import Provider, models from langchain.llms.base import LLM import g4f from langchain_g4f import G4FLLM g4f.debug.logging = True # Enable logging g4f.check_version = False # Disable automatic version checking #print(g4f.version) # Check version #print(g4f.Provider.Ails.params) # Supported args url = "https://app.embedchain.ai/api/v1/pipelines/024a60fa-cfc3-41a2-a27b-2f6a04c1a6fe/context/" def greet(name): payload = { "query": f"{name}", "count": 25 } headers = { 'Authorization': 'Token ec-fBwP02l3yodIa40BHkSEdhqVQmelK8pNsbrUew2J',} response = requests.request("POST", url, headers=headers, json=payload) print(response.text) print(name) c = response.text llm = LLM = G4FLLM(model=models.gpt_35_turbo_16k ) res = llm(f""" Use the following pieces of context to answer the query at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. ${c} Query: ${name} Helpful Answer: """) print(res) return res iface = gr.Interface( fn=greet, inputs="text", outputs=gr.Textbox(label="Réponse"), title="bot", description=" Chatbot-law-code-pénal ") iface.launch()