File size: 1,031 Bytes
c4a5bff 8e3f470 c4a5bff 8e3f470 bbd6db5 8e3f470 7144739 8866047 bbd6db5 8866047 7144739 78a4974 8e3f470 c4a5bff 8e3f470 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import gradio as gr
from ctransformers import AutoModelForCausalLM
from huggingface_hub import hf_hub_download
model_name = "Hemanth-thunder/Tamil-Mistral-7B-Instruct-v0.1"
model_file = "tamil-mistral-7b-instruct-v0.1.Q4_K_M.gguf"
model_path = hf_hub_download(model_name, filename=model_file)
llm = AutoModelForCausalLM.from_pretrained(model_name, model_file=model_file,
model_type="mistral", gpu_layers=0)
def alternatingly_agree(message, history):
outputs = []
prompt = """<s> சரியான பதிலுடன் வேலையை வெற்றிகரமாக முடிக்க. தேவையான தகவலை உள்ளிடவும்.
### Instruction:
{}
### Response:
"""
prompt = prompt.format(message)
result = llm(prompt,max_new_tokens=50,temperature=0.7,stream=True)
for token in result:
outputs.append(token)
yield "".join(outputs)
gr.ChatInterface(alternatingly_agree).launch()
|