Spaces:
Running
Running
File size: 5,111 Bytes
74129e6 a032ead 175c7c9 74129e6 a032ead 74129e6 a032ead 74129e6 a032ead a691c88 a032ead fe172d6 e1a5f90 fe172d6 e1a5f90 a032ead e1a5f90 fe172d6 6e3bcaf a032ead e1a5f90 a032ead 74129e6 a032ead 74129e6 a032ead 74129e6 5fd968a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
from aksharamukha import transliterate
import torch
from huggingface_hub import InferenceClient
# Set up device
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load translation models and tokenizers
trans_model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M").to(device)
eng_trans_tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M")
translator = pipeline('translation', model=trans_model, tokenizer=eng_trans_tokenizer, src_lang="eng_Latn", tgt_lang='sin_Sinh', max_length=400, device=device)
sin_trans_model = AutoModelForSeq2SeqLM.from_pretrained("thilina/mt5-sinhalese-english").to(device)
si_trans_tokenizer = AutoTokenizer.from_pretrained("thilina/mt5-sinhalese-english", use_fast=False) # Use slow tokenizer
singlish_pipe = pipeline("text2text-generation", model="Dhahlan2000/Simple_Translation-model-for-GPT-v14")
# Translation functions
def translate_Singlish_to_sinhala(text):
translated_text = singlish_pipe(f"translate Singlish to Sinhala: {text}", clean_up_tokenization_spaces=False)[0]['generated_text']
return translated_text
def translate_english_to_sinhala(text):
parts = text.split("\n")
translated_parts = [translator(part, clean_up_tokenization_spaces=False)[0]['translation_text'] for part in parts]
return "\n".join(translated_parts).replace("ප් රභූවරුන්", "")
def translate_sinhala_to_english(text):
parts = text.split("\n")
translated_parts = []
for part in parts:
inputs = si_trans_tokenizer(part.strip(), return_tensors="pt", padding=True, truncation=True, max_length=512).to(device)
outputs = sin_trans_model.generate(**inputs)
translated_part = si_trans_tokenizer.decode(outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
translated_parts.append(translated_part)
return "\n".join(translated_parts)
def transliterate_from_sinhala(text):
latin_text = transliterate.process('Sinhala', 'Velthuis', text).replace('.', '').replace('*', '').replace('"', '').lower()
return latin_text
def transliterate_to_sinhala(text):
return transliterate.process('Velthuis', 'Sinhala', text)
# Load conversation model
# conv_model_name = "microsoft/Phi-3-mini-4k-instruct" # Use GPT-2 instead of the gated model
# tokenizer = AutoTokenizer.from_pretrained(conv_model_name, trust_remote_code=True)
# model = AutoModelForCausalLM.from_pretrained(conv_model_name, trust_remote_code=True).to(device)
pipe1 = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
# client = InferenceClient("google/gemma-2b-it")
def conversation_predict(text):
# return client.text_generation(text, return_full_text=False)
# pipe = pipeline(
# "text-generation",
# model=model,
# tokenizer=tokenizer,
# )
# generation_args = {
# "max_new_tokens": 500,
# "return_full_text": False,
# "temperature": 0.0,
# "do_sample": False,
# }
# output = pipe(text, **generation_args)
# return output[0]['generated_text']
# input_ids = tokenizer(text, return_tensors="pt").to(device)
# outputs = model.generate(**input_ids)
# return tokenizer.decode(outputs[0])
outputs = pipe1(text, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
return outputs[0]["generated_text"]
def ai_predicted(user_input):
if user_input.lower() == 'exit':
return "Goodbye!"
user_input = translate_Singlish_to_sinhala(user_input)
user_input = transliterate_to_sinhala(user_input)
user_input = translate_sinhala_to_english(user_input)
ai_response = conversation_predict(user_input)
ai_response_lines = ai_response.split("</s>")
response = translate_english_to_sinhala(ai_response_lines[-1])
response = transliterate_from_sinhala(response)
return response
# Gradio Interface
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ai_predicted(message)
yield response
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch(share=True) |