bigtranslate / gradio_bigtranslate.py
binlee120's picture
gradio bigtranslate
3cd9bc4
raw
history blame contribute delete
No virus
1.91 kB
# -*- coding: utf-8 -*-
"""gradio-bigtranslate.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1Rtw0lupjDrxW3bRiuFmxFlxKO40X6AuU
"""
# ! pip install gradio
# ! pip install transformers
from huggingface_hub import notebook_login
notebook_login()
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load the model and tokenizer
# ! pip install optimum auto-gptq
from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig # Import necessary modules
# Load the model and tokenizer
model_name = "TheBloke/BigTranslate-13B-GPTQ"
# Configure GPTQ to disable Exllama and use the CUDA backend
quantization_config = GPTQConfig(bits=4, disable_exllama=True)
model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config)
tokenizer = AutoTokenizer.from_pretrained(model_name)
import gradio as gr
supported_languages = {
"English": "en",
"French": "fr",
"Spanish": "es",
"German": "de",
# Add more languages and their codes as needed
}
def translate_text(input_text, output_language):
# Prefix the input text with the target language code
prefixed_input_text = f">>{output_language}<< {input_text}"
# Tokenize the input text
inputs = tokenizer(prefixed_input_text, return_tensors="pt")
# Generate translation
outputs = model.generate(inputs['input_ids'], max_length=40, num_beams=4, early_stopping=True)
# Decode the output
translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return translated_text
# Create the Gradio interface
iface = gr.Interface(
fn=translate_text,
inputs=[
gr.Textbox(lines=2, placeholder="Enter text here..."),
gr.Dropdown(choices=list(supported_languages.keys()), label="Select output language")
],
outputs="text"
)
# Launch the interface
iface.launch()