# -*- coding: utf-8 -*- """translation practice.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1KrnodZGBZrUFdaJ9FIn8IhtWtCL7peoE """ import requests import gradio as gr from dotenv import load_dotenv import os from openai import OpenAI import spacy # Load environment variables from .env file load_dotenv() # Access the env HF_TOKEN = os.getenv('HUGGING_FACE_TOKEN') # openai setup client = OpenAI( api_key=os.getenv('OPENAI_API_KEY') ) # hugging face setup #model_name = "mmnga/ELYZA-japanese-Llama-2-7b-instruct-gguf" API_URL = f"https://api-inference.huggingface.co/models/" #API_URL = f"https://api-inference.huggingface.co/models/{model_name}" headers = {"Authorization": f"Bearer {HF_TOKEN}"} # Global variable to control debug printing DEBUG_MODE = True def debug_print(*args, **kwargs): if DEBUG_MODE: print(*args, **kwargs) def split_sentences_ginza(input_text): nlp = spacy.load("ja_core_news_sm") doc = nlp(input_text) sentences = [sent.text for sent in doc.sents] return sentences def query_hf(payload, model_name): # HTTP POST Request response = requests.post(API_URL+model_name, headers=headers, json=payload) return response.json() def translate_hf(input_text): debug_print("Translating... ", input_text) sentences = split_sentences_ginza(input_text) # split into sentences translated_sentences = [] debug_print("Split sentences... ", sentences) for sentence in sentences: if sentence.strip(): # Ensure sentence is not empty # API Request for each sentence: response = query_hf({ "inputs": sentence.strip(), "options": {"wait_for_model": True} }, "Helsinki-NLP/opus-mt-ja-en") debug_print("response: ", response) translated_sentence = response[0]["translation_text"] translated_sentences.append(translated_sentence) # Join the translated sentences translation = ' '.join(translated_sentences) return translation def translate_openai(input_text): prompt = "Translate the following text into Japanese language: " + input_text response = client.chat.completions.create( # get translation from GPT messages=[ { "role": "user", "content": prompt, } ], model="gpt-3.5-turbo", temperature=0 # should be the same translation every time ) translation = response.choices[0].message.content debug_print("GPT translation:", translation) return translation def assess(original_japanese, student_translation): try: # get the English translation generated_translation = translate_hf(original_japanese) debug_print("Generated translation:", generated_translation) except Exception as e: return "Error in processing translation.", str(e) try: prompt = (f"Evaluate the student's English translation of Japanese for accuracy and naturalness. " f"Original: {original_japanese}, " f"Reference Translation: {generated_translation}, " f"Student Translation: {student_translation}. " "Highlight errors, suggest improvements, and note any nuances. Provide concise and very simple feedback for an English language learner aimed at improving their translation skills. Where possible, give concrete examples.") debug_print(prompt) # Evaluating the student's translation attempt response = client.chat.completions.create( messages=[ { "role": "user", "content": prompt, } ], model="gpt-3.5-turbo", ) debug_print("Full GPT response:", response) debug_print("Generated translation:", generated_translation) evaluation_feedback = response.choices[0].message.content return generated_translation, evaluation_feedback except Exception as e: return "Error in processing evaluation.", str(e) assessor = gr.Interface(fn=assess, inputs=[ gr.Textbox(label="Japanese Sentence Input", placeholder="Input text to be translated", lines=1, value="これは例です"),#example_Japanese),#" gr.Textbox(label="Student's Translation Attempt", placeholder="Input your English translation", lines=1, value="This is an example")#"This is an example") ], outputs=[ gr.Textbox(label="Machine Generated Translation"), gr.Textbox(label="Evaluation Feedback") ], title="Translation Practice", description="Enter a Japanese sentence and your English translation attempt to receive evaluation feedback." ) assessor.launch(debug=True, share=True)