File size: 5,066 Bytes
26fd6a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90deb69
 
26fd6a5
90deb69
 
 
26fd6a5
 
 
 
 
 
 
 
 
 
 
 
 
90deb69
26fd6a5
 
 
 
90deb69
26fd6a5
 
 
 
 
 
 
 
 
90deb69
26fd6a5
 
 
 
 
 
 
 
 
 
 
 
 
90deb69
26fd6a5
 
 
 
 
 
 
90deb69
26fd6a5
 
90deb69
26fd6a5
 
 
 
 
 
 
 
90deb69
26fd6a5
 
90deb69
26fd6a5
 
 
 
 
 
 
90deb69
26fd6a5
 
 
 
 
 
 
 
 
 
 
 
90deb69
26fd6a5
90deb69
 
 
 
26fd6a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
# -*- coding: utf-8 -*-
"""translation practice.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1KrnodZGBZrUFdaJ9FIn8IhtWtCL7peoE
"""
import requests
import gradio as gr
from dotenv import load_dotenv
import os
from openai import OpenAI
import spacy

# Load environment variables from .env file
load_dotenv()

# Access the env
HF_TOKEN = os.getenv('HUGGING_FACE_TOKEN')

# openai setup
client = OpenAI(
  api_key=os.getenv('OPENAI_API_KEY')
)

# hugging face setup
#model_name = "mmnga/ELYZA-japanese-Llama-2-7b-instruct-gguf"
API_URL = f"https://api-inference.huggingface.co/models/"
#API_URL = f"https://api-inference.huggingface.co/models/{model_name}"
headers = {"Authorization": f"Bearer {HF_TOKEN}"}

# Global variable to control debug printing
DEBUG_MODE = True

def debug_print(*args, **kwargs):
    if DEBUG_MODE:
        print(*args, **kwargs)

def split_sentences_ginza(input_text):
    nlp = spacy.load("ja_core_news_sm")
    doc = nlp(input_text)
    sentences = [sent.text for sent in doc.sents]
    return sentences

def query_hf(payload, model_name):
    # HTTP POST Request
    response = requests.post(API_URL+model_name, headers=headers, json=payload)
    return response.json()

def translate_hf(input_text):
    debug_print("Translating... ", input_text)

    sentences = split_sentences_ginza(input_text)  # split into sentences
    translated_sentences = []

    debug_print("Split sentences... ", sentences)

    for sentence in sentences:
        if sentence.strip():  # Ensure sentence is not empty
            # API Request for each sentence:
            response = query_hf({
                "inputs": sentence.strip(),
                "options": {"wait_for_model": True}
            }, "Helsinki-NLP/opus-mt-ja-en")

            debug_print("response: ", response)
            translated_sentence = response[0]["translation_text"]
            translated_sentences.append(translated_sentence)

    # Join the translated sentences
    translation = ' '.join(translated_sentences)

    return translation


def translate_openai(input_text):

    prompt = "Translate the following text into Japanese language: " + input_text

    response = client.chat.completions.create( # get translation from GPT
        messages=[
            {
                "role": "user",
                "content": prompt,
            }
        ],
        model="gpt-3.5-turbo",
        temperature=0 # should be the same translation every time
      )
    translation = response.choices[0].message.content
    debug_print("GPT translation:", translation)

    return translation

def assess(original_japanese, student_translation):

    try:
      # get the English translation
      generated_translation = translate_hf(original_japanese)
      debug_print("Generated translation:", generated_translation)
    except Exception as e:
      return "Error in processing translation.", str(e)
 
    try:
      prompt = (f"Evaluate the student's English translation of Japanese for accuracy and naturalness. "
            f"Original: {original_japanese}, "
            f"Reference Translation: {generated_translation}, "
            f"Student Translation: {student_translation}. "
            "Highlight errors, suggest improvements, and note any nuances. Provide concise and very simple feedback for an English language learner aimed at improving their translation skills. Where possible, give concrete examples.")

      debug_print(prompt)

      # Evaluating the student's translation attempt
      response = client.chat.completions.create(
        messages=[
            {
                "role": "user",
                "content": prompt,
            }
        ],
        model="gpt-3.5-turbo",
      )

      debug_print("Full GPT response:", response)

      debug_print("Generated translation:", generated_translation)
                  
      evaluation_feedback = response.choices[0].message.content
      
      return generated_translation, evaluation_feedback
    except Exception as e:
      return "Error in processing evaluation.", str(e)

assessor = gr.Interface(fn=assess,
                          inputs=[
                              gr.Textbox(label="Japanese Sentence Input", placeholder="Input text to be translated", lines=1, value="これは例です"),#example_Japanese),#"
                              gr.Textbox(label="Student's Translation Attempt", placeholder="Input your English translation", lines=1, value="This is an example")#"This is an example")
                          ],
                          outputs=[
                              gr.Textbox(label="Machine Generated Translation"),
                              gr.Textbox(label="Evaluation Feedback")
                          ],
                          title="Translation Practice",
                          description="Enter a Japanese sentence and your English translation attempt to receive evaluation feedback."
                          )

assessor.launch(debug=True, share=True)