steve7909 commited on
Commit
90deb69
โ€ข
1 Parent(s): 3856c5b

clean up code, add global debug print

Browse files
Files changed (1) hide show
  1. app.py +19 -40
app.py CHANGED
@@ -30,29 +30,12 @@ API_URL = f"https://api-inference.huggingface.co/models/"
30
  #API_URL = f"https://api-inference.huggingface.co/models/{model_name}"
31
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
32
 
33
- example_Japanese = '''ใ“ใ‚“ใซใกใฏ๏ผ
 
34
 
35
- ใฟใชใ•ใ‚“ใ€็งใŸใกใฎใƒ—ใƒฌใ‚ผใƒณใƒ†ใƒผใ‚ทใƒงใƒณใซใƒฏใ‚ฏใƒฏใ‚ฏใ—ใฆใ„ใพใ™ใ‹๏ผŸ
36
- ใŠใชใ‹ใŒใƒšใ‚ณใƒšใ‚ณใงใฏใ‚ใ‚Šใพใ›ใ‚“ใ‹๏ผŸ
37
- ใงใฏใ€ใƒ‹ใ‚ณใƒ‹ใ‚ณใ—ใฆใ€่žใ„ใฆใใ ใ•ใ„๏ผ
38
-
39
- ไปŠๆ—ฅใฎใƒ—ใƒฌใ‚ผใƒณใƒ†ใƒผใ‚ทใƒงใƒณใฎใƒ†ใƒผใƒžใฏใ‚ชใƒŽใƒžใƒˆใƒšใฎใใ‚Šใ‹ใˆใ™ใ“ใจใฐใงใ™ใ€‚
40
-
41
- ใใฎใ‚ˆใ†ใชใ“ใจใฐใ‚’ใฟใชใ•ใ‚“ไฝ•ใ‹็Ÿฅใฃใฆใ„ใพใ™ใ‹๏ผŸ
42
-
43
- ๆ—ฅๆœฌ่ชžใฏใใ‚Šใ‹ใˆใ™ใ“ใจใฐใ‚’ๆฏŽๆ—ฅใคใ‹ใ„ใพใ™ใ€‚ๆฐ—ๆŒใกใ‚„ใ‚ˆใ†ใ™ใ‚’ใ‚ˆใใฒใ‚‡ใ†ใ’ใ‚“ใงใใ‚‹ใ—ใ€ใ‚ใ‹ใ‚Šใ‚„ใ™ใ„ใ—ใ€ใ„ใ‚“ใ—ใ‚‡ใ†ใซใฎใ“ใ‚Š ใ‚„ใ™ใ„ใ‹ใ‚‰ใงใ™ใ€‚ใŸใจใˆใฐใ€ใ€Œใดใ‹ใดใ‹ใ€ใจ่žใ„ใŸใ‚‰ใ€ใฉใ†ใŠใ‚‚ใ„ใพใ™ใ‹๏ผŸใฉใ‚“ใชใ‚คใƒกใƒผใ‚ธใงใ™ใ‹๏ผŸใ‚„ใฃใฑใ‚Šใใ‚Œใ„ใ‚„ใงใ‚“ใใฎใฒใ‹ใ‚Šใงใ™ใญใ€‚'''
44
-
45
- example_English = '''Hello!
46
-
47
- Are you all excited about our presentation?
48
- Aren't you hungry?
49
- So, smile and listen!
50
-
51
- The theme of today's presentation is onomatopoeic repetition.
52
-
53
- Do you know any such words?
54
-
55
- In Japanese, we use repeated words every day. It's easy to understand, easy to understand, and easy to follow. For example, what do you think when you hear the word "pikapika"? What kind of image do you have? After all, it is a beautiful and electric light.'''
56
 
57
  def split_sentences_ginza(input_text):
58
  nlp = spacy.load("ja_core_news_sm")
@@ -63,16 +46,15 @@ def split_sentences_ginza(input_text):
63
  def query_hf(payload, model_name):
64
  # HTTP POST Request
65
  response = requests.post(API_URL+model_name, headers=headers, json=payload)
66
-
67
  return response.json()
68
 
69
  def translate_hf(input_text):
70
- print("Translating... ", input_text)
71
 
72
  sentences = split_sentences_ginza(input_text) # split into sentences
73
  translated_sentences = []
74
 
75
- print("Split sentences... ", sentences)
76
 
77
  for sentence in sentences:
78
  if sentence.strip(): # Ensure sentence is not empty
@@ -82,7 +64,7 @@ def translate_hf(input_text):
82
  "options": {"wait_for_model": True}
83
  }, "Helsinki-NLP/opus-mt-ja-en")
84
 
85
- print("response: ", response)
86
  translated_sentence = response[0]["translation_text"]
87
  translated_sentences.append(translated_sentence)
88
 
@@ -96,7 +78,7 @@ def translate_openai(input_text):
96
 
97
  prompt = "Translate the following text into Japanese language: " + input_text
98
 
99
- response = client.chat.completions.create(
100
  messages=[
101
  {
102
  "role": "user",
@@ -104,12 +86,10 @@ def translate_openai(input_text):
104
  }
105
  ],
106
  model="gpt-3.5-turbo",
107
- temperature=0 # should be the same every time
108
  )
109
-
110
  translation = response.choices[0].message.content
111
-
112
- print("GPT translation:", translation)
113
 
114
  return translation
115
 
@@ -118,11 +98,10 @@ def assess(original_japanese, student_translation):
118
  try:
119
  # get the English translation
120
  generated_translation = translate_hf(original_japanese)
 
121
  except Exception as e:
122
  return "Error in processing translation.", str(e)
123
-
124
-
125
- print("Generated translation:", generated_translation)
126
  try:
127
  prompt = (f"Evaluate the student's English translation of Japanese for accuracy and naturalness. "
128
  f"Original: {original_japanese}, "
@@ -130,7 +109,7 @@ def assess(original_japanese, student_translation):
130
  f"Student Translation: {student_translation}. "
131
  "Highlight errors, suggest improvements, and note any nuances. Provide concise and very simple feedback for an English language learner aimed at improving their translation skills. Where possible, give concrete examples.")
132
 
133
- print(prompt)
134
 
135
  # Evaluating the student's translation attempt
136
  response = client.chat.completions.create(
@@ -142,11 +121,13 @@ def assess(original_japanese, student_translation):
142
  ],
143
  model="gpt-3.5-turbo",
144
  )
145
- print("Full GPT response:", response)
146
 
147
- evaluation_feedback = response.choices[0].message.content
148
 
149
- print("GPT feedback:", evaluation_feedback)
 
 
 
150
  return generated_translation, evaluation_feedback
151
  except Exception as e:
152
  return "Error in processing evaluation.", str(e)
@@ -165,5 +146,3 @@ assessor = gr.Interface(fn=assess,
165
  )
166
 
167
  assessor.launch(debug=True, share=True)
168
-
169
- #assessor.launch(debug=True)
 
30
  #API_URL = f"https://api-inference.huggingface.co/models/{model_name}"
31
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
32
 
33
+ # Global variable to control debug printing
34
+ DEBUG_MODE = True
35
 
36
+ def debug_print(*args, **kwargs):
37
+ if DEBUG_MODE:
38
+ print(*args, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  def split_sentences_ginza(input_text):
41
  nlp = spacy.load("ja_core_news_sm")
 
46
  def query_hf(payload, model_name):
47
  # HTTP POST Request
48
  response = requests.post(API_URL+model_name, headers=headers, json=payload)
 
49
  return response.json()
50
 
51
  def translate_hf(input_text):
52
+ debug_print("Translating... ", input_text)
53
 
54
  sentences = split_sentences_ginza(input_text) # split into sentences
55
  translated_sentences = []
56
 
57
+ debug_print("Split sentences... ", sentences)
58
 
59
  for sentence in sentences:
60
  if sentence.strip(): # Ensure sentence is not empty
 
64
  "options": {"wait_for_model": True}
65
  }, "Helsinki-NLP/opus-mt-ja-en")
66
 
67
+ debug_print("response: ", response)
68
  translated_sentence = response[0]["translation_text"]
69
  translated_sentences.append(translated_sentence)
70
 
 
78
 
79
  prompt = "Translate the following text into Japanese language: " + input_text
80
 
81
+ response = client.chat.completions.create( # get translation from GPT
82
  messages=[
83
  {
84
  "role": "user",
 
86
  }
87
  ],
88
  model="gpt-3.5-turbo",
89
+ temperature=0 # should be the same translation every time
90
  )
 
91
  translation = response.choices[0].message.content
92
+ debug_print("GPT translation:", translation)
 
93
 
94
  return translation
95
 
 
98
  try:
99
  # get the English translation
100
  generated_translation = translate_hf(original_japanese)
101
+ debug_print("Generated translation:", generated_translation)
102
  except Exception as e:
103
  return "Error in processing translation.", str(e)
104
+
 
 
105
  try:
106
  prompt = (f"Evaluate the student's English translation of Japanese for accuracy and naturalness. "
107
  f"Original: {original_japanese}, "
 
109
  f"Student Translation: {student_translation}. "
110
  "Highlight errors, suggest improvements, and note any nuances. Provide concise and very simple feedback for an English language learner aimed at improving their translation skills. Where possible, give concrete examples.")
111
 
112
+ debug_print(prompt)
113
 
114
  # Evaluating the student's translation attempt
115
  response = client.chat.completions.create(
 
121
  ],
122
  model="gpt-3.5-turbo",
123
  )
 
124
 
125
+ debug_print("Full GPT response:", response)
126
 
127
+ debug_print("Generated translation:", generated_translation)
128
+
129
+ evaluation_feedback = response.choices[0].message.content
130
+
131
  return generated_translation, evaluation_feedback
132
  except Exception as e:
133
  return "Error in processing evaluation.", str(e)
 
146
  )
147
 
148
  assessor.launch(debug=True, share=True)