steve7909 commited on
Commit
6e81085
โ€ข
1 Parent(s): 857261b

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. .DS_Store +0 -0
  2. .gitignore +1 -0
  3. README.md +1 -7
  4. app.py +169 -0
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
  title: EvaLingo
3
- emoji: โšก
4
- colorFrom: green
5
- colorTo: gray
6
  sdk: gradio
7
  sdk_version: 4.23.0
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: EvaLingo
3
+ app_file: app.py
 
 
4
  sdk: gradio
5
  sdk_version: 4.23.0
 
 
6
  ---
 
 
app.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """translation practice.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1KrnodZGBZrUFdaJ9FIn8IhtWtCL7peoE
8
+ """
9
+ import requests
10
+ import gradio as gr
11
+ from dotenv import load_dotenv
12
+ import os
13
+ from openai import OpenAI
14
+ import spacy
15
+
16
+ # Load environment variables from .env file
17
+ load_dotenv()
18
+
19
+ # Access the env
20
+ HF_TOKEN = os.getenv('HUGGING_FACE_TOKEN')
21
+
22
+ # openai setup
23
+ client = OpenAI(
24
+ api_key=os.getenv('OPENAI_API_KEY')
25
+ )
26
+
27
+ # hugging face setup
28
+ #model_name = "mmnga/ELYZA-japanese-Llama-2-7b-instruct-gguf"
29
+ API_URL = f"https://api-inference.huggingface.co/models/"
30
+ #API_URL = f"https://api-inference.huggingface.co/models/{model_name}"
31
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
32
+
33
+ example_Japanese = '''ใ“ใ‚“ใซใกใฏ๏ผ
34
+
35
+ ใฟใชใ•ใ‚“ใ€็งใŸใกใฎใƒ—ใƒฌใ‚ผใƒณใƒ†ใƒผใ‚ทใƒงใƒณใซใƒฏใ‚ฏใƒฏใ‚ฏใ—ใฆใ„ใพใ™ใ‹๏ผŸ
36
+ ใŠใชใ‹ใŒใƒšใ‚ณใƒšใ‚ณใงใฏใ‚ใ‚Šใพใ›ใ‚“ใ‹๏ผŸ
37
+ ใงใฏใ€ใƒ‹ใ‚ณใƒ‹ใ‚ณใ—ใฆใ€่žใ„ใฆใใ ใ•ใ„๏ผ
38
+
39
+ ไปŠๆ—ฅใฎใƒ—ใƒฌใ‚ผใƒณใƒ†ใƒผใ‚ทใƒงใƒณใฎใƒ†ใƒผใƒžใฏใ‚ชใƒŽใƒžใƒˆใƒšใฎใใ‚Šใ‹ใˆใ™ใ“ใจใฐใงใ™ใ€‚
40
+
41
+ ใใฎใ‚ˆใ†ใชใ“ใจใฐใ‚’ใฟใชใ•ใ‚“ไฝ•ใ‹็Ÿฅใฃใฆใ„ใพใ™ใ‹๏ผŸ
42
+
43
+ ๆ—ฅๆœฌ่ชžใฏใใ‚Šใ‹ใˆใ™ใ“ใจใฐใ‚’ๆฏŽๆ—ฅใคใ‹ใ„ใพใ™ใ€‚ๆฐ—ๆŒใกใ‚„ใ‚ˆใ†ใ™ใ‚’ใ‚ˆใใฒใ‚‡ใ†ใ’ใ‚“ใงใใ‚‹ใ—ใ€ใ‚ใ‹ใ‚Šใ‚„ใ™ใ„ใ—ใ€ใ„ใ‚“ใ—ใ‚‡ใ†ใซใฎใ“ใ‚Š ใ‚„ใ™ใ„ใ‹ใ‚‰ใงใ™ใ€‚ใŸใจใˆใฐใ€ใ€Œใดใ‹ใดใ‹ใ€ใจ่žใ„ใŸใ‚‰ใ€ใฉใ†ใŠใ‚‚ใ„ใพใ™ใ‹๏ผŸใฉใ‚“ใชใ‚คใƒกใƒผใ‚ธใงใ™ใ‹๏ผŸใ‚„ใฃใฑใ‚Šใใ‚Œใ„ใ‚„ใงใ‚“ใใฎใฒใ‹ใ‚Šใงใ™ใญใ€‚'''
44
+
45
+ example_English = '''Hello!
46
+
47
+ Are you all excited about our presentation?
48
+ Aren't you hungry?
49
+ So, smile and listen!
50
+
51
+ The theme of today's presentation is onomatopoeic repetition.
52
+
53
+ Do you know any such words?
54
+
55
+ In Japanese, we use repeated words every day. It's easy to understand, easy to understand, and easy to follow. For example, what do you think when you hear the word "pikapika"? What kind of image do you have? After all, it is a beautiful and electric light.'''
56
+
57
+ def split_sentences_ginza(input_text):
58
+ nlp = spacy.load("ja_core_news_sm")
59
+ doc = nlp(input_text)
60
+ sentences = [sent.text for sent in doc.sents]
61
+ return sentences
62
+
63
+ def query_hf(payload, model_name):
64
+ # HTTP POST Request
65
+ response = requests.post(API_URL+model_name, headers=headers, json=payload)
66
+
67
+ return response.json()
68
+
69
+ def translate_hf(input_text):
70
+ print("Translating... ", input_text)
71
+
72
+ sentences = split_sentences_ginza(input_text) # split into sentences
73
+ translated_sentences = []
74
+
75
+ print("Split sentences... ", sentences)
76
+
77
+ for sentence in sentences:
78
+ if sentence.strip(): # Ensure sentence is not empty
79
+ # API Request for each sentence:
80
+ response = query_hf({
81
+ "inputs": sentence.strip(),
82
+ "options": {"wait_for_model": True}
83
+ }, "Helsinki-NLP/opus-mt-ja-en")
84
+
85
+ print("response: ", response)
86
+ translated_sentence = response[0]["translation_text"]
87
+ translated_sentences.append(translated_sentence)
88
+
89
+ # Join the translated sentences
90
+ translation = ' '.join(translated_sentences)
91
+
92
+ return translation
93
+
94
+
95
+ def translate_openai(input_text):
96
+
97
+ prompt = "Translate the following text into Japanese language: " + input_text
98
+
99
+ response = client.chat.completions.create(
100
+ messages=[
101
+ {
102
+ "role": "user",
103
+ "content": prompt,
104
+ }
105
+ ],
106
+ model="gpt-3.5-turbo",
107
+ temperature=0 # should be the same every time
108
+ )
109
+
110
+ translation = response.choices[0].message.content
111
+
112
+ print("GPT translation:", translation)
113
+
114
+ return translation
115
+
116
+ def assess(original_japanese, student_translation):
117
+
118
+ try:
119
+ # get the English translation
120
+ generated_translation = translate_hf(original_japanese)
121
+ except Exception as e:
122
+ return "Error in processing translation.", str(e)
123
+
124
+
125
+ print("Generated translation:", generated_translation)
126
+ try:
127
+ prompt = (f"Evaluate the student's English translation of Japanese for accuracy and naturalness. "
128
+ f"Original: {original_japanese}, "
129
+ f"Reference Translation: {generated_translation}, "
130
+ f"Student Translation: {student_translation}. "
131
+ "Highlight errors, suggest improvements, and note any nuances. Provide concise and very simple feedback for an English language learner aimed at improving their translation skills. Where possible, give concrete examples.")
132
+
133
+ print(prompt)
134
+
135
+ # Evaluating the student's translation attempt
136
+ response = client.chat.completions.create(
137
+ messages=[
138
+ {
139
+ "role": "user",
140
+ "content": prompt,
141
+ }
142
+ ],
143
+ model="gpt-3.5-turbo",
144
+ )
145
+ print("Full GPT response:", response)
146
+
147
+ evaluation_feedback = response.choices[0].message.content
148
+
149
+ print("GPT feedback:", evaluation_feedback)
150
+ return generated_translation, evaluation_feedback
151
+ except Exception as e:
152
+ return "Error in processing evaluation.", str(e)
153
+
154
+ assessor = gr.Interface(fn=assess,
155
+ inputs=[
156
+ gr.Textbox(label="Japanese Sentence Input", placeholder="Input text to be translated", lines=1, value="ใ“ใ‚Œใฏไพ‹ใงใ™"),#example_Japanese),#"
157
+ gr.Textbox(label="Student's Translation Attempt", placeholder="Input your English translation", lines=1, value="This is an example")#"This is an example")
158
+ ],
159
+ outputs=[
160
+ gr.Textbox(label="Machine Generated Translation"),
161
+ gr.Textbox(label="Evaluation Feedback")
162
+ ],
163
+ title="Translation Practice",
164
+ description="Enter a Japanese sentence and your English translation attempt to receive evaluation feedback."
165
+ )
166
+
167
+ assessor.launch(debug=True, share=True)
168
+
169
+ #assessor.launch(debug=True)