before share button attempt
Browse files
app.py
CHANGED
@@ -1,11 +1,3 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
"""translation practice.ipynb
|
3 |
-
|
4 |
-
Automatically generated by Colaboratory.
|
5 |
-
|
6 |
-
Original file is located at
|
7 |
-
https://colab.research.google.com/drive/1KrnodZGBZrUFdaJ9FIn8IhtWtCL7peoE
|
8 |
-
"""
|
9 |
import requests
|
10 |
import gradio as gr
|
11 |
from dotenv import load_dotenv
|
@@ -90,6 +82,7 @@ def predict(message, history):
|
|
90 |
Encourage the student by specifying the strengths of their writing.
|
91 |
DO NOT PROVIDE THE CORRECT ENGLISH TRANSLATION until the student gets the correct translation. Let the student work it out.
|
92 |
Provide your feedback as a list in the format: a, b, c etc.
|
|
|
93 |
|
94 |
Execute the following tasks step by step:
|
95 |
1. Ask the student to translate the following sentence from Japanese to English: {japanese_sentence}. Here is the English translation for reference: {english_sentence}
|
@@ -114,7 +107,7 @@ def predict(message, history):
|
|
114 |
|
115 |
history_langchain_format.append(HumanMessage(content=message))
|
116 |
|
117 |
-
debug_print("### Full history: ", history_langchain_format)
|
118 |
gpt_response = llm(history_langchain_format)
|
119 |
return gpt_response.content
|
120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import requests
|
2 |
import gradio as gr
|
3 |
from dotenv import load_dotenv
|
|
|
82 |
Encourage the student by specifying the strengths of their writing.
|
83 |
DO NOT PROVIDE THE CORRECT ENGLISH TRANSLATION until the student gets the correct translation. Let the student work it out.
|
84 |
Provide your feedback as a list in the format: a, b, c etc.
|
85 |
+
Do not respond in Japanese - always respond in English even if the student uses Japanese to with you.
|
86 |
|
87 |
Execute the following tasks step by step:
|
88 |
1. Ask the student to translate the following sentence from Japanese to English: {japanese_sentence}. Here is the English translation for reference: {english_sentence}
|
|
|
107 |
|
108 |
history_langchain_format.append(HumanMessage(content=message))
|
109 |
|
110 |
+
#debug_print("### Full history: ", history_langchain_format)
|
111 |
gpt_response = llm(history_langchain_format)
|
112 |
return gpt_response.content
|
113 |
|