File size: 5,178 Bytes
26fd6a5
 
 
 
a224a43
 
26fd6a5
a224a43
 
 
69ce671
682747c
 
26fd6a5
 
 
 
 
 
 
682747c
 
26fd6a5
69ce671
 
 
26fd6a5
 
 
 
 
 
 
90deb69
 
26fd6a5
682747c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90deb69
 
 
26fd6a5
 
 
 
 
 
 
 
69ce671
26fd6a5
69ce671
 
 
 
 
26fd6a5
69ce671
26fd6a5
69ce671
 
 
26fd6a5
69ce671
26fd6a5
69ce671
 
26fd6a5
69ce671
 
26fd6a5
 
69ce671
26fd6a5
 
69ce671
26fd6a5
 
a224a43
 
 
69ce671
 
 
 
 
 
 
4bd2a98
 
682747c
69ce671
 
 
 
4bd2a98
 
69ce671
4bd2a98
 
 
 
69ce671
 
4bd2a98
69ce671
 
a224a43
4bd2a98
 
 
 
69ce671
a224a43
4bd2a98
0b37375
a224a43
682747c
a224a43
4bd2a98
 
682747c
 
 
 
 
 
4bd2a98
 
 
a224a43
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import requests
import gradio as gr
from dotenv import load_dotenv
import os
#from openai import OpenAI
from langchain_openai import OpenAI
import spacy
#from langchain.chat_models import ChatOpenAI
from langchain_openai import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage
import pandas as pd
import uuid
import json

# Load environment variables from .env file
load_dotenv()

# Access the env
HF_TOKEN = os.getenv('HUGGING_FACE_TOKEN')

GITHUB_TOKEN = "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" # move to env

# openai setup
# client = OpenAI(
#   api_key=os.getenv('OPENAI_API_KEY')
# )

# hugging face setup
#model_name = "mmnga/ELYZA-japanese-Llama-2-7b-instruct-gguf"
API_URL = f"https://api-inference.huggingface.co/models/"
#API_URL = f"https://api-inference.huggingface.co/models/{model_name}"
headers = {"Authorization": f"Bearer {HF_TOKEN}"}

# Global variable to control debug printing
DEBUG_MODE = True


def share_to_gist(content, public=False):
    url = "https://api.github.com/gists"
    headers = {
        "Authorization": f"token {os.getenv(GITHUB_TOKEN)}",
        "Accept": "application/vnd.github.v3+json",
    }
    data = {
        "public": public,
        "description": "Chat history",
        "files": {
            "chat.txt": {
                "content": content
            }
        }
    }
    response = requests.post(url, headers=headers, data=json.dumps(data))
    gist_url = response.json().get('html_url', '')
    return gist_url

def generate_unique_id():
    return str(uuid.uuid4())

def debug_print(*args, **kwargs):
    if DEBUG_MODE:
        print(*args, **kwargs)

def split_sentences_ginza(input_text):
    nlp = spacy.load("ja_core_news_sm")
    doc = nlp(input_text)
    sentences = [sent.text for sent in doc.sents]
    return sentences


file_path = 'anki_japanese_english_pairs.csv'

def load_csv(file_path):
    # Load the CSV file into a DataFrame
    df = pd.read_csv(file_path)
    
    return df

def get_sentence_pair(df):

    # Get a random row from the DataFrame
    random_row = df.sample(1)
    #debug_print("### random_row:", random_row)

    #print(random_row.shape)

    japanese_sentence = str(random_row.iloc[0, 0])
    english_sentence = str(random_row.iloc[0,  1])

    debug_print("### Japanese sentence:", japanese_sentence)
    debug_print("### English sentence:", english_sentence)


    return japanese_sentence, english_sentence


japanese_sentence, english_sentence = get_sentence_pair(load_csv(file_path))


llm = ChatOpenAI(temperature=0.7, model='gpt-3.5-turbo')

def predict(message, history):

    # Define your initial setup prompt here
    initial_setup = f'''
    Japanese students are learning to translate Japanese text to English text. They will be given a Japanese sentence to translate, and will provide an English translation attempt. 
    Based on the feedback you provide, they will revise their translation. This process will continue until their translation is accurate. 

    Encourage the student by specifying the strengths of their writing.
    DO NOT PROVIDE THE CORRECT ENGLISH TRANSLATION until the student gets the correct translation. Let the student work it out.
    Provide your feedback as a list in the format: a, b, c etc.
    Do not respond in Japanese - always respond in English even if the student uses Japanese with you.
    
    Execute the following tasks step by step:
    1. Ask the student to translate the following sentence from Japanese to English: {japanese_sentence}. Here is the English translation for reference: {english_sentence}
    2. Suggest only mechanical corrections (i.e., spelling, grammar, and punctuation) for the student. Ask for another translation attempt.

    Start by asking the student to translate the Japanese sentence.
    '''

    # removed from prompt
    # The student's translation need not match the provided English translation exactly, but it should be accurate to the Japanese text.

    # Start your history with a SystemMessage containing the setup prompt
    history_langchain_format = [AIMessage(content=initial_setup)]
    #history_langchain_format.append(HumanMessage(content="Let's start."))


    for human, ai in history:
        if human is not None:
            history_langchain_format.append(HumanMessage(content=human)) # convert to str to avoid error; not compatible with multimodal
        if ai is not None:
            history_langchain_format.append(AIMessage(content=ai))

    history_langchain_format.append(HumanMessage(content=message))

    #debug_print("### Full history: ", history_langchain_format)
    gpt_response = llm(history_langchain_format)
    return  gpt_response.content

welcome_message = "Hi! 👋. Are you ready to practise translation?"

# with gr.Blocks() as app:
#     chatbot = gr.Chatbot()
#     message = gr.Textbox()
#     clear = gr.ClearButton([message, chatbot])
#     message.submit(predict, [message, chatbot], [message, chatbot])

app = gr.ChatInterface(fn=predict, title="Translation Chatbot", chatbot=gr.Chatbot(value=[(None, welcome_message)],),)#, multimodal=True) # chatbot=gr.Chatbot(value=[["Welcome 👋. I am an assistant",]])



app.launch()