rkoushikroy2 commited on
Commit
54d25d7
β€’
1 Parent(s): de43c3a

Upload 3 files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. app.py +34 -0
  3. data_with_ada_embedding.csv +3 -0
  4. helper_functions.py +46 -0
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ data_with_ada_embedding.csv filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from helper_functions import *
3
+
4
+ def get_prompt(user_message):
5
+ return pre_text + "\n\n" + get_context(user_message)
6
+
7
+ def set_pre_text(system_prompt):
8
+ global pre_text
9
+ pre_text = system_prompt
10
+
11
+ with gr.Blocks() as app:
12
+ gr.Markdown('# Prompt Generator for FundedNext')
13
+ with gr.Tab("Generate Prompt"):
14
+ user_message = gr.Textbox(label = "Enter your message")
15
+ prompt = gr.Textbox(label="Generated Prompt", interactive=True, lines=20)
16
+ with gr.Tab("Edit System Prompt"):
17
+ system_prompt = gr.Textbox(
18
+ label="System Prompt", interactive=True, lines=15
19
+ )
20
+ gr.Markdown("## System Prompt Examples")
21
+ gr.Examples(
22
+ examples = [[pre_text]],
23
+ inputs = [system_prompt]
24
+ )
25
+
26
+ user_message.submit(
27
+ fn = get_prompt, inputs = user_message, outputs = prompt
28
+ ).then(lambda:"", inputs=None, outputs=user_message)
29
+ system_prompt.change(
30
+ fn = set_pre_text, inputs = system_prompt, outputs = None
31
+ )
32
+
33
+
34
+ app.launch(auth=(os.getenv("id"), os.getenv("password")), show_api=False)
data_with_ada_embedding.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b258b46cc12de730c67e6650fda4e5139b701d91529ec2f50e12e2580aecbb7
3
+ size 19996735
helper_functions.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai.embeddings_utils import get_embedding, cosine_similarity
2
+ import os
3
+ import openai
4
+ import pandas as pd
5
+ import numpy as np
6
+
7
+ # Set up OpenAI API key
8
+ openai.api_key = os.getenv("OPENAI_API_KEY")
9
+
10
+ # Load data
11
+ df = pd.read_csv('data_with_ada_embedding.csv')
12
+ df["token"] = df.combined_summarised.map(len)//4
13
+ df['ada_embedding'] = df.ada_embedding.apply(eval).apply(np.array)
14
+
15
+ pre_text = """FundedNext funds promising traders from all over the world. This is how it works: traders come to the platform, and sign up for different challenges. If they are able to reach the trading targets without breaching any of the rules and pass the challenge, then they can get funded by your company FundedNext.
16
+ Fundednext has two account models. Users can go for Either Express Model or Evaluation Model, To get a real funded account. Each model has challenge phase and real phase. After sucessfully completing the challenge phase without violating any rules, users are eligible for their real trading account.
17
+ Express model has two phases. Express Demo and Express Real. Express Demo is the challenge phase. Express users need to pass only one challenge phase to get to Express Real phase.
18
+ While traders in the Evaluation model need to pass two challenge phases called Phase 1 and Phase 2. The final phase in Evaluation model is Evaluation Real.
19
+ You are supposed to help the users of FundedNext with their questions and provide them with helpful answers.
20
+ Your Approach to the conversation will be as follows :
21
+ As an AI customer service agent, your main job is to help customers with their questions and concerns. This requires you to be empathetic and understanding of their situation. You should always begin the conversation by asking how you can help and listening carefully to their response.
22
+ It's important to make the customer feel welcome and comfortable by using pleasant greetings and respectful language throughout the interaction. You should always remain professional and respectful, even if the customer is upset or frustrated.
23
+ Remember to ask clarifying questions if necessary to fully understand the customer's issue. Once you have identified the problem, work to find a solution that meets their needs. End the conversation on a positive note by thanking the customer for their time and offering any additional assistance they may need.
24
+ Overall, your goal is to provide top-notch customer service that leaves a positive impression on every customer. By following these guidelines, you can ensure that you are doing just that.
25
+ To help you with the necessary information needed to properly serve the client, relevant context and information are shared with you up next. Use the context to ask follow up questions to the user and respond to the queries. You should only answer the question if you are sure of the answer based on the provided context."""
26
+
27
+ def search(df, query, max_n, max_token):
28
+ query_embedding = get_embedding(
29
+ query,
30
+ engine="text-embedding-ada-002"
31
+ )
32
+ df["similarity"] = df.ada_embedding.apply(lambda x: cosine_similarity(x, query_embedding))
33
+ df = df.sort_values("similarity", ascending=False).head(max_n)
34
+ df = df[df['similarity'] >= .77]
35
+ df["cumulative_sum"] = df.token.cumsum()
36
+ return '\n\n'.join(df[(df['cumulative_sum'] < max_token)]["combined_summarised"])
37
+
38
+ def get_context(query):
39
+ results = search(df, query, max_n = 10, max_token = 500)
40
+ return f"""I will ask you questions based on the following context:
41
+ β€” Start of Context β€”
42
+ {results}
43
+ β€” End of Context β€”
44
+ My question is: β€œ{query}”
45
+ """
46
+