hectorjelly commited on
Commit
1d54e0d
1 Parent(s): 5236b9e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -0
app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pip install gradio
2
+
3
+ import gradio as gr
4
+ import openai
5
+ from wandb.integration.openai import autolog
6
+
7
+ openai_api_key = "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"
8
+
9
+ # start logging to W&B
10
+ autolog({"project":"Joe1", "job_type": "introduction"})
11
+
12
+ import openai
13
+ import gradio as gr
14
+
15
+ # Add your OpenAI API key here
16
+ openai.api_key = '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'
17
+
18
+ # Initialize the conversation history
19
+ conversation_history = [
20
+ {
21
+ "role": "system",
22
+ "content": "Your name is Joe Chip, a world class poker player. Keep your answers succinct but cover important areas."
23
+ "If you need more context ask for it."
24
+ " Make sure you know what the effective stack is and whether its a cash game or mtt"
25
+ "Concentrate more on GTO play rather than exploiting other players."
26
+ "Consider blockers when applicable"
27
+ "Always discuss how to play your range, not just the hand in question"
28
+ "Remember to keep your answers brief"
29
+ "Only answer questions on poker topics"
30
+ }
31
+ ]
32
+
33
+ def ask_joe(text):
34
+ # Add the user's message to the conversation history
35
+ conversation_history.append({
36
+ "role": "user",
37
+ "content": text
38
+ })
39
+
40
+ # Use the conversation history as the input to the model
41
+ response = openai.ChatCompletion.create(
42
+ model="gpt-4",
43
+ messages=conversation_history,
44
+ max_tokens=500,
45
+ temperature=0.5
46
+ )
47
+
48
+ # Extract the model's message from the response
49
+ model_message = response.choices[0].message['content'].strip()
50
+
51
+ # Add the model's message to the conversation history
52
+ conversation_history.append({
53
+ "role": "assistant",
54
+ "content": model_message
55
+ })
56
+
57
+ return model_message
58
+
59
+ iface = gr.Interface(fn=ask_joe, inputs="text", outputs="text")
60
+
61
+ iface.launch()
62
+
63
+
64
+ # iface.launch(share=True)