bbz662 commited on
Commit
bd6ff35
1 Parent(s): e391ae2
Files changed (3) hide show
  1. .gitignore +1 -0
  2. app.py +127 -0
  3. requirements.txt +2 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ flagged
app.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tiktoken
3
+ from decimal import getcontext
4
+
5
+ getcontext().prec = 2
6
+
7
+ def num_tokens_from_messages(messages, model):
8
+ """Return the number of tokens used by a list of messages."""
9
+ encoding = tiktoken.encoding_for_model(model)
10
+ if model in {
11
+ "gpt-3.5-turbo-1106",
12
+ "gpt-4-1106-preview",
13
+ "gpt-4",
14
+ }:
15
+ tokens_per_message = 3
16
+ tokens_per_name = 1
17
+ else:
18
+ raise NotImplementedError(
19
+ f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
20
+ )
21
+ num_tokens = 0
22
+ for message in messages:
23
+ num_tokens += tokens_per_message
24
+ for key, value in message.items():
25
+ num_tokens += len(encoding.encode(value))
26
+ if key == "name":
27
+ num_tokens += tokens_per_name
28
+ num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
29
+ return num_tokens
30
+
31
+ # https://openai.com/pricing
32
+ def calc_input_cost(tokens, model):
33
+ """Return the input cost of tokens"""
34
+ cost = 0
35
+ if model == "gpt-3.5-turbo-1106":
36
+ # input $0.001 / 1K tokens, output $0.002 / 1K tokens
37
+ cost = 0.001 * tokens / 1000
38
+ elif model == "gpt-4-1106-preview":
39
+ # input $0.01 / 1K tokens, output $0.03 / 1K tokens
40
+ cost = 0.01 * tokens / 1000
41
+ elif model == "gpt-4":
42
+ # input $0.03 / 1K tokens, output $0.06 / 1K tokens
43
+ cost = 0.03 * tokens / 1000
44
+ else:
45
+ raise NotImplementedError(
46
+ f"""calc_input_cost() is not implemented for model {model}."""
47
+ )
48
+ return float(cost)
49
+
50
+ # https://openai.com/pricing
51
+ def calc_output_cost(tokens, model="gpt-3.5-turbo-1106"):
52
+ """Return the output cost of tokens"""
53
+ cost = 0
54
+ if model == "gpt-3.5-turbo-1106":
55
+ # input $0.001 / 1K tokens, output $0.002 / 1K tokens
56
+ cost = 0.002 * tokens / 1000
57
+ elif model == "gpt-4-1106-preview":
58
+ # input $0.03 / 1K tokens, output $0.03 / 1K tokens
59
+ cost = 0.03 * tokens / 1000
60
+ elif model == "gpt-4":
61
+ # input $0.03 / 1K tokens, output $0.06 / 1K tokens
62
+ cost = 0.06 * tokens / 1000
63
+ else:
64
+ raise NotImplementedError(
65
+ f"""calc_input_cost() is not implemented for model {model}."""
66
+ )
67
+ return float(cost)
68
+
69
+ def make_input_messages(system, user):
70
+ return [
71
+ {
72
+ "role": "system",
73
+ "content": system,
74
+ },
75
+ {
76
+ "role": "user",
77
+ "content": user,
78
+ }
79
+ ]
80
+
81
+ def make_output_messages(output):
82
+ return [
83
+ {
84
+ "role": "assistant",
85
+ "content": output,
86
+ }
87
+ ]
88
+
89
+ def calc(request_count, system_prompt, user_prompt, output):
90
+ result = ""
91
+ input_messages = make_input_messages(system_prompt, user_prompt)
92
+ output_messages = make_output_messages(output)
93
+ for model in [
94
+ "gpt-3.5-turbo-1106",
95
+ "gpt-4-1106-preview",
96
+ "gpt-4"
97
+ ]:
98
+
99
+ # example token count from the function defined above
100
+ input_token = num_tokens_from_messages(input_messages, model)
101
+ input_cost = calc_input_cost(input_token, model)
102
+ output_token = num_tokens_from_messages(output_messages, model)
103
+ output_cost = calc_output_cost(output_token, model)
104
+ total_cost = input_cost + output_cost
105
+
106
+ result += f"""{model}
107
+ input token:{input_token}, output token: {output_token} tokens counted by tiktoken.
108
+ input cost: ${format(input_cost, 'f')}, output cost: ${format(output_cost, 'f')}, total cost: ${format(total_cost, 'f')} per 1 request
109
+ {int(request_count)} request cost is ${format(total_cost * request_count, 'f')}\n"""
110
+ return result
111
+
112
+ iface = gr.Interface(
113
+ fn=calc,
114
+ inputs=[
115
+ gr.Number(label="Request Count", value=500),
116
+ gr.Textbox(label="System Prompt", value="System Prompt"),
117
+ gr.Textbox(label="User Prompt", value="User Prompt"),
118
+ gr.Textbox(label="Output", value="Output Text"),
119
+ ],
120
+ outputs=[
121
+ gr.Textbox(label="Result")
122
+ ],
123
+ title="ChatGPT Token and Cost Calculator",
124
+ description=f"Enter each form value, then click the button to see the results.\nThe results of this calculation should be considered for reference only."
125
+ )
126
+
127
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio
2
+ tiktoken