skinapi commited on
Commit
e4809ff
·
1 Parent(s): 7c6803d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -13
app.py CHANGED
@@ -1,19 +1,82 @@
1
- from flask import Flask, request
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  app = Flask(__name__)
5
 
6
- model_name = "openai-gpt"
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForCausalLM.from_pretrained(model_name)
9
-
10
- @app.route('/generate_text', methods=['POST'])
11
- def generate_text():
12
- input_text = request.json['input_text']
13
- input_ids = tokenizer.encode(input_text, return_tensors='pt')
14
- output_ids = model(**input_ids)
15
- output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
16
- return {'generated_text': output_text}
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  if __name__ == '__main__':
19
  app.run()
 
1
+ from flask import Flask, request, jsonify
2
+ from typing import Dict, Any
3
+ import openai
4
+
5
+ openai.api_key = "YOUR_API_KEY"
6
+
7
+ def submit_message(user_token, prompt, prompt_template, temperature, max_tokens, state):
8
+ history = state['messages']
9
+
10
+ if not prompt:
11
+ return {
12
+ 'chat_messages': [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)],
13
+ 'total_tokens_used_msg': f"Total tokens used: {state['total_tokens']} / 3000"
14
+ }
15
+
16
+ prompt_template = prompt_templates[prompt_template]
17
+
18
+ system_prompt = []
19
+ if prompt_template:
20
+ system_prompt = [{ "role": "system", "content": prompt_template }]
21
+
22
+ prompt_msg = { "role": "user", "content": prompt }
23
+
24
+ try:
25
+ response = openai.Completion.create(
26
+ engine="davinci",
27
+ prompt=system_prompt + history + [prompt_msg],
28
+ temperature=temperature,
29
+ max_tokens=max_tokens
30
+ )
31
+
32
+ history.append(prompt_msg)
33
+ history.append({
34
+ "role": "system",
35
+ "content": response.choices[0].text
36
+ })
37
+
38
+ state['total_tokens'] += response['total_characters']
39
+
40
+ except Exception as e:
41
+ history.append(prompt_msg)
42
+ history.append({
43
+ "role": "system",
44
+ "content": f"Error: {e}"
45
+ })
46
+
47
+ total_tokens_used_msg = f"Total tokens used: {state['total_tokens']} / 3000" if not user_token else ""
48
+ chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
49
+ input_visibility
50
+
51
+ return {
52
+ 'chat_messages': chat_messages,
53
+ 'total_tokens_used_msg': total_tokens_used_msg
54
+ }
55
 
56
  app = Flask(__name__)
57
 
58
+ state = {
59
+ 'messages': [],
60
+ 'total_tokens': 0
61
+ }
62
+
63
+ @app.route('/chat', methods=['POST'])
64
+ def chat():
65
+ data = request.json
66
+ user_token = data.get('user_token')
67
+ prompt = data.get('prompt')
68
+ prompt_template = data.get('prompt_template')
69
+ temperature = data.get('temperature', 0.5)
70
+ max_tokens = data.get('max_tokens', 50)
71
+
72
+ _, chat_messages, total_tokens_used_msg, state = submit_message(user_token, prompt, prompt_template, temperature, max_tokens, state)
73
+
74
+ response = {
75
+ 'chat_messages': chat_messages,
76
+ 'total_tokens_used_msg': total_tokens_used_msg
77
+ }
78
+
79
+ return jsonify(response)
80
 
81
  if __name__ == '__main__':
82
  app.run()