Rooni commited on
Commit
a78ddb1
·
verified ·
1 Parent(s): 981f35a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -64
app.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  import random
3
  import requests
4
  from huggingface_hub import InferenceClient
5
- from flask import Flask, request, jsonify
6
  from flask_cors import CORS
7
  import json
8
 
@@ -16,7 +16,7 @@ def get_random_api_key():
16
  else:
17
  raise ValueError("API keys not found. Please set the KEYS environment variable.")
18
 
19
- def generate_story(prompt, style, stream=False):
20
  try:
21
  client = InferenceClient(api_key=get_random_api_key())
22
 
@@ -25,20 +25,14 @@ def generate_story(prompt, style, stream=False):
25
  {"role": "user", "content": prompt}
26
  ]
27
 
28
- completion = client.chat.completions.create(model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, temperature=0.7, max_tokens=1200, stream=stream)
29
- if stream:
30
- story = ""
31
- for chunk in completion:
32
- if chunk.choices:
33
- story += chunk.choices[0].delta.content or ""
34
- yield story
35
- else:
36
- story = completion.choices[0].message.content
37
- yield story
38
  except Exception as e:
39
  yield f"Ошибка генерации: {e}"
40
 
41
- def edit_story(original_story, edited_prompt, stream=False):
42
  if not original_story:
43
  yield f"Сначала сгенерируйте историю!"
44
  return
@@ -53,20 +47,14 @@ def edit_story(original_story, edited_prompt, stream=False):
53
  {"role": "user", "content": edited_prompt},
54
  {"role": "assistant", "content": original_story}
55
  ]
56
- completion = client.chat.completions.create(model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, temperature=0.7, max_tokens=155000, stream=stream)
57
- if stream:
58
- edited_story = ""
59
- for chunk in completion:
60
- if chunk.choices:
61
- edited_story += chunk.choices[0].delta.content or ""
62
- yield edited_story
63
- else:
64
- edited_story = completion.choices[0].message.content
65
- yield edited_story
66
  except Exception as e:
67
  yield f"Ошибка редактирования: {e}"
68
 
69
- def next_story_func(original_story, next_prompt, continuation_type="Продолжение", stream=False):
70
  if not original_story:
71
  yield f"Сначала сгенерируйте историю!"
72
  return
@@ -85,19 +73,22 @@ def next_story_func(original_story, next_prompt, continuation_type="Продол
85
  {"role": "user", "content": continuation_prompt},
86
  {"role": "assistant", "content": original_story}
87
  ]
88
- completion = client.chat.completions.create(model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, temperature=0.7, max_tokens=1200, stream=stream)
89
- if stream:
90
- next_story = ""
91
- for chunk in completion:
92
- if chunk.choices:
93
- next_story += chunk.choices[0].delta.content or ""
94
- yield next_story
95
- else:
96
- next_story = completion.choices[0].message.content
97
- yield next_story
98
  except Exception as e:
99
  yield f"Ошибка продления: {e}"
100
 
 
 
 
 
 
 
 
 
 
101
  @app.route('/generate', methods=['POST'])
102
  def api_generate_story():
103
  data = request.get_json()
@@ -105,17 +96,10 @@ def api_generate_story():
105
  return jsonify({"error": True, "message": "Missing 'input' in request body"}), 400
106
 
107
  prompt = data['input']
108
- style = data.get('style', 'Приключенческая') # Default style
109
- stream = data.get('stream', False)
110
 
111
- try:
112
- story_generator = generate_story(prompt, style, stream)
113
- if stream:
114
- return jsonify({"error": False, "message": "".join(story_generator)})
115
- else:
116
- return jsonify({"error": False, "message": next(story_generator)})
117
- except Exception as e:
118
- return jsonify({"error": True, "message": str(e)}), 500
119
 
120
  @app.route('/edit', methods=['POST'])
121
  def api_edit_story():
@@ -125,16 +109,9 @@ def api_edit_story():
125
 
126
  original_story = data['original']
127
  edited_prompt = data['input']
128
- stream = data.get('stream', False)
129
 
130
- try:
131
- edited_story_generator = edit_story(original_story, edited_prompt, stream)
132
- if stream:
133
- return jsonify({"error": False, "message": "".join(edited_story_generator)})
134
- else:
135
- return jsonify({"error": False, "message": next(edited_story_generator)})
136
- except Exception as e:
137
- return jsonify({"error": True, "message": str(e)}), 500
138
 
139
  @app.route('/continue', methods=['POST'])
140
  def api_continue_story():
@@ -144,18 +121,11 @@ def api_continue_story():
144
 
145
  original_story = data['original']
146
  next_prompt = data['input']
147
- continuation_type = data.get('type', 'Продолжение') # Default continuation type
148
- stream = data.get('stream', False)
149
 
150
- try:
151
- next_story_generator = next_story_func(original_story, next_prompt, continuation_type, stream)
152
- if stream:
153
- return jsonify({"error": False, "message": "".join(next_story_generator)})
154
- else:
155
- return jsonify({"error": False, "message": next(next_story_generator)})
156
- except Exception as e:
157
- return jsonify({"error": True, "message": str(e)}), 500
158
 
159
  if __name__ == '__main__':
160
  app.run(host='0.0.0.0', port=7860, debug=True)
161
-
 
2
  import random
3
  import requests
4
  from huggingface_hub import InferenceClient
5
+ from flask import Flask, request, jsonify, Response, stream_with_context
6
  from flask_cors import CORS
7
  import json
8
 
 
16
  else:
17
  raise ValueError("API keys not found. Please set the KEYS environment variable.")
18
 
19
+ def generate_story(prompt, style):
20
  try:
21
  client = InferenceClient(api_key=get_random_api_key())
22
 
 
25
  {"role": "user", "content": prompt}
26
  ]
27
 
28
+ completion = client.chat.completions.create(model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, temperature=0.7, max_tokens=1200, stream=True)
29
+ for chunk in completion:
30
+ if chunk.choices:
31
+ yield chunk.choices[0].delta.content or ""
 
 
 
 
 
 
32
  except Exception as e:
33
  yield f"Ошибка генерации: {e}"
34
 
35
+ def edit_story(original_story, edited_prompt):
36
  if not original_story:
37
  yield f"Сначала сгенерируйте историю!"
38
  return
 
47
  {"role": "user", "content": edited_prompt},
48
  {"role": "assistant", "content": original_story}
49
  ]
50
+ completion = client.chat.completions.create(model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, temperature=0.7, max_tokens=155000, stream=True)
51
+ for chunk in completion:
52
+ if chunk.choices:
53
+ yield chunk.choices[0].delta.content or ""
 
 
 
 
 
 
54
  except Exception as e:
55
  yield f"Ошибка редактирования: {e}"
56
 
57
+ def next_story_func(original_story, next_prompt, continuation_type="Продолжение"):
58
  if not original_story:
59
  yield f"Сначала сгенерируйте историю!"
60
  return
 
73
  {"role": "user", "content": continuation_prompt},
74
  {"role": "assistant", "content": original_story}
75
  ]
76
+ completion = client.chat.completions.create(model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, temperature=0.7, max_tokens=1200, stream=True)
77
+ for chunk in completion:
78
+ if chunk.choices:
79
+ yield chunk.choices[0].delta.content or ""
 
 
 
 
 
 
80
  except Exception as e:
81
  yield f"Ошибка продления: {e}"
82
 
83
+ def stream_sse(generator):
84
+ def event_stream():
85
+ try:
86
+ for chunk in generator:
87
+ yield f"data: {json.dumps({'message': chunk})}\n\n"
88
+ except Exception as e:
89
+ yield f"data: {json.dumps({'error': True, 'message': str(e)})}\n\n"
90
+ return Response(stream_with_context(event_stream()), content_type='text/event-stream')
91
+
92
  @app.route('/generate', methods=['POST'])
93
  def api_generate_story():
94
  data = request.get_json()
 
96
  return jsonify({"error": True, "message": "Missing 'input' in request body"}), 400
97
 
98
  prompt = data['input']
99
+ style = data.get('style', 'Приключенческая')
 
100
 
101
+ story_generator = generate_story(prompt, style)
102
+ return stream_sse(story_generator)
 
 
 
 
 
 
103
 
104
  @app.route('/edit', methods=['POST'])
105
  def api_edit_story():
 
109
 
110
  original_story = data['original']
111
  edited_prompt = data['input']
 
112
 
113
+ edited_story_generator = edit_story(original_story, edited_prompt)
114
+ return stream_sse(edited_story_generator)
 
 
 
 
 
 
115
 
116
  @app.route('/continue', methods=['POST'])
117
  def api_continue_story():
 
121
 
122
  original_story = data['original']
123
  next_prompt = data['input']
124
+ continuation_type = data.get('type', 'Продолжение')
 
125
 
126
+ next_story_generator = next_story_func(original_story, next_prompt, continuation_type)
127
+ return stream_sse(next_story_generator)
128
+
 
 
 
 
 
129
 
130
  if __name__ == '__main__':
131
  app.run(host='0.0.0.0', port=7860, debug=True)