yangtb24 commited on
Commit
8cec638
·
verified ·
1 Parent(s): be2060b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -81
app.py CHANGED
@@ -7,9 +7,6 @@ import random
7
  import uuid
8
  import concurrent.futures
9
  import threading
10
- import base64
11
- import io
12
- from PIL import Image
13
  from datetime import datetime, timedelta
14
  from apscheduler.schedulers.background import BackgroundScheduler
15
  from flask import Flask, request, jsonify, Response, stream_with_context
@@ -633,6 +630,8 @@ def handsome_chat_completions():
633
 
634
  if data.get("stream", False):
635
  def generate():
 
 
636
  try:
637
  response.raise_for_status()
638
  end_time = time.time()
@@ -651,12 +650,6 @@ def handsome_chat_completions():
651
  logging.info(f"Extracted image URL: {image_url}")
652
 
653
  if image_url:
654
- image_response = requests.get(image_url, stream=True)
655
- image_response.raise_for_status()
656
-
657
-
658
- first_chunk_time = time.time()
659
-
660
  chunk_data = {
661
  "id": f"chatcmpl-{uuid.uuid4()}",
662
  "object": "chat.completion.chunk",
@@ -667,67 +660,14 @@ def handsome_chat_completions():
667
  "index": 0,
668
  "delta": {
669
  "role": "assistant",
670
- "content": ""
671
  },
672
  "finish_reason": None
673
  }
674
  ]
675
  }
676
  yield f"data: {json.dumps(chunk_data)}\n\n".encode('utf-8')
677
-
678
- for chunk in image_response.iter_content(chunk_size=1024):
679
- if chunk:
680
- base64_chunk = base64.b64encode(chunk).decode('utf-8')
681
- chunk_data = {
682
- "id": f"chatcmpl-{uuid.uuid4()}",
683
- "object": "chat.completion.chunk",
684
- "created": int(time.time()),
685
- "model": model_name,
686
- "choices": [
687
- {
688
- "index": 0,
689
- "delta": {
690
- "role": "assistant",
691
- "content": base64_chunk
692
- },
693
- "finish_reason": None
694
- }
695
- ]
696
- }
697
- yield f"data: {json.dumps(chunk_data)}\n\n".encode('utf-8')
698
-
699
- end_chunk_data = {
700
- "id": f"chatcmpl-{uuid.uuid4()}",
701
- "object": "chat.completion.chunk",
702
- "created": int(time.time()),
703
- "model": model_name,
704
- "choices": [
705
- {
706
- "index": 0,
707
- "delta": {},
708
- "finish_reason": "stop"
709
- }
710
- ]
711
- }
712
- yield f"data: {json.dumps(end_chunk_data)}\n\n".encode('utf-8')
713
-
714
- first_token_time = (
715
- first_chunk_time - start_time
716
- if first_chunk_time else 0
717
- )
718
- total_time = end_time - start_time
719
-
720
- logging.info(
721
- f"使用的key: {api_key}, "
722
- f"首字用时: {first_token_time:.4f}秒, "
723
- f"总共用时: {total_time:.4f}秒, "
724
- f"使用的模型: {model_name}"
725
- )
726
-
727
- with data_lock:
728
- request_timestamps.append(time.time())
729
- token_counts.append(0) # Image generation doesn't use tokens
730
-
731
  else:
732
  chunk_data = {
733
  "id": f"chatcmpl-{uuid.uuid4()}",
@@ -746,20 +686,26 @@ def handsome_chat_completions():
746
  ]
747
  }
748
  yield f"data: {json.dumps(chunk_data)}\n\n".encode('utf-8')
749
- end_chunk_data = {
750
- "id": f"chatcmpl-{uuid.uuid4()}",
751
- "object": "chat.completion.chunk",
752
- "created": int(time.time()),
753
- "model": model_name,
754
- "choices": [
755
- {
756
- "index": 0,
757
- "delta": {},
758
- "finish_reason": "stop"
759
- }
760
- ]
761
- }
762
- yield f"data: {json.dumps(end_chunk_data)}\n\n".encode('utf-8')
 
 
 
 
 
 
763
  except requests.exceptions.RequestException as e:
764
  logging.error(f"请求转发异常: {e}")
765
  error_chunk_data = {
@@ -793,9 +739,12 @@ def handsome_chat_completions():
793
  ]
794
  }
795
  yield f"data: {json.dumps(end_chunk_data)}\n\n".encode('utf-8')
796
-
 
 
 
 
797
  yield "data: [DONE]\n\n".encode('utf-8')
798
-
799
  return Response(stream_with_context(generate()), content_type='text/event-stream')
800
  else:
801
  response.raise_for_status()
@@ -1321,6 +1270,10 @@ def handsome_embeddings():
1321
  except requests.exceptions.RequestException as e:
1322
  return jsonify({"error": str(e)}), 500
1323
 
 
 
 
 
1324
  @app.route('/handsome/v1/images/generations', methods=['POST'])
1325
  def handsome_images_generations():
1326
  if not check_authorization(request):
@@ -1359,6 +1312,7 @@ def handsome_images_generations():
1359
  response_data = {}
1360
 
1361
  if "stable-diffusion" in model_name:
 
1362
  siliconflow_data = {
1363
  "model": model_name,
1364
  "prompt": data.get("prompt"),
@@ -1371,6 +1325,7 @@ def handsome_images_generations():
1371
  "prompt_enhancement": False,
1372
  }
1373
 
 
1374
  if siliconflow_data["batch_size"] < 1:
1375
  siliconflow_data["batch_size"] = 1
1376
  if siliconflow_data["batch_size"] > 4:
@@ -1455,7 +1410,7 @@ def handsome_images_generations():
1455
 
1456
  with data_lock:
1457
  request_timestamps.append(time.time())
1458
- token_counts.append(0)
1459
 
1460
  return jsonify(response_data)
1461
 
 
7
  import uuid
8
  import concurrent.futures
9
  import threading
 
 
 
10
  from datetime import datetime, timedelta
11
  from apscheduler.schedulers.background import BackgroundScheduler
12
  from flask import Flask, request, jsonify, Response, stream_with_context
 
630
 
631
  if data.get("stream", False):
632
  def generate():
633
+ first_chunk_time = None
634
+ full_response_content = ""
635
  try:
636
  response.raise_for_status()
637
  end_time = time.time()
 
650
  logging.info(f"Extracted image URL: {image_url}")
651
 
652
  if image_url:
 
 
 
 
 
 
653
  chunk_data = {
654
  "id": f"chatcmpl-{uuid.uuid4()}",
655
  "object": "chat.completion.chunk",
 
660
  "index": 0,
661
  "delta": {
662
  "role": "assistant",
663
+ "content": image_url
664
  },
665
  "finish_reason": None
666
  }
667
  ]
668
  }
669
  yield f"data: {json.dumps(chunk_data)}\n\n".encode('utf-8')
670
+ full_response_content = image_url
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
671
  else:
672
  chunk_data = {
673
  "id": f"chatcmpl-{uuid.uuid4()}",
 
686
  ]
687
  }
688
  yield f"data: {json.dumps(chunk_data)}\n\n".encode('utf-8')
689
+ full_response_content = "Failed to generate image"
690
+
691
+ end_chunk_data = {
692
+ "id": f"chatcmpl-{uuid.uuid4()}",
693
+ "object": "chat.completion.chunk",
694
+ "created": int(time.time()),
695
+ "model": model_name,
696
+ "choices": [
697
+ {
698
+ "index": 0,
699
+ "delta": {},
700
+ "finish_reason": "stop"
701
+ }
702
+ ]
703
+ }
704
+ yield f"data: {json.dumps(end_chunk_data)}\n\n".encode('utf-8')
705
+
706
+ with data_lock:
707
+ request_timestamps.append(time.time())
708
+ token_counts.append(0) # Image generation doesn't use tokens
709
  except requests.exceptions.RequestException as e:
710
  logging.error(f"请求转发异常: {e}")
711
  error_chunk_data = {
 
739
  ]
740
  }
741
  yield f"data: {json.dumps(end_chunk_data)}\n\n".encode('utf-8')
742
+
743
+ logging.info(
744
+ f"使用的key: {api_key}, "
745
+ f"使用的模型: {model_name}"
746
+ )
747
  yield "data: [DONE]\n\n".encode('utf-8')
 
748
  return Response(stream_with_context(generate()), content_type='text/event-stream')
749
  else:
750
  response.raise_for_status()
 
1270
  except requests.exceptions.RequestException as e:
1271
  return jsonify({"error": str(e)}), 500
1272
 
1273
+ import base64
1274
+ import io
1275
+ from PIL import Image
1276
+
1277
  @app.route('/handsome/v1/images/generations', methods=['POST'])
1278
  def handsome_images_generations():
1279
  if not check_authorization(request):
 
1312
  response_data = {}
1313
 
1314
  if "stable-diffusion" in model_name:
1315
+ # Map OpenAI-style parameters to SiliconFlow's parameters
1316
  siliconflow_data = {
1317
  "model": model_name,
1318
  "prompt": data.get("prompt"),
 
1325
  "prompt_enhancement": False,
1326
  }
1327
 
1328
+ # Parameter validation and adjustments
1329
  if siliconflow_data["batch_size"] < 1:
1330
  siliconflow_data["batch_size"] = 1
1331
  if siliconflow_data["batch_size"] > 4:
 
1410
 
1411
  with data_lock:
1412
  request_timestamps.append(time.time())
1413
+ token_counts.append(0) # Image generation doesn't use tokens
1414
 
1415
  return jsonify(response_data)
1416