yasserrmd commited on
Commit
3d6604f
1 Parent(s): 43827a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -16
app.py CHANGED
@@ -73,9 +73,8 @@ async def serve_frontend():
73
  async def generate_infographic(request: InfographicRequest):
74
  description =await generate_infographic_details(request)
75
  prompt = PROMPT_TEMPLATE.format(description=description)
76
-
77
- try:
78
- messages = [{"role": "user", "content": prompt}]
79
  stream = client.chat.completions.create(
80
  model="Qwen/Qwen2.5-Coder-32B-Instruct",
81
  messages=messages,
@@ -86,16 +85,39 @@ async def generate_infographic(request: InfographicRequest):
86
  )
87
 
88
 
89
- generated_text = ""
90
- for chunk in stream:
91
- generated_text += chunk.choices[0].delta.content
92
-
93
- print(generated_text)
94
- code_blocks= await extract_code_blocks(generated_text)
95
- if code_blocks:
96
- return JSONResponse(content={"html": code_blocks[0]})
97
- else:
98
- return JSONResponse(content={"error": "No generation"},status_code=500)
99
-
100
- except Exception as e:
101
- return JSONResponse(content={"error": str(e)}, status_code=500)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  async def generate_infographic(request: InfographicRequest):
74
  description =await generate_infographic_details(request)
75
  prompt = PROMPT_TEMPLATE.format(description=description)
76
+
77
+ messages = [{"role": "user", "content": prompt}]
 
78
  stream = client.chat.completions.create(
79
  model="Qwen/Qwen2.5-Coder-32B-Instruct",
80
  messages=messages,
 
85
  )
86
 
87
 
88
+ generated_text = ""
89
+ for chunk in stream:
90
+ generated_text += chunk.choices[0].delta.content
91
+
92
+ print(generated_text)
93
+ code_blocks= await extract_code_blocks(generated_text)
94
+ if code_blocks:
95
+ return JSONResponse(content={"html": code_blocks[0]})
96
+ else:
97
+ return JSONResponse(content={"error": "No generation"},status_code=500)
98
+
99
+ # try:
100
+ # messages = [{"role": "user", "content": prompt}]
101
+ # stream = client.chat.completions.create(
102
+ # model="Qwen/Qwen2.5-Coder-32B-Instruct",
103
+ # messages=messages,
104
+ # temperature=0.4,
105
+ # max_tokens=6000,
106
+ # top_p=0.7,
107
+ # stream=True,
108
+ # )
109
+
110
+
111
+ # generated_text = ""
112
+ # for chunk in stream:
113
+ # generated_text += chunk.choices[0].delta.content
114
+
115
+ # print(generated_text)
116
+ # code_blocks= await extract_code_blocks(generated_text)
117
+ # if code_blocks:
118
+ # return JSONResponse(content={"html": code_blocks[0]})
119
+ # else:
120
+ # return JSONResponse(content={"error": "No generation"},status_code=500)
121
+
122
+ # except Exception as e:
123
+ # return JSONResponse(content={"error": str(e)}, status_code=500)