yasserrmd commited on
Commit
11511b6
1 Parent(s): 90eb196

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -8
app.py CHANGED
@@ -4,7 +4,7 @@ from fastapi.responses import HTMLResponse, JSONResponse
4
  from fastapi.staticfiles import StaticFiles
5
  from pydantic import BaseModel
6
  from huggingface_hub import InferenceClient
7
- import uvicorn
8
 
9
  # Initialize FastAPI app
10
  app = FastAPI()
@@ -22,6 +22,25 @@ class InfographicRequest(BaseModel):
22
  # Load prompt template from environment variable
23
  PROMPT_TEMPLATE = os.getenv("PROMPT_TEMPLATE")
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  # Route to serve the HTML template
26
  @app.get("/", response_class=HTMLResponse)
27
  async def serve_frontend():
@@ -35,7 +54,6 @@ async def generate_infographic(request: InfographicRequest):
35
  prompt = PROMPT_TEMPLATE.format(description=description)
36
 
37
  try:
38
- # Query Hugging Face model
39
  messages = [{"role": "user", "content": prompt}]
40
  stream = client.chat.completions.create(
41
  model="Qwen/Qwen2.5-Coder-32B-Instruct",
@@ -46,13 +64,16 @@ async def generate_infographic(request: InfographicRequest):
46
  stream=True,
47
  )
48
 
49
- # Collect the HTML content from the stream
50
- generated_html = ""
51
  for chunk in stream:
52
- generated_html += chunk.choices[0].delta.content
53
-
54
- # Return the generated HTML content
55
- return JSONResponse(content={"html": generated_html})
 
 
 
56
 
57
  except Exception as e:
58
  return JSONResponse(content={"error": str(e)}, status_code=500)
 
4
  from fastapi.staticfiles import StaticFiles
5
  from pydantic import BaseModel
6
  from huggingface_hub import InferenceClient
7
+ import re
8
 
9
  # Initialize FastAPI app
10
  app = FastAPI()
 
22
  # Load prompt template from environment variable
23
  PROMPT_TEMPLATE = os.getenv("PROMPT_TEMPLATE")
24
 
25
+
26
+ async def extract_code_blocks(markdown_text):
27
+ """
28
+ Extracts code blocks from the given Markdown text.
29
+
30
+ Args:
31
+ markdown_text (str): The Markdown content as a string.
32
+
33
+ Returns:
34
+ list: A list of code blocks extracted from the Markdown.
35
+ """
36
+ # Regex to match code blocks (fenced with triple backticks)
37
+ code_block_pattern = re.compile(r'```.*?\n(.*?)```', re.DOTALL)
38
+
39
+ # Find all code blocks
40
+ code_blocks = code_block_pattern.findall(markdown_text)
41
+
42
+ return code_blocks
43
+
44
  # Route to serve the HTML template
45
  @app.get("/", response_class=HTMLResponse)
46
  async def serve_frontend():
 
54
  prompt = PROMPT_TEMPLATE.format(description=description)
55
 
56
  try:
 
57
  messages = [{"role": "user", "content": prompt}]
58
  stream = client.chat.completions.create(
59
  model="Qwen/Qwen2.5-Coder-32B-Instruct",
 
64
  stream=True,
65
  )
66
 
67
+
68
+ generated_text = ""
69
  for chunk in stream:
70
+ generated_text += chunk.choices[0].delta.content
71
+
72
+ code_blocks=extract_code_blocks(generated_text)
73
+ if code_blocks:
74
+ return JSONResponse(content={"html": code_blocks[0]})
75
+ else:
76
+ return JSONResponse(content={"error": "No generation"},status_code=500)
77
 
78
  except Exception as e:
79
  return JSONResponse(content={"error": str(e)}, status_code=500)