acecalisto3 commited on
Commit
9700ac4
1 Parent(s): 11b8f4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -5
app.py CHANGED
@@ -2,6 +2,7 @@ from huggingface_hub import InferenceClient
2
  import gradio as gr
3
  import random
4
  import prompts
 
5
  client = InferenceClient(
6
  "mistralai/Mixtral-8x7B-Instruct-v0.1"
7
  )
@@ -13,6 +14,7 @@ def format_prompt(message, history):
13
  prompt += f" {bot_response}</s> "
14
  prompt += f"[INST] {message} [/INST]"
15
  return prompt
 
16
  agents =[
17
  "WEB_DEV",
18
  "AI_SYSTEM_PROMPT",
@@ -26,12 +28,12 @@ agents =[
26
  "HUGGINGFACE_FILE_DEV",
27
 
28
  ]
 
29
  def generate(
30
- prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
31
  ):
32
  seed = random.randint(1,1111111111111111)
33
 
34
-
35
  agent=prompts.WEB_DEV
36
  if agent_name == "WEB_DEV":
37
  agent = prompts.WEB_DEV_SYSTEM_PROMPT
@@ -69,6 +71,11 @@ def generate(
69
  seed=seed,
70
  )
71
 
 
 
 
 
 
72
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
73
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
74
  output = ""
@@ -76,7 +83,9 @@ def generate(
76
  for response in stream:
77
  output += response.token.text
78
  yield output
79
- return output
 
 
80
 
81
 
82
  additional_inputs=[
@@ -128,8 +137,6 @@ additional_inputs=[
128
  interactive=True,
129
  info="Penalize repeated tokens",
130
  ),
131
-
132
-
133
  ]
134
 
135
  examples=[
 
2
  import gradio as gr
3
  import random
4
  import prompts
5
+
6
  client = InferenceClient(
7
  "mistralai/Mixtral-8x7B-Instruct-v0.1"
8
  )
 
14
  prompt += f" {bot_response}</s> "
15
  prompt += f"[INST] {message} [/INST]"
16
  return prompt
17
+
18
  agents =[
19
  "WEB_DEV",
20
  "AI_SYSTEM_PROMPT",
 
28
  "HUGGINGFACE_FILE_DEV",
29
 
30
  ]
31
+
32
  def generate(
33
+ prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, file=None
34
  ):
35
  seed = random.randint(1,1111111111111111)
36
 
 
37
  agent=prompts.WEB_DEV
38
  if agent_name == "WEB_DEV":
39
  agent = prompts.WEB_DEV_SYSTEM_PROMPT
 
71
  seed=seed,
72
  )
73
 
74
+ # Process the uploaded file
75
+ if file:
76
+ file_content = file.read().decode("utf-8")
77
+ prompt = f"Here's the file I uploaded: {file_content} \n{prompt}"
78
+
79
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
80
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
81
  output = ""
 
83
  for response in stream:
84
  output += response.token.text
85
  yield output
86
+
87
+ # This line should be indented at the same level as the 'for' loop
88
+ return output
89
 
90
 
91
  additional_inputs=[
 
137
  interactive=True,
138
  info="Penalize repeated tokens",
139
  ),
 
 
140
  ]
141
 
142
  examples=[