johann22 commited on
Commit
52168c1
β€’
1 Parent(s): 2743076

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -13
app.py CHANGED
@@ -2,13 +2,15 @@ import os
2
  import random
3
  from huggingface_hub import InferenceClient
4
  import gradio as gr
 
5
  from datetime import datetime
 
6
  import agent
7
  from models import models
 
 
8
  import requests
9
  import io
10
- import uuid
11
- base_url="https://johann22-chat-diffusion.hf.space/"
12
  loaded_model=[]
13
  for i,model in enumerate(models):
14
  loaded_model.append(gr.load(f'models/{model}'))
@@ -24,6 +26,9 @@ client = InferenceClient(
24
  ############################################
25
  model = gr.load("models/stabilityai/sdxl-turbo")
26
 
 
 
 
27
  history = []
28
 
29
  def infer(txt):
@@ -37,20 +42,33 @@ def format_prompt(message, history):
37
  prompt += f"[INST] {message} [/INST]"
38
  return prompt
39
 
40
- def run_gpt(in_prompt,history):
 
 
 
 
 
 
41
  prompt=format_prompt(in_prompt,history)
42
  seed = random.randint(1,1111111111111111)
43
  print (seed)
44
  generate_kwargs = dict(
45
  temperature=1.0,
46
- max_new_tokens=256,
47
  top_p=0.99,
48
  repetition_penalty=1.0,
49
  do_sample=True,
50
  seed=seed,
51
  )
 
 
52
  content = agent.GENERATE_PROMPT + prompt
53
- #print(content)
 
 
 
 
 
54
  stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
55
  resp = ""
56
  for response in stream:
@@ -59,15 +77,15 @@ def run_gpt(in_prompt,history):
59
 
60
 
61
  def run(purpose,history,model_drop):
62
-
63
  #print(purpose)
64
  #print(hist)
65
  task=None
66
  directory="./"
67
- if history:
68
- history=str(history).strip("[]")
69
- if not history:
70
- history = ""
71
 
72
  #action_name, action_input = parse_action(line)
73
  out_prompt = run_gpt(
@@ -81,7 +99,7 @@ def run(purpose,history,model_drop):
81
  model=loaded_model[int(model_drop)]
82
  out_img=model(out_prompt)
83
  print(out_img)
84
- url=f'https://johann22-chat-diffusion.hf.space/file={out_img}'
85
  print(url)
86
  uid = uuid.uuid4()
87
  #urllib.request.urlretrieve(image, 'tmp.png')
@@ -94,11 +112,11 @@ def run(purpose,history,model_drop):
94
 
95
 
96
 
97
-
98
  ################################################
99
 
100
  with gr.Blocks() as iface:
101
  gr.HTML("""<center><h1>Chat Diffusion</h1><br><h3>This chatbot will generate images</h3></center>""")
 
102
  with gr.Row():
103
  with gr.Column():
104
  chatbot=gr.Chatbot()
@@ -109,10 +127,19 @@ with gr.Blocks() as iface:
109
  stop_b = gr.Button("Stop")
110
  clear = gr.ClearButton([msg, chatbot])
111
 
112
- sumbox=gr.Image(label="Image",type="filepath")
113
 
114
 
115
  sub_b = submit_b.click(run, [msg,chatbot,model_drop],[msg,chatbot,sumbox])
116
  sub_e = msg.submit(run, [msg, chatbot,model_drop], [msg, chatbot,sumbox])
117
  stop_b.click(None,None,None, cancels=[sub_b,sub_e])
118
  iface.launch()
 
 
 
 
 
 
 
 
 
 
2
  import random
3
  from huggingface_hub import InferenceClient
4
  import gradio as gr
5
+ from utils import parse_action, parse_file_content, read_python_module_structure
6
  from datetime import datetime
7
+ from PIL import Image
8
  import agent
9
  from models import models
10
+ import urllib.request
11
+ import uuid
12
  import requests
13
  import io
 
 
14
  loaded_model=[]
15
  for i,model in enumerate(models):
16
  loaded_model.append(gr.load(f'models/{model}'))
 
26
  ############################################
27
  model = gr.load("models/stabilityai/sdxl-turbo")
28
 
29
+ VERBOSE = True
30
+ MAX_HISTORY = 10000
31
+ #MODEL = "gpt-3.5-turbo" # "gpt-4"
32
  history = []
33
 
34
  def infer(txt):
 
42
  prompt += f"[INST] {message} [/INST]"
43
  return prompt
44
 
45
+
46
+
47
+ def run_gpt(
48
+ in_prompt,
49
+ history,
50
+ ):
51
+ print(f'history :: {history}')
52
  prompt=format_prompt(in_prompt,history)
53
  seed = random.randint(1,1111111111111111)
54
  print (seed)
55
  generate_kwargs = dict(
56
  temperature=1.0,
57
+ max_new_tokens=1048,
58
  top_p=0.99,
59
  repetition_penalty=1.0,
60
  do_sample=True,
61
  seed=seed,
62
  )
63
+
64
+
65
  content = agent.GENERATE_PROMPT + prompt
66
+
67
+ print(content)
68
+
69
+ #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
70
+ #formatted_prompt = format_prompt(f'{content}', history)
71
+
72
  stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
73
  resp = ""
74
  for response in stream:
 
77
 
78
 
79
  def run(purpose,history,model_drop):
80
+ print (history)
81
  #print(purpose)
82
  #print(hist)
83
  task=None
84
  directory="./"
85
+ #if history:
86
+ # history=str(history).strip("[]")
87
+ #if not history:
88
+ # history = ""
89
 
90
  #action_name, action_input = parse_action(line)
91
  out_prompt = run_gpt(
 
99
  model=loaded_model[int(model_drop)]
100
  out_img=model(out_prompt)
101
  print(out_img)
102
+ url=f'https://johann22-mixtral-chat-diffusion.hf.space/file={out_img}'
103
  print(url)
104
  uid = uuid.uuid4()
105
  #urllib.request.urlretrieve(image, 'tmp.png')
 
112
 
113
 
114
 
 
115
  ################################################
116
 
117
  with gr.Blocks() as iface:
118
  gr.HTML("""<center><h1>Chat Diffusion</h1><br><h3>This chatbot will generate images</h3></center>""")
119
+ #chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
120
  with gr.Row():
121
  with gr.Column():
122
  chatbot=gr.Chatbot()
 
127
  stop_b = gr.Button("Stop")
128
  clear = gr.ClearButton([msg, chatbot])
129
 
130
+ sumbox=gr.Image(label="Image")
131
 
132
 
133
  sub_b = submit_b.click(run, [msg,chatbot,model_drop],[msg,chatbot,sumbox])
134
  sub_e = msg.submit(run, [msg, chatbot,model_drop], [msg, chatbot,sumbox])
135
  stop_b.click(None,None,None, cancels=[sub_b,sub_e])
136
  iface.launch()
137
+ '''
138
+ gr.ChatInterface(
139
+ fn=run,
140
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
141
+ title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
142
+ examples=examples,
143
+ concurrency_limit=20,
144
+ ).launch(show_api=False)
145
+ '''