prlabs2023 commited on
Commit
7e1aade
1 Parent(s): 272e197

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -19
app.py CHANGED
@@ -32,6 +32,13 @@ from fastapi import Form
32
  class Query(BaseModel):
33
  text: str
34
  code:str
 
 
 
 
 
 
 
35
 
36
  # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
37
  # tokenizer = BertTokenizerFast.from_pretrained('mrm8488/bert-small2bert-small-finetuned-cnn_daily_mail-summarization')
@@ -88,6 +95,7 @@ async def get_answer(q: Query ):
88
 
89
  text = q.text
90
  code= q.code
 
91
 
92
 
93
  N = 20
@@ -100,7 +108,7 @@ async def get_answer(q: Query ):
100
 
101
  filename= res
102
 
103
- t = threading.Thread(target=do_ML, args=(filename,text,code))
104
  t.start()
105
 
106
  return JSONResponse({"id": filename})
@@ -108,6 +116,25 @@ async def get_answer(q: Query ):
108
  return "hello"
109
 
110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  import requests
112
  import io
113
  import torch
@@ -120,33 +147,36 @@ client = InferenceClient()
120
  # client = InferenceClient(model="SG161222/Realistic_Vision_V1.4")
121
 
122
 
123
- def do_ML(filename:str,text:str,code:str):
 
 
124
 
125
- global client
126
 
127
- imagei = client.text_to_image(text)
128
-
129
- byte_array = io.BytesIO()
130
- imagei.save(byte_array, format='JPEG')
131
- image_bytes = byte_array.getvalue()
132
 
133
 
134
- files = {'file': image_bytes}
135
 
136
- global audio_space
137
- url = audio_space+code
138
 
139
- data = {"filename": filename}
140
- response = requests.post(url, files=files,data= data)
141
 
142
- print(response.text)
143
 
144
- if response.status_code == 200:
145
- print("File uploaded successfully.")
146
  # Handle the response as needed
147
- else:
148
- print("File upload failed.")
149
-
150
 
 
 
 
 
151
 
152
 
 
32
  class Query(BaseModel):
33
  text: str
34
  code:str
35
+ host:str
36
+
37
+ class Query2(BaseModel):
38
+ text: str
39
+ code:str
40
+ filename:str
41
+ host:str
42
 
43
  # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
44
  # tokenizer = BertTokenizerFast.from_pretrained('mrm8488/bert-small2bert-small-finetuned-cnn_daily_mail-summarization')
 
95
 
96
  text = q.text
97
  code= q.code
98
+ host= q.host
99
 
100
 
101
  N = 20
 
108
 
109
  filename= res
110
 
111
+ t = threading.Thread(target=do_ML, args=(filename,text,code,host))
112
  t.start()
113
 
114
  return JSONResponse({"id": filename})
 
116
  return "hello"
117
 
118
 
119
+
120
+
121
+ @app.post("/error")
122
+ async def get_answer(q: Query2 ):
123
+
124
+ text = q.text
125
+ code= q.code
126
+ filename= q.filename
127
+ host= q.host
128
+
129
+
130
+ t = threading.Thread(target=do_ML, args=(filename,text,code,host))
131
+ t.start()
132
+
133
+ return JSONResponse({"id": filename})
134
+
135
+
136
+
137
+
138
  import requests
139
  import io
140
  import torch
 
147
  # client = InferenceClient(model="SG161222/Realistic_Vision_V1.4")
148
 
149
 
150
+ def do_ML(filename:str,text:str,code:str,host:str):
151
+ try:
152
+ global client
153
 
154
+ imagei = client.text_to_image(text)
155
 
156
+ byte_array = io.BytesIO()
157
+ imagei.save(byte_array, format='JPEG')
158
+ image_bytes = byte_array.getvalue()
 
 
159
 
160
 
161
+ files = {'file': image_bytes}
162
 
163
+ global audio_space
164
+ url = audio_space+code
165
 
166
+ data = {"filename": filename}
167
+ response = requests.post(url, files=files,data= data)
168
 
169
+ print(response.text)
170
 
171
+ if response.status_code == 200:
172
+ print("File uploaded successfully.")
173
  # Handle the response as needed
174
+ else:
175
+ print("File upload failed.")
 
176
 
177
+ except:
178
+ data={"text":text,"filename":filename}
179
+ requests.post(host+"texttoimage2handleerror",data=data)
180
+
181
 
182