removed the rgb from ndarray conversion for PIL on the image
Browse files
app.py
CHANGED
@@ -52,11 +52,11 @@ def krypton(input,
|
|
52 |
# Image is not defined at all
|
53 |
gr.Error("Uplaod an image for Krypton to work")
|
54 |
|
55 |
-
|
56 |
# image = Image.open(requests.get(url, stream=True).raw)
|
57 |
prompt = ("<|start_header_id|>user<|end_header_id|>\n\n<image>\n{input['text']}<|eot_id|>"
|
58 |
"<|start_header_id|>assistant<|end_header_id|>\n\n")
|
59 |
-
inputs = processor(prompt,
|
60 |
outputs = model.generate(**inputs, max_new_tokens=200, do_sample=False)
|
61 |
output_text = processor.decode(outputs[0], skip_special_tokens=True)
|
62 |
print(output_text)
|
|
|
52 |
# Image is not defined at all
|
53 |
gr.Error("Uplaod an image for Krypton to work")
|
54 |
|
55 |
+
image = Image.open(image)
|
56 |
# image = Image.open(requests.get(url, stream=True).raw)
|
57 |
prompt = ("<|start_header_id|>user<|end_header_id|>\n\n<image>\n{input['text']}<|eot_id|>"
|
58 |
"<|start_header_id|>assistant<|end_header_id|>\n\n")
|
59 |
+
inputs = processor(prompt, image, return_tensors='pt').to('cuda', torch.float16)
|
60 |
outputs = model.generate(**inputs, max_new_tokens=200, do_sample=False)
|
61 |
output_text = processor.decode(outputs[0], skip_special_tokens=True)
|
62 |
print(output_text)
|