SeyedAli commited on
Commit
4d335eb
1 Parent(s): 09e78c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -12
app.py CHANGED
@@ -7,18 +7,15 @@ from transformers import ViTImageProcessor,pipeline
7
  model = ViTImageProcessor.from_pretrained('SeyedAli/Food-Image-Classification-VIT')
8
 
9
  def FoodClassification(image):
10
- image = read_image(image)
11
- # Encode your PIL Image as a JPEG without writing to disk
12
- # buffer = io.BytesIO(image)
13
- # YourImage.save(buffer, format='JPEG', quality=75)
14
-
15
- # # You probably want
16
- # desiredObject = buffer.getbuffer()
17
-
18
- pipline = pipeline(task="image-classification", model=model)
19
- #output=pipline(model(Image.open(desiredObject), return_tensors='pt'))
20
- output=pipline(image, return_tensors='pt')
21
- return output
22
 
23
  iface = gr.Interface(fn=FoodClassification, inputs="image", outputs="text")
24
  iface.launch(share=False)
 
7
  model = ViTImageProcessor.from_pretrained('SeyedAli/Food-Image-Classification-VIT')
8
 
9
  def FoodClassification(image):
10
+ with tempfile.NamedTemporaryFile(suffix=".png") as temp_audio_file:
11
+ # Copy the contents of the uploaded audio file to the temporary file
12
+ temp_image_file.write(open(image, "rb").read())
13
+ temp_image_file.flush()
14
+ # Load the image file using torchvision
15
+ image = read_image(temp_image_file.name)
16
+ pipline = pipeline(task="image-classification", model=model)
17
+ output=pipline(image, return_tensors='pt')
18
+ return output
 
 
 
19
 
20
  iface = gr.Interface(fn=FoodClassification, inputs="image", outputs="text")
21
  iface.launch(share=False)