melindakhosasih commited on
Commit
c970970
1 Parent(s): 4809d91

add price and examples

Browse files
.gitignore ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python build
2
+ .eggs/
3
+ gradio.egg-info
4
+ dist/
5
+ *.pyc
6
+ __pycache__/
7
+ *.py[cod]
8
+ *$py.class
9
+ build/
10
+ __tmp/*
11
+ *.pyi
12
+ py.typed
13
+
14
+ # JS build
15
+ gradio/templates/*
16
+ gradio/node/*
17
+ gradio/_frontend_code/*
18
+ js/gradio-preview/test/*
19
+
20
+ # Secrets
21
+ .env
22
+
23
+ # Gradio run artifacts
24
+ *.db
25
+ *.sqlite3
26
+ gradio/launches.json
27
+ flagged/
28
+ gradio_cached_examples/
29
+ tmp.zip
30
+
31
+ # Tests
32
+ .coverage
33
+ coverage.xml
34
+ test.txt
35
+ **/snapshots/**/*.png
36
+ playwright-report/
37
+
38
+ # Demos
39
+ demo/tmp.zip
40
+ demo/files/*.avi
41
+ demo/files/*.mp4
42
+ demo/all_demos/demos/*
43
+ demo/all_demos/requirements.txt
44
+ demo/*/config.json
45
+ demo/annotatedimage_component/*.png
46
+
47
+ # Etc
48
+ .idea/*
49
+ .DS_Store
50
+ *.bak
51
+ workspace.code-workspace
52
+ *.h5
53
+
54
+ # dev containers
55
+ .pnpm-store/
56
+
57
+ # log files
58
+ .pnpm-debug.log
59
+
60
+ # Local virtualenv for devs
61
+ .venv*
62
+
63
+ # FRP
64
+ gradio/frpc_*
65
+ .vercel
66
+
67
+ # js
68
+ node_modules
69
+ public/build/
70
+ test-results
71
+ client/js/test.js
72
+ .config/test.py
73
+
74
+ # storybook
75
+ storybook-static
76
+ build-storybook.log
77
+ js/storybook/theme.css
78
+
79
+ # playwright
80
+ .config/playwright/.cache
app.py CHANGED
@@ -1,9 +1,11 @@
1
  from hubconf import custom
2
  model = custom(path_or_model='best.pt') # custom example
 
3
  # model = create(name='yolov7', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example
4
 
5
  # Verify inference
6
  import numpy as np
 
7
  from PIL import Image
8
  import gradio as gr
9
 
@@ -13,19 +15,47 @@ import gradio as gr
13
  # results = model(imgs) # batched inference
14
  # results.print()
15
  # results.save()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  def predict(input_image):
17
  """
18
  Predict model output
19
  """
20
- results = model(input_image)
 
 
 
 
 
21
  output_image = results.render()[0]
22
- price = "0"
23
 
24
  # Return the output image and price
25
  return [output_image, price]
26
- # return [input_image, price]
27
-
28
 
 
 
 
 
 
 
 
 
 
29
 
30
- # gr.Interface(inputs=["image"],outputs=["image"],fn=lambda img:model(img).render()[0]).launch()
31
- gr.Interface(inputs=["image"], outputs=["image", "text"], fn=predict).launch()
 
1
  from hubconf import custom
2
  model = custom(path_or_model='best.pt') # custom example
3
+ model.eval()
4
  # model = create(name='yolov7', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example
5
 
6
  # Verify inference
7
  import numpy as np
8
+ import torch
9
  from PIL import Image
10
  import gradio as gr
11
 
 
15
  # results = model(imgs) # batched inference
16
  # results.print()
17
  # results.save()
18
+ def total_price(predicted):
19
+ price = 0
20
+ for name in predicted:
21
+ if name == "side dish":
22
+ price += 10
23
+ elif name == "purple rice" or name == "white rice" or name == "brown rice":
24
+ price += 20
25
+ elif name == "40dollars meal":
26
+ price += 40
27
+ elif name == "30dollars meal":
28
+ price += 30
29
+ elif name == "25dollars meal":
30
+ price += 25
31
+
32
+ return price
33
+
34
  def predict(input_image):
35
  """
36
  Predict model output
37
  """
38
+ # Disable gradient computation
39
+ with torch.no_grad():
40
+ results = model(input_image)
41
+
42
+ predicted = results.pandas().xyxy[0]["name"]
43
+
44
  output_image = results.render()[0]
45
+ price = total_price(predicted)
46
 
47
  # Return the output image and price
48
  return [output_image, price]
 
 
49
 
50
+ with gr.Blocks() as demo:
51
+ # Title
52
+ gr.HTML(
53
+ """
54
+ <h1 align="center">Group 29 - AI Cafeteria Price Evaluator</h1>
55
+ """)
56
+ examples = ["./examples/img_1.jpg", "./examples/img_2.jpg", "./examples/img_3.jpg", "./examples/img_4.jpg"]
57
+ # gr.Interface(inputs=["image"],outputs=["image"],fn=lambda img:model(img).render()[0]).launch()
58
+ gr.Interface(inputs=["image"], outputs=["image", "text"], fn=predict, examples=examples)
59
 
60
+ if __name__ == "__main__":
61
+ demo.launch()
best.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e649c278fc179eb0b75e136f2ea86a58a2dd84a7f77dba4fe2b93987d56e63df
3
- size 75247994
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fdd53a30b200533858ef900c88f230ff0707ebde17309ba3c09fa9fd7cd5086
3
+ size 74876410
best_old.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e649c278fc179eb0b75e136f2ea86a58a2dd84a7f77dba4fe2b93987d56e63df
3
+ size 75247994
examples/img_1.jpg ADDED
examples/img_2.jpg ADDED
examples/img_3.jpg ADDED
examples/img_4.jpg ADDED
requirements.txt CHANGED
@@ -1,6 +1,7 @@
1
  # Usage: pip install -r requirements.txt
2
 
3
  # Base ----------------------------------------
 
4
  matplotlib>=3.2.2
5
  numpy>=1.18.5,<1.24.0
6
  opencv-python>=4.1.1
 
1
  # Usage: pip install -r requirements.txt
2
 
3
  # Base ----------------------------------------
4
+ gradio
5
  matplotlib>=3.2.2
6
  numpy>=1.18.5,<1.24.0
7
  opencv-python>=4.1.1