Omnibus commited on
Commit
387ecb3
1 Parent(s): d140aac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -27
app.py CHANGED
@@ -4,52 +4,65 @@ from transformers import AutoFeatureExtractor, AutoModelForImageClassification,
4
 
5
  models=[
6
  "Nahrawy/AIorNot",
7
- "RishiDarkDevil/ai-image-det-resnet152",
8
  "arnolfokam/ai-generated-image-detector",
9
  "umm-maybe/AI-image-detector",
10
  ]
11
- #pipe = pipeline("image-classification", "umm-maybe/AI-image-detector")
12
 
13
- def image_classifier(image):
14
- outputs = pipe(image)
15
- results = {}
16
- for result in outputs:
17
- results[result['label']] = result['score']
18
- return results
19
-
20
-
21
-
22
- #demo = gr.Interface(fn=image_classifier, inputs=gr.Image(type="pil"), outputs="label", title=title, description=description)
23
- #demo.launch(show_api=False)
24
-
25
-
26
-
27
- def aiornot(image,mod_choose):
28
  labels = ["Real", "AI"]
29
-
30
- #feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/resnet-50")
31
- mod=models[int(mod_choose)]
32
  feature_extractor = AutoFeatureExtractor.from_pretrained(mod)
33
  model = AutoModelForImageClassification.from_pretrained(mod)
34
-
35
  input = feature_extractor(image, return_tensors="pt")
36
  with torch.no_grad():
37
  outputs = model(**input)
38
- print(outputs)
39
- print(dir(outputs))
40
  logits = outputs.logits
41
  print (logits)
42
  prediction = logits.argmax(-1).item()
43
- print(prediction)
44
  label = labels[prediction]
45
  return label
46
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  with gr.Blocks() as app:
48
  with gr.Row():
49
  with gr.Column():
50
  inp = gr.Image()
51
  mod_choose=gr.Number(value=0)
52
  btn = gr.Button()
53
- outp = gr.Textbox()
54
- btn.click(aiornot,[inp,mod_choose],outp)
 
 
 
 
 
 
 
55
  app.launch()
 
4
 
5
  models=[
6
  "Nahrawy/AIorNot",
 
7
  "arnolfokam/ai-generated-image-detector",
8
  "umm-maybe/AI-image-detector",
9
  ]
 
10
 
11
+ def aiornot0(image):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  labels = ["Real", "AI"]
13
+ mod=models[0]
 
 
14
  feature_extractor = AutoFeatureExtractor.from_pretrained(mod)
15
  model = AutoModelForImageClassification.from_pretrained(mod)
 
16
  input = feature_extractor(image, return_tensors="pt")
17
  with torch.no_grad():
18
  outputs = model(**input)
19
+ print (outputs)
 
20
  logits = outputs.logits
21
  print (logits)
22
  prediction = logits.argmax(-1).item()
 
23
  label = labels[prediction]
24
  return label
25
+ def aiornot1(image):
26
+ labels = ["Real", "AI"]
27
+ mod=models[1]
28
+ feature_extractor = AutoFeatureExtractor.from_pretrained(mod)
29
+ model = AutoModelForImageClassification.from_pretrained(mod)
30
+ input = feature_extractor(image, return_tensors="pt")
31
+ with torch.no_grad():
32
+ outputs = model(**input)
33
+ print (outputs)
34
+ logits = outputs.logits
35
+ print (logits)
36
+ prediction = logits.argmax(-1).item()
37
+ label = labels[prediction]
38
+ return label
39
+ def aiornot2(image):
40
+ labels = ["Real", "AI"]
41
+ mod=models[2]
42
+ feature_extractor = AutoFeatureExtractor.from_pretrained(mod)
43
+ model = AutoModelForImageClassification.from_pretrained(mod)
44
+ input = feature_extractor(image, return_tensors="pt")
45
+ with torch.no_grad():
46
+ outputs = model(**input)
47
+ print (outputs)
48
+ logits = outputs.logits
49
+ print (logits)
50
+ prediction = logits.argmax(-1).item()
51
+ label = labels[prediction]
52
+ return label
53
  with gr.Blocks() as app:
54
  with gr.Row():
55
  with gr.Column():
56
  inp = gr.Image()
57
  mod_choose=gr.Number(value=0)
58
  btn = gr.Button()
59
+
60
+ with gr.Column():
61
+ outp0 = gr.Textbox()
62
+ outp1 = gr.Textbox()
63
+ outp2 = gr.Textbox()
64
+ btn.click(aiornot0,[inp],outp0)
65
+ btn.click(aiornot1,[inp],outp1)
66
+ btn.click(aiornot2,[inp],outp2)
67
+
68
  app.launch()