File size: 2,764 Bytes
6364b8e
 
063d7d0
436d80d
e452552
 
436d80d
 
 
6364b8e
436d80d
759d503
 
 
53c3b30
759d503
063d7d0
387ecb3
6364b8e
387ecb3
b69e293
 
 
6364b8e
b69e293
436d80d
 
 
bec3144
b69e293
eb7bd46
e452552
17da355
e452552
6364b8e
5c55b9f
 
 
 
 
 
 
 
 
 
387ecb3
 
 
b69e293
 
 
387ecb3
b69e293
387ecb3
 
 
 
 
 
 
 
 
b69e293
 
 
387ecb3
b69e293
387ecb3
 
 
 
 
 
6364b8e
 
759d503
836257c
759d503
 
387ecb3
 
e452552
5c55b9f
2671156
 
387ecb3
 
 
 
6364b8e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import gradio as gr
import torch
from transformers import AutoFeatureExtractor, AutoModelForImageClassification, pipeline
from numpy import exp
import pandas as  pd

def softmax(vector):
 e = exp(vector)
 return e / e.sum()

    
models=[
    "Nahrawy/AIorNot",
    "arnolfokam/ai-generated-image-detector",
    "umm-maybe/AI-image-detector",
]

def aiornot0(image):    
    labels = ["Real", "AI"]
    mod=models[0]
    feature_extractor0 = AutoFeatureExtractor.from_pretrained(mod)
    model0 = AutoModelForImageClassification.from_pretrained(mod)
    input = feature_extractor0(image, return_tensors="pt")
    with torch.no_grad():
        outputs = model0(**input)
        print (outputs)
        logits = outputs.logits
        print (logits)
        probability = softmax(logits)
        print(f'PROBABILITY ::: {probability}')
        print(probability[0][0])
        px = pd.DataFrame(probability.numpy())
        print(px)
        
    prediction = logits.argmax(-1).item()
    label = labels[prediction]
    
    html_out = f"""
    <h3>Model used: <a href src='https://huggingface.co/models/{mod}'>{mod}</a><br>
    This image is likely: {label}<br>
    Probabilites<br>
    AI: {px[0]}<br>
    Real: {px[1]}"""
    
    return gr.update(html_out)
def aiornot1(image):    
    labels = ["Real", "AI"]
    mod=models[1]
    feature_extractor1 = AutoFeatureExtractor.from_pretrained(mod)
    model1 = AutoModelForImageClassification.from_pretrained(mod)
    input = feature_extractor1(image, return_tensors="pt")
    with torch.no_grad():
      outputs = model1(**input)
      print (outputs)
      logits = outputs.logits
      print (logits)
    prediction = logits.argmax(-1).item()
    label = labels[prediction] 
    return label
def aiornot2(image):    
    labels = ["Real", "AI"]
    mod=models[2]
    feature_extractor2 = AutoFeatureExtractor.from_pretrained(mod)
    model2 = AutoModelForImageClassification.from_pretrained(mod)
    input = feature_extractor2(image, return_tensors="pt")
    with torch.no_grad():
      outputs = model2(**input)
      print (outputs)
      logits = outputs.logits
      print (logits)
    prediction = logits.argmax(-1).item()
    label = labels[prediction] 
    return label    
with gr.Blocks() as app:
    with gr.Row():
        with gr.Column():
            inp = gr.Image()
            mod_choose=gr.Number(value=0)
            btn = gr.Button()
            
        with gr.Column():
            #outp0 = gr.Textbox(label=f'{models[0]}')
            outp0 = gr.HTML("""""")
            outp1 = gr.Textbox(label=f'{models[1]}')
            outp2 = gr.Textbox(label=f'{models[2]}')
    btn.click(aiornot0,[inp],outp0)
    btn.click(aiornot1,[inp],outp1)
    btn.click(aiornot2,[inp],outp2)
    
app.launch()