File size: 4,224 Bytes
fd1dd45
e158b55
 
 
 
 
 
 
c2aa8fe
e158b55
 
 
 
 
 
 
 
 
 
61d589d
 
30b2d94
61d589d
e158b55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
597a3e1
 
 
 
 
 
 
 
 
 
 
 
 
 
e158b55
 
 
b16d75f
e158b55
2da4724
597a3e1
 
e158b55
 
 
d3036fa
 
 
2da4724
d3036fa
 
 
 
e158b55
 
b16d75f
bb4cd3c
fd1dd45
4ae11c9
fd1dd45
 
 
 
30b2d94
 
 
 
fd1dd45
30b2d94
b16d75f
fd1dd45
b16d75f
 
30b2d94
2da7508
b16d75f
c9a473f
30b2d94
 
fd1dd45
 
 
d3036fa
2da7508
d3036fa
 
 
 
597a3e1
d3036fa
 
 
597a3e1
d3036fa
 
 
597a3e1
c93d198
f94b187
 
 
 
30b2d94
 
f94b187
 
 
30b2d94
f94b187
 
61d589d
da55d71
61d589d
 
 
 
 
 
 
 
 
f94b187
 
 
 
9a15952
f94b187
2da7508
c2aa8fe
be4dbd7
e158b55
 
 
 
 
 
61d589d
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
from fastapi import FastAPI, UploadFile
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse

import subprocess
import os
import json
import uuid
import logging

import torch
from diffusers import (
    StableDiffusionPipeline,
    DPMSolverMultistepScheduler,
    EulerDiscreteScheduler,
)

app = FastAPI()

def file_extension(filename):
  filename_list = filename.split(".")
  return filename_list[1].lower()


@app.get("/generate")
def generate_image(prompt, model):
    torch.cuda.empty_cache()

    modelArray = model.split(",")
    modelName = modelArray[0]
    modelVersion = modelArray[1]

    pipeline = StableDiffusionPipeline.from_pretrained(
        str(modelName), torch_dtype=torch.float16
    )
    pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config)
    pipeline = pipeline.to("cuda")

    image = pipeline(prompt, num_inference_steps=50, height=512, width=512).images[0]

    filename = str(uuid.uuid4()) + ".jpg"
    image.save(filename)

    assertion = {
       "assertions": [
           {
               "label": "com.truepic.custom.ai",
               "data": {
                   "model_name": modelName,
                   "model_version": modelVersion,
                   "prompt": prompt,
               },
           }
       ]
    }

    json_object = json.dumps(assertion)

    subprocess.check_output(
        [
            "./scripts/sign.sh",
            filename,
            filename,
            "--assertions-inline",
            json_object
        ]
    )

    subprocess.check_output(
        [
            "cp",
            filename,
            "static/" + filename,
        ]
    )

    return {"response": filename}


@app.post("/verify")
def verify_image(fileUpload: UploadFile):
    logging.warning("in verify")
    logging.warning(fileUpload.filename)

    # check if the file has been uploaded
    if fileUpload.filename:
        fileupload_extension = file_extension(fileUpload.filename)
        input_filename = str(uuid.uuid4()) + "." + fileupload_extension
        output_filename = str(uuid.uuid4()) + "." + fileupload_extension

        # strip the leading path from the file name
        fn = os.path.basename(input_filename)

        # open read and write the file into the server
        open(fn, "wb").write(fileUpload.file.read())

        
        response = subprocess.check_output(
            [
                "./scripts/verify.sh",
                input_filename,
                output_filename
            ]
        )

        
        logging.warning(response)
        response_list = response.splitlines()
        
        c2pa_string = str(response_list[0])
        c2pa = c2pa_string.split(":", 1)
        c2pa = c2pa[1].strip(" ").strip("'")

        watermark_string = str(response_list[1])
        watermark = watermark_string.split(":", 1)
        watermark = watermark[1].strip(" ").strip("'")

        original_media_string = str(response_list[2])
        original_media = original_media_string.split(":", 1)
        original_media = original_media[1].strip(" ").strip("'")

        if c2pa == 'true':
            response = subprocess.check_output(
                [
                    "cp",
                    input_filename,
                    "static/" + output_filename,
                ]
            )

            result_media = output_filename

        elif original_media != 'n/a':
            original_media_extension = file_extension(original_media)
            filename = str(uuid.uuid4()) + "." + original_media_extension
            
            response = subprocess.check_output(
                [
                    "cp",
                    original_media,
                    "static/" + filename,
                ]
            ) 

            result_media = filename
        else:
            result_media = 'n/a'


    return {"response": fileUpload.filename, "contains_c2pa" : c2pa, "contains_watermark" : watermark, "result_media" : result_media}



app.mount("/", StaticFiles(directory="static", html=True), name="static")


@app.get("/")
def index() -> FileResponse:
    return FileResponse(path="/app/static/index.html", media_type="text/html")