File size: 6,398 Bytes
181c0bd
f4b6f92
181c0bd
 
 
 
 
 
 
f4b6f92
 
 
 
 
181c0bd
 
f4b6f92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181c0bd
f4b6f92
181c0bd
 
f4b6f92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181c0bd
f4b6f92
181c0bd
 
f4b6f92
181c0bd
f4b6f92
181c0bd
f4b6f92
181c0bd
f4b6f92
181c0bd
f4b6f92
181c0bd
 
f4b6f92
181c0bd
f4b6f92
181c0bd
f4b6f92
181c0bd
f4b6f92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181c0bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8"/>
    <meta name="viewport" content="width=device-width, initial-scale=1.0"/>
    <script src="https://cdn.tailwindcss.com"></script>
    <!-- polyfill for firefox + import maps -->
    <script src="https://unpkg.com/es-module-shims@1.7.0/dist/es-module-shims.js"></script>
    <script type="importmap">
        {
            "imports": {
                "@huggingface/inference": "https://cdn.jsdelivr.net/npm/@huggingface/inference@2.1.1/+esm"
            }
        }
    </script>
</head>
<body class="bg-gray-100 min-h-screen flex items-center justify-center">
    <form class="w-[90%] max-w-md mx-auto bg-white p-8 rounded-lg shadow-md" onsubmit="launch(); return false;">
        <h1 class="text-3xl font-bold mb-8 text-center text-gradient from-pink-500 to-violet-500">
            Document & visual question answering demo with
            <a href="https://github.com/huggingface/huggingface.js" class="text-blue-500">
                <kbd>@huggingface/inference</kbd>
            </a>
        </h1>

        <p class="text-center text-gray-500 mb-8">
            First, input your token if you have one! Otherwise, you may encounter rate limiting.
            You can create a token for free at
            <a href="https://huggingface.co/settings/tokens" class="underline text-blue-500" target="_blank">
                hf.co/settings/tokens
            </a>
        </p>

        <input
            type="text"
            id="token"
            class="rounded border-2 border-blue-500 shadow-md px-3 py-2 w-full mb-6"
            placeholder="Token (optional)"
        />

        <p class="text-center text-gray-500 mb-8">
            Pick the model type and the model you want to run. Check out models for
            <a href="https://huggingface.co/tasks/document-question-answering" class="underline text-blue-500" target="_blank">
                document
            </a> and
            <a href="https://huggingface.co/tasks/visual-question-answering" class="underline text-blue-500" target="_blank">
                image
            </a> question answering.
        </p>

        <div class="space-x-2 flex text-sm mb-8 justify-center">
            <label class="flex items-center">
                <input class="sr-only peer" name="type" type="radio" value="document" onclick="update_model(this.value)" checked />
                <div class="px-3 py-3 rounded-lg shadow-md flex items-center justify-center text-slate-700 bg-gradient-to-r peer-checked:font-semibold peer-checked:from-pink-500 peer-checked:to-violet-500 peer-checked:text-white">
                    Document
                </div>
            </label>
            <label class="flex items-center">
                <input class="sr-only peer" name="type" type="radio" value="image" onclick="update_model(this.value)" />
                <div class="px-3 py-3 rounded-lg shadow-md flex items-center justify-center text-slate-700 bg-gradient-to-r peer-checked:font-semibold peer-checked:from-pink-500 peer-checked:to-violet-500 peer-checked:text-white">
                    Image
                </div>
            </label>
        </div>

        <input
            id="model"
            class="rounded border-2 border-blue-500 shadow-md px-3 py-2 w-full mb-6"
            value="impira/layoutlm-document-qa"
            required
        />

        <p class="text-center text-gray-500 mb-8">The input image</p>

        <input type="file" required accept="image/*" class="rounded border-blue-500 shadow-md px-3 py-2 w-full mb-6 block" id="image" />

        <p class="text-center text-gray-500 mb-8">The question</p>

        <input
            type="text"
            id="question"
            class="rounded border-2 border-blue-500 shadow-md px-3 py-2 w-full mb-6"
            required
        />

        <button
            id="submit"
            class="my-8 bg-green-500 rounded py-3 px-5 text-white shadow-md disabled:bg-slate-300 w-full"
        >
            Run
        </button>

        <p class="text-gray-400 text-sm text-center">Output logs</p>
        <div id="logs" class="bg-gray-100 rounded p-3 mb-8 text-sm">
            Output will be here
        </div>

        <p class="text-center text-blue-500 text-sm">
            Check out the
            <a href="https://huggingface.co/spaces/huggingfacejs/doc-vis-qa/blob/main/index.html" class="underline" target="_blank">
                source code
            </a>
        </p>
    </form>

<script type="module">
    import {HfInference} from "@huggingface/inference";

    const default_models = {
        "document": "impira/layoutlm-document-qa",
        "image": "dandelin/vilt-b32-finetuned-vqa",
    };

    let running = false;

    async function launch() {
        if (running) {
            return;
        }
        running = true;
        try {
            const hf = new HfInference(
                document.getElementById("token").value.trim() || undefined
            );
            const model = document.getElementById("model").value.trim();
            const model_type = document.querySelector("[name=type]:checked").value;
            const image = document.getElementById("image").files[0];
            const question = document.getElementById("question").value.trim();
            document.getElementById("logs").textContent = "";

            const method = model_type === "document" ? hf.documentQuestionAnswering : hf.visualQuestionAnswering;
            const {answer, score} = await method({model, inputs: {
                image, question
                }});

            document.getElementById("logs").textContent = answer + ": " + score;
        } catch (err) {
            alert("Error: " + err.message);
        } finally {
            running = false;
        }
    }

    window.launch = launch;

    window.update_model = (model_type) => {
        const model_input = document.getElementById("model");
        const cur_model = model_input.value.trim();
        let new_model = "";
        if (
            model_type === "document" && cur_model === default_models["image"]
            || model_type === "image" && cur_model === default_models["document"]
            || cur_model === ""
        ) {
             new_model = default_models[model_type];
        }
        model_input.value = new_model;
    };
</script>
</body>
</html>