Ron Au commited on
Commit
db6e2f8
1 Parent(s): 26d3451

Initial Commit

Browse files
README.md CHANGED
@@ -1,13 +1,16 @@
1
  ---
2
- title: Flask
3
- emoji: 🌖
4
- colorFrom: purple
5
- colorTo: pink
6
  sdk: gradio
7
  sdk_version: 2.9.1
 
8
  app_file: app.py
9
- pinned: false
 
10
  license: mit
 
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
1
  ---
2
+ title: Flask + dev server
3
+ emoji: ⚗️
4
+ colorFrom: gray
5
+ colorTo: gray
6
  sdk: gradio
7
  sdk_version: 2.9.1
8
+ python_version: 3.10.4
9
  app_file: app.py
10
+ models: [osanseviero/BigGAN-deep-128, t5-small]
11
+ datasets: [emotion]
12
  license: mit
13
+ pinned: false
14
  ---
15
 
16
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import json
4
+ from io import BytesIO
5
+
6
+ from flask import Flask, jsonify, render_template, request, send_file
7
+
8
+ from modules.inference import infer_t5
9
+ from modules.dataset import query_emotion
10
+
11
+ # https://huggingface.co/settings/tokens
12
+ # https://huggingface.co/spaces/{username}/{space}/settings
13
+ API_TOKEN = os.getenv("BIG_GAN_TOKEN")
14
+
15
+ app = Flask(__name__)
16
+
17
+
18
+ @app.route("/")
19
+ def index():
20
+ return render_template("index.html")
21
+
22
+
23
+ @app.route("/infer_biggan")
24
+ def biggan():
25
+ input = request.args.get("input")
26
+
27
+ output = requests.request(
28
+ "POST",
29
+ "https://api-inference.huggingface.co/models/osanseviero/BigGAN-deep-128",
30
+ headers={"Authorization": f"Bearer {API_TOKEN}"},
31
+ data=json.dumps(input),
32
+ )
33
+
34
+ return send_file(BytesIO(output.content), mimetype="image/png")
35
+
36
+
37
+ @app.route("/infer_t5")
38
+ def t5():
39
+ input = request.args.get("input")
40
+
41
+ output = infer_t5(input)
42
+
43
+ return jsonify({"output": output})
44
+
45
+
46
+ @app.route("/query_emotion")
47
+ def emotion():
48
+ start = request.args.get("start")
49
+ end = request.args.get("end")
50
+
51
+ print(start)
52
+ print(end)
53
+
54
+ output = query_emotion(int(start), int(end))
55
+
56
+ return jsonify({"output": output})
57
+
58
+
59
+ if __name__ == "__main__":
60
+ app.run(host="0.0.0.0", port=7860)
modules/dataset.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+
3
+ dataset = load_dataset("emotion", split="train")
4
+
5
+ emotions = dataset.info.features["label"].names
6
+
7
+ def query_emotion(start, end):
8
+ rows = dataset[start:end]
9
+ texts, labels = [rows[k] for k in rows.keys()]
10
+
11
+ observations = []
12
+
13
+ for i, text in enumerate(texts):
14
+ observations.append({
15
+ "text": text,
16
+ "emotion": emotions[labels[i]],
17
+ })
18
+
19
+ return observations
modules/inference.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
2
+
3
+ tokenizer = T5Tokenizer.from_pretrained("t5-small")
4
+ model = T5ForConditionalGeneration.from_pretrained("t5-small")
5
+
6
+
7
+ def infer_t5(input):
8
+ input_ids = tokenizer(input, return_tensors="pt").input_ids
9
+ outputs = model.generate(input_ids)
10
+
11
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
1
+ datasets==2.*
2
+ flask==2.1.*
3
+ requests==2.27.*
4
+ sentencepiece==0.1.*
5
+ torch==1.11.*
6
+ transformers==4.*
static/index.js ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ if (document.location.search.includes('dark-theme=true')) {
2
+ document.body.classList.add('dark-theme');
3
+ }
4
+
5
+ let cursor = 0;
6
+ const RANGE = 5;
7
+ const LIMIT = 16_000;
8
+
9
+ const textToImage = async (text) => {
10
+ const inferenceResponse = await fetch(`infer_biggan?input=${text}`);
11
+ const inferenceBlob = await inferenceResponse.blob();
12
+
13
+ return URL.createObjectURL(inferenceBlob);
14
+ };
15
+
16
+ const translateText = async (text) => {
17
+ const inferResponse = await fetch(`infer_t5?input=${text}`);
18
+ const inferJson = await inferResponse.json();
19
+
20
+ return inferJson.output;
21
+ };
22
+
23
+ const queryDataset = async (start, end) => {
24
+ const queryResponse = await fetch(`query_emotion?start=${start}&end=${end}`);
25
+ const queryJson = await queryResponse.json();
26
+
27
+ return queryJson.output;
28
+ };
29
+
30
+ const updateTable = async (cursor, range = RANGE) => {
31
+ const table = document.querySelector('.dataset-output');
32
+
33
+ const fragment = new DocumentFragment();
34
+
35
+ const observations = await queryDataset(cursor, cursor + range);
36
+
37
+ for (const observation of observations) {
38
+ let row = document.createElement('tr');
39
+ let text = document.createElement('td');
40
+ let emotion = document.createElement('td');
41
+
42
+ text.textContent = observation.text;
43
+ emotion.textContent = observation.emotion;
44
+
45
+ row.appendChild(text);
46
+ row.appendChild(emotion);
47
+ fragment.appendChild(row);
48
+ }
49
+
50
+ table.innerHTML = '';
51
+
52
+ table.appendChild(fragment);
53
+
54
+ table.insertAdjacentHTML(
55
+ 'afterbegin',
56
+ `<thead>
57
+ <tr>
58
+ <td>text</td>
59
+ <td>emotion</td>
60
+ </tr>
61
+ </thead>`
62
+ );
63
+ };
64
+
65
+ const imageGenSelect = document.getElementById('image-gen-input');
66
+ const imageGenImage = document.querySelector('.image-gen-output');
67
+ const textGenForm = document.querySelector('.text-gen-form');
68
+ const tableButtonPrev = document.querySelector('.table-previous');
69
+ const tableButtonNext = document.querySelector('.table-next');
70
+
71
+ imageGenSelect.addEventListener('change', async (event) => {
72
+ const value = event.target.value;
73
+
74
+ try {
75
+ imageGenImage.src = await textToImage(value);
76
+ imageGenImage.alt = value + ' generated from BigGAN AI model';
77
+ } catch (err) {
78
+ console.error(err);
79
+ }
80
+ });
81
+
82
+ textGenForm.addEventListener('submit', async (event) => {
83
+ event.preventDefault();
84
+
85
+ const textGenInput = document.getElementById('text-gen-input');
86
+ const textGenParagraph = document.querySelector('.text-gen-output');
87
+
88
+ try {
89
+ textGenParagraph.textContent = await translateText(textGenInput.value);
90
+ } catch (err) {
91
+ console.error(err);
92
+ }
93
+ });
94
+
95
+ tableButtonPrev.addEventListener('click', () => {
96
+ cursor = cursor > RANGE ? cursor - RANGE : 0;
97
+
98
+ if (cursor < RANGE) {
99
+ tableButtonPrev.classList.add('hidden');
100
+ }
101
+ if (cursor < LIMIT - RANGE) {
102
+ tableButtonNext.classList.remove('hidden');
103
+ }
104
+
105
+ updateTable(cursor);
106
+ });
107
+
108
+ tableButtonNext.addEventListener('click', () => {
109
+ cursor = cursor < LIMIT - RANGE ? cursor + RANGE : cursor;
110
+
111
+ if (cursor >= RANGE) {
112
+ tableButtonPrev.classList.remove('hidden');
113
+ }
114
+ if (cursor >= LIMIT - RANGE) {
115
+ tableButtonNext.classList.add('hidden');
116
+ }
117
+
118
+ updateTable(cursor);
119
+ });
120
+
121
+ textToImage(imageGenSelect.value)
122
+ .then((image) => (imageGenImage.src = image))
123
+ .catch(console.error);
124
+
125
+ updateTable(cursor)
126
+ .catch(console.error);
static/style.css ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ body {
2
+ --text: hsl(0 0% 15%);
3
+ padding: 2.5rem;
4
+ font-family: sans-serif;
5
+ color: var(--text);
6
+ }
7
+ body.dark-theme {
8
+ --text: hsl(0 0% 90%);
9
+ background-color: hsl(223 39% 7%);
10
+ }
11
+
12
+ main {
13
+ max-width: 80rem;
14
+ text-align: center;
15
+ }
16
+
17
+ section {
18
+ display: flex;
19
+ flex-direction: column;
20
+ align-items: center;
21
+ }
22
+
23
+ a {
24
+ color: var(--text);
25
+ }
26
+
27
+ select, input, button, .text-gen-output {
28
+ padding: 0.5rem 1rem;
29
+ }
30
+
31
+ select, img, input {
32
+ margin: 0.5rem auto 1rem;
33
+ }
34
+
35
+ form {
36
+ width: 25rem;
37
+ margin: 0 auto;
38
+ }
39
+
40
+ input {
41
+ width: 70%;
42
+ }
43
+
44
+ button {
45
+ cursor: pointer;
46
+ }
47
+
48
+ .text-gen-output {
49
+ min-height: 1.2rem;
50
+ margin: 1rem;
51
+ border: 0.5px solid grey;
52
+ }
53
+
54
+ #dataset button {
55
+ width: 6rem;
56
+ margin: 0.5rem;
57
+ }
58
+
59
+ #dataset button.hidden {
60
+ visibility: hidden;
61
+ }
62
+
63
+ table {
64
+ max-width: 40rem;
65
+ text-align: left;
66
+ border-collapse: collapse;
67
+ }
68
+
69
+ thead {
70
+ font-weight: bold;
71
+ }
72
+
73
+ td {
74
+ padding: 0.5rem;
75
+ }
76
+
77
+ td:not(thead td) {
78
+ border: 0.5px solid grey;
79
+ }
templates/index.html ADDED
The diff for this file is too large to render. See raw diff