Ron Au commited on
Commit
5264665
1 Parent(s): babba8b

Initial Commit

Browse files
Files changed (8) hide show
  1. README.md +8 -5
  2. app.py +79 -0
  3. dataset.py +19 -0
  4. index.html +0 -0
  5. index.js +126 -0
  6. inference.py +11 -0
  7. requirements.txt +5 -0
  8. style.css +79 -0
README.md CHANGED
@@ -1,13 +1,16 @@
1
  ---
2
- title: Http Server
3
- emoji: 🐨
4
- colorFrom: yellow
5
- colorTo: red
6
  sdk: gradio
7
  sdk_version: 2.9.1
 
8
  app_file: app.py
9
- pinned: false
 
10
  license: mit
 
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
1
  ---
2
+ title: Python + HTTP Server
3
+ emoji: 🐍
4
+ colorFrom: blue
5
+ colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 2.9.1
8
+ python_version: 3.10.4
9
  app_file: app.py
10
+ models: [osanseviero/BigGAN-deep-128, t5-small]
11
+ datasets: [emotion]
12
  license: mit
13
+ pinned: false
14
  ---
15
 
16
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import requests
4
+ from http.server import SimpleHTTPRequestHandler, ThreadingHTTPServer
5
+ from urllib.parse import parse_qs, urlparse
6
+
7
+ from inference import infer_t5
8
+ from dataset import query_emotion
9
+
10
+ # https://huggingface.co/settings/tokens
11
+ # https://huggingface.co/spaces/{username}/{space}/settings
12
+ API_TOKEN = os.getenv("BIG_GAN_TOKEN")
13
+
14
+
15
+ class RequestHandler(SimpleHTTPRequestHandler):
16
+ def do_GET(self):
17
+ if self.path == "/":
18
+ self.path = "index.html"
19
+
20
+ return SimpleHTTPRequestHandler.do_GET(self)
21
+
22
+ if self.path.startswith("/infer_biggan"):
23
+ url = urlparse(self.path)
24
+ query = parse_qs(url.query)
25
+ input = query.get("input", None)[0]
26
+
27
+ output = requests.request(
28
+ "POST",
29
+ "https://api-inference.huggingface.co/models/osanseviero/BigGAN-deep-128",
30
+ headers={"Authorization": f"Bearer {API_TOKEN}"},
31
+ data=json.dumps(input),
32
+ )
33
+
34
+ self.send_response(200)
35
+ self.send_header("Content-Type", "application/json")
36
+ self.end_headers()
37
+
38
+ self.wfile.write(output.content)
39
+
40
+ return SimpleHTTPRequestHandler
41
+
42
+ elif self.path.startswith("/infer_t5"):
43
+ url = urlparse(self.path)
44
+ query = parse_qs(url.query)
45
+ input = query.get("input", None)[0]
46
+
47
+ output = infer_t5(input)
48
+
49
+ self.send_response(200)
50
+ self.send_header("Content-Type", "application/json")
51
+ self.end_headers()
52
+
53
+ self.wfile.write(json.dumps({"output": output}).encode("utf-8"))
54
+
55
+ return SimpleHTTPRequestHandler
56
+
57
+ elif self.path.startswith("/query_emotion"):
58
+ url = urlparse(self.path)
59
+ query = parse_qs(url.query)
60
+ start = int(query.get("start", None)[0])
61
+ end = int(query.get("end", None)[0])
62
+
63
+ output = query_emotion(start, end)
64
+
65
+ self.send_response(200)
66
+ self.send_header("Content-Type", "application/json")
67
+ self.end_headers()
68
+
69
+ self.wfile.write(json.dumps({"output": output}).encode("utf-8"))
70
+
71
+ return SimpleHTTPRequestHandler
72
+
73
+ else:
74
+ return SimpleHTTPRequestHandler.do_GET(self)
75
+
76
+
77
+ server = ThreadingHTTPServer(("", 7860), RequestHandler)
78
+
79
+ server.serve_forever()
dataset.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+
3
+ dataset = load_dataset("emotion", split="train")
4
+
5
+ emotions = dataset.info.features["label"].names
6
+
7
+ def query_emotion(start, end):
8
+ rows = dataset[start:end]
9
+ texts, labels = [rows[k] for k in rows.keys()]
10
+
11
+ observations = []
12
+
13
+ for i, text in enumerate(texts):
14
+ observations.append({
15
+ "text": text,
16
+ "emotion": emotions[labels[i]],
17
+ })
18
+
19
+ return observations
index.html ADDED
The diff for this file is too large to render. See raw diff
 
index.js ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ if (document.location.search.includes('dark-theme=true')) {
2
+ document.body.classList.add('dark-theme');
3
+ }
4
+
5
+ let cursor = 0;
6
+ const RANGE = 5;
7
+ const LIMIT = 16_000;
8
+
9
+ const textToImage = async (text) => {
10
+ const inferenceResponse = await fetch(`infer_biggan?input=${text}`);
11
+ const inferenceBlob = await inferenceResponse.blob();
12
+
13
+ return URL.createObjectURL(inferenceBlob);
14
+ };
15
+
16
+ const translateText = async (text) => {
17
+ const inferResponse = await fetch(`infer_t5?input=${text}`);
18
+ const inferJson = await inferResponse.json();
19
+
20
+ return inferJson.output;
21
+ };
22
+
23
+ const queryDataset = async (start, end) => {
24
+ const queryResponse = await fetch(`query_emotion?start=${start}&end=${end}`);
25
+ const queryJson = await queryResponse.json();
26
+
27
+ return queryJson.output;
28
+ };
29
+
30
+ const updateTable = async (cursor, range = RANGE) => {
31
+ const table = document.querySelector('.dataset-output');
32
+
33
+ const fragment = new DocumentFragment();
34
+
35
+ const observations = await queryDataset(cursor, cursor + range);
36
+
37
+ for (const observation of observations) {
38
+ let row = document.createElement('tr');
39
+ let text = document.createElement('td');
40
+ let emotion = document.createElement('td');
41
+
42
+ text.textContent = observation.text;
43
+ emotion.textContent = observation.emotion;
44
+
45
+ row.appendChild(text);
46
+ row.appendChild(emotion);
47
+ fragment.appendChild(row);
48
+ }
49
+
50
+ table.innerHTML = '';
51
+
52
+ table.appendChild(fragment);
53
+
54
+ table.insertAdjacentHTML(
55
+ 'afterbegin',
56
+ `<thead>
57
+ <tr>
58
+ <td>text</td>
59
+ <td>emotion</td>
60
+ </tr>
61
+ </thead>`
62
+ );
63
+ };
64
+
65
+ const imageGenSelect = document.getElementById('image-gen-input');
66
+ const imageGenImage = document.querySelector('.image-gen-output');
67
+ const textGenForm = document.querySelector('.text-gen-form');
68
+ const tableButtonPrev = document.querySelector('.table-previous');
69
+ const tableButtonNext = document.querySelector('.table-next');
70
+
71
+ imageGenSelect.addEventListener('change', async (event) => {
72
+ const value = event.target.value;
73
+
74
+ try {
75
+ imageGenImage.src = await textToImage(value);
76
+ imageGenImage.alt = value + ' generated from BigGAN AI model';
77
+ } catch (err) {
78
+ console.error(err);
79
+ }
80
+ });
81
+
82
+ textGenForm.addEventListener('submit', async (event) => {
83
+ event.preventDefault();
84
+
85
+ const textGenInput = document.getElementById('text-gen-input');
86
+ const textGenParagraph = document.querySelector('.text-gen-output');
87
+
88
+ try {
89
+ textGenParagraph.textContent = await translateText(textGenInput.value);
90
+ } catch (err) {
91
+ console.error(err);
92
+ }
93
+ });
94
+
95
+ tableButtonPrev.addEventListener('click', () => {
96
+ cursor = cursor > RANGE ? cursor - RANGE : 0;
97
+
98
+ if (cursor < RANGE) {
99
+ tableButtonPrev.classList.add('hidden');
100
+ }
101
+ if (cursor < LIMIT - RANGE) {
102
+ tableButtonNext.classList.remove('hidden');
103
+ }
104
+
105
+ updateTable(cursor);
106
+ });
107
+
108
+ tableButtonNext.addEventListener('click', () => {
109
+ cursor = cursor < LIMIT - RANGE ? cursor + RANGE : cursor;
110
+
111
+ if (cursor >= RANGE) {
112
+ tableButtonPrev.classList.remove('hidden');
113
+ }
114
+ if (cursor >= LIMIT - RANGE) {
115
+ tableButtonNext.classList.add('hidden');
116
+ }
117
+
118
+ updateTable(cursor);
119
+ });
120
+
121
+ textToImage(imageGenSelect.value)
122
+ .then((image) => (imageGenImage.src = image))
123
+ .catch(console.error);
124
+
125
+ updateTable(cursor)
126
+ .catch(console.error);
inference.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
2
+
3
+ tokenizer = T5Tokenizer.from_pretrained("t5-small")
4
+ model = T5ForConditionalGeneration.from_pretrained("t5-small")
5
+
6
+
7
+ def infer_t5(input):
8
+ input_ids = tokenizer(input, return_tensors="pt").input_ids
9
+ outputs = model.generate(input_ids)
10
+
11
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ datasets==2.*
2
+ requests==2.27.*
3
+ sentencepiece==0.1.*
4
+ torch==1.11.*
5
+ transformers==4.*
style.css ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ body {
2
+ --text: hsl(0 0% 15%);
3
+ padding: 2.5rem;
4
+ font-family: sans-serif;
5
+ color: var(--text);
6
+ }
7
+ body.dark-theme {
8
+ --text: hsl(0 0% 90%);
9
+ background-color: hsl(223 39% 7%);
10
+ }
11
+
12
+ main {
13
+ max-width: 80rem;
14
+ text-align: center;
15
+ }
16
+
17
+ section {
18
+ display: flex;
19
+ flex-direction: column;
20
+ align-items: center;
21
+ }
22
+
23
+ a {
24
+ color: var(--text);
25
+ }
26
+
27
+ select, input, button, .text-gen-output {
28
+ padding: 0.5rem 1rem;
29
+ }
30
+
31
+ select, img, input {
32
+ margin: 0.5rem auto 1rem;
33
+ }
34
+
35
+ form {
36
+ width: 25rem;
37
+ margin: 0 auto;
38
+ }
39
+
40
+ input {
41
+ width: 70%;
42
+ }
43
+
44
+ button {
45
+ cursor: pointer;
46
+ }
47
+
48
+ .text-gen-output {
49
+ min-height: 1.2rem;
50
+ margin: 1rem;
51
+ border: 0.5px solid grey;
52
+ }
53
+
54
+ #dataset button {
55
+ width: 6rem;
56
+ margin: 0.5rem;
57
+ }
58
+
59
+ #dataset button.hidden {
60
+ visibility: hidden;
61
+ }
62
+
63
+ table {
64
+ max-width: 40rem;
65
+ text-align: left;
66
+ border-collapse: collapse;
67
+ }
68
+
69
+ thead {
70
+ font-weight: bold;
71
+ }
72
+
73
+ td {
74
+ padding: 0.5rem;
75
+ }
76
+
77
+ td:not(thead td) {
78
+ border: 0.5px solid grey;
79
+ }