areeb-h commited on
Commit
b431032
·
verified ·
1 Parent(s): 729b76d

Update index.js

Browse files
Files changed (1) hide show
  1. index.js +111 -63
index.js CHANGED
@@ -1,3 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
2
 
3
  // Since we will download the model from the Hugging Face Hub, we can skip the local model check
@@ -5,75 +86,42 @@ env.allowLocalModels = false;
5
 
6
  // Reference the elements that we will need
7
  const status = document.getElementById('status');
8
- const fileUpload = document.getElementById('upload');
9
- const imageContainer = document.getElementById('container');
10
- const example = document.getElementById('example');
11
 
12
- const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg';
13
-
14
- // Create a new object detection pipeline
15
  status.textContent = 'Loading model...';
16
- const detector = await pipeline('object-detection', 'Xenova/detr-resnet-50');
17
- status.textContent = 'Ready';
18
 
19
- example.addEventListener('click', (e) => {
20
- e.preventDefault();
21
- detect(EXAMPLE_URL);
22
- });
23
 
24
- fileUpload.addEventListener('change', function (e) {
25
- const file = e.target.files[0];
26
- if (!file) {
27
  return;
28
  }
29
 
30
- const reader = new FileReader();
31
-
32
- // Set up a callback when the file is loaded
33
- reader.onload = e2 => detect(e2.target.result);
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- reader.readAsDataURL(file);
 
36
  });
37
-
38
-
39
- // Detect objects in the image
40
- async function detect(img) {
41
- imageContainer.innerHTML = '';
42
- imageContainer.style.backgroundImage = `url(${img})`;
43
-
44
- status.textContent = 'Analysing...';
45
- const output = await detector(img, {
46
- threshold: 0.5,
47
- percentage: true,
48
- });
49
- status.textContent = '';
50
- output.forEach(renderBox);
51
- }
52
-
53
- // Render a bounding box and label on the image
54
- function renderBox({ box, label }) {
55
- const { xmax, xmin, ymax, ymin } = box;
56
-
57
- // Generate a random color for the box
58
- const color = '#' + Math.floor(Math.random() * 0xFFFFFF).toString(16).padStart(6, 0);
59
-
60
- // Draw the box
61
- const boxElement = document.createElement('div');
62
- boxElement.className = 'bounding-box';
63
- Object.assign(boxElement.style, {
64
- borderColor: color,
65
- left: 100 * xmin + '%',
66
- top: 100 * ymin + '%',
67
- width: 100 * (xmax - xmin) + '%',
68
- height: 100 * (ymax - ymin) + '%',
69
- })
70
-
71
- // Draw label
72
- const labelElement = document.createElement('span');
73
- labelElement.textContent = label;
74
- labelElement.className = 'bounding-box-label';
75
- labelElement.style.backgroundColor = color;
76
-
77
- boxElement.appendChild(labelElement);
78
- imageContainer.appendChild(boxElement);
79
- }
 
1
+ // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
2
+
3
+ // // Since we will download the model from the Hugging Face Hub, we can skip the local model check
4
+ // env.allowLocalModels = false;
5
+
6
+ // // Reference the elements that we will need
7
+ // const status = document.getElementById('status');
8
+ // const fileUpload = document.getElementById('upload');
9
+ // const imageContainer = document.getElementById('container');
10
+ // const example = document.getElementById('example');
11
+
12
+ // const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg';
13
+
14
+ // // Create a new object detection pipeline
15
+ // status.textContent = 'Loading model...';
16
+ // const detector = await pipeline('object-detection', 'Xenova/detr-resnet-50');
17
+ // status.textContent = 'Ready';
18
+
19
+ // example.addEventListener('click', (e) => {
20
+ // e.preventDefault();
21
+ // detect(EXAMPLE_URL);
22
+ // });
23
+
24
+ // fileUpload.addEventListener('change', function (e) {
25
+ // const file = e.target.files[0];
26
+ // if (!file) {
27
+ // return;
28
+ // }
29
+
30
+ // const reader = new FileReader();
31
+
32
+ // // Set up a callback when the file is loaded
33
+ // reader.onload = e2 => detect(e2.target.result);
34
+
35
+ // reader.readAsDataURL(file);
36
+ // });
37
+
38
+
39
+ // // Detect objects in the image
40
+ // async function detect(img) {
41
+ // imageContainer.innerHTML = '';
42
+ // imageContainer.style.backgroundImage = `url(${img})`;
43
+
44
+ // status.textContent = 'Analysing...';
45
+ // const output = await detector(img, {
46
+ // threshold: 0.5,
47
+ // percentage: true,
48
+ // });
49
+ // status.textContent = '';
50
+ // output.forEach(renderBox);
51
+ // }
52
+
53
+ // // Render a bounding box and label on the image
54
+ // function renderBox({ box, label }) {
55
+ // const { xmax, xmin, ymax, ymin } = box;
56
+
57
+ // // Generate a random color for the box
58
+ // const color = '#' + Math.floor(Math.random() * 0xFFFFFF).toString(16).padStart(6, 0);
59
+
60
+ // // Draw the box
61
+ // const boxElement = document.createElement('div');
62
+ // boxElement.className = 'bounding-box';
63
+ // Object.assign(boxElement.style, {
64
+ // borderColor: color,
65
+ // left: 100 * xmin + '%',
66
+ // top: 100 * ymin + '%',
67
+ // width: 100 * (xmax - xmin) + '%',
68
+ // height: 100 * (ymax - ymin) + '%',
69
+ // })
70
+
71
+ // // Draw label
72
+ // const labelElement = document.createElement('span');
73
+ // labelElement.textContent = label;
74
+ // labelElement.className = 'bounding-box-label';
75
+ // labelElement.style.backgroundColor = color;
76
+
77
+ // boxElement.appendChild(labelElement);
78
+ // imageContainer.appendChild(boxElement);
79
+ // }
80
+
81
+
82
  import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
83
 
84
  // Since we will download the model from the Hugging Face Hub, we can skip the local model check
 
86
 
87
  // Reference the elements that we will need
88
  const status = document.getElementById('status');
89
+ const userInput = document.getElementById('user-input');
90
+ const outputContainer = document.getElementById('output');
91
+ const submitButton = document.getElementById('submit-button');
92
 
93
+ // Load the text-generation pipeline
 
 
94
  status.textContent = 'Loading model...';
95
+ const generator = await pipeline('text-generation', 'meta-llama/Llama-2-7b-hf');
96
+ status.textContent = 'Model loaded. Ready to chat!';
97
 
98
+ // Add event listener to the submit button
99
+ submitButton.addEventListener('click', async () => {
100
+ const inputText = userInput.value.trim();
 
101
 
102
+ if (!inputText) {
103
+ outputContainer.innerText = 'Please enter a prompt.';
 
104
  return;
105
  }
106
 
107
+ // Update status to show the user we're processing
108
+ status.textContent = 'Generating response...';
109
+
110
+ try {
111
+ // Generate text from the user input
112
+ const response = await generator(inputText, {
113
+ max_new_tokens: 100,
114
+ temperature: 0.7, // Controls randomness; lower = more deterministic
115
+ top_p: 0.95, // Nucleus sampling
116
+ });
117
+
118
+ // Display the generated response
119
+ outputContainer.innerText = response[0].generated_text;
120
+ } catch (error) {
121
+ console.error(error);
122
+ outputContainer.innerText = 'Error generating response. Please try again.';
123
+ }
124
 
125
+ // Reset the status
126
+ status.textContent = 'Model loaded. Ready to chat!';
127
  });