areeb-h commited on
Commit
f854c7a
1 Parent(s): 7f07f49

Update index.js

Browse files
Files changed (1) hide show
  1. index.js +14 -89
index.js CHANGED
@@ -1,90 +1,9 @@
1
- // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
2
-
3
- // // Since we will download the model from the Hugging Face Hub, we can skip the local model check
4
- // env.allowLocalModels = false;
5
-
6
- // // Reference the elements that we will need
7
- // const status = document.getElementById('status');
8
- // const fileUpload = document.getElementById('upload');
9
- // const imageContainer = document.getElementById('container');
10
- // const example = document.getElementById('example');
11
-
12
- // const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg';
13
-
14
- // // Create a new object detection pipeline
15
- // status.textContent = 'Loading model...';
16
- // const detector = await pipeline('object-detection', 'Xenova/detr-resnet-50');
17
- // status.textContent = 'Ready';
18
-
19
- // example.addEventListener('click', (e) => {
20
- // e.preventDefault();
21
- // detect(EXAMPLE_URL);
22
- // });
23
-
24
- // fileUpload.addEventListener('change', function (e) {
25
- // const file = e.target.files[0];
26
- // if (!file) {
27
- // return;
28
- // }
29
-
30
- // const reader = new FileReader();
31
-
32
- // // Set up a callback when the file is loaded
33
- // reader.onload = e2 => detect(e2.target.result);
34
-
35
- // reader.readAsDataURL(file);
36
- // });
37
-
38
-
39
- // // Detect objects in the image
40
- // async function detect(img) {
41
- // imageContainer.innerHTML = '';
42
- // imageContainer.style.backgroundImage = `url(${img})`;
43
-
44
- // status.textContent = 'Analysing...';
45
- // const output = await detector(img, {
46
- // threshold: 0.5,
47
- // percentage: true,
48
- // });
49
- // status.textContent = '';
50
- // output.forEach(renderBox);
51
- // }
52
-
53
- // // Render a bounding box and label on the image
54
- // function renderBox({ box, label }) {
55
- // const { xmax, xmin, ymax, ymin } = box;
56
-
57
- // // Generate a random color for the box
58
- // const color = '#' + Math.floor(Math.random() * 0xFFFFFF).toString(16).padStart(6, 0);
59
-
60
- // // Draw the box
61
- // const boxElement = document.createElement('div');
62
- // boxElement.className = 'bounding-box';
63
- // Object.assign(boxElement.style, {
64
- // borderColor: color,
65
- // left: 100 * xmin + '%',
66
- // top: 100 * ymin + '%',
67
- // width: 100 * (xmax - xmin) + '%',
68
- // height: 100 * (ymax - ymin) + '%',
69
- // })
70
-
71
- // // Draw label
72
- // const labelElement = document.createElement('span');
73
- // labelElement.textContent = label;
74
- // labelElement.className = 'bounding-box-label';
75
- // labelElement.style.backgroundColor = color;
76
-
77
- // boxElement.appendChild(labelElement);
78
- // imageContainer.appendChild(boxElement);
79
- // }
80
-
81
-
82
  import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
83
 
84
- // Since we will download the model from the Hugging Face Hub, we can skip the local model check
85
  env.allowLocalModels = false;
86
 
87
- // Reference the elements that we will need
88
  const status = document.getElementById('status');
89
  const userInput = document.getElementById('user-input');
90
  const outputContainer = document.getElementById('output');
@@ -92,8 +11,15 @@ const submitButton = document.getElementById('submit-button');
92
 
93
  // Load the text-generation pipeline
94
  status.textContent = 'Loading model...';
95
- const generator = await pipeline('text-generation', 'meta-llama/Llama-2-7b-hf');
96
- status.textContent = 'Model loaded. Ready to chat!';
 
 
 
 
 
 
 
97
 
98
  // Add event listener to the submit button
99
  submitButton.addEventListener('click', async () => {
@@ -104,15 +30,14 @@ submitButton.addEventListener('click', async () => {
104
  return;
105
  }
106
 
107
- // Update status to show the user we're processing
108
  status.textContent = 'Generating response...';
109
 
110
  try {
111
- // Generate text from the user input
112
  const response = await generator(inputText, {
113
  max_new_tokens: 100,
114
- temperature: 0.7, // Controls randomness; lower = more deterministic
115
- top_p: 0.95, // Nucleus sampling
116
  });
117
 
118
  // Display the generated response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
2
 
3
+ // Disable local models
4
  env.allowLocalModels = false;
5
 
6
+ // Reference the elements
7
  const status = document.getElementById('status');
8
  const userInput = document.getElementById('user-input');
9
  const outputContainer = document.getElementById('output');
 
11
 
12
  // Load the text-generation pipeline
13
  status.textContent = 'Loading model...';
14
+
15
+ let generator;
16
+ try {
17
+ generator = await pipeline('text-generation', 'gpt2');
18
+ status.textContent = 'Model loaded. Ready to chat!';
19
+ } catch (error) {
20
+ console.error('Error loading model:', error);
21
+ status.textContent = 'Failed to load model. Check the console for details.';
22
+ }
23
 
24
  // Add event listener to the submit button
25
  submitButton.addEventListener('click', async () => {
 
30
  return;
31
  }
32
 
33
+ // Update status to show we're processing
34
  status.textContent = 'Generating response...';
35
 
36
  try {
 
37
  const response = await generator(inputText, {
38
  max_new_tokens: 100,
39
+ temperature: 0.7,
40
+ top_p: 0.95,
41
  });
42
 
43
  // Display the generated response