Skip to content

Update demo-site dependencies and installation instructions #1129

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
969 changes: 969 additions & 0 deletions examples/demo-site/package-lock.json

Large diffs are not rendered by default.

8 changes: 4 additions & 4 deletions examples/demo-site/package.json
Original file line number Diff line number Diff line change
@@ -9,11 +9,11 @@
"preview": "vite preview"
},
"devDependencies": {
"vite": "^4.3.2"
"vite": "latest"
},
"dependencies": {
"@xenova/transformers": "^2.0.0-alpha.3",
"chart.js": "^4.3.0",
"prismjs": "^1.29.0"
"@huggingface/transformers": "../..",
"chart.js": "latest",
"prismjs": "latest"
}
}
8 changes: 4 additions & 4 deletions examples/demo-site/src/index.html
Original file line number Diff line number Diff line change
@@ -535,16 +535,16 @@ <h2 class="fw-bolder">Quick tour</h2>
</div>
<div class="mb-3">
<h5 class="mb-2">Installation</h5>
To install via <a href="https://www.npmjs.com/package/@xenova/transformers">NPM</a>, run:
<pre><code class="language-bash">npm i @xenova/transformers</code></pre>
To install via <a href="https://www.npmjs.com/package/@huggingface/transformers">NPM</a>, run:
<pre><code class="language-bash">npm i @huggingface/transformers</code></pre>

Alternatively, you can use it in vanilla JS, without any bundler, by using a CDN
or static hosting. For example, using
<a href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Modules">ES Modules</a>,
you can import the library with:

<pre><code class="language-html">&lt;script type=&quot;module&quot;&gt;
import { pipeline } from &#39;https://cdn.jsdelivr.net/npm/@xenova/transformers&#39;;
import { pipeline } from &#39;https://cdn.jsdelivr.net/npm/@huggingface/transformers&#39;;
&lt;/script&gt;</code></pre>
</div>

@@ -564,7 +564,7 @@ <h5 class="mb-2">Basic example</h5>

</div>
<div class="col-lg-6 mb-4 mb-lg-0">
<pre><code class="language-js">import { pipeline } from '@xenova/transformers';
<pre><code class="language-js">import { pipeline } from '@huggingface/transformers';

// Allocate a pipeline for sentiment-analysis
let pipe = await pipeline('sentiment-analysis');
129 changes: 83 additions & 46 deletions examples/demo-site/src/worker.js
Original file line number Diff line number Diff line change
@@ -4,7 +4,7 @@
// Needed to ensure the UI thread is not blocked when running //
/////////////////////////////////////////////////////////////////

import { pipeline, env } from "@xenova/transformers";
import { pipeline, env, TextStreamer } from "@huggingface/transformers";
env.allowLocalModels = false;

// Define task function mapping
@@ -158,20 +158,26 @@ async function translate(data) {
// Doing it this way prevents the same model from being loaded multiple times
pipeline.task = `translation_${data.languageFrom}_to_${data.languageTo}`;

return await pipeline(data.text, {
...data.generation,
callback_function: function (beams) {
const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, {
skip_special_tokens: true,
})
let allText = "";

const streamer = new TextStreamer(pipeline.tokenizer, {
skip_prompt: true,
skip_special_tokens: true,
callback_function: function (newText) {
allText += newText;

self.postMessage({
type: 'update',
target: data.elementIdToUpdate,
data: decodedText
data: allText.trim()
});
}
})

return await pipeline(data.text, {
...data.generation,
streamer
})
}

async function text_generation(data) {
@@ -186,20 +192,26 @@ async function text_generation(data) {

let text = data.text.trim();

return await pipeline(text, {
...data.generation,
callback_function: function (beams) {
const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, {
skip_special_tokens: true,
})
let allText = "";

const streamer = new TextStreamer(pipeline.tokenizer, {
skip_prompt: true,
skip_special_tokens: true,
callback_function: function (newText) {
allText += newText;

self.postMessage({
type: 'update',
target: data.elementIdToUpdate,
data: decodedText
data: allText.trim()
});
}
})

return await pipeline(text, {
...data.generation,
streamer
})
}

async function code_completion(data) {
@@ -214,21 +226,27 @@ async function code_completion(data) {

let text = data.text;

return await pipeline(text, {
...data.generation,
callback_function: function (beams) {
const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, {
skip_special_tokens: true,
})
let allText = "";

const streamer = new TextStreamer(pipeline.tokenizer, {
skip_prompt: true,
skip_special_tokens: true,
callback_function: function (newText) {
allText += newText;

self.postMessage({
type: 'update',
target: data.elementIdToUpdate,
targetType: data.targetType,
data: decodedText
data: allText.trim()
});
}
})

return await pipeline(text, {
...data.generation,
streamer
})
}

async function masked_lm(data) {
@@ -395,20 +413,26 @@ async function summarize(data) {
});
})

return await pipeline(data.text, {
...data.generation,
callback_function: function (beams) {
const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, {
skip_special_tokens: true,
})
let allText = "";

const streamer = new TextStreamer(pipeline.tokenizer, {
skip_prompt: true,
skip_special_tokens: true,
callback_function: function (newText) {
allText += newText;

self.postMessage({
type: 'update',
target: data.elementIdToUpdate,
data: decodedText.trim()
data: allText
});
}
})

return await pipeline(data.text, {
...data.generation,
streamer
})
}

async function speech_to_text(data) {
@@ -420,24 +444,31 @@ async function speech_to_text(data) {
});
})

return await pipeline(data.audio, {
// Choose good defaults for the demo
chunk_length_s: 30,
stride_length_s: 5,
let allText = "";

...data.generation,
callback_function: function (beams) {
const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, {
skip_special_tokens: true,
})
const streamer = new TextStreamer(pipeline.tokenizer, {
skip_prompt: true,
skip_special_tokens: true,
callback_function: function (newText) {
allText += newText;

self.postMessage({
type: 'update',
target: data.elementIdToUpdate,
data: decodedText.trim()
data: allText
});
}
})

return await pipeline(data.audio, {
// Choose good defaults for the demo
chunk_length_s: 30,
stride_length_s: 5,

...data.generation,

streamer
})
}

async function image_to_text(data) {
@@ -449,20 +480,26 @@ async function image_to_text(data) {
});
})

return await pipeline(data.image, {
...data.generation,
callback_function: function (beams) {
const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, {
skip_special_tokens: true,
})
let allText = "";

const streamer = new TextStreamer(pipeline.tokenizer, {
skip_prompt: true,
skip_special_tokens: true,
callback_function: function (newText) {
allText += newText;

self.postMessage({
type: 'update',
target: data.elementIdToUpdate,
data: decodedText.trim()
data: allText
});
}
})

return await pipeline(data.image, {
...data.generation,
streamer
})
}

async function image_classification(data) {