Enter two texts and compare their meaning using vector embeddings. The model runs inside a Web Worker — the page stays fully responsive during inference. First run downloads ~90 MB; subsequent runs are instant.
const model = await pool.load('feature-extraction', {
model: 'mixedbread-ai/mxbai-embed-xsmall-v1',
});
const [e1, e2] = await Promise.all([
model.run(textA, { pooling: 'mean', normalize: true }),
model.run(textB, { pooling: 'mean', normalize: true }),
]);
const similarity = cosine(e1.data, e2.data); // 0..1