Xenova HF Staff commited on
Commit
e706b13
·
verified ·
1 Parent(s): 4a39f84

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +31 -0
README.md CHANGED
@@ -48,6 +48,37 @@ For more details refer to our blog post: https://hf.co/blog/smollm3
48
 
49
  ## How to use
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  ### ONNXRuntime
52
 
53
  ```py
 
48
 
49
  ## How to use
50
 
51
+ ### Transformers.js
52
+
53
+ ```js
54
+ import { pipeline, TextStreamer } from "@huggingface/transformers";
55
+
56
+ // Create a text generation pipeline
57
+ const generator = await pipeline(
58
+ "text-generation",
59
+ "HuggingFaceTB/SmolLM3-3B-ONNX",
60
+ { dtype: "q4f16", device: "webgpu" },
61
+ );
62
+
63
+ // Define the model inputs
64
+ const thinking = true; // Whether the model should think before answering
65
+ const messages = [
66
+ {
67
+ role: "system",
68
+ content: "You are SmolLM, a language model created by Hugging Face."
69
+ + (thinking ? "/think" : "/no_think")
70
+ },
71
+ { role: "user", content: "Solve the equation x^2 - 3x + 2 = 0" },
72
+ ];
73
+
74
+ // Generate a response
75
+ const output = await generator(messages, {
76
+ max_new_tokens: 1024,
77
+ streamer: new TextStreamer(generator.tokenizer, { skip_prompt: true, skip_special_tokens: true }),
78
+ });
79
+ console.log(output[0].generated_text.at(-1).content);
80
+ ```
81
+
82
  ### ONNXRuntime
83
 
84
  ```py