Felladrin commited on
Commit
d894377
1 Parent(s): d25f4a1

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +31 -0
README.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: "transformers.js"
3
+ base_model: Felladrin/llama2_xs_460M_experimental_evol_instruct
4
+ ---
5
+
6
+ INT8 ONNX version of [Felladrin/llama2_xs_460M_experimental_evol_instruct](https://huggingface.co/Felladrin/llama2_xs_460M_experimental_evol_instruct) to use with [Transformers.js](https://huggingface.co/docs/transformers.js).
7
+
8
+ ### Example usage
9
+ #### Pipeline API
10
+ ```js
11
+ import { pipeline } from '@xenova/transformers';
12
+
13
+ const generator = await pipeline('text-generation', 'Felladrin/onnx-int8-llama2_xs_460M_experimental_evol_instruct');
14
+ const output = await generator('Once upon a time,', { add_special_tokens: true, max_new_tokens: 60, repetition_penalty: 1.2});
15
+ console.log(output);
16
+ ```
17
+
18
+ #### Auto Classes
19
+ ```js
20
+ import { AutoModelForCausalLM, AutoTokenizer } from '@xenova/transformers';
21
+
22
+ const model_path = 'Felladrin/onnx-int8-llama2_xs_460M_experimental_evol_instruct';
23
+ const model = await AutoModelForCausalLM.from_pretrained(model_path);
24
+ const tokenizer = await AutoTokenizer.from_pretrained(model_path);
25
+
26
+ const prompt = 'Once upon a time,';
27
+ const { input_ids } = tokenizer(prompt);
28
+ const tokens = await model.generate(input_ids, { max_new_tokens: 60, repetition_penalty: 1.2});
29
+ console.log(tokenizer.decode(tokens[0], { skip_special_tokens: true }));
30
+ ```
31
+