michaelfeil
commited on
Commit
•
7d66d2f
1
Parent(s):
a3ae4db
Upload Salesforce/codegen-2B-mono ctranslate fp16 weights
Browse files- .gitattributes +2 -8
- README.md +108 -0
- added_tokens.json +1 -0
- config.json +5 -0
- merges.txt +0 -0
- model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
- vocab.json +0 -0
- vocabulary.txt +0 -0
.gitattributes
CHANGED
@@ -1,28 +1,22 @@
|
|
1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
|
4 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
@@ -30,5 +24,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
30 |
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
|
|
6 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
|
|
11 |
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
13 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
17 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
|
|
20 |
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
|
|
24 |
*.wasm filter=lfs diff=lfs merge=lfs -text
|
25 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
28 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- ctranslate2
|
4 |
+
- int8
|
5 |
+
- float16
|
6 |
+
|
7 |
+
license: bsd-3-clause
|
8 |
+
---
|
9 |
+
# # Fast-Inference with Ctranslate2
|
10 |
+
Speedup inference while reducing memory by 2x-4x using int8 inference in C++ on CPU or GPU.
|
11 |
+
|
12 |
+
quantized version of [Salesforce/codegen-2B-mono](https://huggingface.co/Salesforce/codegen-2B-mono)
|
13 |
+
```bash
|
14 |
+
pip install hf-hub-ctranslate2>=2.0.8
|
15 |
+
```
|
16 |
+
Converted on 2023-05-21 using
|
17 |
+
```
|
18 |
+
ct2-transformers-converter --model Salesforce/codegen-2B-mono --output_dir /home/michael/tmp-ct2fast-codegen-2B-mono --force --copy_files merges.txt tokenizer.json README.md tokenizer_config.json vocab.json special_tokens_map.json added_tokens.json .gitattributes --quantization float16
|
19 |
+
```
|
20 |
+
|
21 |
+
Checkpoint compatible to [ctranslate2>=3.13.0](https://github.com/OpenNMT/CTranslate2) and [hf-hub-ctranslate2>=2.0.6](https://github.com/michaelfeil/hf-hub-ctranslate2)
|
22 |
+
- `compute_type=int8_float16` for `device="cuda"`
|
23 |
+
- `compute_type=int8` for `device="cpu"`
|
24 |
+
|
25 |
+
```python
|
26 |
+
from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub
|
27 |
+
from transformers import AutoTokenizer
|
28 |
+
|
29 |
+
model_name = "michaelfeil/ct2fast-codegen-2B-mono"
|
30 |
+
# use either TranslatorCT2fromHfHub or GeneratorCT2fromHfHub here, depending on model.
|
31 |
+
model = GeneratorCT2fromHfHub(
|
32 |
+
# load in int8 on CUDA
|
33 |
+
model_name_or_path=model_name,
|
34 |
+
device="cuda",
|
35 |
+
compute_type="int8_float16",
|
36 |
+
# tokenizer=AutoTokenizer.from_pretrained("Salesforce/codegen-2B-mono")
|
37 |
+
)
|
38 |
+
outputs = model.generate(
|
39 |
+
text=["def print_hello_world():", "def hello_name(name:"],
|
40 |
+
max_length=64
|
41 |
+
)
|
42 |
+
print(outputs)
|
43 |
+
```
|
44 |
+
|
45 |
+
# Licence and other remarks:
|
46 |
+
This is just a quantized version. Licence conditions are intended to be idential to original huggingface repo.
|
47 |
+
|
48 |
+
# Original description
|
49 |
+
|
50 |
+
tags:
|
51 |
+
- ctranslate2
|
52 |
+
- int8
|
53 |
+
- float16
|
54 |
+
|
55 |
+
# CodeGen (CodeGen-Mono 2B)
|
56 |
+
|
57 |
+
## Model description
|
58 |
+
|
59 |
+
CodeGen is a family of autoregressive language models for **program synthesis** from the paper: [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong. The models are originally released in [this repository](https://github.com/salesforce/CodeGen), under 3 pre-training data variants (`NL`, `Multi`, `Mono`) and 4 model size variants (`350M`, `2B`, `6B`, `16B`).
|
60 |
+
|
61 |
+
The checkpoint included in this repository is denoted as **CodeGen-Mono 2B** in the paper, where "Mono" means the model is initialized with *CodeGen-Multi 2B* and further pre-trained on a Python programming language dataset, and "2B" refers to the number of trainable parameters.
|
62 |
+
|
63 |
+
## Training data
|
64 |
+
|
65 |
+
This checkpoint (CodeGen-Mono 2B) was firstly initialized with *CodeGen-Multi 2B*, and then pre-trained on BigPython dataset. The data consists of 71.7B tokens of Python programming language. See Section 2.1 of the [paper](https://arxiv.org/abs/2203.13474) for more details.
|
66 |
+
|
67 |
+
## Training procedure
|
68 |
+
|
69 |
+
CodeGen was trained using cross-entropy loss to maximize the likelihood of sequential inputs.
|
70 |
+
The family of models are trained using multiple TPU-v4-512 by Google, leveraging data and model parallelism.
|
71 |
+
See Section 2.3 of the [paper](https://arxiv.org/abs/2203.13474) for more details.
|
72 |
+
|
73 |
+
## Evaluation results
|
74 |
+
|
75 |
+
We evaluate our models on two code generation benchmark: HumanEval and MTPB. Please refer to the [paper](https://arxiv.org/abs/2203.13474) for more details.
|
76 |
+
|
77 |
+
|
78 |
+
## Intended Use and Limitations
|
79 |
+
|
80 |
+
As an autoregressive language model, CodeGen is capable of extracting features from given natural language and programming language texts, and calculating the likelihood of them.
|
81 |
+
However, the model is intended for and best at **program synthesis**, that is, generating executable code given English prompts, where the prompts should be in the form of a comment string. The model can complete partially-generated code as well.
|
82 |
+
|
83 |
+
## How to use
|
84 |
+
|
85 |
+
This model can be easily loaded using the `AutoModelForCausalLM` functionality:
|
86 |
+
|
87 |
+
```python
|
88 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
89 |
+
tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-2B-mono")
|
90 |
+
model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-2B-mono")
|
91 |
+
|
92 |
+
text = "def hello_world():"
|
93 |
+
input_ids = tokenizer(text, return_tensors="pt").input_ids
|
94 |
+
|
95 |
+
generated_ids = model.generate(input_ids, max_length=128)
|
96 |
+
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
|
97 |
+
```
|
98 |
+
|
99 |
+
## BibTeX entry and citation info
|
100 |
+
|
101 |
+
```bibtex
|
102 |
+
@article{Nijkamp2022ACP,
|
103 |
+
title={A Conversational Paradigm for Program Synthesis},
|
104 |
+
author={Nijkamp, Erik and Pang, Bo and Hayashi, Hiroaki and Tu, Lifu and Wang, Huan and Zhou, Yingbo and Savarese, Silvio and Xiong, Caiming},
|
105 |
+
journal={arXiv preprint},
|
106 |
+
year={2022}
|
107 |
+
}
|
108 |
+
```
|
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{" ": 50280, " ": 50284, " ": 50262, " ": 50266, "\t\t\t\t\t\t\t": 50289, " ": 50264, " ": 50279, " ": 50281, "\t\t\t\t\t\t\t\t\t": 50287, " ": 50286, "\t\t\t": 50293, " ": 50261, " ": 50282, " ": 50283, " ": 50269, " ": 50273, " ": 50271, "\t\t\t\t\t\t\t\t": 50288, " ": 50285, " ": 50276, "\t\t\t\t\t\t": 50290, "\t\t\t\t\t": 50291, " ": 50263, " ": 50278, " ": 50258, " ": 50270, " ": 50259, " ": 50272, " ": 50274, " ": 50267, " ": 50268, "\t\t": 50294, " ": 50257, " ": 50277, "\t\t\t\t": 50292, " ": 50260, " ": 50265, " ": 50275}
|
config.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<|endoftext|>",
|
3 |
+
"eos_token": "<|endoftext|>",
|
4 |
+
"unk_token": "<|endoftext|>"
|
5 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:22ecbeadde7ad1fae48c1f07006ca51df4aba43bbdb56278e2343f89ca3577ae
|
3 |
+
size 5558730208
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>"}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 2048, "special_tokens_map_file": null, "name_or_path": "gpt2", "tokenizer_class": "CodeGenTokenizer"}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
vocabulary.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|