danielhanchen commited on
Commit
c267bc2
·
verified ·
1 Parent(s): 82fed4e

Add files using upload-large-folder tool

Browse files
Files changed (2) hide show
  1. config.json +21 -1
  2. generation_config.json +1 -1
config.json CHANGED
@@ -17,12 +17,32 @@
17
  "num_hidden_layers": 40,
18
  "num_key_value_heads": 8,
19
  "pad_token_id": 11,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  "rms_norm_eps": 1e-05,
21
  "rope_theta": 100000000.0,
22
  "sliding_window": null,
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "bfloat16",
25
- "transformers_version": "4.48.2",
26
  "unsloth_fixed": true,
27
  "use_cache": true,
28
  "vocab_size": 131072
 
17
  "num_hidden_layers": 40,
18
  "num_key_value_heads": 8,
19
  "pad_token_id": 11,
20
+ "quantization_config": {
21
+ "_load_in_4bit": true,
22
+ "_load_in_8bit": false,
23
+ "bnb_4bit_compute_dtype": "bfloat16",
24
+ "bnb_4bit_quant_storage": "uint8",
25
+ "bnb_4bit_quant_type": "nf4",
26
+ "bnb_4bit_use_double_quant": true,
27
+ "llm_int8_enable_fp32_cpu_offload": false,
28
+ "llm_int8_has_fp16_weight": false,
29
+ "llm_int8_skip_modules": [
30
+ "lm_head",
31
+ "multi_modal_projector",
32
+ "merger",
33
+ "modality_projection"
34
+ ],
35
+ "llm_int8_threshold": 6.0,
36
+ "load_in_4bit": true,
37
+ "load_in_8bit": false,
38
+ "quant_method": "bitsandbytes"
39
+ },
40
  "rms_norm_eps": 1e-05,
41
  "rope_theta": 100000000.0,
42
  "sliding_window": null,
43
  "tie_word_embeddings": false,
44
  "torch_dtype": "bfloat16",
45
+ "transformers_version": "4.49.0.dev0",
46
  "unsloth_fixed": true,
47
  "use_cache": true,
48
  "vocab_size": 131072
generation_config.json CHANGED
@@ -6,5 +6,5 @@
6
  "max_length": 32768,
7
  "pad_token_id": 11,
8
  "temperature": 0.15,
9
- "transformers_version": "4.48.2"
10
  }
 
6
  "max_length": 32768,
7
  "pad_token_id": 11,
8
  "temperature": 0.15,
9
+ "transformers_version": "4.49.0.dev0"
10
  }