File size: 753 Bytes
83b941f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
{
	"_name_or_path": "./llama-7b-hf",
	"architectures": ["LlamaForCausalLM"],
	"bos_token_id": 0,
	"eos_token_id": 1,
	"hidden_act": "silu",
	"hidden_size": 4096,
	"initializer_range": 0.02,
	"intermediate_size": 11008,
	"max_sequence_length": 2048,
	"model_type": "llama",
	"num_attention_heads": 32,
	"num_hidden_layers": 32,
	"pad_token_id": -1,
	"rms_norm_eps": 1e-6,
	"tie_word_embeddings": false,
	"torch_dtype": "float32",
	"transformers_version": "4.28.0.dev0",
	"use_cache": true,
	"vocab_size": 32001,
	"quantization_config": {
		"bits": 4,
		"group_size": 128,
		"damp_percent": 0.01,
		"desc_act": false,
		"sym": true,
		"true_sequential": true,
		"model_name_or_path": null,
		"model_file_base_name": "model",
		"quant_method": "gptq"
	}
}