DongfuJiang commited on
Commit
b8e7374
1 Parent(s): e096a5f

Training in progress, step 400

Browse files
README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ base_model: microsoft/Phi-3-mini-128k-instruct
4
+ tags:
5
+ - llama-factory
6
+ - full
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: sft
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/dongfu/huggingface/runs/4r5mtf1q)
17
+ # sft
18
+
19
+ This model is a fine-tuned version of [microsoft/Phi-3-mini-128k-instruct](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) on the pairrm-data dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.4639
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 1e-05
41
+ - train_batch_size: 1
42
+ - eval_batch_size: 1
43
+ - seed: 42
44
+ - distributed_type: multi-GPU
45
+ - num_devices: 8
46
+ - gradient_accumulation_steps: 16
47
+ - total_train_batch_size: 128
48
+ - total_eval_batch_size: 8
49
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
+ - lr_scheduler_type: cosine
51
+ - lr_scheduler_warmup_ratio: 0.05
52
+ - num_epochs: 2
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss |
57
+ |:-------------:|:------:|:----:|:---------------:|
58
+ | 0.4483 | 0.3469 | 500 | 0.4420 |
59
+ | 0.453 | 0.6938 | 1000 | 0.4246 |
60
+ | 0.2827 | 1.0407 | 1500 | 0.4557 |
61
+ | 0.3098 | 1.3876 | 2000 | 0.4560 |
62
+ | 0.2522 | 1.7345 | 2500 | 0.4652 |
63
+
64
+
65
+ ### Framework versions
66
+
67
+ - Transformers 4.43.1
68
+ - Pytorch 2.3.0+cu121
69
+ - Datasets 2.20.0
70
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/Phi-3-mini-128k-instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "gate_up_proj",
24
+ "qkv_proj",
25
+ "o_proj",
26
+ "down_proj"
27
+ ],
28
+ "task_type": "CAUSAL_LM",
29
+ "use_dora": false,
30
+ "use_rslora": false
31
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54f5f419c1266309c1e7823b7e7fde2909dd8355fec825493980b66b14bd687d
3
+ size 50365768
added_tokens.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|assistant|>": 32001,
3
+ "<|endoftext|>": 32000,
4
+ "<|end|>": 32007,
5
+ "<|placeholder1|>": 32002,
6
+ "<|placeholder2|>": 32003,
7
+ "<|placeholder3|>": 32004,
8
+ "<|placeholder4|>": 32005,
9
+ "<|placeholder5|>": 32008,
10
+ "<|placeholder6|>": 32009,
11
+ "<|system|>": 32006,
12
+ "<|user|>": 32010
13
+ }
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.9994796635157401,
3
+ "eval_loss": 0.4639037847518921,
4
+ "eval_runtime": 767.9829,
5
+ "eval_samples_per_second": 12.644,
6
+ "eval_steps_per_second": 1.581,
7
+ "total_flos": 420396754255872.0,
8
+ "train_loss": 0.3626429720482505,
9
+ "train_runtime": 106139.7498,
10
+ "train_samples_per_second": 3.476,
11
+ "train_steps_per_second": 0.027
12
+ }
config.json ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/Phi-3-mini-128k-instruct",
3
+ "architectures": [
4
+ "Phi3ForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "auto_map": {
9
+ "AutoConfig": "configuration_phi3.Phi3Config",
10
+ "AutoModel": "modeling_phi3.Phi3ForCausalLM",
11
+ "AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM"
12
+ },
13
+ "bos_token_id": 1,
14
+ "embd_pdrop": 0.0,
15
+ "eos_token_id": 32000,
16
+ "hidden_act": "silu",
17
+ "hidden_size": 3072,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 8192,
20
+ "max_position_embeddings": 131072,
21
+ "model_type": "phi3",
22
+ "num_attention_heads": 32,
23
+ "num_hidden_layers": 32,
24
+ "num_key_value_heads": 32,
25
+ "original_max_position_embeddings": 4096,
26
+ "pad_token_id": 32000,
27
+ "resid_pdrop": 0.0,
28
+ "rms_norm_eps": 1e-05,
29
+ "rope_scaling": {
30
+ "long_factor": [
31
+ 1.0700000524520874,
32
+ 1.1200000047683716,
33
+ 1.149999976158142,
34
+ 1.4199999570846558,
35
+ 1.5699999332427979,
36
+ 1.7999999523162842,
37
+ 2.129999876022339,
38
+ 2.129999876022339,
39
+ 3.009999990463257,
40
+ 5.910000324249268,
41
+ 6.950000286102295,
42
+ 9.070000648498535,
43
+ 9.930000305175781,
44
+ 10.710000038146973,
45
+ 11.130000114440918,
46
+ 14.609999656677246,
47
+ 15.409998893737793,
48
+ 19.809999465942383,
49
+ 37.279998779296875,
50
+ 38.279998779296875,
51
+ 38.599998474121094,
52
+ 40.12000274658203,
53
+ 46.20000457763672,
54
+ 50.940006256103516,
55
+ 53.66000747680664,
56
+ 54.9373893737793,
57
+ 56.89738845825195,
58
+ 57.28738784790039,
59
+ 59.98738479614258,
60
+ 60.86738586425781,
61
+ 60.887386322021484,
62
+ 61.71739196777344,
63
+ 62.91739273071289,
64
+ 62.957393646240234,
65
+ 63.41739273071289,
66
+ 63.8173942565918,
67
+ 63.83739471435547,
68
+ 63.897396087646484,
69
+ 63.93739700317383,
70
+ 64.06739807128906,
71
+ 64.11434936523438,
72
+ 64.12435150146484,
73
+ 64.15435028076172,
74
+ 64.19435119628906,
75
+ 64.24435424804688,
76
+ 64.57435607910156,
77
+ 64.69000244140625,
78
+ 64.76000213623047
79
+ ],
80
+ "short_factor": [
81
+ 1.1,
82
+ 1.1,
83
+ 1.1,
84
+ 1.3000000000000003,
85
+ 1.3500000000000003,
86
+ 1.3500000000000003,
87
+ 1.4000000000000004,
88
+ 1.5500000000000005,
89
+ 2.000000000000001,
90
+ 2.000000000000001,
91
+ 2.000000000000001,
92
+ 2.000000000000001,
93
+ 2.000000000000001,
94
+ 2.000000000000001,
95
+ 2.000000000000001,
96
+ 2.000000000000001,
97
+ 2.000000000000001,
98
+ 2.000000000000001,
99
+ 2.000000000000001,
100
+ 2.000000000000001,
101
+ 2.000000000000001,
102
+ 2.000000000000001,
103
+ 2.000000000000001,
104
+ 2.000000000000001,
105
+ 2.000000000000001,
106
+ 2.0500000000000007,
107
+ 2.0500000000000007,
108
+ 2.0500000000000007,
109
+ 2.0500000000000007,
110
+ 2.0500000000000007,
111
+ 2.0500000000000007,
112
+ 2.1000000000000005,
113
+ 2.1000000000000005,
114
+ 2.1500000000000004,
115
+ 2.25,
116
+ 2.25,
117
+ 2.25,
118
+ 2.25,
119
+ 2.25,
120
+ 2.3999999999999995,
121
+ 2.4499999999999993,
122
+ 2.499999999999999,
123
+ 2.6999999999999984,
124
+ 2.6999999999999984,
125
+ 2.7499999999999982,
126
+ 2.799999999999998,
127
+ 2.8999999999999977,
128
+ 3.049999999999997
129
+ ],
130
+ "type": "longrope"
131
+ },
132
+ "rope_theta": 10000.0,
133
+ "sliding_window": 262144,
134
+ "tie_word_embeddings": false,
135
+ "torch_dtype": "bfloat16",
136
+ "transformers_version": "4.43.1",
137
+ "use_cache": false,
138
+ "vocab_size": 32064
139
+ }
configuration_phi3.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """ Phi-3 model configuration"""
17
+
18
+
19
+ from transformers.configuration_utils import PretrainedConfig
20
+ from transformers.utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ PHI3_PRETRAINED_CONFIG_ARCHIVE_MAP = {
26
+ "microsoft/Phi-3-mini-4k-instruct": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/config.json",
27
+ "microsoft/Phi-3-mini-128k-instruct": "https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/config.json",
28
+ }
29
+
30
+
31
+ class Phi3Config(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3
34
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
35
+ defaults will yield a similar configuration to that of the
36
+ [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct).
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 32064):
43
+ Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`Phi3Model`].
45
+ hidden_size (`int`, *optional*, defaults to 3072):
46
+ Dimension of the hidden representations.
47
+ intermediate_size (`int`, *optional*, defaults to 8192):
48
+ Dimension of the MLP representations.
49
+ num_hidden_layers (`int`, *optional*, defaults to 32):
50
+ Number of hidden layers in the Transformer decoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 32):
52
+ Number of attention heads for each attention layer in the Transformer decoder.
53
+ num_key_value_heads (`int`, *optional*):
54
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
55
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
56
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
57
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
58
+ by meanpooling all the original heads within that group. For more details checkout [this
59
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
60
+ `num_attention_heads`.
61
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
62
+ Dropout probability for mlp outputs.
63
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the embeddings.
65
+ attention_dropout (`float`, *optional*, defaults to 0.0):
66
+ The dropout ratio after computing the attention scores.
67
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
68
+ The non-linear activation function (function or string) in the decoder.
69
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
70
+ The maximum sequence length that this model might ever be used with.
71
+ original_max_position_embeddings (`int`, *optional*, defaults to 4096):
72
+ The maximum sequence length that this model was trained with. This is used to determine the size of the
73
+ original RoPE embeddings when using long scaling.
74
+ initializer_range (`float`, *optional*, defaults to 0.02):
75
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
76
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
77
+ The epsilon value used for the RMSNorm.
78
+ use_cache (`bool`, *optional*, defaults to `True`):
79
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
80
+ relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
81
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
82
+ Whether to tie weight embeddings
83
+ rope_theta (`float`, *optional*, defaults to 10000.0):
84
+ The base period of the RoPE embeddings.
85
+ rope_scaling (`dict`, *optional*):
86
+ The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
87
+ contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be `longrope` and
88
+ the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size
89
+ divided by the number of attention heads divided by 2.
90
+ bos_token_id (`int`, *optional*, defaults to 1):
91
+ The id of the "beginning-of-sequence" token.
92
+ eos_token_id (`int`, *optional*, defaults to 32000):
93
+ The id of the "end-of-sequence" token.
94
+ pad_token_id (`int`, *optional*, defaults to 32000):
95
+ The id of the padding token.
96
+ sliding_window (`int`, *optional*):
97
+ Sliding window attention window size. If `None`, no sliding window is applied.
98
+
99
+ Example:
100
+
101
+ ```python
102
+ >>> from transformers import Phi3Model, Phi3Config
103
+
104
+ >>> # Initializing a Phi-3 style configuration
105
+ >>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
106
+
107
+ >>> # Initializing a model from the configuration
108
+ >>> model = Phi3Model(configuration)
109
+
110
+ >>> # Accessing the model configuration
111
+ >>> configuration = model.config
112
+ ```"""
113
+
114
+ model_type = "phi3"
115
+ keys_to_ignore_at_inference = ["past_key_values"]
116
+
117
+ def __init__(
118
+ self,
119
+ vocab_size=32064,
120
+ hidden_size=3072,
121
+ intermediate_size=8192,
122
+ num_hidden_layers=32,
123
+ num_attention_heads=32,
124
+ num_key_value_heads=None,
125
+ resid_pdrop=0.0,
126
+ embd_pdrop=0.0,
127
+ attention_dropout=0.0,
128
+ hidden_act="silu",
129
+ max_position_embeddings=4096,
130
+ original_max_position_embeddings=4096,
131
+ initializer_range=0.02,
132
+ rms_norm_eps=1e-5,
133
+ use_cache=True,
134
+ tie_word_embeddings=False,
135
+ rope_theta=10000.0,
136
+ rope_scaling=None,
137
+ bos_token_id=1,
138
+ eos_token_id=32000,
139
+ pad_token_id=32000,
140
+ sliding_window=None,
141
+ **kwargs,
142
+ ):
143
+ self.vocab_size = vocab_size
144
+ self.hidden_size = hidden_size
145
+ self.intermediate_size = intermediate_size
146
+ self.num_hidden_layers = num_hidden_layers
147
+ self.num_attention_heads = num_attention_heads
148
+
149
+ if num_key_value_heads is None:
150
+ num_key_value_heads = num_attention_heads
151
+
152
+ self.num_key_value_heads = num_key_value_heads
153
+ self.resid_pdrop = resid_pdrop
154
+ self.embd_pdrop = embd_pdrop
155
+ self.attention_dropout = attention_dropout
156
+ self.hidden_act = hidden_act
157
+ self.max_position_embeddings = max_position_embeddings
158
+ self.original_max_position_embeddings = original_max_position_embeddings
159
+ self.initializer_range = initializer_range
160
+ self.rms_norm_eps = rms_norm_eps
161
+ self.use_cache = use_cache
162
+ self.rope_theta = rope_theta
163
+ self.rope_scaling = rope_scaling
164
+ self._rope_scaling_adjustment()
165
+ self._rope_scaling_validation()
166
+ self.sliding_window = sliding_window
167
+
168
+ super().__init__(
169
+ bos_token_id=bos_token_id,
170
+ eos_token_id=eos_token_id,
171
+ pad_token_id=pad_token_id,
172
+ tie_word_embeddings=tie_word_embeddings,
173
+ **kwargs,
174
+ )
175
+
176
+ def _rope_scaling_adjustment(self):
177
+ """
178
+ Adjust the `type` of the `rope_scaling` configuration for backward compatibility.
179
+ """
180
+ if self.rope_scaling is None:
181
+ return
182
+
183
+ rope_scaling_type = self.rope_scaling.get("type", None)
184
+
185
+ # For backward compatibility if previous version used "su" or "yarn"
186
+ if rope_scaling_type is not None and rope_scaling_type in ["su", "yarn"]:
187
+ self.rope_scaling["type"] = "longrope"
188
+
189
+ def _rope_scaling_validation(self):
190
+ """
191
+ Validate the `rope_scaling` configuration.
192
+ """
193
+ if self.rope_scaling is None:
194
+ return
195
+
196
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3:
197
+ raise ValueError(
198
+ "`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, "
199
+ f"got {self.rope_scaling}"
200
+ )
201
+ rope_scaling_type = self.rope_scaling.get("type", None)
202
+ rope_scaling_short_factor = self.rope_scaling.get("short_factor", None)
203
+ rope_scaling_long_factor = self.rope_scaling.get("long_factor", None)
204
+ if rope_scaling_type is None or rope_scaling_type not in ["longrope"]:
205
+ raise ValueError(f"`rope_scaling`'s type field must be one of ['longrope'], got {rope_scaling_type}")
206
+ if not (
207
+ isinstance(rope_scaling_short_factor, list)
208
+ and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor)
209
+ ):
210
+ raise ValueError(
211
+ f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}"
212
+ )
213
+ if not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2:
214
+ raise ValueError(
215
+ f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}"
216
+ )
217
+ if not (
218
+ isinstance(rope_scaling_long_factor, list)
219
+ and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor)
220
+ ):
221
+ raise ValueError(
222
+ f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}"
223
+ )
224
+ if not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2:
225
+ raise ValueError(
226
+ f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}"
227
+ )
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.9994796635157401,
3
+ "eval_loss": 0.4639037847518921,
4
+ "eval_runtime": 767.9829,
5
+ "eval_samples_per_second": 12.644,
6
+ "eval_steps_per_second": 1.581
7
+ }
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": [
5
+ 32000,
6
+ 32001,
7
+ 32007
8
+ ],
9
+ "pad_token_id": 32000,
10
+ "transformers_version": "4.43.1"
11
+ }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e51913b5d4b2c5b9f556692dde82a887ccf41ed386680e92d00e1589ed61b0b
3
+ size 4972489328
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff6b8aa0650a11fcfab7d2f27cacc25b029ff1dd10a83ff74e8dd2e04476c9c7
3
+ size 2669692552
model.safetensors.index.json ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 7642159104
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00002-of-00002.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
10
+ "model.layers.0.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
11
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
12
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
13
+ "model.layers.0.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
14
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
15
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
16
+ "model.layers.1.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
17
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
18
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
19
+ "model.layers.1.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
20
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
21
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
22
+ "model.layers.10.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
23
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
24
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
25
+ "model.layers.10.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
26
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
27
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
28
+ "model.layers.11.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
29
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
30
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
31
+ "model.layers.11.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
32
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
33
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
34
+ "model.layers.12.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
35
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
36
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
37
+ "model.layers.12.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
38
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
39
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
40
+ "model.layers.13.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
41
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
42
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
43
+ "model.layers.13.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
44
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
45
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
46
+ "model.layers.14.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
47
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
48
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
49
+ "model.layers.14.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
50
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
51
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
52
+ "model.layers.15.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
53
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
54
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
55
+ "model.layers.15.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
56
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
57
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
58
+ "model.layers.16.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
59
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
60
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
61
+ "model.layers.16.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
62
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
63
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
64
+ "model.layers.17.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
65
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
66
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
67
+ "model.layers.17.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
68
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
69
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
70
+ "model.layers.18.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
71
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
72
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
73
+ "model.layers.18.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
74
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
75
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
76
+ "model.layers.19.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
77
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
78
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
79
+ "model.layers.19.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
80
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
81
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
82
+ "model.layers.2.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
83
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
84
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
85
+ "model.layers.2.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
86
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
87
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
88
+ "model.layers.20.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
89
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
90
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
91
+ "model.layers.20.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
92
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
93
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
94
+ "model.layers.21.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
95
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
96
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
97
+ "model.layers.21.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
98
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
99
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
100
+ "model.layers.22.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
101
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
102
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
103
+ "model.layers.22.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
104
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
105
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
106
+ "model.layers.23.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
107
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
108
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
109
+ "model.layers.23.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
110
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
111
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
112
+ "model.layers.24.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
113
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
114
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
115
+ "model.layers.24.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
116
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
117
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
118
+ "model.layers.25.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
119
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
120
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
121
+ "model.layers.25.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
122
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
123
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
124
+ "model.layers.26.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
125
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
126
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
127
+ "model.layers.26.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
128
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
129
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
130
+ "model.layers.27.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
131
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
132
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
133
+ "model.layers.27.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
134
+ "model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
135
+ "model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
136
+ "model.layers.28.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
137
+ "model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
138
+ "model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
139
+ "model.layers.28.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
140
+ "model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
141
+ "model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
142
+ "model.layers.29.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
143
+ "model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
144
+ "model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
145
+ "model.layers.29.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
146
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
147
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
148
+ "model.layers.3.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
149
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
150
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
151
+ "model.layers.3.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
152
+ "model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
153
+ "model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
154
+ "model.layers.30.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
155
+ "model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
156
+ "model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
157
+ "model.layers.30.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
158
+ "model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
159
+ "model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
160
+ "model.layers.31.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
161
+ "model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
162
+ "model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
163
+ "model.layers.31.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
164
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
165
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
166
+ "model.layers.4.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
167
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
168
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
169
+ "model.layers.4.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
170
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
171
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
172
+ "model.layers.5.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
173
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
174
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
175
+ "model.layers.5.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
176
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
177
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
178
+ "model.layers.6.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
179
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
180
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
181
+ "model.layers.6.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
182
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
183
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
184
+ "model.layers.7.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
185
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
186
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
187
+ "model.layers.7.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
188
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
189
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
190
+ "model.layers.8.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
191
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
192
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
193
+ "model.layers.8.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
194
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
195
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
196
+ "model.layers.9.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
197
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
198
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
199
+ "model.layers.9.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
200
+ "model.norm.weight": "model-00002-of-00002.safetensors"
201
+ }
202
+ }
modeling_phi3.py ADDED
@@ -0,0 +1,1563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """ PyTorch Phi-3 model."""
17
+
18
+ import inspect
19
+ import math
20
+ import warnings
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.nn.functional as F
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.cache_utils import Cache, DynamicCache
31
+ from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
32
+ from transformers.modeling_outputs import (
33
+ BaseModelOutputWithPast,
34
+ CausalLMOutputWithPast,
35
+ SequenceClassifierOutputWithPast,
36
+ TokenClassifierOutput,
37
+ )
38
+ from transformers.modeling_utils import PreTrainedModel
39
+ from transformers.utils import (
40
+ add_code_sample_docstrings,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ is_flash_attn_2_available,
44
+ is_flash_attn_greater_or_equal_2_10,
45
+ logging,
46
+ replace_return_docstrings,
47
+ )
48
+ from .configuration_phi3 import Phi3Config
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ # Transformers scans dependencies in the modeling file, causing issues on conditional loading. The regex only ignores try/catch blocks, but not if statements
54
+ # if is_flash_attn_2_available():
55
+ _flash_supports_window_size = False
56
+ try:
57
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
58
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
59
+
60
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
61
+ except ImportError as error:
62
+ logger.warning(
63
+ f"`flash-attention` package not found, consider installing for better performance: {error}."
64
+ )
65
+ if not _flash_supports_window_size:
66
+ logger.warning(
67
+ "Current `flash-attention` does not support `window_size`. Either upgrade or use `attn_implementation='eager'`."
68
+ )
69
+
70
+ _CHECKPOINT_FOR_DOC = "microsoft/Phi-3-mini-4k-instruct"
71
+ _CONFIG_FOR_DOC = "Phi3Config"
72
+
73
+ PHI3_PRETRAINED_MODEL_ARCHIVE_LIST = [
74
+ "microsoft/Phi-3-mini-4k-instruct",
75
+ "microsoft/Phi-3-mini-128k-instruct",
76
+ # See all Phi-3 models at https://huggingface.co/models?filter=Phi-3
77
+ ]
78
+
79
+
80
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Phi3
81
+ class Phi3RMSNorm(nn.Module):
82
+ def __init__(self, hidden_size, eps=1e-6):
83
+ """
84
+ Phi3RMSNorm is equivalent to T5LayerNorm
85
+ """
86
+ super().__init__()
87
+ self.weight = nn.Parameter(torch.ones(hidden_size))
88
+ self.variance_epsilon = eps
89
+
90
+ def forward(self, hidden_states):
91
+ input_dtype = hidden_states.dtype
92
+ hidden_states = hidden_states.to(torch.float32)
93
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
94
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
95
+ return self.weight * hidden_states.to(input_dtype)
96
+
97
+
98
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
99
+ def _get_unpad_data(attention_mask):
100
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
101
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
102
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
103
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
104
+ return (
105
+ indices,
106
+ cu_seqlens,
107
+ max_seqlen_in_batch,
108
+ )
109
+
110
+
111
+ # Copied from transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding with gemma->phi3, Gemma->Phi3
112
+ class Phi3RotaryEmbedding(nn.Module):
113
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
114
+ super().__init__()
115
+
116
+ self.dim = dim
117
+ self.max_position_embeddings = max_position_embeddings
118
+ self.base = base
119
+ self.register_buffer("inv_freq", None, persistent=False)
120
+
121
+ @torch.no_grad()
122
+ def forward(self, x, position_ids, seq_len=None):
123
+ # x: [bs, num_attention_heads, seq_len, head_size]
124
+ if self.inv_freq is None:
125
+ self.inv_freq = 1.0 / (
126
+ self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim)
127
+ )
128
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
129
+ position_ids_expanded = position_ids[:, None, :].float()
130
+ # Force float32 since bfloat16 loses precision on long contexts
131
+ # See https://github.com/huggingface/transformers/pull/29285
132
+ device_type = x.device.type
133
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
134
+ with torch.autocast(device_type=device_type, enabled=False):
135
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
136
+ emb = torch.cat((freqs, freqs), dim=-1)
137
+ cos = emb.cos()
138
+ sin = emb.sin()
139
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
140
+
141
+
142
+ class Phi3LongRoPEScaledRotaryEmbedding(Phi3RotaryEmbedding):
143
+ def __init__(self, dim, config, device=None):
144
+ super().__init__(dim, config.max_position_embeddings, config.rope_theta, device)
145
+
146
+ self.short_factor = config.rope_scaling["short_factor"]
147
+ self.long_factor = config.rope_scaling["long_factor"]
148
+ self.original_max_position_embeddings = config.original_max_position_embeddings
149
+
150
+ @torch.no_grad()
151
+ def forward(self, x, position_ids, seq_len=None):
152
+ seq_len = torch.max(position_ids) + 1
153
+ if seq_len > self.original_max_position_embeddings:
154
+ ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device)
155
+ else:
156
+ ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device)
157
+
158
+ inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim
159
+ self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape)
160
+
161
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
162
+ position_ids_expanded = position_ids[:, None, :].float()
163
+
164
+ # Force float32 since bfloat16 loses precision on long contexts
165
+ # See https://github.com/huggingface/transformers/pull/29285
166
+ device_type = x.device.type
167
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
168
+ with torch.autocast(device_type=device_type, enabled=False):
169
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
170
+ emb = torch.cat((freqs, freqs), dim=-1)
171
+
172
+ scale = self.max_position_embeddings / self.original_max_position_embeddings
173
+ if scale <= 1.0:
174
+ scaling_factor = 1.0
175
+ else:
176
+ scaling_factor = math.sqrt(1 + math.log(scale) / math.log(self.original_max_position_embeddings))
177
+
178
+ cos = emb.cos() * scaling_factor
179
+ sin = emb.sin() * scaling_factor
180
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
181
+
182
+
183
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
184
+ def rotate_half(x):
185
+ """Rotates half the hidden dims of the input."""
186
+ x1 = x[..., : x.shape[-1] // 2]
187
+ x2 = x[..., x.shape[-1] // 2 :]
188
+ return torch.cat((-x2, x1), dim=-1)
189
+
190
+
191
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
192
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
193
+ """Applies Rotary Position Embedding to the query and key tensors.
194
+
195
+ Args:
196
+ q (`torch.Tensor`): The query tensor.
197
+ k (`torch.Tensor`): The key tensor.
198
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
199
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
200
+ position_ids (`torch.Tensor`, *optional*):
201
+ Deprecated and unused.
202
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
203
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
204
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
205
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
206
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
207
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
208
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
209
+ Returns:
210
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
211
+ """
212
+ cos = cos.unsqueeze(unsqueeze_dim)
213
+ sin = sin.unsqueeze(unsqueeze_dim)
214
+ q_embed = (q * cos) + (rotate_half(q) * sin)
215
+ k_embed = (k * cos) + (rotate_half(k) * sin)
216
+ return q_embed, k_embed
217
+
218
+
219
+ class Phi3MLP(nn.Module):
220
+ def __init__(self, config):
221
+ super().__init__()
222
+
223
+ self.config = config
224
+ self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
225
+ self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
226
+
227
+ self.activation_fn = ACT2FN[config.hidden_act]
228
+
229
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
230
+ up_states = self.gate_up_proj(hidden_states)
231
+
232
+ gate, up_states = up_states.chunk(2, dim=-1)
233
+ up_states = up_states * self.activation_fn(gate)
234
+
235
+ return self.down_proj(up_states)
236
+
237
+
238
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv with llama->phi
239
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
240
+ """
241
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
242
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
243
+ """
244
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
245
+ if n_rep == 1:
246
+ return hidden_states
247
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
248
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
249
+
250
+
251
+ class Phi3Attention(nn.Module):
252
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
253
+
254
+ def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None):
255
+ super().__init__()
256
+ self.config = config
257
+ self.layer_idx = layer_idx
258
+ if layer_idx is None:
259
+ logger.warning_once(
260
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
261
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
262
+ "when creating this class."
263
+ )
264
+
265
+ self.attention_dropout = config.attention_dropout
266
+ self.hidden_size = config.hidden_size
267
+ self.num_heads = config.num_attention_heads
268
+ self.head_dim = self.hidden_size // self.num_heads
269
+ self.num_key_value_heads = config.num_key_value_heads
270
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
271
+ self.max_position_embeddings = config.max_position_embeddings
272
+ self.original_max_position_embeddings = config.original_max_position_embeddings
273
+ self.rope_theta = config.rope_theta
274
+ self.rope_scaling = config.rope_scaling
275
+ self.is_causal = True
276
+
277
+ if (self.head_dim * self.num_heads) != self.hidden_size:
278
+ raise ValueError(
279
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
280
+ f" and `num_heads`: {self.num_heads})."
281
+ )
282
+
283
+ op_size = self.num_heads * self.head_dim + 2 * (self.num_key_value_heads * self.head_dim)
284
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
285
+ self.qkv_proj = nn.Linear(self.hidden_size, op_size, bias=False)
286
+ self._init_rope()
287
+
288
+ def _init_rope(self):
289
+ if self.rope_scaling is None:
290
+ self.rotary_emb = Phi3RotaryEmbedding(
291
+ self.head_dim,
292
+ max_position_embeddings=self.max_position_embeddings,
293
+ base=self.rope_theta,
294
+ )
295
+ else:
296
+ scaling_type = self.config.rope_scaling["type"]
297
+ if scaling_type == "longrope":
298
+ self.rotary_emb = Phi3LongRoPEScaledRotaryEmbedding(self.head_dim, self.config)
299
+ else:
300
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
301
+
302
+ def forward(
303
+ self,
304
+ hidden_states: torch.Tensor,
305
+ attention_mask: Optional[torch.Tensor] = None,
306
+ position_ids: Optional[torch.LongTensor] = None,
307
+ past_key_value: Optional[Cache] = None,
308
+ output_attentions: bool = False,
309
+ use_cache: bool = False,
310
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
311
+ logger.warning_once("You are not running the flash-attention implementation, expect numerical differences.")
312
+
313
+ bsz, q_len, _ = hidden_states.size()
314
+
315
+ qkv = self.qkv_proj(hidden_states)
316
+ query_pos = self.num_heads * self.head_dim
317
+ query_states = qkv[..., :query_pos]
318
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
319
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
320
+
321
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
322
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
323
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
324
+
325
+ kv_seq_len = key_states.shape[-2]
326
+ if past_key_value is not None:
327
+ if self.layer_idx is None:
328
+ raise ValueError(
329
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
330
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
331
+ "with a layer index."
332
+ )
333
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
334
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
335
+
336
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
337
+
338
+ if past_key_value is not None:
339
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
340
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
341
+
342
+ # repeat k/v heads if n_kv_heads < n_heads
343
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
344
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
345
+
346
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
347
+
348
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
349
+ raise ValueError(
350
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
351
+ f" {attn_weights.size()}"
352
+ )
353
+
354
+ if attention_mask is not None:
355
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
356
+ raise ValueError(
357
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
358
+ )
359
+ attn_weights = attn_weights + attention_mask
360
+
361
+ # upcast attention to fp32
362
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(value_states.dtype)
363
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
364
+
365
+ attn_output = torch.matmul(attn_weights, value_states)
366
+
367
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
368
+ raise ValueError(
369
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
370
+ f" {attn_output.size()}"
371
+ )
372
+
373
+ attn_output = attn_output.transpose(1, 2).contiguous()
374
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
375
+
376
+ attn_output = self.o_proj(attn_output)
377
+
378
+ if not output_attentions:
379
+ attn_weights = None
380
+
381
+ return attn_output, attn_weights, past_key_value
382
+
383
+
384
+ class Phi3FlashAttention2(Phi3Attention):
385
+ """
386
+ Phi-3 flash attention module. This module inherits from `Phi3Attention` as the weights of the module stays
387
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
388
+ flash attention and deal with padding tokens in case the input contains any of them.
389
+ """
390
+
391
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
392
+ def __init__(self, *args, **kwargs):
393
+ super().__init__(*args, **kwargs)
394
+
395
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
396
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
397
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
398
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
399
+
400
+ def forward(
401
+ self,
402
+ hidden_states: torch.Tensor,
403
+ attention_mask: Optional[torch.LongTensor] = None,
404
+ position_ids: Optional[torch.LongTensor] = None,
405
+ past_key_value: Optional[Cache] = None,
406
+ output_attentions: bool = False,
407
+ use_cache: bool = False,
408
+ **kwargs,
409
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
410
+ # Phi3FlashAttention2 attention does not support output_attentions
411
+
412
+ if not _flash_supports_window_size:
413
+ logger.warning_once(
414
+ "The current flash attention version does not support sliding window attention. Please use `attn_implementation='eager'` or upgrade flash-attn library."
415
+ )
416
+ raise ValueError("The current flash attention version does not support sliding window attention.")
417
+
418
+ output_attentions = False
419
+
420
+ if "padding_mask" in kwargs:
421
+ warnings.warn(
422
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
423
+ )
424
+
425
+ # overwrite attention_mask with padding_mask
426
+ attention_mask = kwargs.pop("padding_mask")
427
+
428
+ bsz, q_len, _ = hidden_states.size()
429
+
430
+ qkv = self.qkv_proj(hidden_states)
431
+ query_pos = self.num_heads * self.head_dim
432
+ query_states = qkv[..., :query_pos]
433
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
434
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
435
+
436
+ # Flash attention requires the input to have the shape
437
+ # batch_size x seq_length x head_dim x hidden_dim
438
+ # therefore we just need to keep the original shape
439
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
440
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
441
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
442
+
443
+ kv_seq_len = key_states.shape[-2]
444
+ if past_key_value is not None:
445
+ if self.layer_idx is None:
446
+ raise ValueError(
447
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
448
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
449
+ "with a layer index."
450
+ )
451
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
452
+
453
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
454
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
455
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=rotary_seq_len)
456
+
457
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
458
+
459
+ use_sliding_windows = (
460
+ _flash_supports_window_size
461
+ and getattr(self.config, "sliding_window", None) is not None
462
+ and kv_seq_len > self.config.sliding_window
463
+ )
464
+
465
+ if past_key_value is not None:
466
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
467
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
468
+ if (
469
+ getattr(self.config, "sliding_window", None) is not None
470
+ and kv_seq_len > self.config.sliding_window
471
+ and cache_has_contents
472
+ ):
473
+ slicing_tokens = 1 - self.config.sliding_window
474
+
475
+ past_key = past_key_value[self.layer_idx][0]
476
+ past_value = past_key_value[self.layer_idx][1]
477
+
478
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
479
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
480
+
481
+ if past_key.shape[-2] != self.config.sliding_window - 1:
482
+ raise ValueError(
483
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
484
+ f" {past_key.shape}"
485
+ )
486
+
487
+ if attention_mask is not None:
488
+ attention_mask = attention_mask[:, slicing_tokens:]
489
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
490
+
491
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
492
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
493
+
494
+ # repeat k/v heads if n_kv_heads < n_heads
495
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
496
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
497
+
498
+ attn_dropout = self.attention_dropout if self.training else 0.0
499
+
500
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
501
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
502
+ # cast them back in the correct dtype just to be sure everything works as expected.
503
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
504
+ # in fp32.
505
+
506
+ if query_states.dtype == torch.float32:
507
+ if torch.is_autocast_enabled():
508
+ target_dtype = torch.get_autocast_gpu_dtype()
509
+ # Handle the case where the model is quantized
510
+ elif hasattr(self.config, "_pre_quantization_dtype"):
511
+ target_dtype = self.config._pre_quantization_dtype
512
+ else:
513
+ target_dtype = self.qkv_proj.weight.dtype
514
+
515
+ logger.warning_once(
516
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
517
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
518
+ f" {target_dtype}."
519
+ )
520
+
521
+ query_states = query_states.to(target_dtype)
522
+ key_states = key_states.to(target_dtype)
523
+ value_states = value_states.to(target_dtype)
524
+
525
+ # Reashape to the expected shape for Flash Attention
526
+ query_states = query_states.transpose(1, 2)
527
+ key_states = key_states.transpose(1, 2)
528
+ value_states = value_states.transpose(1, 2)
529
+
530
+ attn_output = self._flash_attention_forward(
531
+ query_states,
532
+ key_states,
533
+ value_states,
534
+ attention_mask,
535
+ q_len,
536
+ dropout=attn_dropout,
537
+ use_sliding_windows=use_sliding_windows,
538
+ )
539
+
540
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
541
+ attn_output = self.o_proj(attn_output)
542
+
543
+ if not output_attentions:
544
+ attn_weights = None
545
+
546
+ return attn_output, attn_weights, past_key_value
547
+
548
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._flash_attention_forward
549
+ def _flash_attention_forward(
550
+ self,
551
+ query_states,
552
+ key_states,
553
+ value_states,
554
+ attention_mask,
555
+ query_length,
556
+ dropout=0.0,
557
+ softmax_scale=None,
558
+ use_sliding_windows=False,
559
+ ):
560
+ """
561
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
562
+ first unpad the input, then computes the attention scores and pad the final attention scores.
563
+
564
+ Args:
565
+ query_states (`torch.Tensor`):
566
+ Input query states to be passed to Flash Attention API
567
+ key_states (`torch.Tensor`):
568
+ Input key states to be passed to Flash Attention API
569
+ value_states (`torch.Tensor`):
570
+ Input value states to be passed to Flash Attention API
571
+ attention_mask (`torch.Tensor`):
572
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
573
+ position of padding tokens and 1 for the position of non-padding tokens.
574
+ dropout (`float`):
575
+ Attention dropout
576
+ softmax_scale (`float`, *optional*):
577
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
578
+ use_sliding_windows (`bool`, *optional*):
579
+ Whether to activate sliding window attention.
580
+ """
581
+ if not self._flash_attn_uses_top_left_mask:
582
+ causal = self.is_causal
583
+ else:
584
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
585
+ causal = self.is_causal and query_length != 1
586
+
587
+ # Contains at least one padding token in the sequence
588
+ if attention_mask is not None:
589
+ batch_size = query_states.shape[0]
590
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
591
+ query_states, key_states, value_states, attention_mask, query_length
592
+ )
593
+
594
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
595
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
596
+
597
+ if not use_sliding_windows:
598
+ attn_output_unpad = flash_attn_varlen_func(
599
+ query_states,
600
+ key_states,
601
+ value_states,
602
+ cu_seqlens_q=cu_seqlens_q,
603
+ cu_seqlens_k=cu_seqlens_k,
604
+ max_seqlen_q=max_seqlen_in_batch_q,
605
+ max_seqlen_k=max_seqlen_in_batch_k,
606
+ dropout_p=dropout,
607
+ softmax_scale=softmax_scale,
608
+ causal=causal,
609
+ )
610
+ else:
611
+ attn_output_unpad = flash_attn_varlen_func(
612
+ query_states,
613
+ key_states,
614
+ value_states,
615
+ cu_seqlens_q=cu_seqlens_q,
616
+ cu_seqlens_k=cu_seqlens_k,
617
+ max_seqlen_q=max_seqlen_in_batch_q,
618
+ max_seqlen_k=max_seqlen_in_batch_k,
619
+ dropout_p=dropout,
620
+ softmax_scale=softmax_scale,
621
+ causal=causal,
622
+ window_size=(self.config.sliding_window, self.config.sliding_window),
623
+ )
624
+
625
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
626
+ else:
627
+ if not use_sliding_windows:
628
+ attn_output = flash_attn_func(
629
+ query_states,
630
+ key_states,
631
+ value_states,
632
+ dropout,
633
+ softmax_scale=softmax_scale,
634
+ causal=causal,
635
+ )
636
+ else:
637
+ attn_output = flash_attn_func(
638
+ query_states,
639
+ key_states,
640
+ value_states,
641
+ dropout,
642
+ softmax_scale=softmax_scale,
643
+ causal=causal,
644
+ window_size=(self.config.sliding_window, self.config.sliding_window),
645
+ )
646
+
647
+ return attn_output
648
+
649
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
650
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
651
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
652
+
653
+ # On the first iteration we need to properly re-create the padding mask
654
+ # by slicing it on the proper place
655
+ if kv_seq_len != attention_mask.shape[-1]:
656
+ attention_mask_num_tokens = attention_mask.shape[-1]
657
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
658
+
659
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
660
+
661
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
662
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
663
+
664
+ if query_length == kv_seq_len:
665
+ query_layer = index_first_axis(
666
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
667
+ )
668
+ cu_seqlens_q = cu_seqlens_k
669
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
670
+ indices_q = indices_k
671
+ elif query_length == 1:
672
+ max_seqlen_in_batch_q = 1
673
+ cu_seqlens_q = torch.arange(
674
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
675
+ ) # There is a memcpy here, that is very bad.
676
+ indices_q = cu_seqlens_q[:-1]
677
+ query_layer = query_layer.squeeze(1)
678
+ else:
679
+ # The -q_len: slice assumes left padding.
680
+ attention_mask = attention_mask[:, -query_length:]
681
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
682
+
683
+ return (
684
+ query_layer,
685
+ key_layer,
686
+ value_layer,
687
+ indices_q,
688
+ (cu_seqlens_q, cu_seqlens_k),
689
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
690
+ )
691
+
692
+
693
+ # copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Phi3
694
+ # TODO @Arthur no longer copied from LLama after static cache
695
+ class Phi3SdpaAttention(Phi3Attention):
696
+ """
697
+ Phi3 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
698
+ `Phi3Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
699
+ SDPA API.
700
+ """
701
+
702
+ # Adapted from Phi3Attention.forward
703
+ def forward(
704
+ self,
705
+ hidden_states: torch.Tensor,
706
+ attention_mask: Optional[torch.Tensor] = None,
707
+ position_ids: Optional[torch.LongTensor] = None,
708
+ past_key_value: Optional[Cache] = None,
709
+ output_attentions: bool = False,
710
+ use_cache: bool = False,
711
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
712
+ if output_attentions:
713
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
714
+ logger.warning_once(
715
+ "Phi3Model is using Phi3SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
716
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
717
+ )
718
+ return super().forward(
719
+ hidden_states=hidden_states,
720
+ attention_mask=attention_mask,
721
+ position_ids=position_ids,
722
+ past_key_value=past_key_value,
723
+ output_attentions=output_attentions,
724
+ use_cache=use_cache,
725
+ )
726
+
727
+ bsz, q_len, _ = hidden_states.size()
728
+
729
+ qkv = self.qkv_proj(hidden_states)
730
+ query_pos = self.num_heads * self.head_dim
731
+ query_states = qkv[..., :query_pos]
732
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
733
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
734
+
735
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
736
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
737
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
738
+
739
+ kv_seq_len = key_states.shape[-2]
740
+ if past_key_value is not None:
741
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
742
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
743
+
744
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
745
+
746
+ if past_key_value is not None:
747
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
748
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
749
+
750
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
751
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
752
+
753
+ if attention_mask is not None:
754
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
755
+ raise ValueError(
756
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
757
+ )
758
+
759
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
760
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
761
+ if query_states.device.type == "cuda" and attention_mask is not None:
762
+ query_states = query_states.contiguous()
763
+ key_states = key_states.contiguous()
764
+ value_states = value_states.contiguous()
765
+
766
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
767
+ query_states,
768
+ key_states,
769
+ value_states,
770
+ attn_mask=attention_mask,
771
+ dropout_p=self.attention_dropout if self.training else 0.0,
772
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
773
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
774
+ )
775
+
776
+ attn_output = attn_output.transpose(1, 2).contiguous()
777
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
778
+
779
+ attn_output = self.o_proj(attn_output)
780
+
781
+ return attn_output, None, past_key_value
782
+
783
+
784
+ PHI3_ATTENTION_CLASSES = {
785
+ "eager": Phi3Attention,
786
+ "flash_attention_2": Phi3FlashAttention2,
787
+ "sdpa": Phi3SdpaAttention,
788
+ }
789
+
790
+
791
+ class Phi3DecoderLayer(nn.Module):
792
+ def __init__(self, config: Phi3Config, layer_idx: int):
793
+ super().__init__()
794
+
795
+ self.config = config
796
+ self.self_attn = PHI3_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
797
+
798
+ self.mlp = Phi3MLP(config)
799
+ self.input_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
800
+
801
+ self.resid_attn_dropout = nn.Dropout(config.resid_pdrop)
802
+ self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop)
803
+ self.post_attention_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
804
+
805
+ def forward(
806
+ self,
807
+ hidden_states: torch.Tensor,
808
+ attention_mask: Optional[torch.Tensor] = None,
809
+ position_ids: Optional[torch.LongTensor] = None,
810
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
811
+ output_attentions: Optional[bool] = False,
812
+ use_cache: Optional[bool] = False,
813
+ **kwargs,
814
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
815
+ if "padding_mask" in kwargs:
816
+ warnings.warn(
817
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
818
+ )
819
+ """
820
+ Args:
821
+ hidden_states (`torch.FloatTensor`):
822
+ input to the layer of shape `(batch, seq_len, embed_dim)`
823
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
824
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
825
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
826
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
827
+ `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
828
+ output_attentions (`bool`, *optional*):
829
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
830
+ returned tensors for more detail.
831
+ use_cache (`bool`, *optional*):
832
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
833
+ (see `past_key_values`).
834
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
835
+ """
836
+
837
+ residual = hidden_states
838
+
839
+ hidden_states = self.input_layernorm(hidden_states)
840
+
841
+ # Self Attention
842
+ attn_outputs, self_attn_weights, present_key_value = self.self_attn(
843
+ hidden_states=hidden_states,
844
+ attention_mask=attention_mask,
845
+ position_ids=position_ids,
846
+ past_key_value=past_key_value,
847
+ output_attentions=output_attentions,
848
+ use_cache=use_cache,
849
+ )
850
+
851
+ hidden_states = residual + self.resid_attn_dropout(attn_outputs)
852
+
853
+ residual = hidden_states
854
+ hidden_states = self.post_attention_layernorm(hidden_states)
855
+ hidden_states = self.mlp(hidden_states)
856
+ hidden_states = residual + self.resid_mlp_dropout(hidden_states)
857
+
858
+ outputs = (hidden_states,)
859
+
860
+ if output_attentions:
861
+ outputs += (self_attn_weights,)
862
+
863
+ if use_cache:
864
+ outputs += (present_key_value,)
865
+
866
+ return outputs
867
+
868
+
869
+ PHI3_START_DOCSTRING = r"""
870
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
871
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
872
+ etc.)
873
+
874
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
875
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
876
+ and behavior.
877
+
878
+ Parameters:
879
+ config ([`Phi3Config`]):
880
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
881
+ load the weights associated with the model, only the configuration. Check out the
882
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
883
+ """
884
+
885
+
886
+ @add_start_docstrings(
887
+ "The bare Phi-3 model outputting raw hidden-states without any specific head on top.",
888
+ PHI3_START_DOCSTRING,
889
+ )
890
+ class Phi3PreTrainedModel(PreTrainedModel):
891
+ config_class = Phi3Config
892
+ base_model_prefix = "model"
893
+ supports_gradient_checkpointing = True
894
+ _no_split_modules = ["Phi3DecoderLayer"]
895
+ _skip_keys_device_placement = "past_key_values"
896
+ _supports_flash_attn_2 = True
897
+ _supports_sdpa = False
898
+ _supports_cache_class = True
899
+
900
+ _version = "0.0.5"
901
+
902
+ def _init_weights(self, module):
903
+ std = self.config.initializer_range
904
+ if isinstance(module, nn.Linear):
905
+ module.weight.data.normal_(mean=0.0, std=std)
906
+ if module.bias is not None:
907
+ module.bias.data.zero_()
908
+ elif isinstance(module, nn.Embedding):
909
+ module.weight.data.normal_(mean=0.0, std=std)
910
+ if module.padding_idx is not None:
911
+ module.weight.data[module.padding_idx].zero_()
912
+
913
+
914
+ PHI3_INPUTS_DOCSTRING = r"""
915
+ Args:
916
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
917
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
918
+ it.
919
+
920
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
921
+ [`PreTrainedTokenizer.__call__`] for details.
922
+
923
+ [What are input IDs?](../glossary#input-ids)
924
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
925
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
926
+
927
+ - 1 for tokens that are **not masked**,
928
+ - 0 for tokens that are **masked**.
929
+
930
+ [What are attention masks?](../glossary#attention-mask)
931
+
932
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
933
+ [`PreTrainedTokenizer.__call__`] for details.
934
+
935
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
936
+ `past_key_values`).
937
+
938
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
939
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
940
+ information on the default strategy.
941
+
942
+ - 1 indicates the head is **not masked**,
943
+ - 0 indicates the head is **masked**.
944
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
945
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
946
+ config.n_positions - 1]`.
947
+
948
+ [What are position IDs?](../glossary#position-ids)
949
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
950
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
951
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
952
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
953
+
954
+ Two formats are allowed:
955
+ - a [`~cache_utils.Cache`] instance;
956
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
957
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
958
+ cache format.
959
+
960
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
961
+ legacy cache format will be returned.
962
+
963
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
964
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
965
+ of shape `(batch_size, sequence_length)`.
966
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
967
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
968
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
969
+ model's internal embedding lookup matrix.
970
+ use_cache (`bool`, *optional*):
971
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
972
+ `past_key_values`).
973
+ output_attentions (`bool`, *optional*):
974
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
975
+ tensors for more detail.
976
+ output_hidden_states (`bool`, *optional*):
977
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
978
+ more detail.
979
+ return_dict (`bool`, *optional*):
980
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
981
+ """
982
+
983
+
984
+ @add_start_docstrings(
985
+ "The bare Phi-3 model outputting raw hidden-states without any specific head on top.",
986
+ PHI3_START_DOCSTRING,
987
+ )
988
+ class Phi3Model(Phi3PreTrainedModel):
989
+ """
990
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Phi3DecoderLayer`]
991
+
992
+ Args:
993
+ config: Phi3Config
994
+ """
995
+
996
+ def __init__(self, config: Phi3Config):
997
+ super().__init__(config)
998
+ self.padding_idx = config.pad_token_id
999
+ self.vocab_size = config.vocab_size
1000
+
1001
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
1002
+ self.embed_dropout = nn.Dropout(config.embd_pdrop)
1003
+ self.layers = nn.ModuleList(
1004
+ [Phi3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
1005
+ )
1006
+ self._attn_implementation = config._attn_implementation
1007
+ self.norm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1008
+
1009
+ self.gradient_checkpointing = False
1010
+ # Initialize weights and apply final processing
1011
+ self.post_init()
1012
+
1013
+ def get_input_embeddings(self):
1014
+ return self.embed_tokens
1015
+
1016
+ def set_input_embeddings(self, value):
1017
+ self.embed_tokens = value
1018
+
1019
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1020
+ def forward(
1021
+ self,
1022
+ input_ids: torch.LongTensor = None,
1023
+ attention_mask: Optional[torch.Tensor] = None,
1024
+ position_ids: Optional[torch.LongTensor] = None,
1025
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1026
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1027
+ use_cache: Optional[bool] = None,
1028
+ output_attentions: Optional[bool] = None,
1029
+ output_hidden_states: Optional[bool] = None,
1030
+ return_dict: Optional[bool] = None,
1031
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
1032
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1033
+ output_hidden_states = (
1034
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1035
+ )
1036
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1037
+
1038
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1039
+
1040
+ # retrieve input_ids and inputs_embeds
1041
+ if input_ids is not None and inputs_embeds is not None:
1042
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1043
+ elif input_ids is not None:
1044
+ batch_size, seq_length = input_ids.shape[:2]
1045
+ elif inputs_embeds is not None:
1046
+ batch_size, seq_length = inputs_embeds.shape[:2]
1047
+ else:
1048
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1049
+
1050
+ past_key_values_length = 0
1051
+
1052
+ if self.gradient_checkpointing and self.training:
1053
+ if use_cache:
1054
+ logger.warning_once(
1055
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1056
+ )
1057
+ use_cache = False
1058
+
1059
+ if use_cache:
1060
+ use_legacy_cache = not isinstance(past_key_values, Cache)
1061
+ if use_legacy_cache:
1062
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1063
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
1064
+
1065
+ if position_ids is None:
1066
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1067
+ position_ids = torch.arange(
1068
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
1069
+ )
1070
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
1071
+ else:
1072
+ position_ids = position_ids.view(-1, seq_length).long()
1073
+
1074
+ if inputs_embeds is None:
1075
+ inputs_embeds = self.embed_tokens(input_ids)
1076
+
1077
+ if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
1078
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
1079
+ if is_padding_right:
1080
+ raise ValueError(
1081
+ "You are attempting to perform batched generation with padding_side='right'"
1082
+ " this may lead to unexpected behaviour for Flash Attention version of Phi3. Make sure to "
1083
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
1084
+ )
1085
+
1086
+ if self._attn_implementation == "flash_attention_2":
1087
+ # 2d mask is passed through the layers
1088
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1089
+ else:
1090
+ # 4d mask is passed through the layers
1091
+ attention_mask = _prepare_4d_causal_attention_mask(
1092
+ attention_mask,
1093
+ (batch_size, seq_length),
1094
+ inputs_embeds,
1095
+ past_key_values_length,
1096
+ sliding_window=self.config.sliding_window,
1097
+ )
1098
+
1099
+ hidden_states = inputs_embeds
1100
+
1101
+ # decoder layers
1102
+ all_hidden_states = () if output_hidden_states else None
1103
+ all_self_attns = () if output_attentions else None
1104
+ next_decoder_cache = None
1105
+
1106
+ for decoder_layer in self.layers:
1107
+ if output_hidden_states:
1108
+ all_hidden_states += (hidden_states,)
1109
+
1110
+ if self.gradient_checkpointing and self.training:
1111
+ layer_outputs = self._gradient_checkpointing_func(
1112
+ decoder_layer.__call__,
1113
+ hidden_states,
1114
+ attention_mask,
1115
+ position_ids,
1116
+ past_key_values,
1117
+ output_attentions,
1118
+ use_cache,
1119
+ )
1120
+ else:
1121
+ layer_outputs = decoder_layer(
1122
+ hidden_states,
1123
+ attention_mask=attention_mask,
1124
+ position_ids=position_ids,
1125
+ past_key_value=past_key_values,
1126
+ output_attentions=output_attentions,
1127
+ use_cache=use_cache,
1128
+ )
1129
+
1130
+ hidden_states = layer_outputs[0]
1131
+
1132
+ if use_cache:
1133
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1134
+
1135
+ if output_attentions:
1136
+ all_self_attns += (layer_outputs[1],)
1137
+
1138
+ hidden_states = self.norm(hidden_states)
1139
+
1140
+ # add hidden states from the last decoder layer
1141
+ if output_hidden_states:
1142
+ all_hidden_states += (hidden_states,)
1143
+
1144
+ next_cache = None
1145
+ if use_cache:
1146
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
1147
+ if not return_dict:
1148
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1149
+ return BaseModelOutputWithPast(
1150
+ last_hidden_state=hidden_states,
1151
+ past_key_values=next_cache,
1152
+ hidden_states=all_hidden_states,
1153
+ attentions=all_self_attns,
1154
+ )
1155
+
1156
+
1157
+ class Phi3ForCausalLM(Phi3PreTrainedModel):
1158
+ _tied_weights_keys = ["lm_head.weight"]
1159
+
1160
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi3
1161
+ def __init__(self, config):
1162
+ super().__init__(config)
1163
+ self.model = Phi3Model(config)
1164
+ self.vocab_size = config.vocab_size
1165
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1166
+
1167
+ # Initialize weights and apply final processing
1168
+ self.post_init()
1169
+
1170
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings
1171
+ def get_input_embeddings(self):
1172
+ return self.model.embed_tokens
1173
+
1174
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings
1175
+ def set_input_embeddings(self, value):
1176
+ self.model.embed_tokens = value
1177
+
1178
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings
1179
+ def get_output_embeddings(self):
1180
+ return self.lm_head
1181
+
1182
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings
1183
+ def set_output_embeddings(self, new_embeddings):
1184
+ self.lm_head = new_embeddings
1185
+
1186
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder
1187
+ def set_decoder(self, decoder):
1188
+ self.model = decoder
1189
+
1190
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder
1191
+ def get_decoder(self):
1192
+ return self.model
1193
+
1194
+ # Ignore copy
1195
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1196
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1197
+ def forward(
1198
+ self,
1199
+ input_ids: torch.LongTensor = None,
1200
+ attention_mask: Optional[torch.Tensor] = None,
1201
+ position_ids: Optional[torch.LongTensor] = None,
1202
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1203
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1204
+ labels: Optional[torch.LongTensor] = None,
1205
+ use_cache: Optional[bool] = None,
1206
+ output_attentions: Optional[bool] = None,
1207
+ output_hidden_states: Optional[bool] = None,
1208
+ return_dict: Optional[bool] = None,
1209
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1210
+ r"""
1211
+ Args:
1212
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1213
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1214
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1215
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1216
+
1217
+ Returns:
1218
+
1219
+ Example:
1220
+
1221
+ ```python
1222
+ >>> from transformers import AutoTokenizer, Phi3ForCausalLM
1223
+
1224
+ >>> model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct")
1225
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct")
1226
+
1227
+ >>> prompt = "This is an example script ."
1228
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1229
+
1230
+ >>> # Generate
1231
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1232
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1233
+ 'This is an example script .\n Certainly! Below is a sample script that demonstrates a simple task, such as calculating the sum'
1234
+ ```"""
1235
+
1236
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1237
+ output_hidden_states = (
1238
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1239
+ )
1240
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1241
+
1242
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1243
+ outputs = self.model(
1244
+ input_ids=input_ids,
1245
+ attention_mask=attention_mask,
1246
+ position_ids=position_ids,
1247
+ past_key_values=past_key_values,
1248
+ inputs_embeds=inputs_embeds,
1249
+ use_cache=use_cache,
1250
+ output_attentions=output_attentions,
1251
+ output_hidden_states=output_hidden_states,
1252
+ return_dict=return_dict,
1253
+ )
1254
+
1255
+ hidden_states = outputs[0]
1256
+ logits = self.lm_head(hidden_states)
1257
+ logits = logits.float()
1258
+
1259
+ loss = None
1260
+ if labels is not None:
1261
+ # Shift so that tokens < n predict n
1262
+ shift_logits = logits[..., :-1, :].contiguous()
1263
+ shift_labels = labels[..., 1:].contiguous()
1264
+ # Flatten the tokens
1265
+ loss_fct = CrossEntropyLoss()
1266
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1267
+ shift_labels = shift_labels.view(-1)
1268
+ # Enable model parallelism
1269
+ shift_labels = shift_labels.to(shift_logits.device)
1270
+ loss = loss_fct(shift_logits, shift_labels)
1271
+
1272
+ if not return_dict:
1273
+ output = (logits,) + outputs[1:]
1274
+ return (loss,) + output if loss is not None else output
1275
+
1276
+ return CausalLMOutputWithPast(
1277
+ loss=loss,
1278
+ logits=logits,
1279
+ past_key_values=outputs.past_key_values,
1280
+ hidden_states=outputs.hidden_states,
1281
+ attentions=outputs.attentions,
1282
+ )
1283
+
1284
+ # Copied from transformers.models.persimmon.modeling_persimmon.PersimmonForCausalLM.prepare_inputs_for_generation
1285
+ def prepare_inputs_for_generation(
1286
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1287
+ ):
1288
+ if past_key_values is not None:
1289
+ if isinstance(past_key_values, Cache):
1290
+ cache_length = past_key_values.get_seq_length()
1291
+ past_length = past_key_values.seen_tokens
1292
+ max_cache_length = past_key_values.get_max_length()
1293
+ else:
1294
+ cache_length = past_length = past_key_values[0][0].shape[2]
1295
+ max_cache_length = None
1296
+
1297
+ # Keep only the unprocessed tokens:
1298
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1299
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1300
+ # input)
1301
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1302
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1303
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1304
+ # input_ids based on the past_length.
1305
+ elif past_length < input_ids.shape[1]:
1306
+ input_ids = input_ids[:, past_length:]
1307
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1308
+
1309
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1310
+ if (
1311
+ max_cache_length is not None
1312
+ and attention_mask is not None
1313
+ and cache_length + input_ids.shape[1] > max_cache_length
1314
+ ):
1315
+ attention_mask = attention_mask[:, -max_cache_length:]
1316
+
1317
+ position_ids = kwargs.get("position_ids", None)
1318
+ if attention_mask is not None and position_ids is None:
1319
+ # create position_ids on the fly for batch generation
1320
+ position_ids = attention_mask.long().cumsum(-1) - 1
1321
+ position_ids.masked_fill_(attention_mask == 0, 1)
1322
+ if past_key_values:
1323
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1324
+
1325
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1326
+ if inputs_embeds is not None and past_key_values is None:
1327
+ model_inputs = {"inputs_embeds": inputs_embeds}
1328
+ else:
1329
+ model_inputs = {"input_ids": input_ids}
1330
+
1331
+ model_inputs.update(
1332
+ {
1333
+ "position_ids": position_ids,
1334
+ "past_key_values": past_key_values,
1335
+ "use_cache": kwargs.get("use_cache"),
1336
+ "attention_mask": attention_mask,
1337
+ }
1338
+ )
1339
+ return model_inputs
1340
+
1341
+ @staticmethod
1342
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache
1343
+ def _reorder_cache(past_key_values, beam_idx):
1344
+ reordered_past = ()
1345
+ for layer_past in past_key_values:
1346
+ reordered_past += (
1347
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1348
+ )
1349
+ return reordered_past
1350
+
1351
+
1352
+ @add_start_docstrings(
1353
+ """
1354
+ The [`Phi3Model`] with a sequence classification head on top (linear layer).
1355
+
1356
+ [`Phi3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1357
+ (e.g. GPT-2) do.
1358
+
1359
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1360
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1361
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1362
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1363
+ each row of the batch).
1364
+ """,
1365
+ PHI3_START_DOCSTRING,
1366
+ )
1367
+ # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Phi3, LLAMA->PHI3, self.transformer->self.model, transformer_outputs->model_outputs
1368
+ class Phi3ForSequenceClassification(Phi3PreTrainedModel):
1369
+ def __init__(self, config):
1370
+ super().__init__(config)
1371
+ self.num_labels = config.num_labels
1372
+ self.model = Phi3Model(config)
1373
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1374
+
1375
+ # Initialize weights and apply final processing
1376
+ self.post_init()
1377
+
1378
+ def get_input_embeddings(self):
1379
+ return self.model.embed_tokens
1380
+
1381
+ def set_input_embeddings(self, value):
1382
+ self.model.embed_tokens = value
1383
+
1384
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1385
+ def forward(
1386
+ self,
1387
+ input_ids: torch.LongTensor = None,
1388
+ attention_mask: Optional[torch.Tensor] = None,
1389
+ position_ids: Optional[torch.LongTensor] = None,
1390
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1391
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1392
+ labels: Optional[torch.LongTensor] = None,
1393
+ use_cache: Optional[bool] = None,
1394
+ output_attentions: Optional[bool] = None,
1395
+ output_hidden_states: Optional[bool] = None,
1396
+ return_dict: Optional[bool] = None,
1397
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1398
+ r"""
1399
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1400
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1401
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1402
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1403
+ """
1404
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1405
+
1406
+ model_outputs = self.model(
1407
+ input_ids,
1408
+ attention_mask=attention_mask,
1409
+ position_ids=position_ids,
1410
+ past_key_values=past_key_values,
1411
+ inputs_embeds=inputs_embeds,
1412
+ use_cache=use_cache,
1413
+ output_attentions=output_attentions,
1414
+ output_hidden_states=output_hidden_states,
1415
+ return_dict=return_dict,
1416
+ )
1417
+ hidden_states = model_outputs[0]
1418
+ logits = self.score(hidden_states)
1419
+
1420
+ if input_ids is not None:
1421
+ batch_size = input_ids.shape[0]
1422
+ else:
1423
+ batch_size = inputs_embeds.shape[0]
1424
+
1425
+ if self.config.pad_token_id is None and batch_size != 1:
1426
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1427
+ if self.config.pad_token_id is None:
1428
+ sequence_lengths = -1
1429
+ else:
1430
+ if input_ids is not None:
1431
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1432
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1433
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1434
+ sequence_lengths = sequence_lengths.to(logits.device)
1435
+ else:
1436
+ sequence_lengths = -1
1437
+
1438
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1439
+
1440
+ loss = None
1441
+ if labels is not None:
1442
+ labels = labels.to(logits.device)
1443
+ if self.config.problem_type is None:
1444
+ if self.num_labels == 1:
1445
+ self.config.problem_type = "regression"
1446
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1447
+ self.config.problem_type = "single_label_classification"
1448
+ else:
1449
+ self.config.problem_type = "multi_label_classification"
1450
+
1451
+ if self.config.problem_type == "regression":
1452
+ loss_fct = MSELoss()
1453
+ if self.num_labels == 1:
1454
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1455
+ else:
1456
+ loss = loss_fct(pooled_logits, labels)
1457
+ elif self.config.problem_type == "single_label_classification":
1458
+ loss_fct = CrossEntropyLoss()
1459
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1460
+ elif self.config.problem_type == "multi_label_classification":
1461
+ loss_fct = BCEWithLogitsLoss()
1462
+ loss = loss_fct(pooled_logits, labels)
1463
+ if not return_dict:
1464
+ output = (pooled_logits,) + model_outputs[1:]
1465
+ return ((loss,) + output) if loss is not None else output
1466
+
1467
+ return SequenceClassifierOutputWithPast(
1468
+ loss=loss,
1469
+ logits=pooled_logits,
1470
+ past_key_values=model_outputs.past_key_values,
1471
+ hidden_states=model_outputs.hidden_states,
1472
+ attentions=model_outputs.attentions,
1473
+ )
1474
+
1475
+
1476
+ @add_start_docstrings(
1477
+ """
1478
+ [`Phi3Model`] with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1479
+ Named-Entity-Recognition (NER) tasks.
1480
+ """,
1481
+ PHI3_START_DOCSTRING,
1482
+ )
1483
+ # Copied from transformers.models.mpt.modeling_mpt.MptForTokenClassification with Mpt->Phi3,MPT->PHI3,self.transformer->self.model,transformer_outputs->model_outputs
1484
+ class Phi3ForTokenClassification(Phi3PreTrainedModel):
1485
+ def __init__(self, config: Phi3Config):
1486
+ super().__init__(config)
1487
+ self.num_labels = config.num_labels
1488
+
1489
+ self.model = Phi3Model(config)
1490
+ if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
1491
+ classifier_dropout = config.classifier_dropout
1492
+ elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
1493
+ classifier_dropout = config.hidden_dropout
1494
+ else:
1495
+ classifier_dropout = 0.1
1496
+ self.dropout = nn.Dropout(classifier_dropout)
1497
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1498
+
1499
+ # Initialize weights and apply final processing
1500
+ self.post_init()
1501
+
1502
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1503
+ @add_code_sample_docstrings(
1504
+ checkpoint=_CHECKPOINT_FOR_DOC,
1505
+ output_type=TokenClassifierOutput,
1506
+ config_class=_CONFIG_FOR_DOC,
1507
+ )
1508
+ def forward(
1509
+ self,
1510
+ input_ids: Optional[torch.LongTensor] = None,
1511
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1512
+ attention_mask: Optional[torch.Tensor] = None,
1513
+ inputs_embeds: Optional[torch.Tensor] = None,
1514
+ labels: Optional[torch.Tensor] = None,
1515
+ use_cache: Optional[bool] = None,
1516
+ output_attentions: Optional[bool] = None,
1517
+ output_hidden_states: Optional[bool] = None,
1518
+ return_dict: Optional[bool] = None,
1519
+ **deprecated_arguments,
1520
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1521
+ r"""
1522
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1523
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1524
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1525
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1526
+ """
1527
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1528
+
1529
+ model_outputs = self.model(
1530
+ input_ids,
1531
+ past_key_values=past_key_values,
1532
+ attention_mask=attention_mask,
1533
+ inputs_embeds=inputs_embeds,
1534
+ use_cache=use_cache,
1535
+ output_attentions=output_attentions,
1536
+ output_hidden_states=output_hidden_states,
1537
+ return_dict=return_dict,
1538
+ )
1539
+
1540
+ hidden_states = model_outputs[0]
1541
+ hidden_states = self.dropout(hidden_states)
1542
+ logits = self.classifier(hidden_states)
1543
+
1544
+ loss = None
1545
+ if labels is not None:
1546
+ # move labels to correct device to enable model parallelism
1547
+ labels = labels.to(logits.device)
1548
+ batch_size, seq_length = labels.shape
1549
+ loss_fct = CrossEntropyLoss()
1550
+ loss = loss_fct(
1551
+ logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
1552
+ )
1553
+
1554
+ if not return_dict:
1555
+ output = (logits,) + model_outputs[2:]
1556
+ return ((loss,) + output) if loss is not None else output
1557
+
1558
+ return TokenClassifierOutput(
1559
+ loss=loss,
1560
+ logits=logits,
1561
+ hidden_states=model_outputs.hidden_states,
1562
+ attentions=model_outputs.attentions,
1563
+ )
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": true,
27
+ "single_word": false,
28
+ "special": false
29
+ },
30
+ "32000": {
31
+ "content": "<|endoftext|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "32001": {
39
+ "content": "<|assistant|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": true,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "32002": {
47
+ "content": "<|placeholder1|>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": true,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "32003": {
55
+ "content": "<|placeholder2|>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": true,
59
+ "single_word": false,
60
+ "special": true
61
+ },
62
+ "32004": {
63
+ "content": "<|placeholder3|>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": true,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "32005": {
71
+ "content": "<|placeholder4|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": true,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "32006": {
79
+ "content": "<|system|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": true,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "32007": {
87
+ "content": "<|end|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": false,
91
+ "single_word": false,
92
+ "special": true
93
+ },
94
+ "32008": {
95
+ "content": "<|placeholder5|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": true,
99
+ "single_word": false,
100
+ "special": true
101
+ },
102
+ "32009": {
103
+ "content": "<|placeholder6|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": true,
107
+ "single_word": false,
108
+ "special": true
109
+ },
110
+ "32010": {
111
+ "content": "<|user|>",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": true,
115
+ "single_word": false,
116
+ "special": true
117
+ }
118
+ },
119
+ "bos_token": "<s>",
120
+ "chat_template": "{{ '<s>' }}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|system|>\n' + system_message + '<|end|>\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|user|>\n' + content + '<|end|>\n<|assistant|>\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|end|>' + '\n' }}{% endif %}{% endfor %}",
121
+ "clean_up_tokenization_spaces": false,
122
+ "eos_token": "<|end|>",
123
+ "legacy": false,
124
+ "model_max_length": 131072,
125
+ "pad_token": "<|endoftext|>",
126
+ "padding_side": "right",
127
+ "sp_model_kwargs": {},
128
+ "split_special_tokens": false,
129
+ "tokenizer_class": "LlamaTokenizer",
130
+ "unk_token": "<unk>",
131
+ "use_default_system_prompt": false
132
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.9994796635157401,
3
+ "total_flos": 420396754255872.0,
4
+ "train_loss": 0.3626429720482505,
5
+ "train_runtime": 106139.7498,
6
+ "train_samples_per_second": 3.476,
7
+ "train_steps_per_second": 0.027
8
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 2, "total_steps": 780, "loss": 0.7593, "learning_rate": 5.128205128205128e-07, "epoch": 0.0025622547842101048, "percentage": 0.26, "elapsed_time": "0:00:23", "remaining_time": "2:34:45", "throughput": "0.00", "total_tokens": 0}
2
+ {"current_steps": 4, "total_steps": 780, "loss": 0.8076, "learning_rate": 1.0256410256410257e-06, "epoch": 0.0051245095684202095, "percentage": 0.51, "elapsed_time": "0:00:48", "remaining_time": "2:35:48", "throughput": "0.00", "total_tokens": 0}
3
+ {"current_steps": 6, "total_steps": 780, "loss": 0.7059, "learning_rate": 1.5384615384615387e-06, "epoch": 0.007686764352630314, "percentage": 0.77, "elapsed_time": "0:01:15", "remaining_time": "2:43:15", "throughput": "0.00", "total_tokens": 0}
4
+ {"current_steps": 8, "total_steps": 780, "loss": 0.715, "learning_rate": 2.0512820512820513e-06, "epoch": 0.010249019136840419, "percentage": 1.03, "elapsed_time": "0:01:47", "remaining_time": "2:52:15", "throughput": "0.00", "total_tokens": 0}
5
+ {"current_steps": 10, "total_steps": 780, "loss": 0.7175, "learning_rate": 2.564102564102564e-06, "epoch": 0.012811273921050524, "percentage": 1.28, "elapsed_time": "0:02:12", "remaining_time": "2:50:07", "throughput": "0.00", "total_tokens": 0}
6
+ {"current_steps": 12, "total_steps": 780, "loss": 0.6976, "learning_rate": 3.0769230769230774e-06, "epoch": 0.015373528705260629, "percentage": 1.54, "elapsed_time": "0:02:43", "remaining_time": "2:54:28", "throughput": "0.00", "total_tokens": 0}
7
+ {"current_steps": 14, "total_steps": 780, "loss": 0.7615, "learning_rate": 3.58974358974359e-06, "epoch": 0.017935783489470735, "percentage": 1.79, "elapsed_time": "0:03:07", "remaining_time": "2:51:00", "throughput": "0.00", "total_tokens": 0}
8
+ {"current_steps": 16, "total_steps": 780, "loss": 0.6735, "learning_rate": 4.102564102564103e-06, "epoch": 0.020498038273680838, "percentage": 2.05, "elapsed_time": "0:03:34", "remaining_time": "2:50:58", "throughput": "0.00", "total_tokens": 0}
9
+ {"current_steps": 18, "total_steps": 780, "loss": 0.6885, "learning_rate": 4.615384615384616e-06, "epoch": 0.023060293057890945, "percentage": 2.31, "elapsed_time": "0:03:58", "remaining_time": "2:48:30", "throughput": "0.00", "total_tokens": 0}
10
+ {"current_steps": 20, "total_steps": 780, "loss": 0.768, "learning_rate": 5.128205128205128e-06, "epoch": 0.025622547842101048, "percentage": 2.56, "elapsed_time": "0:04:22", "remaining_time": "2:46:19", "throughput": "0.00", "total_tokens": 0}
11
+ {"current_steps": 22, "total_steps": 780, "loss": 0.7092, "learning_rate": 5.641025641025641e-06, "epoch": 0.028184802626311154, "percentage": 2.82, "elapsed_time": "0:04:48", "remaining_time": "2:45:48", "throughput": "0.00", "total_tokens": 0}
12
+ {"current_steps": 24, "total_steps": 780, "loss": 0.6764, "learning_rate": 6.153846153846155e-06, "epoch": 0.030747057410521257, "percentage": 3.08, "elapsed_time": "0:05:13", "remaining_time": "2:44:50", "throughput": "0.00", "total_tokens": 0}
13
+ {"current_steps": 26, "total_steps": 780, "loss": 0.6386, "learning_rate": 6.666666666666667e-06, "epoch": 0.033309312194731364, "percentage": 3.33, "elapsed_time": "0:05:41", "remaining_time": "2:44:55", "throughput": "0.00", "total_tokens": 0}
14
+ {"current_steps": 28, "total_steps": 780, "loss": 0.6218, "learning_rate": 7.17948717948718e-06, "epoch": 0.03587156697894147, "percentage": 3.59, "elapsed_time": "0:06:05", "remaining_time": "2:43:40", "throughput": "0.00", "total_tokens": 0}
15
+ {"current_steps": 30, "total_steps": 780, "loss": 0.5853, "learning_rate": 7.692307692307694e-06, "epoch": 0.03843382176315158, "percentage": 3.85, "elapsed_time": "0:06:34", "remaining_time": "2:44:14", "throughput": "0.00", "total_tokens": 0}
16
+ {"current_steps": 32, "total_steps": 780, "loss": 0.5441, "learning_rate": 8.205128205128205e-06, "epoch": 0.040996076547361676, "percentage": 4.1, "elapsed_time": "0:07:01", "remaining_time": "2:44:10", "throughput": "0.00", "total_tokens": 0}
17
+ {"current_steps": 34, "total_steps": 780, "loss": 0.5458, "learning_rate": 8.717948717948719e-06, "epoch": 0.04355833133157178, "percentage": 4.36, "elapsed_time": "0:07:26", "remaining_time": "2:43:08", "throughput": "0.00", "total_tokens": 0}
18
+ {"current_steps": 36, "total_steps": 780, "loss": 0.4589, "learning_rate": 9.230769230769232e-06, "epoch": 0.04612058611578189, "percentage": 4.62, "elapsed_time": "0:07:50", "remaining_time": "2:41:58", "throughput": "0.00", "total_tokens": 0}
19
+ {"current_steps": 38, "total_steps": 780, "loss": 0.4386, "learning_rate": 9.743589743589744e-06, "epoch": 0.048682840899991996, "percentage": 4.87, "elapsed_time": "0:08:13", "remaining_time": "2:40:44", "throughput": "0.00", "total_tokens": 0}
20
+ {"current_steps": 40, "total_steps": 780, "loss": 0.4215, "learning_rate": 9.99995506314361e-06, "epoch": 0.051245095684202095, "percentage": 5.13, "elapsed_time": "0:08:38", "remaining_time": "2:39:51", "throughput": "0.00", "total_tokens": 0}
21
+ {"current_steps": 42, "total_steps": 780, "loss": 0.3888, "learning_rate": 9.999595573138845e-06, "epoch": 0.0538073504684122, "percentage": 5.38, "elapsed_time": "0:09:06", "remaining_time": "2:39:55", "throughput": "0.00", "total_tokens": 0}
22
+ {"current_steps": 44, "total_steps": 780, "loss": 0.3749, "learning_rate": 9.99887661897616e-06, "epoch": 0.05636960525262231, "percentage": 5.64, "elapsed_time": "0:09:31", "remaining_time": "2:39:27", "throughput": "0.00", "total_tokens": 0}
23
+ {"current_steps": 46, "total_steps": 780, "loss": 0.3543, "learning_rate": 9.997798252347382e-06, "epoch": 0.058931860036832415, "percentage": 5.9, "elapsed_time": "0:09:59", "remaining_time": "2:39:27", "throughput": "0.00", "total_tokens": 0}
24
+ {"current_steps": 48, "total_steps": 780, "loss": 0.3565, "learning_rate": 9.996360550785619e-06, "epoch": 0.061494114821042514, "percentage": 6.15, "elapsed_time": "0:10:27", "remaining_time": "2:39:23", "throughput": "0.00", "total_tokens": 0}
25
+ {"current_steps": 50, "total_steps": 780, "loss": 0.3242, "learning_rate": 9.994563617659665e-06, "epoch": 0.06405636960525263, "percentage": 6.41, "elapsed_time": "0:10:54", "remaining_time": "2:39:14", "throughput": "0.00", "total_tokens": 0}
26
+ {"current_steps": 52, "total_steps": 780, "loss": 0.3334, "learning_rate": 9.992407582166582e-06, "epoch": 0.06661862438946273, "percentage": 6.67, "elapsed_time": "0:11:23", "remaining_time": "2:39:29", "throughput": "0.00", "total_tokens": 0}
27
+ {"current_steps": 54, "total_steps": 780, "loss": 0.3741, "learning_rate": 9.989892599322404e-06, "epoch": 0.06918087917367283, "percentage": 6.92, "elapsed_time": "0:11:52", "remaining_time": "2:39:44", "throughput": "0.00", "total_tokens": 0}
28
+ {"current_steps": 56, "total_steps": 780, "loss": 0.3331, "learning_rate": 9.987018849950996e-06, "epoch": 0.07174313395788294, "percentage": 7.18, "elapsed_time": "0:12:17", "remaining_time": "2:38:59", "throughput": "0.00", "total_tokens": 0}
29
+ {"current_steps": 58, "total_steps": 780, "loss": 0.3335, "learning_rate": 9.983786540671052e-06, "epoch": 0.07430538874209304, "percentage": 7.44, "elapsed_time": "0:12:44", "remaining_time": "2:38:39", "throughput": "0.00", "total_tokens": 0}
30
+ {"current_steps": 60, "total_steps": 780, "loss": 0.3344, "learning_rate": 9.980195903881231e-06, "epoch": 0.07686764352630315, "percentage": 7.69, "elapsed_time": "0:13:09", "remaining_time": "2:37:58", "throughput": "0.00", "total_tokens": 0}
31
+ {"current_steps": 62, "total_steps": 780, "loss": 0.3055, "learning_rate": 9.976247197743465e-06, "epoch": 0.07942989831051325, "percentage": 7.95, "elapsed_time": "0:13:31", "remaining_time": "2:36:38", "throughput": "0.00", "total_tokens": 0}
32
+ {"current_steps": 64, "total_steps": 780, "loss": 0.3187, "learning_rate": 9.97194070616438e-06, "epoch": 0.08199215309472335, "percentage": 8.21, "elapsed_time": "0:13:55", "remaining_time": "2:35:42", "throughput": "0.00", "total_tokens": 0}
33
+ {"current_steps": 66, "total_steps": 780, "loss": 0.2998, "learning_rate": 9.967276738774897e-06, "epoch": 0.08455440787893347, "percentage": 8.46, "elapsed_time": "0:14:23", "remaining_time": "2:35:39", "throughput": "0.00", "total_tokens": 0}
34
+ {"current_steps": 68, "total_steps": 780, "loss": 0.3251, "learning_rate": 9.962255630907964e-06, "epoch": 0.08711666266314357, "percentage": 8.72, "elapsed_time": "0:14:48", "remaining_time": "2:35:02", "throughput": "0.00", "total_tokens": 0}
35
+ {"current_steps": 70, "total_steps": 780, "loss": 0.317, "learning_rate": 9.956877743574437e-06, "epoch": 0.08967891744735366, "percentage": 8.97, "elapsed_time": "0:15:15", "remaining_time": "2:34:48", "throughput": "0.00", "total_tokens": 0}
36
+ {"current_steps": 72, "total_steps": 780, "loss": 0.31, "learning_rate": 9.951143463437145e-06, "epoch": 0.09224117223156378, "percentage": 9.23, "elapsed_time": "0:15:41", "remaining_time": "2:34:18", "throughput": "0.00", "total_tokens": 0}
37
+ {"current_steps": 74, "total_steps": 780, "loss": 0.3158, "learning_rate": 9.94505320278307e-06, "epoch": 0.09480342701577388, "percentage": 9.49, "elapsed_time": "0:16:05", "remaining_time": "2:33:28", "throughput": "0.00", "total_tokens": 0}
38
+ {"current_steps": 76, "total_steps": 780, "loss": 0.3163, "learning_rate": 9.938607399493714e-06, "epoch": 0.09736568179998399, "percentage": 9.74, "elapsed_time": "0:16:30", "remaining_time": "2:32:59", "throughput": "0.00", "total_tokens": 0}
39
+ {"current_steps": 78, "total_steps": 780, "loss": 0.3132, "learning_rate": 9.931806517013612e-06, "epoch": 0.09992793658419409, "percentage": 10.0, "elapsed_time": "0:16:56", "remaining_time": "2:32:29", "throughput": "0.00", "total_tokens": 0}
40
+ {"current_steps": 80, "total_steps": 780, "loss": 0.315, "learning_rate": 9.924651044317017e-06, "epoch": 0.10249019136840419, "percentage": 10.26, "elapsed_time": "0:17:20", "remaining_time": "2:31:48", "throughput": "0.00", "total_tokens": 0}
41
+ {"current_steps": 82, "total_steps": 780, "loss": 0.2851, "learning_rate": 9.917141495872733e-06, "epoch": 0.1050524461526143, "percentage": 10.51, "elapsed_time": "0:17:45", "remaining_time": "2:31:13", "throughput": "0.00", "total_tokens": 0}
42
+ {"current_steps": 84, "total_steps": 780, "loss": 0.3036, "learning_rate": 9.909278411607134e-06, "epoch": 0.1076147009368244, "percentage": 10.77, "elapsed_time": "0:18:10", "remaining_time": "2:30:36", "throughput": "0.00", "total_tokens": 0}
43
+ {"current_steps": 86, "total_steps": 780, "loss": 0.3374, "learning_rate": 9.90106235686534e-06, "epoch": 0.11017695572103452, "percentage": 11.03, "elapsed_time": "0:18:38", "remaining_time": "2:30:29", "throughput": "0.00", "total_tokens": 0}
44
+ {"current_steps": 88, "total_steps": 780, "loss": 0.316, "learning_rate": 9.892493922370575e-06, "epoch": 0.11273921050524462, "percentage": 11.28, "elapsed_time": "0:19:02", "remaining_time": "2:29:45", "throughput": "0.00", "total_tokens": 0}
45
+ {"current_steps": 90, "total_steps": 780, "loss": 0.3284, "learning_rate": 9.883573724181683e-06, "epoch": 0.11530146528945472, "percentage": 11.54, "elapsed_time": "0:19:25", "remaining_time": "2:28:58", "throughput": "0.00", "total_tokens": 0}
46
+ {"current_steps": 92, "total_steps": 780, "loss": 0.3266, "learning_rate": 9.87430240364885e-06, "epoch": 0.11786372007366483, "percentage": 11.79, "elapsed_time": "0:19:51", "remaining_time": "2:28:27", "throughput": "0.00", "total_tokens": 0}
47
+ {"current_steps": 94, "total_steps": 780, "loss": 0.3104, "learning_rate": 9.864680627367476e-06, "epoch": 0.12042597485787493, "percentage": 12.05, "elapsed_time": "0:20:16", "remaining_time": "2:28:00", "throughput": "0.00", "total_tokens": 0}
48
+ {"current_steps": 96, "total_steps": 780, "loss": 0.3221, "learning_rate": 9.854709087130261e-06, "epoch": 0.12298822964208503, "percentage": 12.31, "elapsed_time": "0:20:43", "remaining_time": "2:27:40", "throughput": "0.00", "total_tokens": 0}
49
+ {"current_steps": 98, "total_steps": 780, "loss": 0.2913, "learning_rate": 9.844388499877457e-06, "epoch": 0.12555048442629513, "percentage": 12.56, "elapsed_time": "0:21:11", "remaining_time": "2:27:31", "throughput": "0.00", "total_tokens": 0}
50
+ {"current_steps": 100, "total_steps": 780, "loss": 0.2974, "learning_rate": 9.833719607645325e-06, "epoch": 0.12811273921050526, "percentage": 12.82, "elapsed_time": "0:21:37", "remaining_time": "2:27:04", "throughput": "0.00", "total_tokens": 0}
51
+ {"current_steps": 102, "total_steps": 780, "loss": 0.3698, "learning_rate": 9.822703177512783e-06, "epoch": 0.13067499399471535, "percentage": 13.08, "elapsed_time": "0:22:06", "remaining_time": "2:26:58", "throughput": "0.00", "total_tokens": 0}
52
+ {"current_steps": 104, "total_steps": 780, "loss": 0.2995, "learning_rate": 9.811340001546252e-06, "epoch": 0.13323724877892545, "percentage": 13.33, "elapsed_time": "0:22:30", "remaining_time": "2:26:20", "throughput": "0.00", "total_tokens": 0}
53
+ {"current_steps": 106, "total_steps": 780, "loss": 0.304, "learning_rate": 9.799630896742716e-06, "epoch": 0.13579950356313555, "percentage": 13.59, "elapsed_time": "0:22:57", "remaining_time": "2:25:58", "throughput": "0.00", "total_tokens": 0}
54
+ {"current_steps": 108, "total_steps": 780, "loss": 0.3032, "learning_rate": 9.787576704970965e-06, "epoch": 0.13836175834734565, "percentage": 13.85, "elapsed_time": "0:23:20", "remaining_time": "2:25:13", "throughput": "0.00", "total_tokens": 0}
55
+ {"current_steps": 110, "total_steps": 780, "loss": 0.2915, "learning_rate": 9.77517829291108e-06, "epoch": 0.14092401313155578, "percentage": 14.1, "elapsed_time": "0:23:44", "remaining_time": "2:24:38", "throughput": "0.00", "total_tokens": 0}
56
+ {"current_steps": 112, "total_steps": 780, "loss": 0.3192, "learning_rate": 9.762436551992117e-06, "epoch": 0.14348626791576588, "percentage": 14.36, "elapsed_time": "0:24:09", "remaining_time": "2:24:04", "throughput": "0.00", "total_tokens": 0}
57
+ {"current_steps": 114, "total_steps": 780, "loss": 0.2999, "learning_rate": 9.74935239832801e-06, "epoch": 0.14604852269997598, "percentage": 14.62, "elapsed_time": "0:24:33", "remaining_time": "2:23:28", "throughput": "0.00", "total_tokens": 0}
58
+ {"current_steps": 116, "total_steps": 780, "loss": 0.31, "learning_rate": 9.735926772651703e-06, "epoch": 0.14861077748418608, "percentage": 14.87, "elapsed_time": "0:24:59", "remaining_time": "2:23:03", "throughput": "0.00", "total_tokens": 0}
59
+ {"current_steps": 118, "total_steps": 780, "loss": 0.292, "learning_rate": 9.722160640247523e-06, "epoch": 0.15117303226839618, "percentage": 15.13, "elapsed_time": "0:25:25", "remaining_time": "2:22:35", "throughput": "0.00", "total_tokens": 0}
60
+ {"current_steps": 120, "total_steps": 780, "loss": 0.3077, "learning_rate": 9.708054990881763e-06, "epoch": 0.1537352870526063, "percentage": 15.38, "elapsed_time": "0:25:51", "remaining_time": "2:22:12", "throughput": "0.00", "total_tokens": 0}
61
+ {"current_steps": 122, "total_steps": 780, "loss": 0.3132, "learning_rate": 9.693610838731532e-06, "epoch": 0.1562975418368164, "percentage": 15.64, "elapsed_time": "0:26:16", "remaining_time": "2:21:41", "throughput": "0.00", "total_tokens": 0}
62
+ {"current_steps": 124, "total_steps": 780, "loss": 0.3139, "learning_rate": 9.678829222311827e-06, "epoch": 0.1588597966210265, "percentage": 15.9, "elapsed_time": "0:26:40", "remaining_time": "2:21:05", "throughput": "0.00", "total_tokens": 0}
63
+ {"current_steps": 126, "total_steps": 780, "loss": 0.2992, "learning_rate": 9.663711204400872e-06, "epoch": 0.1614220514052366, "percentage": 16.15, "elapsed_time": "0:27:08", "remaining_time": "2:20:50", "throughput": "0.00", "total_tokens": 0}
64
+ {"current_steps": 128, "total_steps": 780, "loss": 0.3066, "learning_rate": 9.6482578719637e-06, "epoch": 0.1639843061894467, "percentage": 16.41, "elapsed_time": "0:27:36", "remaining_time": "2:20:36", "throughput": "0.00", "total_tokens": 0}
65
+ {"current_steps": 130, "total_steps": 780, "loss": 0.3121, "learning_rate": 9.632470336074009e-06, "epoch": 0.1665465609736568, "percentage": 16.67, "elapsed_time": "0:27:59", "remaining_time": "2:19:59", "throughput": "0.00", "total_tokens": 0}
66
+ {"current_steps": 132, "total_steps": 780, "loss": 0.2991, "learning_rate": 9.616349731834271e-06, "epoch": 0.16910881575786693, "percentage": 16.92, "elapsed_time": "0:28:24", "remaining_time": "2:19:26", "throughput": "0.00", "total_tokens": 0}
67
+ {"current_steps": 134, "total_steps": 780, "loss": 0.3227, "learning_rate": 9.599897218294122e-06, "epoch": 0.17167107054207703, "percentage": 17.18, "elapsed_time": "0:28:48", "remaining_time": "2:18:53", "throughput": "0.00", "total_tokens": 0}
68
+ {"current_steps": 136, "total_steps": 780, "loss": 0.3025, "learning_rate": 9.583113978367026e-06, "epoch": 0.17423332532628713, "percentage": 17.44, "elapsed_time": "0:29:16", "remaining_time": "2:18:37", "throughput": "0.00", "total_tokens": 0}
69
+ {"current_steps": 138, "total_steps": 780, "loss": 0.3179, "learning_rate": 9.56600121874523e-06, "epoch": 0.17679558011049723, "percentage": 17.69, "elapsed_time": "0:29:46", "remaining_time": "2:18:33", "throughput": "0.00", "total_tokens": 0}
70
+ {"current_steps": 140, "total_steps": 780, "loss": 0.2906, "learning_rate": 9.548560169812997e-06, "epoch": 0.17935783489470733, "percentage": 17.95, "elapsed_time": "0:30:14", "remaining_time": "2:18:15", "throughput": "0.00", "total_tokens": 0}
71
+ {"current_steps": 142, "total_steps": 780, "loss": 0.2968, "learning_rate": 9.530792085558151e-06, "epoch": 0.18192008967891746, "percentage": 18.21, "elapsed_time": "0:30:38", "remaining_time": "2:17:40", "throughput": "0.00", "total_tokens": 0}
72
+ {"current_steps": 144, "total_steps": 780, "loss": 0.3076, "learning_rate": 9.512698243481914e-06, "epoch": 0.18448234446312756, "percentage": 18.46, "elapsed_time": "0:31:06", "remaining_time": "2:17:21", "throughput": "0.00", "total_tokens": 0}
73
+ {"current_steps": 146, "total_steps": 780, "loss": 0.292, "learning_rate": 9.49427994450705e-06, "epoch": 0.18704459924733766, "percentage": 18.72, "elapsed_time": "0:31:32", "remaining_time": "2:16:57", "throughput": "0.00", "total_tokens": 0}
74
+ {"current_steps": 148, "total_steps": 780, "loss": 0.3337, "learning_rate": 9.47553851288434e-06, "epoch": 0.18960685403154776, "percentage": 18.97, "elapsed_time": "0:32:00", "remaining_time": "2:16:39", "throughput": "0.00", "total_tokens": 0}
75
+ {"current_steps": 150, "total_steps": 780, "loss": 0.2854, "learning_rate": 9.45647529609736e-06, "epoch": 0.19216910881575786, "percentage": 19.23, "elapsed_time": "0:32:23", "remaining_time": "2:16:01", "throughput": "0.00", "total_tokens": 0}
76
+ {"current_steps": 152, "total_steps": 780, "loss": 0.328, "learning_rate": 9.437091664765611e-06, "epoch": 0.19473136359996798, "percentage": 19.49, "elapsed_time": "0:32:46", "remaining_time": "2:15:23", "throughput": "0.00", "total_tokens": 0}
77
+ {"current_steps": 154, "total_steps": 780, "loss": 0.2831, "learning_rate": 9.41738901254596e-06, "epoch": 0.19729361838417808, "percentage": 19.74, "elapsed_time": "0:33:14", "remaining_time": "2:15:09", "throughput": "0.00", "total_tokens": 0}
78
+ {"current_steps": 156, "total_steps": 780, "loss": 0.2899, "learning_rate": 9.397368756032445e-06, "epoch": 0.19985587316838818, "percentage": 20.0, "elapsed_time": "0:33:39", "remaining_time": "2:14:39", "throughput": "0.00", "total_tokens": 0}
79
+ {"current_steps": 158, "total_steps": 780, "loss": 0.2796, "learning_rate": 9.37703233465443e-06, "epoch": 0.20241812795259828, "percentage": 20.26, "elapsed_time": "0:34:03", "remaining_time": "2:14:05", "throughput": "0.00", "total_tokens": 0}
80
+ {"current_steps": 160, "total_steps": 780, "loss": 0.2965, "learning_rate": 9.356381210573092e-06, "epoch": 0.20498038273680838, "percentage": 20.51, "elapsed_time": "0:34:27", "remaining_time": "2:13:32", "throughput": "0.00", "total_tokens": 0}
81
+ {"current_steps": 162, "total_steps": 780, "loss": 0.2884, "learning_rate": 9.33541686857632e-06, "epoch": 0.2075426375210185, "percentage": 20.77, "elapsed_time": "0:34:55", "remaining_time": "2:13:14", "throughput": "0.00", "total_tokens": 0}
82
+ {"current_steps": 164, "total_steps": 780, "loss": 0.297, "learning_rate": 9.31414081597194e-06, "epoch": 0.2101048923052286, "percentage": 21.03, "elapsed_time": "0:35:22", "remaining_time": "2:12:51", "throughput": "0.00", "total_tokens": 0}
83
+ {"current_steps": 166, "total_steps": 780, "loss": 0.2862, "learning_rate": 9.292554582479349e-06, "epoch": 0.2126671470894387, "percentage": 21.28, "elapsed_time": "0:35:46", "remaining_time": "2:12:18", "throughput": "0.00", "total_tokens": 0}
84
+ {"current_steps": 168, "total_steps": 780, "loss": 0.2958, "learning_rate": 9.270659720119533e-06, "epoch": 0.2152294018736488, "percentage": 21.54, "elapsed_time": "0:36:15", "remaining_time": "2:12:04", "throughput": "0.00", "total_tokens": 0}
85
+ {"current_steps": 170, "total_steps": 780, "loss": 0.2988, "learning_rate": 9.248457803103476e-06, "epoch": 0.2177916566578589, "percentage": 21.79, "elapsed_time": "0:36:42", "remaining_time": "2:11:43", "throughput": "0.00", "total_tokens": 0}
86
+ {"current_steps": 172, "total_steps": 780, "loss": 0.2803, "learning_rate": 9.225950427718974e-06, "epoch": 0.22035391144206903, "percentage": 22.05, "elapsed_time": "0:37:10", "remaining_time": "2:11:22", "throughput": "0.00", "total_tokens": 0}
87
+ {"current_steps": 174, "total_steps": 780, "loss": 0.2957, "learning_rate": 9.203139212215868e-06, "epoch": 0.22291616622627913, "percentage": 22.31, "elapsed_time": "0:37:35", "remaining_time": "2:10:53", "throughput": "0.00", "total_tokens": 0}
88
+ {"current_steps": 176, "total_steps": 780, "loss": 0.2933, "learning_rate": 9.180025796689692e-06, "epoch": 0.22547842101048923, "percentage": 22.56, "elapsed_time": "0:38:01", "remaining_time": "2:10:28", "throughput": "0.00", "total_tokens": 0}
89
+ {"current_steps": 178, "total_steps": 780, "loss": 0.2926, "learning_rate": 9.156611842963753e-06, "epoch": 0.22804067579469933, "percentage": 22.82, "elapsed_time": "0:38:24", "remaining_time": "2:09:52", "throughput": "0.00", "total_tokens": 0}
90
+ {"current_steps": 180, "total_steps": 780, "loss": 0.3111, "learning_rate": 9.132899034469648e-06, "epoch": 0.23060293057890943, "percentage": 23.08, "elapsed_time": "0:38:47", "remaining_time": "2:09:19", "throughput": "0.00", "total_tokens": 0}
91
+ {"current_steps": 182, "total_steps": 780, "loss": 0.2948, "learning_rate": 9.108889076126226e-06, "epoch": 0.23316518536311953, "percentage": 23.33, "elapsed_time": "0:39:11", "remaining_time": "2:08:46", "throughput": "0.00", "total_tokens": 0}
92
+ {"current_steps": 184, "total_steps": 780, "loss": 0.2872, "learning_rate": 9.084583694217012e-06, "epoch": 0.23572744014732966, "percentage": 23.59, "elapsed_time": "0:39:36", "remaining_time": "2:08:16", "throughput": "0.00", "total_tokens": 0}
93
+ {"current_steps": 186, "total_steps": 780, "loss": 0.3073, "learning_rate": 9.059984636266082e-06, "epoch": 0.23828969493153976, "percentage": 23.85, "elapsed_time": "0:40:04", "remaining_time": "2:08:00", "throughput": "0.00", "total_tokens": 0}
94
+ {"current_steps": 188, "total_steps": 780, "loss": 0.2866, "learning_rate": 9.035093670912424e-06, "epoch": 0.24085194971574986, "percentage": 24.1, "elapsed_time": "0:40:30", "remaining_time": "2:07:33", "throughput": "0.00", "total_tokens": 0}
95
+ {"current_steps": 190, "total_steps": 780, "loss": 0.298, "learning_rate": 9.009912587782772e-06, "epoch": 0.24341420449995996, "percentage": 24.36, "elapsed_time": "0:40:54", "remaining_time": "2:07:01", "throughput": "0.00", "total_tokens": 0}
96
+ {"current_steps": 192, "total_steps": 780, "loss": 0.2644, "learning_rate": 8.984443197362938e-06, "epoch": 0.24597645928417006, "percentage": 24.62, "elapsed_time": "0:41:18", "remaining_time": "2:06:31", "throughput": "0.00", "total_tokens": 0}
97
+ {"current_steps": 194, "total_steps": 780, "loss": 0.2986, "learning_rate": 8.958687330867634e-06, "epoch": 0.24853871406838018, "percentage": 24.87, "elapsed_time": "0:41:44", "remaining_time": "2:06:05", "throughput": "0.00", "total_tokens": 0}
98
+ {"current_steps": 196, "total_steps": 780, "loss": 0.2826, "learning_rate": 8.932646840108818e-06, "epoch": 0.25110096885259026, "percentage": 25.13, "elapsed_time": "0:42:09", "remaining_time": "2:05:36", "throughput": "0.00", "total_tokens": 0}
99
+ {"current_steps": 198, "total_steps": 780, "loss": 0.2824, "learning_rate": 8.906323597362547e-06, "epoch": 0.2536632236368004, "percentage": 25.38, "elapsed_time": "0:42:37", "remaining_time": "2:05:16", "throughput": "0.00", "total_tokens": 0}
100
+ {"current_steps": 200, "total_steps": 780, "loss": 0.2836, "learning_rate": 8.879719495234363e-06, "epoch": 0.2562254784210105, "percentage": 25.64, "elapsed_time": "0:43:02", "remaining_time": "2:04:48", "throughput": "0.00", "total_tokens": 0}
101
+ {"current_steps": 202, "total_steps": 780, "loss": 0.2799, "learning_rate": 8.852836446523213e-06, "epoch": 0.2587877332052206, "percentage": 25.9, "elapsed_time": "0:43:30", "remaining_time": "2:04:29", "throughput": "0.00", "total_tokens": 0}
102
+ {"current_steps": 204, "total_steps": 780, "loss": 0.3027, "learning_rate": 8.825676384083936e-06, "epoch": 0.2613499879894307, "percentage": 26.15, "elapsed_time": "0:43:55", "remaining_time": "2:04:01", "throughput": "0.00", "total_tokens": 0}
103
+ {"current_steps": 206, "total_steps": 780, "loss": 0.3032, "learning_rate": 8.798241260688273e-06, "epoch": 0.2639122427736408, "percentage": 26.41, "elapsed_time": "0:44:20", "remaining_time": "2:03:33", "throughput": "0.00", "total_tokens": 0}
104
+ {"current_steps": 208, "total_steps": 780, "loss": 0.3044, "learning_rate": 8.770533048884483e-06, "epoch": 0.2664744975578509, "percentage": 26.67, "elapsed_time": "0:44:45", "remaining_time": "2:03:06", "throughput": "0.00", "total_tokens": 0}
105
+ {"current_steps": 210, "total_steps": 780, "loss": 0.2784, "learning_rate": 8.742553740855507e-06, "epoch": 0.26903675234206104, "percentage": 26.92, "elapsed_time": "0:45:12", "remaining_time": "2:02:42", "throughput": "0.00", "total_tokens": 0}
106
+ {"current_steps": 212, "total_steps": 780, "loss": 0.3142, "learning_rate": 8.71430534827574e-06, "epoch": 0.2715990071262711, "percentage": 27.18, "elapsed_time": "0:45:42", "remaining_time": "2:02:26", "throughput": "0.00", "total_tokens": 0}
107
+ {"current_steps": 214, "total_steps": 780, "loss": 0.2592, "learning_rate": 8.685789902166395e-06, "epoch": 0.27416126191048124, "percentage": 27.44, "elapsed_time": "0:46:08", "remaining_time": "2:02:01", "throughput": "0.00", "total_tokens": 0}
108
+ {"current_steps": 216, "total_steps": 780, "loss": 0.2881, "learning_rate": 8.657009452749466e-06, "epoch": 0.2767235166946913, "percentage": 27.69, "elapsed_time": "0:46:32", "remaining_time": "2:01:31", "throughput": "0.00", "total_tokens": 0}
109
+ {"current_steps": 218, "total_steps": 780, "loss": 0.3017, "learning_rate": 8.627966069300332e-06, "epoch": 0.27928577147890143, "percentage": 27.95, "elapsed_time": "0:46:58", "remaining_time": "2:01:05", "throughput": "0.00", "total_tokens": 0}
110
+ {"current_steps": 220, "total_steps": 780, "loss": 0.2781, "learning_rate": 8.598661839998972e-06, "epoch": 0.28184802626311156, "percentage": 28.21, "elapsed_time": "0:47:22", "remaining_time": "2:00:35", "throughput": "0.00", "total_tokens": 0}
111
+ {"current_steps": 222, "total_steps": 780, "loss": 0.296, "learning_rate": 8.569098871779828e-06, "epoch": 0.28441028104732163, "percentage": 28.46, "elapsed_time": "0:47:51", "remaining_time": "2:00:16", "throughput": "0.00", "total_tokens": 0}
112
+ {"current_steps": 224, "total_steps": 780, "loss": 0.3161, "learning_rate": 8.539279290180315e-06, "epoch": 0.28697253583153176, "percentage": 28.72, "elapsed_time": "0:48:18", "remaining_time": "1:59:53", "throughput": "0.00", "total_tokens": 0}
113
+ {"current_steps": 226, "total_steps": 780, "loss": 0.2948, "learning_rate": 8.509205239188017e-06, "epoch": 0.28953479061574183, "percentage": 28.97, "elapsed_time": "0:48:45", "remaining_time": "1:59:30", "throughput": "0.00", "total_tokens": 0}
114
+ {"current_steps": 228, "total_steps": 780, "loss": 0.2736, "learning_rate": 8.478878881086505e-06, "epoch": 0.29209704539995196, "percentage": 29.23, "elapsed_time": "0:49:08", "remaining_time": "1:58:58", "throughput": "0.00", "total_tokens": 0}
115
+ {"current_steps": 230, "total_steps": 780, "loss": 0.2954, "learning_rate": 8.448302396299906e-06, "epoch": 0.2946593001841621, "percentage": 29.49, "elapsed_time": "0:49:32", "remaining_time": "1:58:27", "throughput": "0.00", "total_tokens": 0}
116
+ {"current_steps": 232, "total_steps": 780, "loss": 0.3134, "learning_rate": 8.417477983236107e-06, "epoch": 0.29722155496837216, "percentage": 29.74, "elapsed_time": "0:49:57", "remaining_time": "1:58:00", "throughput": "0.00", "total_tokens": 0}
117
+ {"current_steps": 234, "total_steps": 780, "loss": 0.2767, "learning_rate": 8.386407858128707e-06, "epoch": 0.2997838097525823, "percentage": 30.0, "elapsed_time": "0:50:21", "remaining_time": "1:57:29", "throughput": "0.00", "total_tokens": 0}
118
+ {"current_steps": 236, "total_steps": 780, "loss": 0.2783, "learning_rate": 8.355094254877665e-06, "epoch": 0.30234606453679236, "percentage": 30.26, "elapsed_time": "0:50:46", "remaining_time": "1:57:01", "throughput": "0.00", "total_tokens": 0}
119
+ {"current_steps": 238, "total_steps": 780, "loss": 0.2871, "learning_rate": 8.323539424888695e-06, "epoch": 0.3049083193210025, "percentage": 30.51, "elapsed_time": "0:51:10", "remaining_time": "1:56:31", "throughput": "0.00", "total_tokens": 0}
120
+ {"current_steps": 240, "total_steps": 780, "loss": 0.2747, "learning_rate": 8.291745636911382e-06, "epoch": 0.3074705741052126, "percentage": 30.77, "elapsed_time": "0:51:40", "remaining_time": "1:56:15", "throughput": "0.00", "total_tokens": 0}
121
+ {"current_steps": 242, "total_steps": 780, "loss": 0.2737, "learning_rate": 8.259715176876069e-06, "epoch": 0.3100328288894227, "percentage": 31.03, "elapsed_time": "0:52:08", "remaining_time": "1:55:54", "throughput": "0.00", "total_tokens": 0}
122
+ {"current_steps": 244, "total_steps": 780, "loss": 0.2889, "learning_rate": 8.2274503477295e-06, "epoch": 0.3125950836736328, "percentage": 31.28, "elapsed_time": "0:52:37", "remaining_time": "1:55:35", "throughput": "0.00", "total_tokens": 0}
123
+ {"current_steps": 246, "total_steps": 780, "loss": 0.2822, "learning_rate": 8.19495346926924e-06, "epoch": 0.3151573384578429, "percentage": 31.54, "elapsed_time": "0:53:00", "remaining_time": "1:55:03", "throughput": "0.00", "total_tokens": 0}
124
+ {"current_steps": 248, "total_steps": 780, "loss": 0.284, "learning_rate": 8.162226877976886e-06, "epoch": 0.317719593242053, "percentage": 31.79, "elapsed_time": "0:53:28", "remaining_time": "1:54:41", "throughput": "0.00", "total_tokens": 0}
125
+ {"current_steps": 250, "total_steps": 780, "loss": 0.2915, "learning_rate": 8.129272926850079e-06, "epoch": 0.32028184802626314, "percentage": 32.05, "elapsed_time": "0:53:52", "remaining_time": "1:54:12", "throughput": "0.00", "total_tokens": 0}
126
+ {"current_steps": 252, "total_steps": 780, "loss": 0.2842, "learning_rate": 8.096093985233323e-06, "epoch": 0.3228441028104732, "percentage": 32.31, "elapsed_time": "0:54:18", "remaining_time": "1:53:46", "throughput": "0.00", "total_tokens": 0}
127
+ {"current_steps": 254, "total_steps": 780, "loss": 0.3203, "learning_rate": 8.062692438647628e-06, "epoch": 0.32540635759468334, "percentage": 32.56, "elapsed_time": "0:54:44", "remaining_time": "1:53:22", "throughput": "0.00", "total_tokens": 0}
128
+ {"current_steps": 256, "total_steps": 780, "loss": 0.2828, "learning_rate": 8.029070688619013e-06, "epoch": 0.3279686123788934, "percentage": 32.82, "elapsed_time": "0:55:10", "remaining_time": "1:52:55", "throughput": "0.00", "total_tokens": 0}
129
+ {"current_steps": 258, "total_steps": 780, "loss": 0.2672, "learning_rate": 7.995231152505815e-06, "epoch": 0.33053086716310354, "percentage": 33.08, "elapsed_time": "0:55:36", "remaining_time": "1:52:31", "throughput": "0.00", "total_tokens": 0}
130
+ {"current_steps": 260, "total_steps": 780, "loss": 0.292, "learning_rate": 7.961176263324902e-06, "epoch": 0.3330931219473136, "percentage": 33.33, "elapsed_time": "0:56:04", "remaining_time": "1:52:09", "throughput": "0.00", "total_tokens": 0}
131
+ {"current_steps": 262, "total_steps": 780, "loss": 0.2987, "learning_rate": 7.92690846957673e-06, "epoch": 0.33565537673152374, "percentage": 33.59, "elapsed_time": "0:56:29", "remaining_time": "1:51:41", "throughput": "0.00", "total_tokens": 0}
132
+ {"current_steps": 264, "total_steps": 780, "loss": 0.2881, "learning_rate": 7.892430235069317e-06, "epoch": 0.33821763151573386, "percentage": 33.85, "elapsed_time": "0:56:58", "remaining_time": "1:51:22", "throughput": "0.00", "total_tokens": 0}
133
+ {"current_steps": 266, "total_steps": 780, "loss": 0.2912, "learning_rate": 7.857744038741076e-06, "epoch": 0.34077988629994393, "percentage": 34.1, "elapsed_time": "0:57:22", "remaining_time": "1:50:51", "throughput": "0.00", "total_tokens": 0}
134
+ {"current_steps": 268, "total_steps": 780, "loss": 0.2672, "learning_rate": 7.822852374482597e-06, "epoch": 0.34334214108415406, "percentage": 34.36, "elapsed_time": "0:57:51", "remaining_time": "1:50:32", "throughput": "0.00", "total_tokens": 0}
135
+ {"current_steps": 270, "total_steps": 780, "loss": 0.2921, "learning_rate": 7.787757750957335e-06, "epoch": 0.34590439586836413, "percentage": 34.62, "elapsed_time": "0:58:17", "remaining_time": "1:50:06", "throughput": "0.00", "total_tokens": 0}
136
+ {"current_steps": 272, "total_steps": 780, "loss": 0.2676, "learning_rate": 7.752462691421245e-06, "epoch": 0.34846665065257426, "percentage": 34.87, "elapsed_time": "0:58:42", "remaining_time": "1:49:38", "throughput": "0.00", "total_tokens": 0}
137
+ {"current_steps": 274, "total_steps": 780, "loss": 0.2576, "learning_rate": 7.716969733541357e-06, "epoch": 0.3510289054367844, "percentage": 35.13, "elapsed_time": "0:59:05", "remaining_time": "1:49:06", "throughput": "0.00", "total_tokens": 0}
138
+ {"current_steps": 276, "total_steps": 780, "loss": 0.2686, "learning_rate": 7.681281429213328e-06, "epoch": 0.35359116022099446, "percentage": 35.38, "elapsed_time": "0:59:35", "remaining_time": "1:48:49", "throughput": "0.00", "total_tokens": 0}
139
+ {"current_steps": 278, "total_steps": 780, "loss": 0.2678, "learning_rate": 7.645400344377953e-06, "epoch": 0.3561534150052046, "percentage": 35.64, "elapsed_time": "1:00:01", "remaining_time": "1:48:24", "throughput": "0.00", "total_tokens": 0}
140
+ {"current_steps": 280, "total_steps": 780, "loss": 0.2907, "learning_rate": 7.609329058836694e-06, "epoch": 0.35871566978941466, "percentage": 35.9, "elapsed_time": "1:00:28", "remaining_time": "1:47:59", "throughput": "0.00", "total_tokens": 0}
141
+ {"current_steps": 282, "total_steps": 780, "loss": 0.298, "learning_rate": 7.5730701660661795e-06, "epoch": 0.3612779245736248, "percentage": 36.15, "elapsed_time": "1:00:52", "remaining_time": "1:47:30", "throughput": "0.00", "total_tokens": 0}
142
+ {"current_steps": 284, "total_steps": 780, "loss": 0.263, "learning_rate": 7.536626273031747e-06, "epoch": 0.3638401793578349, "percentage": 36.41, "elapsed_time": "1:01:20", "remaining_time": "1:47:07", "throughput": "0.00", "total_tokens": 0}
143
+ {"current_steps": 286, "total_steps": 780, "loss": 0.2733, "learning_rate": 7.500000000000001e-06, "epoch": 0.366402434142045, "percentage": 36.67, "elapsed_time": "1:01:46", "remaining_time": "1:46:42", "throughput": "0.00", "total_tokens": 0}
144
+ {"current_steps": 288, "total_steps": 780, "loss": 0.3159, "learning_rate": 7.4631939803504215e-06, "epoch": 0.3689646889262551, "percentage": 36.92, "elapsed_time": "1:02:13", "remaining_time": "1:46:17", "throughput": "0.00", "total_tokens": 0}
145
+ {"current_steps": 290, "total_steps": 780, "loss": 0.2878, "learning_rate": 7.426210860386032e-06, "epoch": 0.3715269437104652, "percentage": 37.18, "elapsed_time": "1:02:39", "remaining_time": "1:45:51", "throughput": "0.00", "total_tokens": 0}
146
+ {"current_steps": 292, "total_steps": 780, "loss": 0.2829, "learning_rate": 7.3890532991431174e-06, "epoch": 0.3740891984946753, "percentage": 37.44, "elapsed_time": "1:03:04", "remaining_time": "1:45:25", "throughput": "0.00", "total_tokens": 0}
147
+ {"current_steps": 294, "total_steps": 780, "loss": 0.2646, "learning_rate": 7.3517239682000675e-06, "epoch": 0.37665145327888544, "percentage": 37.69, "elapsed_time": "1:03:28", "remaining_time": "1:44:56", "throughput": "0.00", "total_tokens": 0}
148
+ {"current_steps": 296, "total_steps": 780, "loss": 0.301, "learning_rate": 7.314225551485273e-06, "epoch": 0.3792137080630955, "percentage": 37.95, "elapsed_time": "1:03:53", "remaining_time": "1:44:27", "throughput": "0.00", "total_tokens": 0}
149
+ {"current_steps": 298, "total_steps": 780, "loss": 0.2622, "learning_rate": 7.276560745084167e-06, "epoch": 0.38177596284730564, "percentage": 38.21, "elapsed_time": "1:04:18", "remaining_time": "1:44:01", "throughput": "0.00", "total_tokens": 0}
150
+ {"current_steps": 300, "total_steps": 780, "loss": 0.2901, "learning_rate": 7.2387322570453724e-06, "epoch": 0.3843382176315157, "percentage": 38.46, "elapsed_time": "1:04:45", "remaining_time": "1:43:36", "throughput": "0.00", "total_tokens": 0}
151
+ {"current_steps": 302, "total_steps": 780, "loss": 0.2576, "learning_rate": 7.2007428071860045e-06, "epoch": 0.38690047241572584, "percentage": 38.72, "elapsed_time": "1:05:10", "remaining_time": "1:43:10", "throughput": "0.00", "total_tokens": 0}
152
+ {"current_steps": 304, "total_steps": 780, "loss": 0.2716, "learning_rate": 7.162595126896111e-06, "epoch": 0.38946272719993597, "percentage": 38.97, "elapsed_time": "1:05:35", "remaining_time": "1:42:42", "throughput": "0.00", "total_tokens": 0}
153
+ {"current_steps": 306, "total_steps": 780, "loss": 0.2716, "learning_rate": 7.1242919589422974e-06, "epoch": 0.39202498198414604, "percentage": 39.23, "elapsed_time": "1:06:02", "remaining_time": "1:42:18", "throughput": "0.00", "total_tokens": 0}
154
+ {"current_steps": 308, "total_steps": 780, "loss": 0.2978, "learning_rate": 7.085836057270521e-06, "epoch": 0.39458723676835616, "percentage": 39.49, "elapsed_time": "1:06:27", "remaining_time": "1:41:50", "throughput": "0.00", "total_tokens": 0}
155
+ {"current_steps": 310, "total_steps": 780, "loss": 0.2499, "learning_rate": 7.047230186808085e-06, "epoch": 0.39714949155256624, "percentage": 39.74, "elapsed_time": "1:06:54", "remaining_time": "1:41:26", "throughput": "0.00", "total_tokens": 0}
156
+ {"current_steps": 312, "total_steps": 780, "loss": 0.3018, "learning_rate": 7.008477123264849e-06, "epoch": 0.39971174633677636, "percentage": 40.0, "elapsed_time": "1:07:18", "remaining_time": "1:40:58", "throughput": "0.00", "total_tokens": 0}
157
+ {"current_steps": 314, "total_steps": 780, "loss": 0.2834, "learning_rate": 6.96957965293365e-06, "epoch": 0.4022740011209865, "percentage": 40.26, "elapsed_time": "1:07:47", "remaining_time": "1:40:36", "throughput": "0.00", "total_tokens": 0}
158
+ {"current_steps": 316, "total_steps": 780, "loss": 0.3008, "learning_rate": 6.9305405724899876e-06, "epoch": 0.40483625590519656, "percentage": 40.51, "elapsed_time": "1:08:14", "remaining_time": "1:40:12", "throughput": "0.00", "total_tokens": 0}
159
+ {"current_steps": 318, "total_steps": 780, "loss": 0.2753, "learning_rate": 6.891362688790925e-06, "epoch": 0.4073985106894067, "percentage": 40.77, "elapsed_time": "1:08:39", "remaining_time": "1:39:44", "throughput": "0.00", "total_tokens": 0}
160
+ {"current_steps": 320, "total_steps": 780, "loss": 0.2943, "learning_rate": 6.8520488186733e-06, "epoch": 0.40996076547361676, "percentage": 41.03, "elapsed_time": "1:09:03", "remaining_time": "1:39:16", "throughput": "0.00", "total_tokens": 0}
161
+ {"current_steps": 322, "total_steps": 780, "loss": 0.2692, "learning_rate": 6.812601788751192e-06, "epoch": 0.4125230202578269, "percentage": 41.28, "elapsed_time": "1:09:29", "remaining_time": "1:38:50", "throughput": "0.00", "total_tokens": 0}
162
+ {"current_steps": 324, "total_steps": 780, "loss": 0.2961, "learning_rate": 6.773024435212678e-06, "epoch": 0.415085275042037, "percentage": 41.54, "elapsed_time": "1:09:53", "remaining_time": "1:38:21", "throughput": "0.00", "total_tokens": 0}
163
+ {"current_steps": 326, "total_steps": 780, "loss": 0.2898, "learning_rate": 6.733319603615941e-06, "epoch": 0.4176475298262471, "percentage": 41.79, "elapsed_time": "1:10:20", "remaining_time": "1:37:57", "throughput": "0.00", "total_tokens": 0}
164
+ {"current_steps": 328, "total_steps": 780, "loss": 0.2555, "learning_rate": 6.693490148684654e-06, "epoch": 0.4202097846104572, "percentage": 42.05, "elapsed_time": "1:10:44", "remaining_time": "1:37:29", "throughput": "0.00", "total_tokens": 0}
165
+ {"current_steps": 330, "total_steps": 780, "loss": 0.3043, "learning_rate": 6.653538934102743e-06, "epoch": 0.4227720393946673, "percentage": 42.31, "elapsed_time": "1:11:08", "remaining_time": "1:37:01", "throughput": "0.00", "total_tokens": 0}
166
+ {"current_steps": 332, "total_steps": 780, "loss": 0.3098, "learning_rate": 6.6134688323084884e-06, "epoch": 0.4253342941788774, "percentage": 42.56, "elapsed_time": "1:11:39", "remaining_time": "1:36:41", "throughput": "0.00", "total_tokens": 0}
167
+ {"current_steps": 334, "total_steps": 780, "loss": 0.276, "learning_rate": 6.573282724288001e-06, "epoch": 0.42789654896308754, "percentage": 42.82, "elapsed_time": "1:12:02", "remaining_time": "1:36:11", "throughput": "0.00", "total_tokens": 0}
168
+ {"current_steps": 336, "total_steps": 780, "loss": 0.2893, "learning_rate": 6.532983499368078e-06, "epoch": 0.4304588037472976, "percentage": 43.08, "elapsed_time": "1:12:30", "remaining_time": "1:35:49", "throughput": "0.00", "total_tokens": 0}
169
+ {"current_steps": 338, "total_steps": 780, "loss": 0.2522, "learning_rate": 6.492574055008474e-06, "epoch": 0.43302105853150774, "percentage": 43.33, "elapsed_time": "1:12:55", "remaining_time": "1:35:21", "throughput": "0.00", "total_tokens": 0}
170
+ {"current_steps": 340, "total_steps": 780, "loss": 0.2556, "learning_rate": 6.452057296593568e-06, "epoch": 0.4355833133157178, "percentage": 43.59, "elapsed_time": "1:13:26", "remaining_time": "1:35:01", "throughput": "0.00", "total_tokens": 0}
171
+ {"current_steps": 342, "total_steps": 780, "loss": 0.2795, "learning_rate": 6.411436137223479e-06, "epoch": 0.43814556809992794, "percentage": 43.85, "elapsed_time": "1:13:53", "remaining_time": "1:34:37", "throughput": "0.00", "total_tokens": 0}
172
+ {"current_steps": 344, "total_steps": 780, "loss": 0.2619, "learning_rate": 6.370713497504607e-06, "epoch": 0.44070782288413807, "percentage": 44.1, "elapsed_time": "1:14:19", "remaining_time": "1:34:12", "throughput": "0.00", "total_tokens": 0}
173
+ {"current_steps": 346, "total_steps": 780, "loss": 0.2748, "learning_rate": 6.329892305339659e-06, "epoch": 0.44327007766834814, "percentage": 44.36, "elapsed_time": "1:14:43", "remaining_time": "1:33:43", "throughput": "0.00", "total_tokens": 0}
174
+ {"current_steps": 348, "total_steps": 780, "loss": 0.2731, "learning_rate": 6.288975495717124e-06, "epoch": 0.44583233245255827, "percentage": 44.62, "elapsed_time": "1:15:10", "remaining_time": "1:33:19", "throughput": "0.00", "total_tokens": 0}
175
+ {"current_steps": 350, "total_steps": 780, "loss": 0.2797, "learning_rate": 6.247966010500258e-06, "epoch": 0.44839458723676834, "percentage": 44.87, "elapsed_time": "1:15:35", "remaining_time": "1:32:52", "throughput": "0.00", "total_tokens": 0}
176
+ {"current_steps": 352, "total_steps": 780, "loss": 0.2724, "learning_rate": 6.206866798215571e-06, "epoch": 0.45095684202097847, "percentage": 45.13, "elapsed_time": "1:16:00", "remaining_time": "1:32:24", "throughput": "0.00", "total_tokens": 0}
177
+ {"current_steps": 354, "total_steps": 780, "loss": 0.2728, "learning_rate": 6.165680813840822e-06, "epoch": 0.4535190968051886, "percentage": 45.38, "elapsed_time": "1:16:26", "remaining_time": "1:31:59", "throughput": "0.00", "total_tokens": 0}
178
+ {"current_steps": 356, "total_steps": 780, "loss": 0.2733, "learning_rate": 6.124411018592568e-06, "epoch": 0.45608135158939866, "percentage": 45.64, "elapsed_time": "1:16:52", "remaining_time": "1:31:33", "throughput": "0.00", "total_tokens": 0}
179
+ {"current_steps": 358, "total_steps": 780, "loss": 0.2688, "learning_rate": 6.0830603797132574e-06, "epoch": 0.4586436063736088, "percentage": 45.9, "elapsed_time": "1:17:16", "remaining_time": "1:31:05", "throughput": "0.00", "total_tokens": 0}
180
+ {"current_steps": 360, "total_steps": 780, "loss": 0.2505, "learning_rate": 6.041631870257882e-06, "epoch": 0.46120586115781886, "percentage": 46.15, "elapsed_time": "1:17:42", "remaining_time": "1:30:39", "throughput": "0.00", "total_tokens": 0}
181
+ {"current_steps": 362, "total_steps": 780, "loss": 0.2749, "learning_rate": 6.000128468880223e-06, "epoch": 0.463768115942029, "percentage": 46.41, "elapsed_time": "1:18:10", "remaining_time": "1:30:16", "throughput": "0.00", "total_tokens": 0}
182
+ {"current_steps": 364, "total_steps": 780, "loss": 0.2541, "learning_rate": 5.958553159618693e-06, "epoch": 0.46633037072623906, "percentage": 46.67, "elapsed_time": "1:18:37", "remaining_time": "1:29:51", "throughput": "0.00", "total_tokens": 0}
183
+ {"current_steps": 366, "total_steps": 780, "loss": 0.2721, "learning_rate": 5.916908931681781e-06, "epoch": 0.4688926255104492, "percentage": 46.92, "elapsed_time": "1:19:08", "remaining_time": "1:29:31", "throughput": "0.00", "total_tokens": 0}
184
+ {"current_steps": 368, "total_steps": 780, "loss": 0.2774, "learning_rate": 5.8751987792331365e-06, "epoch": 0.4714548802946593, "percentage": 47.18, "elapsed_time": "1:19:33", "remaining_time": "1:29:04", "throughput": "0.00", "total_tokens": 0}
185
+ {"current_steps": 370, "total_steps": 780, "loss": 0.2497, "learning_rate": 5.833425701176294e-06, "epoch": 0.4740171350788694, "percentage": 47.44, "elapsed_time": "1:19:59", "remaining_time": "1:28:38", "throughput": "0.00", "total_tokens": 0}
186
+ {"current_steps": 372, "total_steps": 780, "loss": 0.2686, "learning_rate": 5.79159270093905e-06, "epoch": 0.4765793898630795, "percentage": 47.69, "elapsed_time": "1:20:25", "remaining_time": "1:28:12", "throughput": "0.00", "total_tokens": 0}
187
+ {"current_steps": 374, "total_steps": 780, "loss": 0.2797, "learning_rate": 5.749702786257529e-06, "epoch": 0.4791416446472896, "percentage": 47.95, "elapsed_time": "1:20:52", "remaining_time": "1:27:47", "throughput": "0.00", "total_tokens": 0}
188
+ {"current_steps": 376, "total_steps": 780, "loss": 0.2665, "learning_rate": 5.707758968959923e-06, "epoch": 0.4817038994314997, "percentage": 48.21, "elapsed_time": "1:21:17", "remaining_time": "1:27:20", "throughput": "0.00", "total_tokens": 0}
189
+ {"current_steps": 378, "total_steps": 780, "loss": 0.2753, "learning_rate": 5.6657642647499545e-06, "epoch": 0.48426615421570984, "percentage": 48.46, "elapsed_time": "1:21:44", "remaining_time": "1:26:55", "throughput": "0.00", "total_tokens": 0}
190
+ {"current_steps": 380, "total_steps": 780, "loss": 0.2445, "learning_rate": 5.62372169299004e-06, "epoch": 0.4868284089999199, "percentage": 48.72, "elapsed_time": "1:22:08", "remaining_time": "1:26:28", "throughput": "0.00", "total_tokens": 0}
191
+ {"current_steps": 382, "total_steps": 780, "loss": 0.2933, "learning_rate": 5.581634276484211e-06, "epoch": 0.48939066378413004, "percentage": 48.97, "elapsed_time": "1:22:34", "remaining_time": "1:26:01", "throughput": "0.00", "total_tokens": 0}
192
+ {"current_steps": 384, "total_steps": 780, "loss": 0.2502, "learning_rate": 5.539505041260779e-06, "epoch": 0.4919529185683401, "percentage": 49.23, "elapsed_time": "1:22:56", "remaining_time": "1:25:32", "throughput": "0.00", "total_tokens": 0}
193
+ {"current_steps": 386, "total_steps": 780, "loss": 0.263, "learning_rate": 5.497337016354757e-06, "epoch": 0.49451517335255024, "percentage": 49.49, "elapsed_time": "1:23:24", "remaining_time": "1:25:08", "throughput": "0.00", "total_tokens": 0}
194
+ {"current_steps": 388, "total_steps": 780, "loss": 0.2494, "learning_rate": 5.45513323359009e-06, "epoch": 0.49707742813676037, "percentage": 49.74, "elapsed_time": "1:23:49", "remaining_time": "1:24:41", "throughput": "0.00", "total_tokens": 0}
195
+ {"current_steps": 390, "total_steps": 780, "loss": 0.2431, "learning_rate": 5.412896727361663e-06, "epoch": 0.49963968292097044, "percentage": 50.0, "elapsed_time": "1:24:11", "remaining_time": "1:24:11", "throughput": "0.00", "total_tokens": 0}
196
+ {"current_steps": 392, "total_steps": 780, "loss": 0.248, "learning_rate": 5.370630534417133e-06, "epoch": 0.5022019377051805, "percentage": 50.26, "elapsed_time": "1:24:40", "remaining_time": "1:23:48", "throughput": "0.00", "total_tokens": 0}
197
+ {"current_steps": 394, "total_steps": 780, "loss": 0.2522, "learning_rate": 5.328337693638591e-06, "epoch": 0.5047641924893906, "percentage": 50.51, "elapsed_time": "1:25:02", "remaining_time": "1:23:19", "throughput": "0.00", "total_tokens": 0}
198
+ {"current_steps": 396, "total_steps": 780, "loss": 0.2856, "learning_rate": 5.286021245824075e-06, "epoch": 0.5073264472736008, "percentage": 50.77, "elapsed_time": "1:25:28", "remaining_time": "1:22:53", "throughput": "0.00", "total_tokens": 0}
199
+ {"current_steps": 398, "total_steps": 780, "loss": 0.2626, "learning_rate": 5.243684233468933e-06, "epoch": 0.5098887020578109, "percentage": 51.03, "elapsed_time": "1:25:54", "remaining_time": "1:22:27", "throughput": "0.00", "total_tokens": 0}
200
+ {"current_steps": 400, "total_steps": 780, "loss": 0.2738, "learning_rate": 5.201329700547077e-06, "epoch": 0.512450956842021, "percentage": 51.28, "elapsed_time": "1:26:19", "remaining_time": "1:22:00", "throughput": "0.00", "total_tokens": 0}
201
+ {"current_steps": 402, "total_steps": 780, "loss": 0.2511, "learning_rate": 5.158960692292122e-06, "epoch": 0.515013211626231, "percentage": 51.54, "elapsed_time": "1:26:48", "remaining_time": "1:21:37", "throughput": "0.00", "total_tokens": 0}
202
+ {"current_steps": 404, "total_steps": 780, "loss": 0.2957, "learning_rate": 5.116580254978447e-06, "epoch": 0.5175754664104412, "percentage": 51.79, "elapsed_time": "1:27:14", "remaining_time": "1:21:11", "throughput": "0.00", "total_tokens": 0}
203
+ {"current_steps": 406, "total_steps": 780, "loss": 0.2704, "learning_rate": 5.074191435702155e-06, "epoch": 0.5201377211946513, "percentage": 52.05, "elapsed_time": "1:27:39", "remaining_time": "1:20:44", "throughput": "0.00", "total_tokens": 0}
204
+ {"current_steps": 408, "total_steps": 780, "loss": 0.3206, "learning_rate": 5.031797282162007e-06, "epoch": 0.5226999759788614, "percentage": 52.31, "elapsed_time": "1:28:05", "remaining_time": "1:20:18", "throughput": "0.00", "total_tokens": 0}
205
+ {"current_steps": 410, "total_steps": 780, "loss": 0.2536, "learning_rate": 4.98940084244029e-06, "epoch": 0.5252622307630715, "percentage": 52.56, "elapsed_time": "1:28:27", "remaining_time": "1:19:49", "throughput": "0.00", "total_tokens": 0}
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc69a558b63bc4deebc71791d74e928bce77047a97984154b8bece89dccb197a
3
+ size 5432
training_eval_loss.png ADDED
training_loss.png ADDED