ccdv commited on
Commit
ed764b1
1 Parent(s): f0e03ef
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - summarization
4
+ - summary
5
+ - booksum
6
+ - long-document
7
+ - long-form
8
+ - lsg
9
+ datasets:
10
+ - kmfoda/booksum
11
+ metrics:
12
+ - rouge
13
+ model-index:
14
+ - name: ccdv/lsg-bart-base-4096-booksum
15
+ results: []
16
+ ---
17
+
18
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
19
+ should probably proofread and complete it, then remove this comment. -->
20
+
21
+ **This model relies on a custom modeling file, you need to add trust_remote_code=True**\
22
+ **See [\#13467](https://github.com/huggingface/transformers/pull/13467)**
23
+
24
+ ```python
25
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
26
+
27
+ tokenizer = AutoTokenizer.from_pretrained("ccdv/lsg-bart-base-4096-booksum", trust_remote_code=True)
28
+ model = AutoModelForSeq2SeqLM.from_pretrained("ccdv/lsg-bart-base-4096-booksum", trust_remote_code=True)
29
+
30
+ text = "Replace by what you want."
31
+ pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer, device=0)
32
+ generated_text = pipe(
33
+ text,
34
+ truncation=True,
35
+ max_length=64,
36
+ no_repeat_ngram_size=7,
37
+ num_beams=2,
38
+ early_stopping=True
39
+ )
40
+ ```
41
+
42
+ # ccdv/lsg-bart-base-4096-booksum
43
+
44
+ This model is a fine-tuned version of [ccdv/lsg-bart-base-4096](https://huggingface.co/ccdv/lsg-bart-base-4096) on the kmfoda/booksum kmfoda--booksum dataset.
45
+ It achieves the following results on the evaluation set:
46
+ - eval_loss: 3.2654
47
+ - eval_rouge1: 33.9468
48
+ - eval_rouge2: 6.7034
49
+ - eval_rougeL: 16.7879
50
+ - eval_rougeLsum: 31.7677
51
+ - eval_gen_len: 427.6918
52
+ - eval_runtime: 2910.3841
53
+ - eval_samples_per_second: 0.492
54
+ - eval_steps_per_second: 0.062
55
+ - eval_samples: 1431
56
+
57
+ ## Model description
58
+
59
+ More information needed
60
+
61
+ ## Intended uses & limitations
62
+
63
+ More information needed
64
+
65
+ ## Training and evaluation data
66
+
67
+ More information needed
68
+
69
+ ## Training procedure
70
+
71
+ ### Training hyperparameters
72
+
73
+ The following hyperparameters were used during training:
74
+ - learning_rate: 8e-05
75
+ - train_batch_size: 8
76
+ - seed: 42
77
+ - gradient_accumulation_steps: 4
78
+ - total_train_batch_size: 32
79
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
80
+ - lr_scheduler_type: linear
81
+ - lr_scheduler_warmup_ratio: 0.1
82
+ - num_epochs: 30.0
83
+
84
+
85
+ ### Generate hyperparameters
86
+
87
+ The following hyperparameters were used during generation:
88
+
89
+ - dataset_name: kmfoda/booksum
90
+ - dataset_config_name: kmfoda--booksum
91
+ - eval_batch_size: 8
92
+ - eval_samples: 1431
93
+ - early_stopping: True
94
+ - ignore_pad_token_for_loss: True
95
+ - length_penalty: 2.0
96
+ - max_length: 512
97
+ - min_length: 128
98
+ - num_beams: 5
99
+ - no_repeat_ngram_size: None
100
+ - seed: 123
101
+
102
+ ### Framework versions
103
+
104
+ - Transformers 4.23.1
105
+ - Pytorch 1.12.1
106
+ - Datasets 2.3.2
107
+ - Tokenizers 0.11.6
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eval_gen_len": 427.6918,
3
+ "eval_loss": 3.2653846740722656,
4
+ "eval_rouge1": 33.9468,
5
+ "eval_rouge2": 6.7034,
6
+ "eval_rougeL": 16.7879,
7
+ "eval_rougeLsum": 31.7677,
8
+ "eval_runtime": 2910.3841,
9
+ "eval_samples": 1431,
10
+ "eval_samples_per_second": 0.492,
11
+ "eval_steps_per_second": 0.062
12
+ }
config.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ccdv/lsg-bart-base-4096-booksum",
3
+ "activation_dropout": 0.1,
4
+ "activation_function": "gelu",
5
+ "adaptive": true,
6
+ "add_bias_logits": false,
7
+ "add_final_layer_norm": false,
8
+ "architectures": [
9
+ "LSGBartForConditionalGeneration"
10
+ ],
11
+ "attention_dropout": 0.1,
12
+ "auto_map": {
13
+ "AutoConfig": "modeling_lsg_bart.LSGBartConfig",
14
+ "AutoModel": "modeling_lsg_bart.LSGBartModel",
15
+ "AutoModelForCausalLM": "modeling_lsg_bart.LSGBartForCausalLM",
16
+ "AutoModelForQuestionAnswering": "modeling_lsg_bart.LSGBartForQuestionAnswering",
17
+ "AutoModelForSeq2SeqLM": "modeling_lsg_bart.LSGBartForConditionalGeneration",
18
+ "AutoModelForSequenceClassification": "modeling_lsg_bart.LSGBartForSequenceClassification"
19
+ },
20
+ "base_model_prefix": "lsg",
21
+ "block_size": 256,
22
+ "bos_token_id": 0,
23
+ "classif_dropout": 0.1,
24
+ "classifier_dropout": 0.0,
25
+ "d_model": 768,
26
+ "decoder_attention_heads": 12,
27
+ "decoder_ffn_dim": 3072,
28
+ "decoder_layerdrop": 0.0,
29
+ "decoder_layers": 6,
30
+ "decoder_start_token_id": 2,
31
+ "dropout": 0.1,
32
+ "early_stopping": true,
33
+ "encoder_attention_heads": 12,
34
+ "encoder_ffn_dim": 3072,
35
+ "encoder_layerdrop": 0.0,
36
+ "encoder_layers": 6,
37
+ "eos_token_id": 2,
38
+ "forced_bos_token_id": 0,
39
+ "forced_eos_token_id": 2,
40
+ "gradient_checkpointing": false,
41
+ "id2label": {
42
+ "0": "LABEL_0",
43
+ "1": "LABEL_1",
44
+ "2": "LABEL_2"
45
+ },
46
+ "init_std": 0.02,
47
+ "is_encoder_decoder": true,
48
+ "label2id": {
49
+ "LABEL_0": 0,
50
+ "LABEL_1": 1,
51
+ "LABEL_2": 2
52
+ },
53
+ "length_penalty": 2.0,
54
+ "lsh_num_pre_rounds": 1,
55
+ "mask_first_token": false,
56
+ "max_length": 512,
57
+ "max_position_embeddings": 4096,
58
+ "min_length": 128,
59
+ "model_type": "bart",
60
+ "no_repeat_ngram_size": null,
61
+ "normalize_before": false,
62
+ "normalize_embedding": true,
63
+ "num_beams": 5,
64
+ "num_global_tokens": 1,
65
+ "num_hidden_layers": 6,
66
+ "pad_token_id": 1,
67
+ "pass_global_tokens_to_decoder": true,
68
+ "pool_with_global": true,
69
+ "scale_embedding": false,
70
+ "sparse_block_size": 0,
71
+ "sparsity_factor": 4,
72
+ "sparsity_type": "none",
73
+ "task_specific_params": {
74
+ "summarization": {
75
+ "length_penalty": 1.0,
76
+ "max_length": 128,
77
+ "min_length": 12,
78
+ "num_beams": 4
79
+ },
80
+ "summarization_cnn": {
81
+ "length_penalty": 2.0,
82
+ "max_length": 142,
83
+ "min_length": 56,
84
+ "num_beams": 4
85
+ },
86
+ "summarization_xsum": {
87
+ "length_penalty": 1.0,
88
+ "max_length": 62,
89
+ "min_length": 11,
90
+ "num_beams": 6
91
+ }
92
+ },
93
+ "torch_dtype": "float32",
94
+ "transformers_version": "4.23.1",
95
+ "use_cache": true,
96
+ "vocab_size": 50265
97
+ }
eval_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eval_gen_len": 427.6918,
3
+ "eval_loss": 3.2653846740722656,
4
+ "eval_rouge1": 33.9468,
5
+ "eval_rouge2": 6.7034,
6
+ "eval_rougeL": 16.7879,
7
+ "eval_rougeLsum": 31.7677,
8
+ "eval_runtime": 2910.3841,
9
+ "eval_samples": 1431,
10
+ "eval_samples_per_second": 0.492,
11
+ "eval_steps_per_second": 0.062
12
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
modeling_lsg_bart.py ADDED
@@ -0,0 +1,1047 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from logging import warn
2
+ import torch
3
+ from transformers.models.bart.modeling_bart import *
4
+ from transformers.models.bart.modeling_bart import _expand_mask
5
+ import torch.nn as nn
6
+ import sys
7
+
8
+ AUTO_MAP = {
9
+ "AutoModel": "modeling_lsg_bart.LSGBartModel",
10
+ "AutoModelForCausalLM": "modeling_lsg_bart.LSGBartForCausalLM",
11
+ "AutoModelForQuestionAnswering": "modeling_lsg_bart.LSGBartForQuestionAnswering",
12
+ "AutoModelForSequenceClassification": "modeling_lsg_bart.LSGBartForSequenceClassification",
13
+ "AutoModelForSeq2SeqLM": "modeling_lsg_bart.LSGBartForConditionalGeneration"
14
+ }
15
+
16
+ class LSGBartConfig(BartConfig):
17
+ """
18
+ This class overrides :class:`~transformers.BartConfig`. Please check the superclass for the appropriate
19
+ documentation alongside usage examples.
20
+ """
21
+
22
+ base_model_prefix = "lsg"
23
+ model_type = "bart"
24
+ keys_to_ignore_at_inference = ["past_key_values"]
25
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
26
+
27
+ def __init__(
28
+ self,
29
+ adaptive=True,
30
+ base_model_prefix="lsg",
31
+ block_size=128,
32
+ lsh_num_pre_rounds=1,
33
+ mask_first_token=False,
34
+ num_global_tokens=1,
35
+ pass_global_tokens_to_decoder=True,
36
+ pool_with_global=True,
37
+ sparse_block_size=128,
38
+ sparsity_factor=2,
39
+ sparsity_type="norm",
40
+ **kwargs
41
+ ):
42
+ """Constructs LSGConfig."""
43
+ super().__init__(**kwargs)
44
+
45
+ self.adaptive = adaptive
46
+ self.auto_map = AUTO_MAP
47
+ self.base_model_prefix = base_model_prefix
48
+ self.block_size = block_size
49
+ self.lsh_num_pre_rounds = lsh_num_pre_rounds
50
+ self.mask_first_token = mask_first_token
51
+ self.num_global_tokens = num_global_tokens
52
+ self.pass_global_tokens_to_decoder = pass_global_tokens_to_decoder
53
+ self.pool_with_global = pool_with_global
54
+ self.sparse_block_size = sparse_block_size
55
+ self.sparsity_factor = sparsity_factor
56
+ self.sparsity_type = sparsity_type
57
+
58
+ if sparsity_type not in [None, "none", "norm", "lsh", "pooling", "stride", "block_stride"]:
59
+ logger.warning(
60
+ "[WARNING CONFIG]: sparsity_mode not in [None, 'none', 'norm', 'lsh', 'pooling', 'stride', 'block_stride'], \
61
+ setting sparsity_type=None, computation will skip sparse attention")
62
+ self.sparsity_type = None
63
+
64
+ if self.sparsity_type in ["stride", "block_stride"]:
65
+ if self.sparsity_factor > self.encoder_attention_heads:
66
+ logger.warning(
67
+ "[WARNING CONFIG]: sparsity_factor > encoder_attention_heads is not recommended for stride/block_stride sparsity"
68
+ )
69
+
70
+ if self.num_global_tokens < 1:
71
+ logger.warning(
72
+ "[WARNING CONFIG]: num_global_tokens < 1 is not compatible, setting num_global_tokens=1"
73
+ )
74
+ self.num_global_tokens = 1
75
+ elif self.num_global_tokens > 512:
76
+ logger.warning(
77
+ "[WARNING CONFIG]: num_global_tokens > 512 is not allowed, setting num_global_tokens=512"
78
+ )
79
+ self.num_global_tokens = 512
80
+
81
+ if self.sparsity_factor > 0:
82
+ assert self.block_size % self.sparsity_factor == 0, "[ERROR CONFIG]: block_size must be divisible by sparsity_factor"
83
+ assert self.block_size//self.sparsity_factor >= 1, "[ERROR CONFIG]: make sure block_size >= sparsity_factor"
84
+
85
+ if self.mask_first_token and not pool_with_global:
86
+ logger.warning(
87
+ "[WARNING CONFIG]: pool_with_global==False is not compatible with mask_first_token==True. Setting pool_with_global to True.")
88
+ self.pool_with_global = True
89
+
90
+ if hasattr(self, "position_embedding_type"):
91
+ if self.position_embedding_type != "absolute":
92
+ logger.warning(
93
+ "[WARNING CONFIG]: LSG Attention is not compatible with relative positional embedding and will skip its computation. Set position_embedding_type='absolute' to remove this warning.")
94
+
95
+
96
+ class BaseSelfAttention(nn.Module):
97
+
98
+ def __init__(
99
+ self,
100
+ embed_dim,
101
+ num_heads,
102
+ dropout=0.0,
103
+ is_decoder=False,
104
+ bias=True,
105
+ ):
106
+
107
+ super().__init__()
108
+ self.embed_dim = embed_dim
109
+ self.num_heads = num_heads
110
+ self.dropout = dropout
111
+ self.head_dim = embed_dim // num_heads
112
+
113
+ if (self.head_dim * num_heads) != self.embed_dim:
114
+ raise ValueError(
115
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
116
+ f" and `num_heads`: {num_heads})."
117
+ )
118
+ self.scaling = self.head_dim ** -0.5
119
+ self.is_decoder = is_decoder
120
+
121
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
122
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
123
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
124
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
125
+
126
+ def transpose_for_scores(self, x):
127
+ new_x_shape = x.size()[:-1] + (
128
+ self.num_heads,
129
+ self.head_dim,
130
+ )
131
+ x = x.view(*new_x_shape)
132
+ return x.permute(0, 2, 1, 3)
133
+
134
+ def reshape_output(self, context_layer):
135
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
136
+ new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,)
137
+ return context_layer.view(*new_context_layer_shape)
138
+
139
+ def project_QKV(self, hidden_states):
140
+
141
+ query_layer = self.transpose_for_scores(self.q_proj(hidden_states))
142
+ key_layer = self.transpose_for_scores(self.k_proj(hidden_states))
143
+ value_layer = self.transpose_for_scores(self.v_proj(hidden_states))
144
+ return query_layer, key_layer, value_layer
145
+
146
+
147
+ class BaseAttentionProduct(nn.Module):
148
+
149
+ def __init__(self, config):
150
+ """
151
+ Compute attention: softmax(Q @ K.T) @ V
152
+ """
153
+ super().__init__()
154
+ self.dropout = nn.Dropout(config.attention_dropout)
155
+
156
+ def forward(self, query_layer, key_layer, value_layer, attention_mask=None):
157
+
158
+ d = query_layer.shape[-1]
159
+
160
+ # Take the dot product between "query" and "key" to get the raw attention scores.
161
+ attention_scores = query_layer @ key_layer.transpose(-1, -2) / math.sqrt(d)
162
+
163
+ del query_layer
164
+ del key_layer
165
+
166
+ if attention_mask is not None:
167
+ # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
168
+ attention_scores = attention_scores + attention_mask
169
+ del attention_mask
170
+
171
+ # Normalize the attention scores to probabilities.
172
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
173
+
174
+ # This is actually dropping out entire tokens to attend to, which might
175
+ # seem a bit unusual, but is taken from the original Transformer paper.
176
+ context_layer = self.dropout(attention_probs) @ value_layer
177
+
178
+ return context_layer
179
+
180
+
181
+ class LSGAttentionProduct(nn.Module):
182
+
183
+ def __init__(self, config, block_size=None, sparse_block_size=None, sparsity_factor=4):
184
+ """
185
+ Compute block or overlapping blocks attention products
186
+ """
187
+ super().__init__()
188
+
189
+ self.block_size = block_size
190
+ self.sparse_block_size = sparse_block_size
191
+ self.sparsity_factor = sparsity_factor
192
+
193
+ if self.block_size is None:
194
+ self.block_size = config.block_size
195
+
196
+ if self.sparse_block_size is None:
197
+ self.sparse_block_size = config.sparse_block_size
198
+
199
+ # Shape of blocks
200
+ self.local_shapes = (self.block_size*3, self.block_size)
201
+ if self.sparse_block_size and self.sparsity_factor > 0:
202
+ self.sparse_shapes = (self.sparse_block_size*3, self.block_size//self.sparsity_factor)
203
+
204
+ self.attention = BaseAttentionProduct(config)
205
+
206
+ def build_lsg_inputs(self, hidden_states, sparse_hidden_states, global_hidden_states, is_attn_mask=False):
207
+
208
+ # Build local tokens
209
+ local_hidden_states = self.reshape_to_local_block(hidden_states, is_attn_mask)
210
+ del hidden_states
211
+
212
+ # Build sparse tokens
213
+ if sparse_hidden_states is not None:
214
+ sparse_hidden_states = self.reshape_to_sparse_block(sparse_hidden_states, is_attn_mask)
215
+
216
+ return self.cat_global_sparse_local_tokens(global_hidden_states, sparse_hidden_states, local_hidden_states)
217
+
218
+ def forward(
219
+ self,
220
+ query_layer,
221
+ key_layer,
222
+ value_layer,
223
+ attention_mask=None,
224
+ sparse_key=None,
225
+ sparse_value=None,
226
+ sparse_mask=None,
227
+ global_key=None,
228
+ global_value=None,
229
+ global_mask=None
230
+ ):
231
+
232
+ # Input batch, heads, length, hidden_size
233
+ n, h, t, d = query_layer.size()
234
+ n_blocks = t // self.block_size
235
+ assert t % self.block_size == 0
236
+
237
+ key_layer = self.build_lsg_inputs(
238
+ key_layer,
239
+ sparse_key,
240
+ global_key
241
+ )
242
+ del sparse_key
243
+ del global_key
244
+
245
+ value_layer = self.build_lsg_inputs(
246
+ value_layer,
247
+ sparse_value,
248
+ global_value
249
+ )
250
+ del sparse_value
251
+ del global_value
252
+
253
+ attention_mask = self.build_lsg_inputs(
254
+ attention_mask,
255
+ sparse_mask,
256
+ global_mask.transpose(-1, -2),
257
+ is_attn_mask=True
258
+ ).transpose(-1, -2)
259
+ del sparse_mask
260
+ del global_mask
261
+
262
+ # expect (..., t, d) shape
263
+ # Compute attention
264
+ context_layer = self.attention(
265
+ query_layer=self.chunk(query_layer, n_blocks),
266
+ key_layer=key_layer,
267
+ value_layer=value_layer,
268
+ attention_mask=attention_mask
269
+ )
270
+
271
+ return context_layer.reshape(n, h, -1, d)
272
+
273
+ def reshape_to_local_block(self, hidden_states, is_attn_mask=False):
274
+
275
+ size, step = self.local_shapes
276
+ s = (size - step) // 2
277
+
278
+ # Pad before block reshaping
279
+ if is_attn_mask:
280
+ pad_value = torch.finfo(hidden_states.dtype).min
281
+ hidden_states = hidden_states.transpose(-1, -2)
282
+ else:
283
+ pad_value = 0
284
+
285
+ hidden_states = torch.nn.functional.pad(
286
+ hidden_states.transpose(-1, -2),
287
+ pad=(s, s),
288
+ value=pad_value
289
+ ).transpose(-1, -2)
290
+
291
+ # Make blocks
292
+ hidden_states = hidden_states.unfold(-2, size=size, step=step).transpose(-1, -2)
293
+
294
+ return hidden_states
295
+
296
+ def reshape_to_sparse_block(self, hidden_states, is_attn_mask=False):
297
+
298
+ size, step = self.sparse_shapes
299
+
300
+ # In case of odd case
301
+ odd_offset = (step % 2)
302
+
303
+ # n, h, t, d*2 + 1
304
+ size = size*2
305
+ s = (size - step) // 2 + odd_offset
306
+
307
+ # Pad before block reshaping
308
+ if is_attn_mask:
309
+ pad_value = torch.finfo(hidden_states.dtype).min
310
+ hidden_states = hidden_states.transpose(-1, -2)
311
+ else:
312
+ pad_value = 0
313
+
314
+ hidden_states = torch.nn.functional.pad(
315
+ hidden_states.transpose(-1, -2),
316
+ pad=(s, s),
317
+ value=pad_value
318
+ ).transpose(-1, -2)
319
+
320
+ # Make blocks
321
+ hidden_states = hidden_states.unfold(-2, size=size, step=step).transpose(-1, -2)
322
+
323
+ # Fix case where block_size == sparsify_factor
324
+ if odd_offset:
325
+ hidden_states = hidden_states[..., :-1, :, :]
326
+
327
+ # Indexes for selection
328
+ u = (size - self.block_size * 3 // self.sparsity_factor) // 2 + odd_offset
329
+ s = self.sparse_block_size
330
+
331
+ u_ = u + odd_offset
332
+ return torch.cat([hidden_states[..., u-s:u, :], hidden_states[..., -u_:-u_+s, :]], dim=-2)
333
+
334
+ def cat_global_sparse_local_tokens(self, x_global, x_sparse=None, x_local=None, dim=-2):
335
+
336
+ n, h, b, t, d = x_local.size()
337
+ x_global = x_global.unsqueeze(-3).expand(-1, -1, b, -1, -1)
338
+ if x_sparse is not None:
339
+ return torch.cat([x_global, x_sparse, x_local], dim=dim)
340
+ return torch.cat([x_global, x_local], dim=dim)
341
+
342
+ def chunk(self, x, n_blocks):
343
+
344
+ t, d = x.size()[-2:]
345
+ return x.reshape(*x.size()[:-2], n_blocks, -1, d)
346
+
347
+
348
+ class LSGBartEncoderAttention(BaseSelfAttention):
349
+ '''
350
+ Compute local attention with overlapping blocs
351
+ Use global attention for tokens with highest norm
352
+ '''
353
+ def __init__(
354
+ self,
355
+ config,
356
+ embed_dim,
357
+ num_heads,
358
+ dropout
359
+ ):
360
+
361
+ super().__init__(embed_dim, num_heads, dropout)
362
+
363
+ self.block_size = config.block_size
364
+ self.sparse_block_size = config.sparse_block_size
365
+ self.num_global_tokens = config.num_global_tokens
366
+ self.sparsity_factor = config.sparsity_factor
367
+
368
+ self.attention = LSGAttentionProduct(
369
+ config,
370
+ block_size=config.block_size,
371
+ sparse_block_size=config.sparse_block_size,
372
+ sparsity_factor=self.sparsity_factor,
373
+ )
374
+
375
+ self.full_attention = BaseAttentionProduct(config)
376
+
377
+ sparse_functions = {
378
+ "norm": self.get_sparse_tokens_with_norm,
379
+ "pooling": self.get_sparse_tokens_with_pooling,
380
+ "lsh": self.get_sparse_tokens_with_lsh,
381
+ "stride": self.get_sparse_tokens_with_stride,
382
+ "block_stride": self.get_sparse_tokens_with_block_stride,
383
+ }
384
+
385
+ self.sparsity_type = config.sparsity_type
386
+ self.get_sparse_elements = sparse_functions.get(self.sparsity_type, lambda x, y, z: (None, None, None))
387
+
388
+ if config.sparsity_type == "lsh":
389
+ self.lsh_num_pre_rounds = config.lsh_num_pre_rounds
390
+
391
+ def get_sparse_tokens_with_norm(self, keys, values, mask):
392
+
393
+ if self.sparsity_factor == 1:
394
+ return keys, values, mask.expand(-1, keys.size()[1], -1, -1)
395
+
396
+ with torch.no_grad():
397
+
398
+ block_size = min(self.block_size, self.sparse_block_size)
399
+ key_norm = keys.detach().norm(dim=-1, keepdim=True)
400
+ key_norm = key_norm * ~mask.transpose(-1, -2).bool()
401
+ key_norm = self.chunk(key_norm, block_size)
402
+
403
+ n, h, b, t, d = key_norm.size()
404
+
405
+ idx = key_norm.argsort(dim=-2)
406
+ del key_norm
407
+ idx += (torch.arange(b, device=keys.device)*t).reshape(1, 1, b, 1, 1)
408
+
409
+ split = (t - block_size // self.sparsity_factor, block_size // self.sparsity_factor)
410
+ sparse_idx = idx.split(split, -2)[-1].reshape(n, h, -1, 1)
411
+
412
+ d = keys.size()[-1]
413
+ keys = keys.gather(dim=-2, index=sparse_idx.expand(-1, -1, -1, d))
414
+ values = values.gather(dim=-2, index=sparse_idx.expand(-1, -1, -1, d))
415
+ mask = mask.expand(-1, h, -1, -1).transpose(-1, -2).gather(dim=-2, index=sparse_idx).transpose(-1, -2)
416
+
417
+ return keys, values, mask
418
+
419
+ def get_sparse_tokens_with_pooling(self, keys, values, mask):
420
+
421
+ if self.sparsity_factor == 1:
422
+ return keys, values, mask.expand(-1, keys.size()[1], -1, -1)
423
+
424
+ keys = self.chunk(keys, self.sparsity_factor)
425
+ values = self.chunk(values, self.sparsity_factor)
426
+
427
+ n, h, b, t, d = keys.size()
428
+ mask = mask.reshape(n, 1, b, 1, t)
429
+ mask = ~mask.transpose(-1, -2).bool()
430
+
431
+ keys = keys * mask
432
+ values = values * mask
433
+
434
+ mask = mask.sum(dim=-2)
435
+ keys = keys.sum(dim=-2) / (mask + 1e-6)
436
+ values = values.sum(dim=-2) / (mask + 1e-6)
437
+
438
+ mask = (1. - mask.clamp(0, 1)) * torch.finfo(mask.dtype).min
439
+ return keys.reshape(n, h, -1, d), values.reshape(n, h, -1, d), mask.expand(-1, h, -1, -1).transpose(-1, -2)
440
+
441
+ def get_sparse_tokens_with_stride(self, keys, values, mask):
442
+
443
+ if self.sparsity_factor == 1:
444
+ return keys, values, mask.expand(-1, keys.size()[1], -1, -1)
445
+
446
+ n, h, t, d = keys.size()
447
+ sparse_idx = torch.arange(t // self.sparsity_factor, device=keys.device) * self.sparsity_factor
448
+ sparse_idx = sparse_idx.reshape(1, 1, -1, 1) + (torch.arange(h, device=keys.device) % self.sparsity_factor).reshape(1, h, 1, 1)
449
+ sparse_idx = sparse_idx.expand(n, h, -1, 1)
450
+
451
+ keys = keys.gather(dim=-2, index=sparse_idx.expand(-1, -1, -1, d))
452
+ values = values.gather(dim=-2, index=sparse_idx.expand(-1, -1, -1, d))
453
+ mask = mask.expand(-1, h, -1, -1).transpose(-1, -2).gather(dim=-2, index=sparse_idx).transpose(-1, -2)
454
+
455
+ return keys, values, mask
456
+
457
+ def get_sparse_tokens_with_block_stride(self, keys, values, mask):
458
+
459
+ if self.sparsity_factor == 1:
460
+ return keys, values, mask.expand(-1, keys.size()[1], -1, -1)
461
+
462
+ n, h, t, d = keys.size()
463
+
464
+ t, b = self.block_size, t // self.block_size
465
+ sparse_idx = torch.arange(t // self.sparsity_factor, device=keys.device)
466
+ sparse_idx = sparse_idx.reshape(1, 1, 1, -1, 1) + torch.arange(h, device=keys.device).reshape(1, h, 1, 1, 1) * (t // self.sparsity_factor)
467
+ sparse_idx = (sparse_idx % t)
468
+ sparse_idx = sparse_idx + torch.arange(b, device=keys.device).reshape(1, 1, -1, 1, 1) * t
469
+ sparse_idx = sparse_idx.reshape(1, h, -1, 1).expand(n, h, -1, 1)
470
+
471
+ keys = keys.gather(dim=-2, index=sparse_idx.expand(-1, -1, -1, d))
472
+ values = values.gather(dim=-2, index=sparse_idx.expand(-1, -1, -1, d))
473
+ mask = mask.expand(-1, h, -1, -1).transpose(-1, -2).gather(dim=-2, index=sparse_idx).transpose(-1, -2)
474
+
475
+ return keys, values, mask
476
+
477
+ def get_sparse_tokens_with_lsh(self, keys, values, mask):
478
+
479
+ if self.sparsity_factor == 1:
480
+ return keys, values, mask.expand(-1, keys.size()[1], -1, -1)
481
+
482
+ block_size = min(self.block_size, self.sparse_block_size)
483
+ keys = self.chunk(keys, block_size)
484
+ values = self.chunk(values, block_size)
485
+
486
+ n, h, b, t, d = keys.size()
487
+ mask = mask.reshape(n, 1, b, 1, t)
488
+ mask = ~mask.transpose(-1, -2).bool()
489
+
490
+ keys = keys * mask
491
+ values = values * mask
492
+ mask = mask.expand(-1, h, -1, -1, -1).float()
493
+
494
+ extra_factor = 1
495
+
496
+ for _ in range(self.lsh_num_pre_rounds):
497
+ keys, values, mask = self.lsh_round(keys, values, mask, t*extra_factor)
498
+
499
+ keys, values, mask = self.lsh_round(keys, values, mask, t//self.sparsity_factor)
500
+ keys /= mask + 1e-8
501
+ values /= mask + 1e-8
502
+
503
+ mask = (1. - mask.clamp(0, 1)) * torch.finfo(mask.dtype).min
504
+ return keys.reshape(n, h, -1, d), values.reshape(n, h, -1, d), mask.transpose(-1, -2).reshape(n, h, 1, -1)
505
+
506
+ def lsh_round(self, keys, values, mask, output_size):
507
+
508
+ with torch.no_grad():
509
+
510
+ n_hashes = output_size // 2
511
+ n, h, b, t, d = keys.size()
512
+ binary_mask = mask.clamp(0, 1)
513
+
514
+ indexes = (torch.nn.functional.normalize(keys, dim=-1) * binary_mask) @ torch.randn(1, h, 1, d, n_hashes, device=keys.device)
515
+ indexes = torch.cat([indexes, -indexes], dim=-1).argmax(dim=-1, keepdim=True)
516
+
517
+ n, h, b, t, d = keys.size()
518
+
519
+ x_ = torch.zeros(n, h, b, output_size, d, device=keys.device)
520
+ mask_ = torch.zeros(n, h, b, output_size, 1, device=keys.device)
521
+ keys = torch.scatter_add(x_, dim=-2, index=indexes.expand(-1, -1, -1, -1, d), src=keys)
522
+ values = torch.scatter_add(x_, dim=-2, index=indexes.expand(-1, -1, -1, -1, d), src=values)
523
+ mask = torch.scatter_add(mask_, dim=-2, index=indexes, src=mask)
524
+
525
+ return keys[..., :output_size, :], values[..., :output_size, :], mask[..., :output_size, :]
526
+
527
+ def forward(
528
+ self,
529
+ hidden_states,
530
+ attention_mask=None,
531
+ layer_head_mask=None,
532
+ output_attentions=False
533
+ ):
534
+
535
+ query_layer, key_layer, value_layer = self.project_QKV(hidden_states)
536
+ outputs = self.not_causal_forward(
537
+ query_layer,
538
+ key_layer,
539
+ value_layer,
540
+ attention_mask=attention_mask[:, :, :1, :],
541
+ head_mask=layer_head_mask,
542
+ output_attentions=output_attentions
543
+ )
544
+
545
+ return self.out_proj(outputs), None, None
546
+
547
+ def not_causal_forward(
548
+ self,
549
+ query_layer,
550
+ key_layer,
551
+ value_layer,
552
+ attention_mask=None,
553
+ head_mask=None,
554
+ output_attentions=False,
555
+ ):
556
+
557
+ n, h, t, d = query_layer.size()
558
+
559
+ # Cat global mask
560
+ attention_mask = torch.nn.functional.pad(attention_mask, (self.num_global_tokens, 0), value=0)
561
+
562
+ # Use normal attention if local attention covers every tokens
563
+ if t <= 2 * self.block_size + self.num_global_tokens:
564
+ context_layer = self.full_attention(
565
+ query_layer=query_layer,
566
+ key_layer=key_layer,
567
+ value_layer=value_layer,
568
+ attention_mask=attention_mask
569
+ )
570
+
571
+ return self.reshape_output(context_layer)
572
+
573
+ # Split input into global tokens and other tokens
574
+ split = (self.num_global_tokens, t - self.num_global_tokens)
575
+ global_query, query_layer = query_layer.split(split, dim=-2)
576
+
577
+ # Get global_attention
578
+ bos = self.full_attention(
579
+ query_layer=global_query,
580
+ key_layer=key_layer,
581
+ value_layer=value_layer,
582
+ attention_mask=attention_mask
583
+ )
584
+
585
+ # Split K Q M on global and non global
586
+ global_key, key_layer = key_layer.split(split, dim=-2)
587
+ global_value, value_layer = value_layer.split(split, dim=-2)
588
+ global_mask, attention_mask = attention_mask.split(split, dim=-1)
589
+
590
+ n, h, t, d = key_layer.size()
591
+
592
+ # Get sparse idx
593
+ sparse_key, sparse_value, sparse_mask = (None, None, None)
594
+
595
+ if self.sparse_block_size and self.sparsity_factor > 0:
596
+ sparse_key, sparse_value, sparse_mask = self.get_sparse_elements(key_layer, value_layer, attention_mask)
597
+
598
+ # Expand masks on heads
599
+ attention_mask = attention_mask.expand(-1, h, -1, -1)
600
+ global_mask = global_mask.expand(-1, h, -1, -1)
601
+
602
+ # Compute dot product attention
603
+ context_layer = self.attention(
604
+ query_layer,
605
+ key_layer,
606
+ value_layer,
607
+ attention_mask,
608
+ sparse_key=sparse_key,
609
+ sparse_value=sparse_value,
610
+ sparse_mask=sparse_mask,
611
+ global_key=global_key,
612
+ global_value=global_value,
613
+ global_mask=global_mask
614
+ )
615
+
616
+ # Merge global and local-sparse tokens
617
+ context_layer = torch.cat([bos, context_layer], dim=-2)
618
+ context_layer = self.reshape_output(context_layer)
619
+
620
+ return context_layer
621
+
622
+ def chunk(self, x, chunk_size):
623
+
624
+ n, h, t, d = x.size()
625
+ return x.reshape(n, h, -1, chunk_size, d)
626
+
627
+
628
+ class LSGBartEncoderLayer(BartEncoderLayer):
629
+
630
+ def __init__(self, config):
631
+
632
+ super().__init__(config)
633
+ self.self_attn = LSGBartEncoderAttention(
634
+ config=config,
635
+ embed_dim=self.embed_dim,
636
+ num_heads=config.encoder_attention_heads,
637
+ dropout=config.attention_dropout,
638
+ )
639
+
640
+
641
+ class LSGBartPretrainedModel(BartPretrainedModel):
642
+
643
+ config_class = LSGBartConfig
644
+
645
+ def _set_gradient_checkpointing(self, module, value=False):
646
+
647
+ if isinstance(module, (BartDecoder, BartEncoder, LSGBartEncoder)):
648
+ module.gradient_checkpointing = value
649
+
650
+
651
+ class PretrainedLSGBartModel(LSGBartPretrainedModel):
652
+
653
+ def __init_subclass__(self):
654
+ warnings.warn(
655
+ "The class `PretrainedBartModel` has been depreciated, please use `LSGBartPretrainedModel` instead.",
656
+ FutureWarning,
657
+ )
658
+
659
+
660
+ class LSGBartEncoder(LSGBartPretrainedModel, BartEncoder):
661
+ """
662
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
663
+ :class:`BartEncoderLayer`.
664
+ Args:
665
+ config: BartConfig
666
+ embed_tokens (nn.Embedding): output embedding
667
+ """
668
+
669
+ def __init__(self, config, embed_tokens=None):
670
+
671
+ super().__init__(config)
672
+ self.dropout = config.dropout
673
+ self.layerdrop = config.encoder_layerdrop
674
+
675
+ embed_dim = config.d_model
676
+ self.padding_idx = config.pad_token_id
677
+ self.max_source_positions = config.max_position_embeddings
678
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
679
+
680
+ if embed_tokens is not None:
681
+ self.embed_tokens = embed_tokens
682
+ else:
683
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
684
+
685
+ self.embed_positions = BartLearnedPositionalEmbedding(
686
+ config.max_position_embeddings,
687
+ embed_dim,
688
+ )
689
+ self.layers = nn.ModuleList([LSGBartEncoderLayer(config) for _ in range(config.encoder_layers)])
690
+ self.layernorm_embedding = nn.LayerNorm(embed_dim)
691
+
692
+ #
693
+ assert hasattr(config, "num_global_tokens")
694
+ self.num_global_tokens = config.num_global_tokens
695
+ self.pad_idx = config.pad_token_id
696
+
697
+ assert hasattr(config, "block_size") and hasattr(config, "adaptive")
698
+ self.block_size = config.block_size
699
+ self.adaptive = config.adaptive
700
+ self.mask_first_token = config.mask_first_token
701
+ self.pool_with_global = config.pool_with_global
702
+ self.pass_global_tokens_to_decoder = config.pass_global_tokens_to_decoder
703
+
704
+ self.global_embeddings = nn.Embedding(512, embedding_dim=config.d_model)
705
+
706
+ self.gradient_checkpointing = False
707
+
708
+ # Initialize weights and apply final processing
709
+ self.post_init()
710
+
711
+ def forward(self,
712
+ input_ids=None,
713
+ attention_mask=None,
714
+ head_mask=None,
715
+ inputs_embeds=None,
716
+ output_attentions=None,
717
+ output_hidden_states=None,
718
+ return_dict=None
719
+ ):
720
+
721
+
722
+ inputs_ = input_ids if input_ids is not None else inputs_embeds
723
+ n, t = inputs_.size()[:2]
724
+
725
+ if attention_mask is None:
726
+ attention_mask = torch.ones(n, t, device=inputs_.device, dtype=inputs_.dtype)
727
+ if self.mask_first_token:
728
+ attention_mask[:, 0] = 0
729
+
730
+ b = self.block_size * 2
731
+ pad = t % self.block_size
732
+
733
+ # Check if t is multiple of block_size and pad
734
+ if self.adaptive and t > b and pad > 0:
735
+ pad_length = self.block_size - pad
736
+ if input_ids is not None:
737
+ input_ids = torch.nn.functional.pad(input_ids, (0, pad_length), value=self.pad_idx)
738
+ else:
739
+ inputs_embeds = torch.nn.functional.pad(inputs_embeds.transpose(-1, -2), (0, pad_length), value=0.).transpose(-1, -2)
740
+ attention_mask = torch.nn.functional.pad(attention_mask, (0, pad_length), value=0)
741
+
742
+ n, t_ = attention_mask.size()
743
+
744
+ encoder_outputs = self.forward_with_adaptive(
745
+ input_ids=input_ids,
746
+ attention_mask=attention_mask,
747
+ head_mask=head_mask,
748
+ inputs_embeds=inputs_embeds,
749
+ output_attentions=output_attentions,
750
+ output_hidden_states=output_hidden_states,
751
+ return_dict=return_dict,
752
+ )
753
+
754
+ context = encoder_outputs[0]
755
+ diff = t - t_
756
+
757
+ if self.pass_global_tokens_to_decoder:
758
+ offset = self.num_global_tokens
759
+ else:
760
+ if self.pool_with_global:
761
+ context[:, self.num_global_tokens] = context[:, 0]
762
+ context = context[..., self.num_global_tokens:, :]
763
+ offset = 0
764
+
765
+ # Adapt sequence to initial shape
766
+ if diff < 0:
767
+ context = context[:, :t + offset]
768
+
769
+ if return_dict:
770
+ encoder_outputs.last_hidden_state = context
771
+ else:
772
+ encoder_outputs = (context, ) + encoder_outputs[1:]
773
+
774
+ return encoder_outputs
775
+
776
+ def forward_with_adaptive(
777
+ self,
778
+ input_ids=None,
779
+ attention_mask=None,
780
+ head_mask=None,
781
+ inputs_embeds=None,
782
+ output_attentions=None,
783
+ output_hidden_states=None,
784
+ return_dict=None,
785
+ ):
786
+
787
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
788
+ output_hidden_states = (
789
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
790
+ )
791
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
792
+
793
+ # retrieve input_ids and inputs_embeds
794
+ if input_ids is not None and inputs_embeds is not None:
795
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
796
+ elif input_ids is not None:
797
+ input_shape = input_ids.size()
798
+ input_ids = input_ids.view(-1, input_shape[-1])
799
+ elif inputs_embeds is not None:
800
+ input_shape = inputs_embeds.size()[:-1]
801
+ else:
802
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
803
+
804
+ if inputs_embeds is None:
805
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
806
+
807
+ embed_pos = self.embed_positions(inputs_embeds)
808
+ hidden_states = inputs_embeds + embed_pos
809
+
810
+ # Add global tokens
811
+ n, t, d = hidden_states.size()
812
+ global_idx = torch.arange(self.num_global_tokens, device=hidden_states.device).reshape(1, -1)
813
+ hidden_states = torch.cat([self.global_embeddings(global_idx).expand(n, -1, -1), hidden_states], dim=-2)
814
+
815
+ hidden_states = self.layernorm_embedding(hidden_states)
816
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
817
+
818
+ # expand attention_mask
819
+ if attention_mask is not None:
820
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
821
+ attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
822
+
823
+ encoder_states = () if output_hidden_states else None
824
+ all_attentions = () if output_attentions else None
825
+
826
+ # check if head_mask has a correct number of layers specified if desired
827
+ if head_mask is not None:
828
+ if head_mask.size()[0] != (len(self.layers)):
829
+ raise ValueError(
830
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
831
+ )
832
+
833
+ for idx, encoder_layer in enumerate(self.layers):
834
+ if output_hidden_states:
835
+ encoder_states = encoder_states + (hidden_states,)
836
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
837
+ dropout_probability = random.uniform(0, 1)
838
+ if self.training and (dropout_probability < self.layerdrop): # skip the layer
839
+ layer_outputs = (None, None)
840
+ else:
841
+ if self.gradient_checkpointing and self.training:
842
+
843
+ def create_custom_forward(module):
844
+ def custom_forward(*inputs):
845
+ return module(*inputs, output_attentions)
846
+
847
+ return custom_forward
848
+
849
+ layer_outputs = torch.utils.checkpoint.checkpoint(
850
+ create_custom_forward(encoder_layer),
851
+ hidden_states,
852
+ attention_mask,
853
+ (head_mask[idx] if head_mask is not None else None),
854
+ )
855
+ else:
856
+ layer_outputs = encoder_layer(
857
+ hidden_states,
858
+ attention_mask,
859
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
860
+ output_attentions=output_attentions,
861
+ )
862
+
863
+ hidden_states = layer_outputs[0]
864
+
865
+ if output_attentions:
866
+ all_attentions = all_attentions + (layer_outputs[1],)
867
+
868
+ if output_hidden_states:
869
+ encoder_states = encoder_states + (hidden_states,)
870
+
871
+ if not return_dict:
872
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
873
+ return BaseModelOutput(
874
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
875
+ )
876
+
877
+
878
+ class LSGBartModel(LSGBartPretrainedModel, BartModel):
879
+
880
+ def __init__(self, config):
881
+
882
+ LSGBartPretrainedModel.__init__(self, config)
883
+
884
+ padding_idx, vocab_size = config.pad_token_id, config.vocab_size
885
+ self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
886
+
887
+ self.pass_global_tokens_to_decoder = config.pass_global_tokens_to_decoder
888
+ self.num_global_tokens = config.num_global_tokens
889
+
890
+ self.encoder = LSGBartEncoder(config, self.shared)
891
+ self.decoder = BartDecoder(config, self.shared)
892
+
893
+ # Initialize weights and apply final processing
894
+ self.post_init()
895
+
896
+ def forward(
897
+ self,
898
+ input_ids=None,
899
+ attention_mask=None,
900
+ decoder_input_ids=None,
901
+ decoder_attention_mask=None,
902
+ head_mask=None,
903
+ decoder_head_mask=None,
904
+ cross_attn_head_mask=None,
905
+ encoder_outputs=None,
906
+ past_key_values=None,
907
+ inputs_embeds=None,
908
+ decoder_inputs_embeds=None,
909
+ use_cache=None,
910
+ output_attentions=None,
911
+ output_hidden_states=None,
912
+ return_dict=None,
913
+ ):
914
+
915
+ # different to other models, Bart automatically creates decoder_input_ids from
916
+ # input_ids if no decoder_input_ids are provided
917
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
918
+ decoder_input_ids = shift_tokens_right(
919
+ input_ids, self.config.pad_token_id, self.config.decoder_start_token_id
920
+ )
921
+
922
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
923
+ output_hidden_states = (
924
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
925
+ )
926
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
927
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
928
+
929
+ if encoder_outputs is None:
930
+ encoder_outputs = self.encoder(
931
+ input_ids=input_ids,
932
+ attention_mask=attention_mask,
933
+ head_mask=head_mask,
934
+ inputs_embeds=inputs_embeds,
935
+ output_attentions=output_attentions,
936
+ output_hidden_states=output_hidden_states,
937
+ return_dict=return_dict,
938
+ )
939
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
940
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
941
+ encoder_outputs = BaseModelOutput(
942
+ last_hidden_state=encoder_outputs[0],
943
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
944
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
945
+ )
946
+
947
+ # Pad mask for global tokens
948
+ if self.pass_global_tokens_to_decoder and attention_mask is not None:
949
+ attention_mask = torch.nn.functional.pad(attention_mask, pad=(self.num_global_tokens, 0), value=1)
950
+
951
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
952
+ decoder_outputs = self.decoder(
953
+ input_ids=decoder_input_ids,
954
+ attention_mask=decoder_attention_mask,
955
+ encoder_hidden_states=encoder_outputs[0],
956
+ encoder_attention_mask=attention_mask,
957
+ head_mask=decoder_head_mask,
958
+ cross_attn_head_mask=cross_attn_head_mask,
959
+ past_key_values=past_key_values,
960
+ inputs_embeds=decoder_inputs_embeds,
961
+ use_cache=use_cache,
962
+ output_attentions=output_attentions,
963
+ output_hidden_states=output_hidden_states,
964
+ return_dict=return_dict,
965
+ )
966
+
967
+ if not return_dict:
968
+ return decoder_outputs + encoder_outputs
969
+
970
+ return Seq2SeqModelOutput(
971
+ last_hidden_state=decoder_outputs.last_hidden_state,
972
+ past_key_values=decoder_outputs.past_key_values,
973
+ decoder_hidden_states=decoder_outputs.hidden_states,
974
+ decoder_attentions=decoder_outputs.attentions,
975
+ cross_attentions=decoder_outputs.cross_attentions,
976
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
977
+ encoder_hidden_states=encoder_outputs.hidden_states,
978
+ encoder_attentions=encoder_outputs.attentions,
979
+ )
980
+
981
+
982
+ class LSGBartForConditionalGeneration(LSGBartPretrainedModel, BartForConditionalGeneration):
983
+
984
+ base_model_prefix = "model"
985
+ _keys_to_ignore_on_load_missing = [r"final_logits_bias", r"lm_head\.weight"]
986
+
987
+ def __init__(self, config):
988
+
989
+ LSGBartPretrainedModel.__init__(self, config)
990
+ self.model = LSGBartModel(config)
991
+ self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
992
+ self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
993
+
994
+ # Initialize weights and apply final processing
995
+ self.post_init()
996
+
997
+
998
+ class LSGBartForSequenceClassification(LSGBartPretrainedModel, BartForSequenceClassification):
999
+
1000
+ def __init__(self, config: LSGBartConfig, **kwargs):
1001
+
1002
+ LSGBartPretrainedModel.__init__(self, config, **kwargs)
1003
+ self.model = LSGBartModel(config)
1004
+ self.classification_head = BartClassificationHead(
1005
+ config.d_model,
1006
+ config.d_model,
1007
+ config.num_labels,
1008
+ config.classifier_dropout,
1009
+ )
1010
+ self.model._init_weights(self.classification_head.dense)
1011
+ self.model._init_weights(self.classification_head.out_proj)
1012
+
1013
+
1014
+ class LSGBartForQuestionAnswering(LSGBartPretrainedModel, BartForQuestionAnswering):
1015
+
1016
+ def __init__(self, config: LSGBartConfig):
1017
+
1018
+ LSGBartPretrainedModel.__init__(self, config)
1019
+
1020
+ config.num_labels = 2
1021
+ self.num_labels = config.num_labels
1022
+
1023
+ self.model = LSGBartModel(config)
1024
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1025
+
1026
+ self.model._init_weights(self.qa_outputs)
1027
+
1028
+
1029
+ class LSGBartForCausalLM(LSGBartPretrainedModel, BartForCausalLM):
1030
+
1031
+ def __init__(self, config: LSGBartConfig):
1032
+
1033
+ LSGBartPretrainedModel.__init__(self, config)
1034
+ BartForCausalLM.__init__(self, config)
1035
+
1036
+
1037
+ def str_to_class(classname):
1038
+ return getattr(sys.modules[__name__], classname)
1039
+
1040
+ # Register model in Auto API
1041
+ try:
1042
+ LSGBartConfig.register_for_auto_class()
1043
+ for key, value in AUTO_MAP.items():
1044
+ str_to_class(value.split(".")[-1]).register_for_auto_class(key)
1045
+ except:
1046
+ warn("AutoRegister isn't available, you'll have to manually copy modeling.py after .save_pretrained(...).")
1047
+ warn("Update to transformers >= 4.17.0 to fix.")
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:542d10798e56e6c2875ca4c9e756528d537de9ddc620e44704502a3eee1fd8cb
3
+ size 578416695
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<s>",
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "errors": "replace",
7
+ "mask_token": "<mask>",
8
+ "model_max_length": 4096,
9
+ "name_or_path": "/data/ccondevaux/lsg/text-summarization/tmp/booksum/lsg_local_2",
10
+ "pad_token": "<pad>",
11
+ "sep_token": "</s>",
12
+ "special_tokens_map_file": null,
13
+ "tokenizer_class": "BartTokenizer",
14
+ "trim_offsets": true,
15
+ "unk_token": "<unk>"
16
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff