Zamoranesis
commited on
Commit
•
42a0569
1
Parent(s):
0e15f28
clinical_transcripts_roberta_distilled
Browse files- README.md +97 -0
- added_tokens.json +7 -0
- config.json +27 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +15 -0
- tokenizer.json +0 -0
- tokenizer_config.json +57 -0
- training_args.bin +3 -0
- vocab.json +0 -0
README.md
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: Zamoranesis/clinical_transcripts_roberta
|
3 |
+
tags:
|
4 |
+
- generated_from_trainer
|
5 |
+
model-index:
|
6 |
+
- name: clinical_transcripts_roberta_distilled
|
7 |
+
results: []
|
8 |
+
---
|
9 |
+
|
10 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
11 |
+
should probably proofread and complete it, then remove this comment. -->
|
12 |
+
|
13 |
+
# clinical_transcripts_roberta_distilled
|
14 |
+
|
15 |
+
This model is a fine-tuned version of [Zamoranesis/clinical_transcripts_roberta](https://huggingface.co/Zamoranesis/clinical_transcripts_roberta) on an unknown dataset.
|
16 |
+
It achieves the following results on the evaluation set:
|
17 |
+
- Loss: 106.7275
|
18 |
+
|
19 |
+
## Model description
|
20 |
+
|
21 |
+
More information needed
|
22 |
+
|
23 |
+
## Intended uses & limitations
|
24 |
+
|
25 |
+
More information needed
|
26 |
+
|
27 |
+
## Training and evaluation data
|
28 |
+
|
29 |
+
More information needed
|
30 |
+
|
31 |
+
## Training procedure
|
32 |
+
|
33 |
+
### Training hyperparameters
|
34 |
+
|
35 |
+
The following hyperparameters were used during training:
|
36 |
+
- learning_rate: 0.0005
|
37 |
+
- train_batch_size: 16
|
38 |
+
- eval_batch_size: 16
|
39 |
+
- seed: 42
|
40 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
41 |
+
- lr_scheduler_type: linear
|
42 |
+
- lr_scheduler_warmup_ratio: 0.1
|
43 |
+
- lr_scheduler_warmup_steps: 100
|
44 |
+
- training_steps: 4000
|
45 |
+
|
46 |
+
### Training results
|
47 |
+
|
48 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
49 |
+
|:-------------:|:-----:|:----:|:---------------:|
|
50 |
+
| 276.9792 | 0.31 | 100 | 198.7813 |
|
51 |
+
| 202.4796 | 0.63 | 200 | 187.2836 |
|
52 |
+
| 187.377 | 0.94 | 300 | 168.7091 |
|
53 |
+
| 171.4996 | 1.25 | 400 | 171.7099 |
|
54 |
+
| 162.2323 | 1.57 | 500 | 162.8250 |
|
55 |
+
| 165.8647 | 1.88 | 600 | 161.5869 |
|
56 |
+
| 151.5353 | 2.19 | 700 | 151.7282 |
|
57 |
+
| 145.5493 | 2.51 | 800 | 146.7330 |
|
58 |
+
| 148.6375 | 2.82 | 900 | 154.1694 |
|
59 |
+
| 143.963 | 3.13 | 1000 | 152.8062 |
|
60 |
+
| 141.3649 | 3.45 | 1100 | 149.8849 |
|
61 |
+
| 134.6703 | 3.76 | 1200 | 141.6481 |
|
62 |
+
| 136.8365 | 4.08 | 1300 | 142.7610 |
|
63 |
+
| 127.3401 | 4.39 | 1400 | 134.1384 |
|
64 |
+
| 128.1466 | 4.7 | 1500 | 137.5692 |
|
65 |
+
| 130.1291 | 5.02 | 1600 | 131.3674 |
|
66 |
+
| 123.2371 | 5.33 | 1700 | 133.7921 |
|
67 |
+
| 128.0497 | 5.64 | 1800 | 137.7079 |
|
68 |
+
| 121.8081 | 5.96 | 1900 | 131.1400 |
|
69 |
+
| 118.9764 | 6.27 | 2000 | 136.2727 |
|
70 |
+
| 111.3325 | 6.58 | 2100 | 125.8130 |
|
71 |
+
| 112.32 | 6.9 | 2200 | 122.2134 |
|
72 |
+
| 110.8909 | 7.21 | 2300 | 126.7264 |
|
73 |
+
| 113.9796 | 7.52 | 2400 | 121.6689 |
|
74 |
+
| 109.1709 | 7.84 | 2500 | 123.6003 |
|
75 |
+
| 103.981 | 8.15 | 2600 | 115.3986 |
|
76 |
+
| 99.9035 | 8.46 | 2700 | 118.1729 |
|
77 |
+
| 102.7026 | 8.78 | 2800 | 116.7197 |
|
78 |
+
| 102.889 | 9.09 | 2900 | 110.3246 |
|
79 |
+
| 97.2037 | 9.4 | 3000 | 111.4095 |
|
80 |
+
| 96.6495 | 9.72 | 3100 | 110.4597 |
|
81 |
+
| 91.2564 | 10.03 | 3200 | 114.8320 |
|
82 |
+
| 93.1662 | 10.34 | 3300 | 112.2192 |
|
83 |
+
| 94.8274 | 10.66 | 3400 | 108.9920 |
|
84 |
+
| 91.7985 | 10.97 | 3500 | 106.0877 |
|
85 |
+
| 92.6536 | 11.29 | 3600 | 101.6935 |
|
86 |
+
| 85.6407 | 11.6 | 3700 | 103.1658 |
|
87 |
+
| 88.6192 | 11.91 | 3800 | 98.9863 |
|
88 |
+
| 87.0916 | 12.23 | 3900 | 102.7780 |
|
89 |
+
| 84.1347 | 12.54 | 4000 | 106.7275 |
|
90 |
+
|
91 |
+
|
92 |
+
### Framework versions
|
93 |
+
|
94 |
+
- Transformers 4.34.1
|
95 |
+
- Pytorch 2.1.0+cu118
|
96 |
+
- Datasets 2.14.5
|
97 |
+
- Tokenizers 0.14.1
|
added_tokens.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</s>": 2,
|
3 |
+
"<mask>": 50264,
|
4 |
+
"<pad>": 1,
|
5 |
+
"<s>": 0,
|
6 |
+
"<unk>": 3
|
7 |
+
}
|
config.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Zamoranesis/clinical_transcripts_roberta",
|
3 |
+
"architectures": [
|
4 |
+
"RobertaForMaskedLM"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 768,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 3072,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 514,
|
17 |
+
"model_type": "roberta",
|
18 |
+
"num_attention_heads": 12,
|
19 |
+
"num_hidden_layers": 6,
|
20 |
+
"pad_token_id": 1,
|
21 |
+
"position_embedding_type": "absolute",
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.34.1",
|
24 |
+
"type_vocab_size": 1,
|
25 |
+
"use_cache": true,
|
26 |
+
"vocab_size": 50265
|
27 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3db7a53776ceb4c8e822a2407beaa0af735d5d2b970b7aeea4c6fbe21028e467
|
3 |
+
size 328715954
|
special_tokens_map.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"cls_token": "<s>",
|
4 |
+
"eos_token": "</s>",
|
5 |
+
"mask_token": {
|
6 |
+
"content": "<mask>",
|
7 |
+
"lstrip": true,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"pad_token": "<pad>",
|
13 |
+
"sep_token": "</s>",
|
14 |
+
"unk_token": "<unk>"
|
15 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"0": {
|
5 |
+
"content": "<s>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
},
|
12 |
+
"1": {
|
13 |
+
"content": "<pad>",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": true,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false,
|
18 |
+
"special": true
|
19 |
+
},
|
20 |
+
"2": {
|
21 |
+
"content": "</s>",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": true,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false,
|
26 |
+
"special": true
|
27 |
+
},
|
28 |
+
"3": {
|
29 |
+
"content": "<unk>",
|
30 |
+
"lstrip": false,
|
31 |
+
"normalized": true,
|
32 |
+
"rstrip": false,
|
33 |
+
"single_word": false,
|
34 |
+
"special": true
|
35 |
+
},
|
36 |
+
"50264": {
|
37 |
+
"content": "<mask>",
|
38 |
+
"lstrip": true,
|
39 |
+
"normalized": false,
|
40 |
+
"rstrip": false,
|
41 |
+
"single_word": false,
|
42 |
+
"special": true
|
43 |
+
}
|
44 |
+
},
|
45 |
+
"bos_token": "<s>",
|
46 |
+
"clean_up_tokenization_spaces": true,
|
47 |
+
"cls_token": "<s>",
|
48 |
+
"eos_token": "</s>",
|
49 |
+
"errors": "replace",
|
50 |
+
"mask_token": "<mask>",
|
51 |
+
"model_max_length": 512,
|
52 |
+
"pad_token": "<pad>",
|
53 |
+
"sep_token": "</s>",
|
54 |
+
"tokenizer_class": "RobertaTokenizer",
|
55 |
+
"trim_offsets": true,
|
56 |
+
"unk_token": "<unk>"
|
57 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2ed10eaa35732be06d286c76dd45e9bccb62ba86827fa15c530fe4dfcd711a30
|
3 |
+
size 4600
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|