svenbl80 commited on
Commit
2e2dacd
·
verified ·
1 Parent(s): 9ff4eb8

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: gemma
4
+ base_model: vidore/colpaligemma-3b-pt-448-base
5
+ tags:
6
+ - colpali
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: finetune_colpali_v1_2-german_ver3-4bit
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # finetune_colpali_v1_2-german_ver3-4bit
17
+
18
+ This model is a fine-tuned version of [vidore/colpaligemma-3b-pt-448-base](https://huggingface.co/vidore/colpaligemma-3b-pt-448-base) on the German_docx dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.1136
21
+ - Model Preparation Time: 0.0145
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 5e-05
41
+ - train_batch_size: 4
42
+ - eval_batch_size: 4
43
+ - seed: 42
44
+ - gradient_accumulation_steps: 4
45
+ - total_train_batch_size: 16
46
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
47
+ - lr_scheduler_type: linear
48
+ - lr_scheduler_warmup_steps: 100
49
+ - num_epochs: 10
50
+
51
+ ### Training results
52
+
53
+ | Training Loss | Epoch | Step | Validation Loss | Model Preparation Time |
54
+ |:-------------:|:------:|:----:|:---------------:|:----------------------:|
55
+ | No log | 0.0146 | 1 | 0.4346 | 0.0145 |
56
+ | 1.5435 | 0.1460 | 10 | 0.4079 | 0.0145 |
57
+ | 1.0614 | 0.2920 | 20 | 0.3614 | 0.0145 |
58
+ | 1.2701 | 0.4380 | 30 | 0.3264 | 0.0145 |
59
+ | 0.9991 | 0.5839 | 40 | 0.2962 | 0.0145 |
60
+ | 0.7682 | 0.7299 | 50 | 0.2620 | 0.0145 |
61
+ | 0.8224 | 0.8759 | 60 | 0.2321 | 0.0145 |
62
+ | 0.6615 | 1.0219 | 70 | 0.2152 | 0.0145 |
63
+ | 0.4141 | 1.1679 | 80 | 0.2102 | 0.0145 |
64
+ | 0.6246 | 1.3139 | 90 | 0.1958 | 0.0145 |
65
+ | 0.3975 | 1.4599 | 100 | 0.1798 | 0.0145 |
66
+ | 0.3532 | 1.6058 | 110 | 0.1644 | 0.0145 |
67
+ | 0.322 | 1.7518 | 120 | 0.1557 | 0.0145 |
68
+ | 0.1988 | 1.8978 | 130 | 0.1639 | 0.0145 |
69
+ | 0.2454 | 2.0438 | 140 | 0.1562 | 0.0145 |
70
+ | 0.2045 | 2.1898 | 150 | 0.1525 | 0.0145 |
71
+ | 0.3747 | 2.3358 | 160 | 0.1549 | 0.0145 |
72
+ | 0.2781 | 2.4818 | 170 | 0.1586 | 0.0145 |
73
+ | 0.4309 | 2.6277 | 180 | 0.1460 | 0.0145 |
74
+ | 0.3193 | 2.7737 | 190 | 0.1322 | 0.0145 |
75
+ | 0.1327 | 2.9197 | 200 | 0.1294 | 0.0145 |
76
+ | 0.1606 | 3.0657 | 210 | 0.1336 | 0.0145 |
77
+ | 0.1757 | 3.2117 | 220 | 0.1471 | 0.0145 |
78
+ | 0.1842 | 3.3577 | 230 | 0.1447 | 0.0145 |
79
+ | 0.0674 | 3.5036 | 240 | 0.1431 | 0.0145 |
80
+ | 0.1106 | 3.6496 | 250 | 0.1377 | 0.0145 |
81
+ | 0.1927 | 3.7956 | 260 | 0.1327 | 0.0145 |
82
+ | 0.18 | 3.9416 | 270 | 0.1355 | 0.0145 |
83
+ | 0.1177 | 4.0876 | 280 | 0.1324 | 0.0145 |
84
+ | 0.1028 | 4.2336 | 290 | 0.1240 | 0.0145 |
85
+ | 0.0926 | 4.3796 | 300 | 0.1161 | 0.0145 |
86
+ | 0.107 | 4.5255 | 310 | 0.1110 | 0.0145 |
87
+ | 0.026 | 4.6715 | 320 | 0.1053 | 0.0145 |
88
+ | 0.1152 | 4.8175 | 330 | 0.1070 | 0.0145 |
89
+ | 0.075 | 4.9635 | 340 | 0.1121 | 0.0145 |
90
+ | 0.1164 | 5.1095 | 350 | 0.1169 | 0.0145 |
91
+ | 0.099 | 5.2555 | 360 | 0.1128 | 0.0145 |
92
+ | 0.0542 | 5.4015 | 370 | 0.1121 | 0.0145 |
93
+ | 0.0177 | 5.5474 | 380 | 0.1133 | 0.0145 |
94
+ | 0.1006 | 5.6934 | 390 | 0.1146 | 0.0145 |
95
+ | 0.0708 | 5.8394 | 400 | 0.1133 | 0.0145 |
96
+ | 0.1511 | 5.9854 | 410 | 0.1063 | 0.0145 |
97
+ | 0.1232 | 6.1314 | 420 | 0.1101 | 0.0145 |
98
+ | 0.2041 | 6.2774 | 430 | 0.1116 | 0.0145 |
99
+ | 0.0376 | 6.4234 | 440 | 0.1112 | 0.0145 |
100
+ | 0.0149 | 6.5693 | 450 | 0.1134 | 0.0145 |
101
+ | 0.0437 | 6.7153 | 460 | 0.1108 | 0.0145 |
102
+ | 0.0862 | 6.8613 | 470 | 0.1093 | 0.0145 |
103
+ | 0.1215 | 7.0073 | 480 | 0.1101 | 0.0145 |
104
+ | 0.1635 | 7.1533 | 490 | 0.1073 | 0.0145 |
105
+ | 0.0207 | 7.2993 | 500 | 0.1067 | 0.0145 |
106
+ | 0.1199 | 7.4453 | 510 | 0.1075 | 0.0145 |
107
+ | 0.0094 | 7.5912 | 520 | 0.1078 | 0.0145 |
108
+ | 0.0543 | 7.7372 | 530 | 0.1089 | 0.0145 |
109
+ | 0.0584 | 7.8832 | 540 | 0.1069 | 0.0145 |
110
+ | 0.0672 | 8.0292 | 550 | 0.1089 | 0.0145 |
111
+ | 0.094 | 8.1752 | 560 | 0.1129 | 0.0145 |
112
+ | 0.0206 | 8.3212 | 570 | 0.1138 | 0.0145 |
113
+ | 0.0509 | 8.4672 | 580 | 0.1124 | 0.0145 |
114
+ | 0.081 | 8.6131 | 590 | 0.1115 | 0.0145 |
115
+ | 0.0727 | 8.7591 | 600 | 0.1141 | 0.0145 |
116
+ | 0.0548 | 8.9051 | 610 | 0.1136 | 0.0145 |
117
+ | 0.0356 | 9.0511 | 620 | 0.1139 | 0.0145 |
118
+ | 0.1291 | 9.1971 | 630 | 0.1142 | 0.0145 |
119
+ | 0.2415 | 9.3431 | 640 | 0.1121 | 0.0145 |
120
+ | 0.0088 | 9.4891 | 650 | 0.1118 | 0.0145 |
121
+ | 0.0136 | 9.6350 | 660 | 0.1130 | 0.0145 |
122
+ | 0.0327 | 9.7810 | 670 | 0.1137 | 0.0145 |
123
+ | 0.106 | 9.9270 | 680 | 0.1136 | 0.0145 |
124
+
125
+
126
+ ### Framework versions
127
+
128
+ - Transformers 4.46.1
129
+ - Pytorch 2.3.1
130
+ - Datasets 3.1.0
131
+ - Tokenizers 0.20.1
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "vidore/colpaligemma-3b-pt-448-base",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": "gaussian",
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 32,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": "(.*(language_model).*(down_proj|gate_proj|up_proj|k_proj|q_proj|v_proj|o_proj).*$|.*(custom_text_proj).*$)",
23
+ "task_type": "FEATURE_EXTRACTION",
24
+ "use_dora": false,
25
+ "use_rslora": false
26
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e2e363c03eacc0d738c1f6db6ff87134b58c55b1be847a76676d226207e5a4b
3
+ size 157071680
preprocessor_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "SiglipImageProcessor",
12
+ "image_seq_length": 1024,
13
+ "image_std": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "processor_class": "ColPaliProcessor",
19
+ "resample": 3,
20
+ "rescale_factor": 0.00392156862745098,
21
+ "size": {
22
+ "height": 448,
23
+ "width": 448
24
+ }
25
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<image>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ }
10
+ ],
11
+ "bos_token": {
12
+ "content": "<bos>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "eos_token": {
19
+ "content": "<eos>",
20
+ "lstrip": false,
21
+ "normalized": false,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "pad_token": {
26
+ "content": "<pad>",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ },
32
+ "unk_token": {
33
+ "content": "<unk>",
34
+ "lstrip": false,
35
+ "normalized": false,
36
+ "rstrip": false,
37
+ "single_word": false
38
+ }
39
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ff84f53c290d0348c4e206da6094ef781cf8c0e482fec8b268a996b32257cfd
3
+ size 34600975
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa2e7013dd46674b889788fdb2a830153e400db843b217889b275ea242f84036
3
+ size 5240