introvoyz041 commited on
Commit
f8c7642
·
verified ·
1 Parent(s): 0d2fee0

Migrated from GitHub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. data/LICENSE +201 -0
  2. data/assets/cover.gif +3 -0
  3. data/assets/cover.mp4 +3 -0
  4. data/base_model/__init__.py +3 -0
  5. data/base_model/base.py +18 -0
  6. data/base_model/llama3instruct.py +18 -0
  7. data/base_model/mistral03instruct.py +18 -0
  8. data/cfgs/base_model/llama3i8b.yaml +15 -0
  9. data/cfgs/base_model/mistral03i7b.yaml +10 -0
  10. data/cfgs/config.yaml +38 -0
  11. data/cfgs/mode/eval.yaml +16 -0
  12. data/cfgs/mode/training.yaml +0 -0
  13. data/cfgs/optimization/cem.yaml +20 -0
  14. data/cfgs/optimization/reinforce.yaml +21 -0
  15. data/cfgs/optimization/rsm.yaml +20 -0
  16. data/cfgs/policy/default.yaml +9 -0
  17. data/cfgs/policy/wcomb.yaml +15 -0
  18. data/cfgs/task/ablation_tasks/few_shot_arc_challenge_20.yaml +15 -0
  19. data/cfgs/task/ablation_tasks/few_shot_arc_challenge_3.yaml +15 -0
  20. data/cfgs/task/ablation_tasks/few_shot_arc_challenge_5.yaml +15 -0
  21. data/cfgs/task/ai2_arc.yaml +6 -0
  22. data/cfgs/task/cls.yaml +6 -0
  23. data/cfgs/task/few_shot_arc_challenge.yaml +15 -0
  24. data/cfgs/task/few_shot_humaneval.yaml +14 -0
  25. data/cfgs/task/few_shot_math.yaml +15 -0
  26. data/cfgs/task/gsm8k.yaml +6 -0
  27. data/cfgs/task/math.yaml +6 -0
  28. data/cfgs/task/mbpp2.yaml +6 -0
  29. data/evaluation/fishfarm/fishfarm/__init__.py +14 -0
  30. data/evaluation/fishfarm/fishfarm/chat_templates.py +13 -0
  31. data/evaluation/fishfarm/fishfarm/imports.py +94 -0
  32. data/evaluation/fishfarm/fishfarm/logging.py +190 -0
  33. data/evaluation/fishfarm/fishfarm/models/__init__.py +12 -0
  34. data/evaluation/fishfarm/fishfarm/models/base.py +54 -0
  35. data/evaluation/fishfarm/fishfarm/models/tokenization_utils.py +62 -0
  36. data/evaluation/fishfarm/fishfarm/models/vllm_model.py +145 -0
  37. data/evaluation/fishfarm/fishfarm/tasks/__init__.py +8 -0
  38. data/evaluation/fishfarm/fishfarm/tasks/ai2_arc.py +118 -0
  39. data/evaluation/fishfarm/fishfarm/tasks/base.py +28 -0
  40. data/evaluation/fishfarm/fishfarm/tasks/competation_math.py +391 -0
  41. data/evaluation/fishfarm/fishfarm/tasks/evalplus/__init__.py +4 -0
  42. data/evaluation/fishfarm/fishfarm/tasks/evalplus/data.py +94 -0
  43. data/evaluation/fishfarm/fishfarm/tasks/evalplus/evaluation.py +257 -0
  44. data/evaluation/fishfarm/fishfarm/tasks/evalplus/generation.py +77 -0
  45. data/evaluation/fishfarm/fishfarm/tasks/evalplus/sanitization.py +195 -0
  46. data/evaluation/fishfarm/fishfarm/tasks/evalplus/task.py +54 -0
  47. data/evaluation/fishfarm/fishfarm/tasks/language_restricted_math.py +106 -0
  48. data/evaluation/fishfarm/fishfarm/version.py +1 -0
  49. data/evaluation/fishfarm/pyproject.toml +109 -0
  50. data/evaluation/fishfarm/tox.ini +8 -0
data/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
data/assets/cover.gif ADDED

Git LFS Details

  • SHA256: a2d9c91447847e33f8add372e30b9a482aa6f4211d1f18828c4d981ecde64487
  • Pointer size: 131 Bytes
  • Size of remote file: 467 kB
data/assets/cover.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea293fa4363abed1ef51d27525a430942d273a4138e7968c23e03df07215cad3
3
+ size 522068
data/base_model/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .base import BaseModel
2
+ from .llama3instruct import Llama3Instruct8B
3
+ from .mistral03instruct import MistralV03Instruct7B
data/base_model/base.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+
3
+
4
+ class BaseModel(ABC):
5
+ def __init__(self):
6
+ pass
7
+
8
+ @abstractmethod
9
+ def get_model_id(self):
10
+ raise NotImplementedError
11
+
12
+ @abstractmethod
13
+ def get_model_name(self):
14
+ raise NotImplementedError
15
+
16
+ @abstractmethod
17
+ def get_param_file(self, param_folder_path=""):
18
+ raise NotImplementedError
data/base_model/llama3instruct.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from .base import BaseModel
4
+
5
+
6
+ class Llama3Instruct8B(BaseModel):
7
+ def __init__(self):
8
+ self.model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
9
+ self.dec_param_file_n = "llama3_decomposed_params.pt"
10
+
11
+ def get_model_id(self):
12
+ return self.model_id
13
+
14
+ def get_model_name(self):
15
+ return self.model_id.split("/")[1]
16
+
17
+ def get_param_file(self, param_folder_path=""):
18
+ return os.path.join(param_folder_path, self.dec_param_file_n)
data/base_model/mistral03instruct.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from .base import BaseModel
4
+
5
+
6
+ class MistralV03Instruct7B(BaseModel):
7
+ def __init__(self):
8
+ self.model_id = "mistralai/Mistral-7B-Instruct-v0.3"
9
+ self.dec_param_file_n = "mistral_decomposed_params.pt"
10
+
11
+ def get_model_id(self):
12
+ return self.model_id
13
+
14
+ def get_model_name(self):
15
+ return self.model_id.split("/")[1]
16
+
17
+ def get_param_file(self, param_folder_path=""):
18
+ return os.path.join(param_folder_path, self.dec_param_file_n)
data/cfgs/base_model/llama3i8b.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model:
2
+ _target_: base_model.Llama3Instruct8B
3
+
4
+
5
+ base_model_name: llama3i8b
6
+
7
+ # reference_params_results:
8
+ # - 'saved_models/llama3i8b/gsm8k/learnable_params.pt'
9
+ # - 'saved_models/llama3i8b/mbpp/learnable_params.pt'
10
+ # - 'saved_models/llama3i8b/ai2arc/learnable_params.pt'
11
+
12
+ reference_params_results:
13
+ - "ckpts/learnable_params/llama3_8b_instruct_gsm8k_svd_pg_mlp.pt"
14
+ - "ckpts/learnable_params/llama3_8b_instruct_mbpp_pro_svd_pg_mlp.pt"
15
+ - "ckpts/learnable_params/llama3_8b_instruct_gsm8k_svd_pg_mlp.pt"
data/cfgs/base_model/mistral03i7b.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model:
2
+ _target_: base_model.MistralV03Instruct7B
3
+
4
+
5
+ base_model_name: mistral03i7b
6
+
7
+ reference_params_results:
8
+ - 'saved_models/mistral03i7b/gsm8k/policy_params.pt'
9
+ - 'saved_models/mistral03i7b/mbpp/policy_params.pt'
10
+ - 'saved_models/mistral03i7b/ai2arc/policy_params.pt'
data/cfgs/config.yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - _self_
3
+ - policy@_global_: default
4
+ - task@_global_: gsm8k
5
+ - base_model@_global_: llama3i8b
6
+ - optimization@_global_: reinforce
7
+ - mode@_global_: training
8
+
9
+ num_iters: 2000
10
+ test_interval: 10
11
+ lr: 2e-3
12
+ batch_size: 256
13
+ seed: 42
14
+ init_val: 0.1
15
+ test_only: false
16
+ model_dir: null
17
+ save_legacy_params: false
18
+ use_lora: false
19
+ prompt_based_eval: false
20
+ experts_path_dict: null
21
+
22
+ run_name: null
23
+
24
+ load_ckpt: null
25
+ exp_suffix: 'st'
26
+
27
+ exp_name: ${base_model_name}/${optim_name}-${exp_suffix}
28
+
29
+ wandb_log: true # enabled by default
30
+ wandb_project: shakeoff
31
+ wandb_group_name: ${exp_name}
32
+ extract_svd: false
33
+
34
+ out_dir: results
35
+
36
+ hydra:
37
+ run:
38
+ dir: ${out_dir}/
data/cfgs/mode/eval.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ exp_name: eval_${base_model_name}/temp-lr${lr}-mGN${max_grad_norm}-klC${kl_ref_coeff}-r${rw_strategy}-${exp_suffix}-r
2
+
3
+ test_only: true
4
+ load_ckpt: null
5
+ use_lora: false
6
+
7
+ prompt_based_eval: false
8
+ experts_path_dict:
9
+ code: null
10
+ math: null
11
+ reasoning: null
12
+ other: null
13
+
14
+ wandb_project: T^2_eval
15
+ wandb_group_name: ${exp_name}
16
+ out_dir: results_eval
data/cfgs/mode/training.yaml ADDED
File without changes
data/cfgs/optimization/cem.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ optimization_algorithm:
3
+ _target_: optim_modules.CEM
4
+ elite_ratio: ${elite_ratio}
5
+ pop_size: ${pop_size}
6
+ min_trainable_param: ${min_trainable_param}
7
+ max_trainable_param: ${max_trainable_param}
8
+ optim_ema: ${optim_ema}
9
+ re_eval_best: ${re_eval_best}
10
+ use_loglikelihood_for_ties: ${use_loglikelihood_for_ties}
11
+
12
+
13
+ pop_size: 32
14
+ elite_ratio: 0.2
15
+ min_trainable_param: 0
16
+ max_trainable_param: 1
17
+ optim_ema: 0
18
+ re_eval_best: True
19
+ use_loglikelihood_for_ties: true
20
+ optim_name: CEM-pop${pop_size}e${elite_ratio}-[${min_trainable_param}-${max_trainable_param}]-tieswLL${use_loglikelihood_for_ties}
data/cfgs/optimization/reinforce.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ optimization_algorithm:
3
+ _target_: optim_modules.Reinforce
4
+ # policy: ${policy}
5
+ # gpu: ${gpu}
6
+ max_grad_norm: ${max_grad_norm}
7
+ lr: ${lr}
8
+ rw_norm: ${rw_norm}
9
+ rw_clip: ${rw_clip}
10
+ kl_ref_coeff: ${kl_ref_coeff}
11
+
12
+
13
+ # policy:
14
+ # gpu:
15
+ max_grad_norm: 1e-3
16
+ lr: 2e-3
17
+ rw_norm: 0
18
+ rw_clip: null
19
+ kl_ref_coeff: 0
20
+ rw_strategy: rN${rw_norm}C${rw_clip}
21
+ optim_name: RL-lr${lr}-mGN${max_grad_norm}-klC${kl_ref_coeff}-r${rw_strategy}
data/cfgs/optimization/rsm.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ optimization_algorithm:
3
+ _target_: optim_modules.RandomShooting
4
+ # policy: ${policy}
5
+ # gpu: ${gpu}
6
+ pop_size: ${pop_size}
7
+ min_trainable_param: ${min_trainable_param}
8
+ max_trainable_param: ${max_trainable_param}
9
+ optim_ema: ${optim_ema}
10
+ re_eval_best: ${re_eval_best}
11
+ use_loglikelihood_for_ties: ${use_loglikelihood_for_ties}
12
+
13
+
14
+ pop_size: 32
15
+ min_trainable_param: 0
16
+ max_trainable_param: 1
17
+ optim_ema: 0
18
+ re_eval_best: True
19
+ use_loglikelihood_for_ties: false
20
+ optim_name: RSML-pop${pop_size}-[${min_trainable_param}-${max_trainable_param}]-tieswLL${use_loglikelihood_for_ties}
data/cfgs/policy/default.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ shakeoff_policy:
2
+ _target_: policy.Policy
3
+ init_val: ${init_val}
4
+ mode: ${policy_mode}
5
+ max_mult: ${max_mult}
6
+
7
+ policy_mode: 1
8
+ max_mult: 1
9
+ policy_name: ${policy_mode}_mm${max_mult}
data/cfgs/policy/wcomb.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ shakeoff_policy:
4
+ _target_: policy.WeightedCombination
5
+ base_policy_cfg: null
6
+ params_paths: ${reference_params_results}
7
+ norm_coeffs: ${norm_coeffs}
8
+ per_layer: ${per_layer}
9
+ init_values: ${init_values}
10
+
11
+ norm_coeffs: true
12
+ per_layer: false
13
+ init_values: null
14
+
15
+ policy_name: Wcomb_n${norm_coeffs}_p${per_layer}
data/cfgs/task/ablation_tasks/few_shot_arc_challenge_20.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task_loader:
2
+ _target_: tasks.FewShotTask
3
+ wrapped_task:
4
+ _target_: tasks.AI2ArcTask
5
+ wrapped_split: ${wrapped_split}
6
+ shots: ${task_shots}
7
+ seed: ${task_loader_seed}
8
+
9
+
10
+ wrapped_split: transfer
11
+ task_shots: 20
12
+ task_loader_seed: 38
13
+
14
+ task_name: arc_chal_${task_shots}shots
15
+
data/cfgs/task/ablation_tasks/few_shot_arc_challenge_3.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task_loader:
2
+ _target_: tasks.FewShotTask
3
+ wrapped_task:
4
+ _target_: tasks.AI2ArcTask
5
+ wrapped_split: ${wrapped_split}
6
+ shots: ${task_shots}
7
+ seed: ${task_loader_seed}
8
+
9
+
10
+ wrapped_split: transfer
11
+ task_shots: 3
12
+ task_loader_seed: 38
13
+
14
+ task_name: arc_chal_${task_shots}shots
15
+
data/cfgs/task/ablation_tasks/few_shot_arc_challenge_5.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task_loader:
2
+ _target_: tasks.FewShotTask
3
+ wrapped_task:
4
+ _target_: tasks.AI2ArcTask
5
+ wrapped_split: ${wrapped_split}
6
+ shots: ${task_shots}
7
+ seed: ${task_loader_seed}
8
+
9
+
10
+ wrapped_split: transfer
11
+ task_shots: 5
12
+ task_loader_seed: 38
13
+
14
+ task_name: arc_chal_${task_shots}shots
15
+
data/cfgs/task/ai2_arc.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ task_loader:
2
+ _target_: tasks.AI2ArcTask
3
+
4
+
5
+ task_name: ai2_arc
6
+
data/cfgs/task/cls.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ task_loader:
2
+ _target_: tasks.ClsTask
3
+
4
+
5
+ task_name: Cls
6
+
data/cfgs/task/few_shot_arc_challenge.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task_loader:
2
+ _target_: tasks.FewShotTask
3
+ wrapped_task:
4
+ _target_: tasks.AI2ArcTask
5
+ wrapped_split: ${wrapped_split}
6
+ shots: ${task_shots}
7
+ seed: ${task_loader_seed}
8
+
9
+
10
+ wrapped_split: transfer
11
+ task_shots: 10
12
+ task_loader_seed: 38
13
+
14
+ task_name: arc_chal_${task_shots}shots
15
+
data/cfgs/task/few_shot_humaneval.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task_loader:
2
+ _target_: tasks.FewShotTask
3
+ wrapped_task:
4
+ _target_: tasks.Mbpp2Task2
5
+ wrapped_split: ${wrapped_split}
6
+ shots: ${task_shots}
7
+ seed: ${task_loader_seed}
8
+
9
+
10
+ wrapped_split: transfer
11
+ task_shots: 10
12
+ task_loader_seed: 16
13
+
14
+ task_name: humaneval_${task_shots}shots
data/cfgs/task/few_shot_math.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task_loader:
2
+ _target_: tasks.FewShotTask
3
+ wrapped_task:
4
+ _target_: tasks.MathTask
5
+ wrapped_split: ${wrapped_split}
6
+ shots: ${task_shots}
7
+ seed: ${task_loader_seed}
8
+
9
+
10
+ wrapped_split: test
11
+ task_shots: 10
12
+ task_loader_seed: 27
13
+
14
+ task_name: math_${task_shots}shots
15
+
data/cfgs/task/gsm8k.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ task_loader:
2
+ _target_: tasks.Gsm8kTask
3
+
4
+
5
+ task_name: gsm8k
6
+
data/cfgs/task/math.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ task_loader:
2
+ _target_: tasks.MathTask
3
+
4
+
5
+ task_name: math
6
+
data/cfgs/task/mbpp2.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ task_loader:
2
+ _target_: tasks.Mbpp2Task
3
+
4
+
5
+ task_name: mbpp
6
+
data/evaluation/fishfarm/fishfarm/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import chat_templates, models, tasks
2
+ from .models import Message, Model, Role
3
+ from .tasks import Task, TaskResult
4
+
5
+ __all__ = [
6
+ "chat_templates",
7
+ "tasks",
8
+ "models",
9
+ "Task",
10
+ "TaskResult",
11
+ "Model",
12
+ "Message",
13
+ "Role",
14
+ ]
data/evaluation/fishfarm/fishfarm/chat_templates.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LLAMA3 = (
2
+ "{% set loop_messages = messages %}"
3
+ "{% for message in loop_messages %}"
4
+ "{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>"
5
+ "\n\n'+ message['content'] | trim + '<|eot_id|>' %}"
6
+ "{% if loop.index0 == 0 %}{% set content = bos_token + content %}"
7
+ "{% endif %}"
8
+ "{{ content }}"
9
+ "{% endfor %}"
10
+ "{% if add_generation_prompt %}"
11
+ "{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}"
12
+ "{% endif %}"
13
+ )
data/evaluation/fishfarm/fishfarm/imports.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from types import TracebackType
2
+ from typing import Optional, Tuple, Type
3
+
4
+
5
+ class _DeferredImportExceptionContextManager:
6
+ """Context manager to defer exceptions from imports.
7
+
8
+ Catches :exc:`ImportError` and :exc:`SyntaxError`.
9
+ If any exception is caught, this class raises an :exc:`ImportError` when being checked.
10
+
11
+ """
12
+
13
+ def __init__(self) -> None:
14
+ self._deferred: Optional[Tuple[Exception, str]] = None
15
+
16
+ def __enter__(self) -> "_DeferredImportExceptionContextManager":
17
+ """Enter the context manager.
18
+
19
+ Returns:
20
+ Itself.
21
+
22
+ """
23
+ return self
24
+
25
+ def __exit__(
26
+ self,
27
+ exc_type: Optional[Type[Exception]],
28
+ exc_value: Optional[Exception],
29
+ traceback: Optional[TracebackType],
30
+ ) -> Optional[bool]:
31
+ """Exit the context manager.
32
+
33
+ Args:
34
+ exc_type:
35
+ Raised exception type. :obj:`None` if nothing is raised.
36
+ exc_value:
37
+ Raised exception object. :obj:`None` if nothing is raised.
38
+ traceback:
39
+ Associated traceback. :obj:`None` if nothing is raised.
40
+
41
+ Returns:
42
+ :obj:`None` if nothing is deferred, otherwise :obj:`True`.
43
+ :obj:`True` will suppress any exceptions avoiding them from propagating.
44
+
45
+ """
46
+ if isinstance(exc_value, (ImportError, SyntaxError)):
47
+ if isinstance(exc_value, ImportError):
48
+ message = (
49
+ "Tried to import '{}' but failed. Please make sure that the package is "
50
+ "installed correctly to use this feature. Actual error: {}."
51
+ ).format(exc_value.name, exc_value)
52
+ elif isinstance(exc_value, SyntaxError):
53
+ message = (
54
+ "Tried to import a package but failed due to a syntax error in {}. Please "
55
+ "make sure that the Python version is correct to use this feature. Actual "
56
+ "error: {}."
57
+ ).format(exc_value.filename, exc_value)
58
+ else:
59
+ assert False
60
+
61
+ self._deferred = (exc_value, message)
62
+ return True
63
+ return None
64
+
65
+ def is_successful(self) -> bool:
66
+ """Return whether the context manager has caught any exceptions.
67
+
68
+ Returns:
69
+ :obj:`True` if no exceptions are caught, :obj:`False` otherwise.
70
+
71
+ """
72
+ return self._deferred is None
73
+
74
+ def check(self) -> None:
75
+ """Check whether the context manager has caught any exceptions.
76
+
77
+ Raises:
78
+ :exc:`ImportError`:
79
+ If any exception was caught from the caught exception.
80
+
81
+ """
82
+ if self._deferred is not None:
83
+ exc_value, message = self._deferred
84
+ raise ImportError(message) from exc_value
85
+
86
+
87
+ def try_import() -> _DeferredImportExceptionContextManager:
88
+ """Create a context manager that can wrap imports of optional packages to defer exceptions.
89
+
90
+ Returns:
91
+ Deferred import context manager.
92
+
93
+ """
94
+ return _DeferredImportExceptionContextManager()
data/evaluation/fishfarm/fishfarm/logging.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Copied from Optuna repo:
3
+ https://github.com/optuna/optuna/blob/2595653638506e1b7e025a966a220984a59ab936/optuna/logging.py
4
+ Removed some comments for less verbosity.
5
+
6
+ In general, `logger.info` is preferred over `print` since it contains module name and timestamp;
7
+ We recommend the use of logger object for the fishfarm developers.
8
+
9
+ Inside fishfarm, we can call `get_logger(__name__)` from each python file.
10
+ Then the root logger format and level are applied to that logger object.
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import logging
16
+ import os
17
+ import sys
18
+ import threading
19
+ from logging import CRITICAL, DEBUG, ERROR, FATAL, INFO, WARN, WARNING
20
+
21
+ import colorlog
22
+
23
+ __all__ = [
24
+ "CRITICAL",
25
+ "DEBUG",
26
+ "ERROR",
27
+ "FATAL",
28
+ "INFO",
29
+ "WARN",
30
+ "WARNING",
31
+ ]
32
+
33
+ _lock: threading.Lock = threading.Lock()
34
+ _default_handler: logging.Handler | None = None
35
+
36
+
37
+ def create_default_formatter() -> logging.Formatter:
38
+ """Create a default formatter of log messages.
39
+
40
+ This function is not supposed to be directly accessed by library users.
41
+ """
42
+ header = "[%(levelname)1.1s %(asctime)s %(name)s]"
43
+ message = "%(message)s"
44
+ if _color_supported():
45
+ return colorlog.ColoredFormatter(
46
+ f"%(log_color)s{header}%(reset)s {message}",
47
+ )
48
+ return logging.Formatter(f"{header} {message}")
49
+
50
+
51
+ def _color_supported() -> bool:
52
+ """Detection of color support."""
53
+ # NO_COLOR environment variable:
54
+ if os.environ.get("NO_COLOR", None):
55
+ return False
56
+
57
+ if not hasattr(sys.stderr, "isatty") or not sys.stderr.isatty():
58
+ return False
59
+ else:
60
+ return True
61
+
62
+
63
+ def _get_library_name() -> str:
64
+ return __name__.split(".")[0]
65
+
66
+
67
+ def _get_library_root_logger() -> logging.Logger:
68
+ return logging.getLogger(_get_library_name())
69
+
70
+
71
+ def _configure_library_root_logger() -> None:
72
+ global _default_handler
73
+
74
+ with _lock:
75
+ if _default_handler:
76
+ # This library has already configured the library root logger.
77
+ return
78
+ _default_handler = logging.StreamHandler() # Set sys.stderr as stream.
79
+ _default_handler.setFormatter(create_default_formatter())
80
+
81
+ # Apply our default configuration to the library root logger.
82
+ library_root_logger: logging.Logger = _get_library_root_logger()
83
+ library_root_logger.addHandler(_default_handler)
84
+ library_root_logger.setLevel(logging.INFO)
85
+ library_root_logger.propagate = False
86
+
87
+
88
+ def _reset_library_root_logger() -> None:
89
+ global _default_handler
90
+
91
+ with _lock:
92
+ if not _default_handler:
93
+ return
94
+
95
+ library_root_logger: logging.Logger = _get_library_root_logger()
96
+ library_root_logger.removeHandler(_default_handler)
97
+ library_root_logger.setLevel(logging.NOTSET)
98
+ _default_handler = None
99
+
100
+
101
+ def get_logger(name: str) -> logging.Logger:
102
+ """Return a logger with the specified name.
103
+ name's prefix should be `fishfarm.` (just like __name__ variable),
104
+ otherwise root logger settings will be not reflected.
105
+ This function is not supposed to be directly accessed by library users.
106
+ """
107
+
108
+ _configure_library_root_logger()
109
+ return logging.getLogger(name)
110
+
111
+
112
+ def get_verbosity() -> int:
113
+ """Return the current level for the fishfarm's root logger.
114
+
115
+ Returns:
116
+ Logging level, e.g., ``fishfarm.logging.DEBUG`` and ``fishfarm.logging.INFO``.
117
+
118
+ .. note::
119
+ fishfarm has following logging levels:
120
+
121
+ - ``fishfarm.logging.CRITICAL``, ``fishfarm.logging.FATAL``
122
+ - ``fishfarm.logging.ERROR``
123
+ - ``fishfarm.logging.WARNING``, ``fishfarm.logging.WARN``
124
+ - ``fishfarm.logging.INFO``
125
+ - ``fishfarm.logging.DEBUG``
126
+ """
127
+
128
+ _configure_library_root_logger()
129
+ return _get_library_root_logger().getEffectiveLevel()
130
+
131
+
132
+ def set_verbosity(verbosity: int) -> None:
133
+ """Set the level for the fishfarm's root logger.
134
+
135
+ Args:
136
+ verbosity:
137
+ Logging level, e.g., ``fishfarm.logging.DEBUG`` and ``fishfarm.logging.INFO``.
138
+
139
+ .. note::
140
+ fishfarm has following logging levels:
141
+
142
+ - ``fishfarm.logging.CRITICAL``, ``fishfarm.logging.FATAL``
143
+ - ``fishfarm.logging.ERROR``
144
+ - ``fishfarm.logging.WARNING``, ``fishfarm.logging.WARN``
145
+ - ``fishfarm.logging.INFO``
146
+ - ``fishfarm.logging.DEBUG``
147
+ """
148
+
149
+ _configure_library_root_logger()
150
+ _get_library_root_logger().setLevel(verbosity)
151
+
152
+
153
+ def disable_default_handler() -> None:
154
+ """Disable the default handler of the fishfarm's root logger."""
155
+
156
+ _configure_library_root_logger()
157
+
158
+ assert _default_handler is not None
159
+ _get_library_root_logger().removeHandler(_default_handler)
160
+
161
+
162
+ def enable_default_handler() -> None:
163
+ """Enable the default handler of the fishfarm's root logger."""
164
+
165
+ _configure_library_root_logger()
166
+
167
+ assert _default_handler is not None
168
+ _get_library_root_logger().addHandler(_default_handler)
169
+
170
+
171
+ def disable_propagation() -> None:
172
+ """Disable propagation of the library log outputs.
173
+
174
+ Note that log propagation is disabled by default. You only need to use this function
175
+ to stop log propagation when you use :func:`~fishfarm.logging.enable_propagation()`.
176
+ """
177
+
178
+ _configure_library_root_logger()
179
+ _get_library_root_logger().propagate = False
180
+
181
+
182
+ def enable_propagation() -> None:
183
+ """Enable propagation of the library log outputs.
184
+
185
+ Please disable the fishfarm's default handler to prevent double logging if the root logger has
186
+ been configured.
187
+ """
188
+
189
+ _configure_library_root_logger()
190
+ _get_library_root_logger().propagate = True
data/evaluation/fishfarm/fishfarm/models/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .base import (GenerationRequest, GenerationResult, Message, Model,
2
+ NLLRequest, NLLResult, Role)
3
+
4
+ __all__ = [
5
+ "GenerationRequest",
6
+ "GenerationResult",
7
+ "NLLRequest",
8
+ "NLLResult",
9
+ "Model",
10
+ "Role",
11
+ "Message",
12
+ ]
data/evaluation/fishfarm/fishfarm/models/base.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Iterable, Literal, Optional, Sequence
5
+
6
+ Role = Literal["system", "user", "assistant", "assistant_prefill"]
7
+
8
+
9
+ @dataclass
10
+ class Message:
11
+
12
+ role: Role
13
+ content: str
14
+
15
+
16
+ @dataclass
17
+ class GenerationRequest:
18
+
19
+ messages: list[Message]
20
+
21
+ max_tokens: Optional[int] = None
22
+ stop: Sequence[str] = ()
23
+
24
+
25
+ @dataclass
26
+ class GenerationResult:
27
+
28
+ request: GenerationRequest
29
+ generation: str
30
+
31
+
32
+ @dataclass
33
+ class NLLRequest:
34
+
35
+ messages: list[Message]
36
+
37
+
38
+ @dataclass
39
+ class NLLResult:
40
+
41
+ request: NLLRequest
42
+ sum_nll: float
43
+ num_considered_tokens: int
44
+
45
+
46
+ class Model:
47
+
48
+ def generate(
49
+ self, requests: Sequence[GenerationRequest]
50
+ ) -> Iterable[GenerationResult]:
51
+ raise NotImplementedError()
52
+
53
+ def nll(self, requests: Sequence[NLLRequest]) -> Iterable[NLLResult]:
54
+ raise NotImplementedError()
data/evaluation/fishfarm/fishfarm/models/tokenization_utils.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from typing import Optional
3
+
4
+ from transformers import PreTrainedTokenizerBase
5
+
6
+ from .base import Message
7
+
8
+
9
+ class MaskedTokens:
10
+
11
+ text: str
12
+ token_ids: list[int]
13
+ mask: list[bool]
14
+
15
+ def __init__(self) -> None:
16
+ self.text = ""
17
+ self.token_ids = []
18
+ self.mask = []
19
+
20
+ def extend(
21
+ self,
22
+ messages: list[Message],
23
+ mask_value: bool,
24
+ tokenizer: PreTrainedTokenizerBase,
25
+ chat_template: Optional[str],
26
+ add_generation_prompt: bool,
27
+ ) -> None:
28
+ if len(messages) == 0:
29
+ # `tokenizer.apply_chat_template` does not accept an empty list.
30
+ raise ValueError("At least one message is required.")
31
+
32
+ all_text: str = tokenizer.apply_chat_template(
33
+ conversation=[dataclasses.asdict(message) for message in messages],
34
+ chat_template=chat_template,
35
+ tokenize=False,
36
+ add_generation_prompt=add_generation_prompt,
37
+ )
38
+ assert all_text.startswith(self.text)
39
+ new_text = all_text[len(self.text) :]
40
+ new_token_ids: list[int] = tokenizer.encode(new_text, add_special_tokens=False)
41
+
42
+ self.token_ids.extend(new_token_ids)
43
+ self.mask.extend([mask_value] * len(new_token_ids))
44
+ self.text = all_text
45
+
46
+
47
+ def tokenize_messages(
48
+ messages: list[Message],
49
+ tokenizer: PreTrainedTokenizerBase,
50
+ chat_template: Optional[str],
51
+ ) -> MaskedTokens:
52
+ masked_tokens = MaskedTokens()
53
+
54
+ for i, message in enumerate(messages):
55
+ if message.role != "assistant":
56
+ continue
57
+
58
+ masked_tokens.extend(messages[:i], False, tokenizer, chat_template, True)
59
+ masked_tokens.extend(messages[: i + 1], True, tokenizer, chat_template, False)
60
+
61
+ masked_tokens.extend(messages, False, tokenizer, chat_template, True)
62
+ return masked_tokens
data/evaluation/fishfarm/fishfarm/models/vllm_model.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import dataclasses
3
+ from typing import Any, Iterable, Optional, Sequence
4
+
5
+ from fishfarm.models.base import NLLRequest, NLLResult
6
+ from transformers import PreTrainedTokenizerBase
7
+
8
+ from ..imports import try_import
9
+ from .base import GenerationRequest, GenerationResult, Message, Model
10
+ from .tokenization_utils import tokenize_messages
11
+
12
+ with try_import() as _imports:
13
+ import vllm
14
+
15
+ _imports.check()
16
+
17
+
18
+ class VLLMModel(Model):
19
+
20
+ def __init__(
21
+ self,
22
+ llm: vllm.LLM,
23
+ sampling_params: vllm.SamplingParams,
24
+ chat_template: Optional[str],
25
+ ) -> None:
26
+ self.llm = llm
27
+ self.chat_template = chat_template
28
+ self.sampling_params = sampling_params
29
+
30
+ def get_tokenizer(self) -> PreTrainedTokenizerBase:
31
+ tokenizer = self.llm.get_tokenizer()
32
+
33
+ if not hasattr(tokenizer, "apply_chat_template"):
34
+ if hasattr(tokenizer, "tokenizer"):
35
+ tokenizer = tokenizer.tokenizer
36
+ else:
37
+ raise ValueError(
38
+ "The tokenizer does not have the 'apply_chat_template' method. "
39
+ "This is likely because of the versions of vLLM or transformers."
40
+ )
41
+
42
+ return tokenizer
43
+
44
+ def _into_prompt(self, messages: Sequence[Message]) -> str:
45
+ tokenizer = self.get_tokenizer()
46
+ prefill_text = ""
47
+ n_assistant_prefill = sum([m.role == "assistant_prefill" for m in messages])
48
+ if n_assistant_prefill > 1:
49
+ raise ValueError(
50
+ f"There must be at most one assistant_prefill role, but got {n_assistant_prefill}",
51
+ )
52
+ if n_assistant_prefill:
53
+ assert (
54
+ messages[-1].role == "assistant_prefill"
55
+ ), "assistant_prefill role must be the last message"
56
+ prefill_text = messages[-1].content
57
+ messages = messages[:-1]
58
+ prompt: str = tokenizer.apply_chat_template(
59
+ conversation=[dataclasses.asdict(message) for message in messages],
60
+ chat_template=self.chat_template,
61
+ tokenize=False,
62
+ add_generation_prompt=True,
63
+ )
64
+ prompt += prefill_text
65
+ return prompt
66
+
67
+ def _predict_log_probs(self, token_ids_list: list[list[int]]) -> list[list[float]]:
68
+ sampling_params = copy.copy(self.sampling_params)
69
+ sampling_params.prompt_logprobs = 1
70
+ sampling_params.max_tokens = 1
71
+
72
+ completions = self.llm.generate(
73
+ prompt_token_ids=token_ids_list,
74
+ sampling_params=sampling_params,
75
+ )
76
+
77
+ log_probs_list = []
78
+ for token_ids, completion in zip(token_ids_list, completions):
79
+ log_probs = []
80
+ assert completion.prompt_logprobs is not None
81
+ assert token_ids == completion.prompt_token_ids
82
+ assert len(token_ids) == len(completion.prompt_logprobs)
83
+ for token_id, logprob_dict in zip(token_ids, completion.prompt_logprobs):
84
+ if logprob_dict is None:
85
+ log_probs.append(0.0)
86
+ else:
87
+ logprob_entry: Any = logprob_dict[token_id]
88
+
89
+ if isinstance(logprob_entry, float):
90
+ log_probs.append(logprob_entry)
91
+ else:
92
+ log_probs.append(logprob_entry.logprob)
93
+
94
+ log_probs_list.append(log_probs)
95
+
96
+ return log_probs_list
97
+
98
+ def generate(
99
+ self, requests: Sequence[GenerationRequest]
100
+ ) -> Iterable[GenerationResult]:
101
+
102
+ prompts = [self._into_prompt(request.messages) for request in requests]
103
+ completions = self.llm.generate(
104
+ prompts=prompts,
105
+ sampling_params=self.sampling_params,
106
+ )
107
+
108
+ for request, completion in zip(requests, completions):
109
+ yield GenerationResult(
110
+ request=request, generation=completion.outputs[0].text
111
+ )
112
+
113
+ def nll(self, requests: Sequence[NLLRequest]) -> Iterable[NLLResult]:
114
+ masked_tokens_list = [
115
+ tokenize_messages(
116
+ request.messages, self.get_tokenizer(), self.chat_template
117
+ )
118
+ for request in requests
119
+ ]
120
+ log_probs_list = self._predict_log_probs(
121
+ [masked_tokens.token_ids for masked_tokens in masked_tokens_list]
122
+ )
123
+
124
+ results = []
125
+ for log_probs, masked_tokens, request in zip(
126
+ log_probs_list, masked_tokens_list, requests
127
+ ):
128
+ assert len(log_probs) == len(masked_tokens.mask)
129
+
130
+ sum_nll = 0.0
131
+ num_considered_tokens = 0
132
+ for log_prob, mask_value in zip(log_probs, masked_tokens.mask):
133
+ if mask_value:
134
+ sum_nll += -log_prob
135
+ num_considered_tokens += 1
136
+
137
+ results.append(
138
+ NLLResult(
139
+ request=request,
140
+ sum_nll=sum_nll,
141
+ num_considered_tokens=num_considered_tokens,
142
+ )
143
+ )
144
+
145
+ return results
data/evaluation/fishfarm/fishfarm/tasks/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from . import base
2
+ from .base import Task, TaskResult
3
+
4
+ __all__ = [
5
+ "base",
6
+ "TaskResult",
7
+ "Task",
8
+ ]
data/evaluation/fishfarm/fishfarm/tasks/ai2_arc.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+ from dataclasses import dataclass
4
+ from typing import Iterable, Optional, Sequence
5
+
6
+ from ..models import GenerationRequest, Message, Model
7
+ from .base import Task, TaskResult
8
+
9
+
10
+ def extract_answer(text: str) -> Optional[str]:
11
+ pattern = r"answer is \(?([A-J])\)?"
12
+ match = re.search(pattern, text)
13
+ if match:
14
+ return match.group(1)
15
+ else:
16
+ return extract_again(text)
17
+
18
+
19
+ def extract_again(text: str) -> Optional[str]:
20
+ match = re.search(r".*[aA]nswer:\s*([A-J])", text)
21
+ if match:
22
+ return match.group(1)
23
+ else:
24
+ return extract_final(text)
25
+
26
+
27
+ def extract_final(text: str) -> Optional[str]:
28
+ pattern = r"\b[A-J]\b(?!.*\b[A-J]\b)"
29
+ match = re.search(pattern, text, re.DOTALL)
30
+ if match:
31
+ return match.group(0)
32
+ else:
33
+ return None
34
+
35
+
36
+ def is_correct(pred: Optional[str], answer: str, options: list[str]) -> bool:
37
+ if not pred:
38
+ random.seed(42)
39
+ x = random.randint(0, len(options) - 1)
40
+ if ["A", "B", "C", "D", "E"][x] == answer:
41
+ return True
42
+ else:
43
+ return False
44
+ elif pred == answer:
45
+ return True
46
+ else:
47
+ return False
48
+
49
+
50
+ @dataclass
51
+ class Ai2ArcSample:
52
+
53
+ question: str
54
+ question_id: str
55
+ options: list[str]
56
+ answer: str
57
+
58
+
59
+ def mean(iterable: Iterable[float]) -> float:
60
+ total, count = 0.0, 0
61
+ for x in iterable:
62
+ total += x
63
+ count += 1
64
+ return total / count
65
+
66
+
67
+ class Ai2ArcTask(Task):
68
+ def __init__(
69
+ self,
70
+ samples: Sequence[Ai2ArcSample],
71
+ context_messages: Sequence[Message] = (),
72
+ ):
73
+ self.samples = list(samples)
74
+ self.context_messages = context_messages
75
+
76
+ @property
77
+ def num_samples(self) -> int:
78
+ return len(self.samples)
79
+
80
+ def evaluate(
81
+ self,
82
+ model: Model,
83
+ sample_ids: Optional[Sequence[int]] = None,
84
+ ) -> TaskResult:
85
+ if sample_ids is None:
86
+ sample_ids = range(len(self.samples))
87
+ samples = [self.samples[sample_id] for sample_id in sample_ids]
88
+
89
+ requests = []
90
+ for sample in samples:
91
+ messages = list(self.context_messages)
92
+ messages.append(Message(role="user", content=sample.question))
93
+ requests.append(GenerationRequest(messages=messages))
94
+
95
+ sample_details = []
96
+ for sample, result in zip(samples, model.generate(requests)):
97
+ output = result.generation
98
+ prediction = extract_answer(result.generation)
99
+
100
+ sample_details.append(
101
+ dict(
102
+ problem=sample.question,
103
+ output=output,
104
+ answer=sample.answer,
105
+ prediction=prediction,
106
+ correct=is_correct(prediction, sample.answer, sample.options),
107
+ )
108
+ )
109
+
110
+ aggregate_metrics = {
111
+ "acc": mean(
112
+ float(sd["correct"]) if isinstance(sd["correct"], (bool)) else 0.0
113
+ for sd in sample_details
114
+ )
115
+ }
116
+ return TaskResult(
117
+ aggregate_metrics=aggregate_metrics, sample_details=sample_details
118
+ )
data/evaluation/fishfarm/fishfarm/tasks/base.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ from dataclasses import dataclass
3
+ from typing import Any, Optional, Sequence
4
+
5
+ from ..models import Model
6
+
7
+
8
+ @dataclass
9
+ class TaskResult:
10
+
11
+ aggregate_metrics: dict[str, float]
12
+ sample_details: list[dict[str, Any]]
13
+
14
+
15
+ class Task(abc.ABC):
16
+
17
+ @property
18
+ @abc.abstractmethod
19
+ def num_samples(self) -> int:
20
+ raise NotImplementedError()
21
+
22
+ @abc.abstractmethod
23
+ def evaluate(
24
+ self,
25
+ model: Model,
26
+ sample_ids: Optional[Sequence[int]] = None,
27
+ ) -> TaskResult:
28
+ raise NotImplementedError()
data/evaluation/fishfarm/fishfarm/tasks/competation_math.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from math import isclose
3
+ from typing import Any, Iterable, Optional, Sequence, Union
4
+
5
+ from sympy import N, simplify
6
+ from sympy.parsing.latex import parse_latex
7
+ from sympy.parsing.sympy_parser import parse_expr
8
+
9
+ from ..models import GenerationRequest, Message, Model
10
+ from .base import Task, TaskResult
11
+
12
+
13
+ def _fix_fracs(string: str) -> str:
14
+ substrs = string.split("\\frac")
15
+ new_str = substrs[0]
16
+ if len(substrs) > 1:
17
+ substrs = substrs[1:]
18
+ for substr in substrs:
19
+ new_str += "\\frac"
20
+ if substr[0] == "{":
21
+ new_str += substr
22
+ else:
23
+ try:
24
+ assert len(substr) >= 2
25
+ except AssertionError:
26
+ return string
27
+ a = substr[0]
28
+ b = substr[1]
29
+ if b != "{":
30
+ if len(substr) > 2:
31
+ post_substr = substr[2:]
32
+ new_str += "{" + a + "}{" + b + "}" + post_substr
33
+ else:
34
+ new_str += "{" + a + "}{" + b + "}"
35
+ else:
36
+ if len(substr) > 2:
37
+ post_substr = substr[2:]
38
+ new_str += "{" + a + "}" + b + post_substr
39
+ else:
40
+ new_str += "{" + a + "}" + b
41
+ string = new_str
42
+ return string
43
+
44
+
45
+ def _fix_a_slash_b(string: str) -> str:
46
+ if len(string.split("/")) != 2:
47
+ return string
48
+ a: str = string.split("/")[0]
49
+ b: str = string.split("/")[1]
50
+ try:
51
+ a_int: int = int(a)
52
+ b_int: int = int(b)
53
+ assert string == "{}/{}".format(a_int, b_int)
54
+ new_string = "\\frac{" + str(a) + "}{" + str(b) + "}"
55
+ return new_string
56
+ except (AssertionError, ValueError):
57
+ return string
58
+
59
+
60
+ def _remove_right_units(string: str) -> str:
61
+ if "\\text{ " in string:
62
+ splits = string.split("\\text{ ")
63
+ assert len(splits) == 2
64
+ return splits[0]
65
+ else:
66
+ return string
67
+
68
+
69
+ def _fix_sqrt(string: str) -> str:
70
+ if "\\sqrt" not in string:
71
+ return string
72
+ splits = string.split("\\sqrt")
73
+ new_string = splits[0]
74
+ for split in splits[1:]:
75
+ if split[0] != "{":
76
+ a = split[0]
77
+ new_substr = "\\sqrt{" + a + "}" + split[1:]
78
+ else:
79
+ new_substr = "\\sqrt" + split
80
+ new_string += new_substr
81
+ return new_string
82
+
83
+
84
+ def _strip_string(string: str) -> str:
85
+ string = string.replace("\n", "")
86
+
87
+ string = string.replace("\\!", "")
88
+
89
+ string = string.replace("\\\\", "\\")
90
+
91
+ string = string.replace("tfrac", "frac")
92
+ string = string.replace("dfrac", "frac")
93
+
94
+ string = string.replace("\\left", "")
95
+ string = string.replace("\\right", "")
96
+
97
+ string = string.replace("^{\\circ}", "")
98
+ string = string.replace("^\\circ", "")
99
+
100
+ string = string.replace("\\$", "")
101
+
102
+ string = _remove_right_units(string)
103
+
104
+ string = string.replace(r"\\%", "")
105
+ string = string.replace(r"\%", "")
106
+
107
+ string = string.replace(" .", " 0.")
108
+ string = string.replace("{.", "{0.")
109
+ if len(string) == 0:
110
+ return string
111
+ if string[0] == ".":
112
+ string = "0" + string
113
+
114
+ if len(string.split("=")) == 2:
115
+ if len(string.split("=")[0]) <= 2:
116
+ string = string.split("=")[1]
117
+
118
+ string = _fix_sqrt(string)
119
+
120
+ string = string.replace(" ", "")
121
+
122
+ string = _fix_fracs(string)
123
+
124
+ if string == "0.5":
125
+ string = "\\frac{1}{2}"
126
+
127
+ string = _fix_a_slash_b(string)
128
+
129
+ return string
130
+
131
+
132
+ def is_digit(s: Union[bool, float, str]) -> bool:
133
+ try:
134
+ float(str(s).replace(",", ""))
135
+ return True
136
+ except ValueError:
137
+ return False
138
+
139
+
140
+ def symbolic_equal(a: str, b: str) -> bool:
141
+ def _parse(s: str) -> Any:
142
+ for f in [parse_latex, parse_expr]:
143
+ try:
144
+ return f(s)
145
+ except Exception:
146
+ pass
147
+ return s
148
+
149
+ a = _parse(a)
150
+ b = _parse(b)
151
+
152
+ try:
153
+ if simplify(a - b) == 0:
154
+ return True
155
+ except Exception:
156
+ pass
157
+
158
+ try:
159
+ if isclose(N(a), N(b), rel_tol=1e-3):
160
+ return True
161
+ except Exception:
162
+ pass
163
+ return False
164
+
165
+
166
+ def math_equal(
167
+ prediction: Union[bool, float, str],
168
+ reference: Union[float, str],
169
+ include_percentage: bool = True,
170
+ is_close: bool = True,
171
+ ) -> bool:
172
+ """
173
+ Exact match of math if and only if:
174
+ 1. numerical equal: both can convert to float and are equal
175
+ 2. symbolic equal: both can convert to sympy expression and are equal
176
+ """
177
+ try:
178
+ if is_digit(prediction) and is_digit(reference):
179
+ prediction = float(str(prediction).replace(",", ""))
180
+ reference = float(str(reference).replace(",", ""))
181
+ if include_percentage:
182
+ gt_result = [reference / 100, reference, reference * 100]
183
+ else:
184
+ gt_result = [reference]
185
+ for item in gt_result:
186
+ try:
187
+ if is_close:
188
+ if isclose(item, prediction, rel_tol=1e-4):
189
+ return True
190
+ else:
191
+ if item == prediction:
192
+ return True
193
+ except Exception:
194
+ continue
195
+ return False
196
+ except Exception:
197
+ pass
198
+
199
+ if not prediction and prediction not in [0, False]:
200
+ return False
201
+
202
+ reference = str(reference).strip()
203
+ prediction = str(prediction).strip()
204
+
205
+ pred_str, ref_str = prediction, reference
206
+ if (
207
+ prediction.startswith("[")
208
+ and prediction.endswith("]")
209
+ and not reference.startswith("(")
210
+ ) or (
211
+ prediction.startswith("(")
212
+ and prediction.endswith(")")
213
+ and not reference.startswith("[")
214
+ ):
215
+ pred_str = pred_str.strip("[]()")
216
+ ref_str = ref_str.strip("[]()")
217
+ for s in ["{", "}", "(", ")"]:
218
+ ref_str = ref_str.replace(s, "")
219
+ pred_str = pred_str.replace(s, "")
220
+ if pred_str == ref_str:
221
+ return True
222
+
223
+ if (
224
+ (prediction.startswith("[") and prediction.endswith("]"))
225
+ and (reference.startswith("[") and reference.endswith("]"))
226
+ or (prediction.startswith("(") and prediction.endswith(")"))
227
+ and (reference.startswith("(") and reference.endswith(")"))
228
+ ):
229
+ pred_parts = prediction[1:-1].split(",")
230
+ ref_parts = reference[1:-1].split(",")
231
+ if len(pred_parts) == len(ref_parts):
232
+ if all(
233
+ [
234
+ math_equal(
235
+ pred_parts[i], ref_parts[i], include_percentage, is_close
236
+ )
237
+ for i in range(len(pred_parts))
238
+ ]
239
+ ):
240
+ return True
241
+
242
+ if symbolic_equal(prediction, reference):
243
+ return True
244
+
245
+ return False
246
+
247
+
248
+ def is_equiv(str1: Optional[str], str2: Optional[str]) -> bool:
249
+ if str1 is None and str2 is None:
250
+ return True
251
+ if str1 is None or str2 is None:
252
+ return False
253
+
254
+ try:
255
+ ss1 = _strip_string(str1)
256
+ ss2 = _strip_string(str2)
257
+ return math_equal(ss1, ss2) or ss1 == ss2
258
+ except (AssertionError, TypeError, ValueError):
259
+ return math_equal(str1, str2) or str1 == str2
260
+
261
+
262
+ def last_boxed_only_string(string: str) -> Optional[str]:
263
+ idx = string.rfind("\\boxed")
264
+ if idx < 0:
265
+ idx = string.rfind("\\fbox")
266
+ if idx < 0:
267
+ return None
268
+
269
+ i = idx
270
+ right_brace_idx: Optional[int] = None
271
+
272
+ num_left_braces_open = 0
273
+ while i < len(string):
274
+ if string[i] == "{":
275
+ num_left_braces_open += 1
276
+ if string[i] == "}":
277
+ num_left_braces_open -= 1
278
+ if num_left_braces_open == 0:
279
+ right_brace_idx = i
280
+ break
281
+ i += 1
282
+
283
+ if right_brace_idx is None:
284
+ retval = None
285
+ else:
286
+ assert right_brace_idx is not None
287
+ retval = string[idx : right_brace_idx + 1]
288
+
289
+ return retval
290
+
291
+
292
+ def remove_boxed(s: Optional[str]) -> Optional[str]:
293
+ left = "\\boxed{"
294
+ if s is None:
295
+ return None
296
+ else:
297
+ try:
298
+ assert s[: len(left)] == left
299
+ assert s[-1] == "}"
300
+ return s[len(left) : -1]
301
+ except (AssertionError, TypeError, ValueError):
302
+ return None
303
+
304
+
305
+ @dataclass
306
+ class MathSample:
307
+
308
+ problem: str
309
+ answer: Optional[str] = None
310
+ type: Optional[str] = None
311
+
312
+
313
+ def mean(iterable: Iterable[float]) -> float:
314
+ total, count = 0.0, 0
315
+ for x in iterable:
316
+ total += x
317
+ count += 1
318
+ return total / count
319
+
320
+
321
+ def extract_ans(completion: str) -> Optional[str]:
322
+
323
+ split_ans = completion.split("The answer is: ")
324
+ if len(split_ans) > 1:
325
+ ans = split_ans[-1]
326
+ extract_ans_temp = ans.split(".\n")[0]
327
+ extract_ans_temp = extract_ans_temp.strip()
328
+ if len(extract_ans_temp) > 0 and extract_ans_temp[-1] == ".":
329
+ extract_ans = extract_ans_temp[0:-1]
330
+ else:
331
+ extract_ans = extract_ans_temp
332
+ extract_ans = extract_ans.strip()
333
+ return extract_ans
334
+ else:
335
+ return remove_boxed(last_boxed_only_string(completion))
336
+
337
+
338
+ class LatexFormatMathTask(Task):
339
+ def __init__(
340
+ self,
341
+ samples: Sequence[MathSample],
342
+ context_messages: Sequence[Message] = (),
343
+ ):
344
+ self.samples = list(samples)
345
+ self.context_messages = context_messages
346
+
347
+ @property
348
+ def num_samples(self) -> int:
349
+ return len(self.samples)
350
+
351
+ def evaluate(
352
+ self,
353
+ model: Model,
354
+ sample_ids: Optional[Sequence[int]] = None,
355
+ ) -> TaskResult:
356
+ if sample_ids is None:
357
+ sample_ids = range(len(self.samples))
358
+ samples = [self.samples[sample_id] for sample_id in sample_ids]
359
+
360
+ requests = []
361
+ for sample in samples:
362
+ messages = list(self.context_messages)
363
+ messages.append(Message(role="user", content=sample.problem))
364
+ requests.append(GenerationRequest(messages=messages))
365
+
366
+ sample_details = []
367
+ for sample, result in zip(samples, model.generate(requests)):
368
+ output = result.generation
369
+ prediction = extract_ans(output)
370
+
371
+ sample_details.append(
372
+ dict(
373
+ problem=sample.problem,
374
+ output=output,
375
+ answer=sample.answer,
376
+ type=sample.type,
377
+ prediction=prediction,
378
+ correct=is_equiv(sample.answer, prediction),
379
+ )
380
+ )
381
+
382
+ aggregate_metrics = {
383
+ "acc": mean(
384
+ float(sd["correct"]) if isinstance(sd["correct"], (bool)) else 0.0
385
+ for sd in sample_details
386
+ )
387
+ }
388
+
389
+ return TaskResult(
390
+ aggregate_metrics=aggregate_metrics, sample_details=sample_details
391
+ )
data/evaluation/fishfarm/fishfarm/tasks/evalplus/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .data import load_dataset
2
+ from .task import EvalplusTask
3
+
4
+ __all__ = ["EvalplusTask", "load_dataset"]
data/evaluation/fishfarm/fishfarm/tasks/evalplus/data.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+ from evalplus.data import get_human_eval_plus, get_mbpp_plus
4
+
5
+
6
+ @dataclass
7
+ class TextToCodeProblem:
8
+ id: str
9
+ instruction: str
10
+ response_prefix: str
11
+
12
+
13
+ def get_mbpp_raw_problems() -> list[dict]:
14
+ problems = get_mbpp_plus()
15
+ return list(problems.values())
16
+
17
+
18
+ def get_humaneval_raw_problems() -> list[dict]:
19
+ problems = get_human_eval_plus()
20
+ return list(problems.values())
21
+
22
+
23
+ def read_mbpp_plus(
24
+ plus_path: str, err_incomplete: bool = True, mini: bool = False
25
+ ) -> dict[str, dict]:
26
+ from evalplus.data.mbpp import (completeness_check,
27
+ mbpp_deserialize_inputs, stream_jsonl)
28
+
29
+ plus = {task["task_id"]: task for task in stream_jsonl(plus_path)}
30
+ for task_id, task in plus.items():
31
+ task["base_input"] = mbpp_deserialize_inputs(task_id, task["base_input"])
32
+ task["plus_input"] = mbpp_deserialize_inputs(task_id, task["plus_input"])
33
+
34
+ if err_incomplete:
35
+ completeness_check("MBPP+", plus)
36
+ return plus
37
+
38
+
39
+ def map_mbpp_problem(p: dict) -> TextToCodeProblem:
40
+ id = p["task_id"]
41
+ prompt = p["prompt"]
42
+ start_index = prompt.index('"""')
43
+ end_index = prompt.rindex('"""')
44
+ prompt = prompt[start_index + 3 : end_index]
45
+ assert_index = prompt.index("assert")
46
+ instruction = prompt[:assert_index].strip()
47
+ if not instruction.endswith("."):
48
+ instruction += "."
49
+ assertion = prompt[assert_index:].strip()
50
+ instruction = f"""{instruction} Your code should satisfy the following assertion:
51
+ ```python
52
+ {assertion}
53
+ ```"""
54
+ response_prefix = """```python"""
55
+ return TextToCodeProblem(
56
+ id=str(id), instruction=instruction, response_prefix=response_prefix
57
+ )
58
+
59
+
60
+ def map_humaneval_problem(p: dict) -> TextToCodeProblem:
61
+ id = p["task_id"]
62
+ prompt = p["prompt"]
63
+ prompt = prompt.strip()
64
+ instruction = f"""Write a solution to the following problem:
65
+ ```python
66
+ {prompt}
67
+ ```"""
68
+ response_prefix = f"""```python
69
+ {prompt}"""
70
+ return TextToCodeProblem(
71
+ id=id, instruction=instruction, response_prefix=response_prefix
72
+ )
73
+
74
+
75
+ def load_dataset(source_dataset: str) -> list[TextToCodeProblem]:
76
+ if source_dataset not in ("humaneval", "mbpp"):
77
+ raise ValueError(f"Unknown source_dataset: {source_dataset}")
78
+
79
+ raw_problem_fn = {
80
+ "humaneval": get_humaneval_raw_problems,
81
+ "mbpp": get_mbpp_raw_problems,
82
+ }[source_dataset]
83
+
84
+ if source_dataset.startswith("humaneval"):
85
+ map_problem_fn = map_humaneval_problem
86
+ elif source_dataset.startswith("mbpp"):
87
+ map_problem_fn = map_mbpp_problem
88
+ else:
89
+ raise ValueError(f"Unknown source_dataset: {source_dataset}")
90
+
91
+ raw_problems = raw_problem_fn()
92
+ problems = list(map(map_problem_fn, raw_problems))
93
+
94
+ return problems
data/evaluation/fishfarm/fishfarm/tasks/evalplus/evaluation.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import multiprocessing
3
+ import os
4
+ import threading
5
+ import time
6
+ from collections import Counter, defaultdict
7
+ from concurrent.futures import ProcessPoolExecutor, as_completed
8
+ from datetime import datetime
9
+ from typing import Any
10
+ from warnings import warn
11
+
12
+ import numpy as np
13
+ from evalplus.data import (get_human_eval_plus, get_human_eval_plus_hash,
14
+ get_mbpp_plus, get_mbpp_plus_hash, load_solutions)
15
+ from evalplus.eval import SUCCESS, estimate_pass_at_k, untrusted_check
16
+ from evalplus.eval._special_oracle import MBPP_OUTPUT_NOT_NONE_TASKS
17
+ from evalplus.evaluate import Result, get_groundtruth
18
+ from termcolor import cprint
19
+ from tqdm.auto import tqdm
20
+
21
+ from ...logging import get_logger
22
+
23
+ logger = get_logger(__name__)
24
+
25
+
26
+ def check_correctness(
27
+ dataset: str,
28
+ completion_id: int,
29
+ problem: dict[str, Any],
30
+ solution: str,
31
+ expected_output: dict[str, list],
32
+ base_only: bool = False,
33
+ fast_check: bool = False,
34
+ identifier: str = "HumanEval/0_0",
35
+ min_time_limit: float = 0.1,
36
+ gt_time_limit_factor: float = 2.0,
37
+ ) -> dict[str, Result]:
38
+ ret = {
39
+ "completion_id": completion_id,
40
+ "task_id": problem["task_id"],
41
+ "_identifier": identifier,
42
+ "solution": solution,
43
+ }
44
+ ret["base"] = untrusted_check(
45
+ dataset,
46
+ solution,
47
+ problem["base_input"],
48
+ problem["entry_point"],
49
+ expected=expected_output["base"],
50
+ atol=problem["atol"],
51
+ ref_time=expected_output["base_time"],
52
+ fast_check=fast_check,
53
+ min_time_limit=min_time_limit,
54
+ gt_time_limit_factor=gt_time_limit_factor,
55
+ )
56
+
57
+ if not base_only:
58
+ ret["plus"] = untrusted_check(
59
+ dataset,
60
+ solution,
61
+ problem["plus_input"],
62
+ problem["entry_point"],
63
+ expected=expected_output["plus"],
64
+ atol=problem["atol"],
65
+ ref_time=expected_output["plus_time"],
66
+ fast_check=fast_check,
67
+ min_time_limit=min_time_limit,
68
+ gt_time_limit_factor=gt_time_limit_factor,
69
+ )
70
+ return ret
71
+
72
+
73
+ def evaluate(
74
+ source_dataset: str,
75
+ output_path: str,
76
+ base_only: bool = False,
77
+ parallel: int = 0,
78
+ i_just_wanna_run: bool = False,
79
+ test_details: bool = False,
80
+ min_time_limit: float = 0.2,
81
+ gt_time_limit_factor: float = 4.0,
82
+ mini: bool = False,
83
+ ) -> tuple[Any, list[dict[str, Any]]]:
84
+ if parallel == 0:
85
+ n_workers = max(1, multiprocessing.cpu_count() // 2)
86
+ else:
87
+ n_workers = parallel
88
+
89
+ if os.path.isdir(output_path):
90
+ result_path = os.path.join(output_path, "eval_results.json")
91
+ else:
92
+ assert output_path.endswith(".jsonl")
93
+ result_path = output_path.replace(".jsonl", "_eval_results.json")
94
+
95
+ if source_dataset == "humaneval":
96
+ problems = get_human_eval_plus(mini=mini)
97
+ dataset_hash = get_human_eval_plus_hash()
98
+ expected_output = get_groundtruth(problems, dataset_hash, [])
99
+ elif source_dataset == "mbpp":
100
+ problems = get_mbpp_plus(mini=mini)
101
+ dataset_hash = get_mbpp_plus_hash()
102
+ expected_output = get_groundtruth(
103
+ problems,
104
+ dataset_hash,
105
+ MBPP_OUTPUT_NOT_NONE_TASKS,
106
+ )
107
+
108
+ results = {
109
+ "date": datetime.now().strftime("%Y-%m-%d %H:%M"),
110
+ "hash": dataset_hash,
111
+ "eval": {},
112
+ }
113
+
114
+ with ProcessPoolExecutor(max_workers=n_workers) as executor:
115
+ futures = []
116
+ completion_id: Counter[str] = Counter()
117
+ n_samples = 0
118
+ eval_results = defaultdict(list)
119
+ remainings = set()
120
+ sample_details = []
121
+
122
+ logger.info("Reading samples...")
123
+ for sample in tqdm(load_solutions(output_path)):
124
+ task_id = sample["task_id"]
125
+ explanation = sample.get("explanation", "")
126
+ solution = (
127
+ sample["solution"]
128
+ if "solution" in sample
129
+ else problems[task_id]["prompt"] + sample["completion"]
130
+ )
131
+ remainings.add(sample["_identifier"])
132
+
133
+ args = (
134
+ source_dataset,
135
+ completion_id[task_id],
136
+ problems[task_id],
137
+ solution,
138
+ expected_output[task_id],
139
+ base_only,
140
+ not test_details,
141
+ sample["_identifier"],
142
+ min_time_limit,
143
+ gt_time_limit_factor,
144
+ )
145
+
146
+ futures.append(executor.submit(check_correctness, *args))
147
+ completion_id[task_id] += 1
148
+ n_samples += 1
149
+
150
+ sample_details.append(
151
+ dict(
152
+ task_id=task_id,
153
+ solution=solution,
154
+ explanation=explanation,
155
+ problems=problems[task_id],
156
+ expected_output=expected_output[task_id],
157
+ )
158
+ )
159
+
160
+ assert n_samples == len(remainings), "Missing problems in unfinished"
161
+ if len(completion_id) != len(problems):
162
+ logger.warning("Warning: Missing problems in samples")
163
+
164
+ def stucking_checker() -> None:
165
+ while remainings:
166
+ last_size = len(remainings)
167
+ time.sleep(20)
168
+ if last_size != len(remainings) or len(remainings) == 0:
169
+ continue
170
+ warn("No samples had finished testing in the last 20s")
171
+ warn(f"{len(remainings)} samples to be tested: {remainings}")
172
+
173
+ threading.Thread(target=stucking_checker).start()
174
+
175
+ for future in tqdm(as_completed(futures), total=n_samples):
176
+ result = future.result()
177
+ remainings.remove(result["_identifier"])
178
+ eval_results[result["task_id"]].append(result)
179
+
180
+ for task_id, task_results in eval_results.items():
181
+ task_results.sort(key=lambda x: x["completion_id"])
182
+ results["eval"][task_id] = {
183
+ "nfiles": len(task_results),
184
+ "base": [x["base"] for x in task_results],
185
+ "plus": ([x["plus"] for x in task_results] if not base_only else []),
186
+ }
187
+
188
+ if os.path.isfile(result_path) and i_just_wanna_run:
189
+ decision = ""
190
+ while decision.lower() not in ["y", "n"]:
191
+ logger.info(
192
+ f"{result_path} already exists. Press [Y/N] to overwrite or exit..."
193
+ )
194
+ decision = input()
195
+
196
+ if decision.lower() == "y":
197
+ new_path = result_path + ".bak"
198
+ while os.path.isfile(new_path):
199
+ new_path += ".bak"
200
+ os.rename(result_path, new_path)
201
+ logger.info(f"Backup {result_path} to {new_path}")
202
+
203
+ if not os.path.isfile(result_path):
204
+ with open(result_path, "w") as f:
205
+ json.dump(results, f)
206
+
207
+ total = np.array([r["nfiles"] for r in results["eval"].values()])
208
+ base_correct = []
209
+ new_correct = []
210
+
211
+ for key, res in results["eval"].items():
212
+ elements = [element for element in sample_details if element["task_id"] == key]
213
+ assert (
214
+ len(elements) == 1
215
+ ), f"Expected an element with task_id {key}, found {len(elements)}"
216
+ element = elements[0]
217
+
218
+ bc = sum([r[0] == SUCCESS for r in res["base"]])
219
+ base_correct.append(bc)
220
+ element["base_correct"] = bc
221
+ if res["plus"]:
222
+ new_bc = sum(
223
+ [
224
+ res["plus"][i][0] == res["base"][i][0] == SUCCESS
225
+ for i in range(len(res["plus"]))
226
+ ]
227
+ )
228
+ new_correct.append(new_bc)
229
+ element["plus_correct"] = new_bc
230
+
231
+ base_correct_array = np.array(base_correct)
232
+
233
+ pass_at_k = {
234
+ f"pass@{k}": estimate_pass_at_k(total, base_correct_array, k).mean()
235
+ for k in [1, 10, 100]
236
+ if total.min() >= k
237
+ }
238
+
239
+ result = {f"{source_dataset}_base_{key}": value for key, value in pass_at_k.items()}
240
+ cprint(f"{source_dataset} (base tests)", "red")
241
+ for k, v in pass_at_k.items():
242
+ cprint(f"{k}:\t{v:.3f}", "red")
243
+
244
+ if new_correct:
245
+ cprint(f"{source_dataset}+ (base + extra tests)", "green")
246
+ pass_at_k = {
247
+ f"pass@{k}": estimate_pass_at_k(total, np.array(new_correct), k).mean()
248
+ for k in [1, 10, 100]
249
+ if (total >= k).all()
250
+ }
251
+ result.update(
252
+ {f"{source_dataset}_plus_{key}": value for key, value in pass_at_k.items()}
253
+ )
254
+ for k, v in pass_at_k.items():
255
+ cprint(f"{k}:\t{v:.3f}", "green")
256
+
257
+ return result, sample_details
data/evaluation/fishfarm/fishfarm/tasks/evalplus/generation.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from pathlib import Path
3
+ from typing import Iterable, List, Sequence, TypeVar
4
+
5
+ from evalplus.data import write_jsonl
6
+ from tqdm.auto import tqdm
7
+
8
+ from ...models import GenerationRequest, Message, Model
9
+ from .data import TextToCodeProblem
10
+
11
+ _T = TypeVar("_T")
12
+
13
+
14
+ def chunked(seq: Sequence[_T], n: int) -> Iterable[Sequence[_T]]:
15
+ """Yield successive n-sized chunks from seq."""
16
+ return (seq[i : i + n] for i in range(0, len(seq), n))
17
+
18
+
19
+ def generate(
20
+ model: Model,
21
+ problems: list[TextToCodeProblem],
22
+ context_messages: Sequence[Message],
23
+ output_path: str,
24
+ n_batches: int = 1,
25
+ n_problems_per_batch: int = 1_000_000_000,
26
+ n_samples_per_problem: int = 1,
27
+ ) -> List[str]:
28
+ problems_chunked = list(chunked(list(problems), n_problems_per_batch))
29
+ iter = itertools.product(problems_chunked, range(n_batches))
30
+ n_total = len(problems_chunked) * n_batches
31
+
32
+ Path(output_path).write_text("")
33
+ for problems, batch_idx in tqdm(iter, total=n_total):
34
+ task_ids = [problem.id for problem in problems]
35
+ all_task_ids = task_ids * n_samples_per_problem
36
+
37
+ requests = []
38
+ for problem in problems:
39
+ messages = list(context_messages)
40
+ messages.append(Message(role="user", content=problem.instruction))
41
+ messages.append(
42
+ Message(role="assistant_prefill", content=problem.response_prefix)
43
+ )
44
+ requests.append(GenerationRequest(messages=messages))
45
+ completes = model.generate(requests)
46
+ completions = [c.generation for c in completes]
47
+
48
+ assert len(problems) <= n_problems_per_batch
49
+ assert len(completions) == len(problems) * n_samples_per_problem
50
+
51
+ samples = []
52
+ for task_id, completion in zip(all_task_ids, completions):
53
+ completion_body = completion[
54
+ : (
55
+ index
56
+ if (index := completion.find("```")) != -1
57
+ else len(completion)
58
+ )
59
+ ]
60
+ explanation = completion[
61
+ (
62
+ index
63
+ if (index := completion.find("```") + 3) != -1
64
+ else len(completion)
65
+ ) :
66
+ ].strip()
67
+
68
+ samples.append(
69
+ dict(
70
+ task_id=task_id,
71
+ completion=completion_body,
72
+ explanation=explanation,
73
+ )
74
+ )
75
+
76
+ write_jsonl(output_path, samples, append=True)
77
+ return completions
data/evaluation/fishfarm/fishfarm/tasks/evalplus/sanitization.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import os
3
+ import pathlib
4
+ import re
5
+ import traceback
6
+ from typing import Optional
7
+
8
+ from evalplus.data import (get_human_eval_plus, get_mbpp_plus, load_solutions,
9
+ write_directory, write_jsonl)
10
+ from tqdm.auto import tqdm
11
+
12
+ from ...logging import get_logger
13
+
14
+ logger = get_logger(__name__)
15
+
16
+
17
+ def syntax_check(code: str, verbose: bool = False) -> bool:
18
+ try:
19
+ ast.parse(code)
20
+ return True
21
+ except (SyntaxError, MemoryError):
22
+ if verbose:
23
+ traceback.print_exc()
24
+ return False
25
+
26
+
27
+ def remove_unindented_lines(
28
+ code: str, protect_before: str, execeptions: list[str], trim_tails: list[str]
29
+ ) -> str:
30
+ lines = code.splitlines()
31
+ cut_idx = []
32
+ cut_enabled = False
33
+ for i, line in enumerate(lines):
34
+ if not cut_enabled and line.startswith(protect_before):
35
+ cut_enabled = True
36
+ continue
37
+ if line.strip() == "":
38
+ continue
39
+ if any(line.startswith(e) for e in execeptions):
40
+ continue
41
+
42
+ lspace = len(line) - len(line.lstrip())
43
+ if lspace == 0:
44
+ cut_idx.append(i)
45
+
46
+ if any(line.rstrip().startswith(t) for t in trim_tails):
47
+ cut_idx.extend(list(range(i, len(lines))))
48
+ break
49
+
50
+ return "\n".join([line for i, line in enumerate(lines) if i not in cut_idx])
51
+
52
+
53
+ def to_four_space_indents(old_code: str) -> str:
54
+ new_code = ""
55
+ for line in old_code.splitlines():
56
+ lspace = len(line) - len(line.lstrip())
57
+ if lspace == 3:
58
+ new_code += " "
59
+ new_code += line + "\n"
60
+ return new_code
61
+
62
+
63
+ def sanitize_code(
64
+ old_code: str,
65
+ entry_point: str,
66
+ rm_prefix_lines: Optional[str] = None,
67
+ eofs: list = [],
68
+ ) -> str:
69
+ new_code = old_code
70
+ if rm_prefix_lines is not None:
71
+ new_code = "\n".join(
72
+ [
73
+ line
74
+ for line in old_code.splitlines()
75
+ if not line.startswith(rm_prefix_lines)
76
+ ]
77
+ )
78
+
79
+ new_code = "\n" + new_code
80
+ def_left = "def " + entry_point
81
+
82
+ new_code = new_code.replace("\n```python\n", "\n```\n")
83
+ for chunk in new_code.split("\n```\n"):
84
+ if def_left in chunk:
85
+ new_code = chunk
86
+ break
87
+
88
+ chunks = [chunk for chunk in re.split(rf"{def_left}\s*\(", new_code)]
89
+ bodies = [chunk for chunk in chunks[1:] if " return " in chunk.split("\ndef")[0]]
90
+ def_left = def_left + "("
91
+ new_code = def_left + def_left.join(bodies) if len(bodies) > 0 else ""
92
+ new_code = to_four_space_indents(new_code)
93
+
94
+ for eof in eofs or []:
95
+ new_code = new_code.split(eof)[0]
96
+
97
+ new_code = remove_unindented_lines(
98
+ new_code,
99
+ protect_before=def_left,
100
+ execeptions=["def ", "import ", "from "],
101
+ trim_tails=['"""', "if", "print"],
102
+ )
103
+ new_code = chunks[0] + new_code
104
+
105
+ parts = new_code.split("\ndef ")
106
+ includes = [parts[0]]
107
+ for fn in new_code.split("\ndef ")[1:]:
108
+ if (
109
+ fn.strip().startswith(entry_point + " ")
110
+ or fn.strip().startswith(entry_point + "(")
111
+ or syntax_check("\ndef " + fn)
112
+ ):
113
+ includes.append(fn)
114
+ new_code = "\ndef ".join(includes)
115
+ return new_code.strip()
116
+
117
+
118
+ def sanitize(
119
+ source_dataset: str,
120
+ input_path: str,
121
+ eofs: list = [],
122
+ inplace: bool = False,
123
+ rm_prefix_lines: Optional[str] = None,
124
+ debug_task: Optional[str] = None,
125
+ ) -> str:
126
+ entry_point = {}
127
+
128
+ if source_dataset == "humaneval":
129
+ dataset = get_human_eval_plus()
130
+ elif source_dataset == "mbpp":
131
+ dataset = get_mbpp_plus()
132
+
133
+ for task_id, problem in dataset.items():
134
+ entry_point[task_id] = problem["entry_point"]
135
+
136
+ is_folder = os.path.isdir(input_path)
137
+ target_path = pathlib.Path(input_path)
138
+ if not inplace:
139
+ if is_folder:
140
+ new_name = target_path.name + "-sanitized"
141
+ else:
142
+ new_name = target_path.name.replace(".jsonl", "-sanitized.jsonl")
143
+ target_path = target_path.parent / new_name
144
+ output_path = str(target_path)
145
+
146
+ nsan = 0
147
+ ntotal = 0
148
+
149
+ new_solutions = []
150
+
151
+ for solution in tqdm(load_solutions(input_path)):
152
+ task_id = solution["task_id"]
153
+ dbg_identifier = solution["_identifier"]
154
+ if debug_task is not None and task_id != debug_task:
155
+ continue
156
+
157
+ ntotal += 1
158
+ if "solution" in solution:
159
+ old_code = solution["solution"]
160
+ else:
161
+ assert "completion" in solution
162
+ old_code = dataset[task_id]["prompt"] + "\n" + solution["completion"]
163
+
164
+ old_code = old_code.strip()
165
+
166
+ new_code = sanitize_code(
167
+ old_code=old_code,
168
+ entry_point=entry_point[task_id],
169
+ rm_prefix_lines=rm_prefix_lines,
170
+ eofs=eofs,
171
+ ).strip()
172
+
173
+ if new_code != old_code:
174
+ msg = "Sanitized: " + dbg_identifier
175
+ if is_folder:
176
+ msg += " -> " + dbg_identifier.replace(input_path, output_path)
177
+ logger.info(msg)
178
+ nsan += 1
179
+
180
+ new_solutions.append(
181
+ {
182
+ "task_id": task_id,
183
+ "solution": new_code,
184
+ "explanation": solution["explanation"],
185
+ }
186
+ )
187
+
188
+ if is_folder:
189
+ write_directory(output_path, new_solutions)
190
+ else:
191
+ write_jsonl(output_path, new_solutions)
192
+
193
+ logger.info(f"Sanitized {nsan} out of {ntotal} files.")
194
+
195
+ return output_path
data/evaluation/fishfarm/fishfarm/tasks/evalplus/task.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tempfile
2
+ from typing import Literal, Optional, Sequence
3
+
4
+ from ...models import Message, Model
5
+ from ..base import Task, TaskResult
6
+ from . import evaluation, generation, sanitization
7
+ from .data import TextToCodeProblem
8
+
9
+
10
+ class EvalplusTask(Task):
11
+
12
+ def __init__(
13
+ self,
14
+ samples: Sequence[TextToCodeProblem],
15
+ context_messages: Sequence[Message] = (),
16
+ source_dataset: Literal["humaneval", "mbpp"] = "humaneval",
17
+ ):
18
+ self.samples = list(samples)
19
+ self.context_messages = context_messages
20
+ self.source_dataset = source_dataset
21
+ if source_dataset not in ("humaneval", "mbpp"):
22
+ raise ValueError(f"Unknown source_dataset: {source_dataset}")
23
+
24
+ @property
25
+ def num_samples(self) -> int:
26
+ return len(self.samples)
27
+
28
+ def evaluate(
29
+ self,
30
+ model: Model,
31
+ sample_ids: Optional[Sequence[int]] = None,
32
+ ) -> TaskResult:
33
+ if sample_ids is None:
34
+ sample_ids = range(len(self.samples))
35
+ samples = [self.samples[sample_id] for sample_id in sample_ids]
36
+
37
+ with tempfile.TemporaryDirectory() as save_dir:
38
+ output_path = f"{save_dir}/outputs.jsonl"
39
+
40
+ completions = generation.generate(
41
+ model, samples, self.context_messages, output_path
42
+ )
43
+
44
+ if self.source_dataset == "mbpp":
45
+ output_path = sanitization.sanitize(self.source_dataset, output_path)
46
+
47
+ result, sample_details = evaluation.evaluate(
48
+ self.source_dataset, output_path
49
+ )
50
+
51
+ for i, completion in enumerate(completions):
52
+ sample_details[i]["output"] = completion
53
+
54
+ return TaskResult(aggregate_metrics=result, sample_details=sample_details)
data/evaluation/fishfarm/fishfarm/tasks/language_restricted_math.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from dataclasses import dataclass
3
+ from typing import Iterable, Optional, Sequence
4
+
5
+ import huggingface_hub
6
+
7
+ from ..imports import try_import
8
+ from ..models import GenerationRequest, Message, Model
9
+ from .base import Task, TaskResult
10
+
11
+ with try_import() as _imports:
12
+ import fasttext
13
+
14
+ _imports.check()
15
+
16
+
17
+ @dataclass
18
+ class MathSample:
19
+
20
+ problem: str
21
+ answer: int
22
+
23
+
24
+ def mean(iterable: Iterable[float]) -> float:
25
+ total, count = 0.0, 0
26
+ for x in iterable:
27
+ total += x
28
+ count += 1
29
+ return total / count
30
+
31
+
32
+ def extract_answer_number(completion: str) -> Optional[float]:
33
+ matches = re.findall(r"\d*\.?\d+", completion)
34
+ if not matches:
35
+ return None
36
+ text = matches[-1]
37
+ return float(text.replace(",", ""))
38
+
39
+
40
+ class LanguageRestrictedMathTask(Task):
41
+ def __init__(
42
+ self,
43
+ samples: Sequence[MathSample],
44
+ context_messages: Sequence[Message] = (),
45
+ languages: Sequence[str] = ("ja", "en"),
46
+ ):
47
+ self.samples = list(samples)
48
+ self.languages = languages
49
+ self.context_messages = context_messages
50
+ if len(self.languages) != 0:
51
+ lid176ftz_path = huggingface_hub.hf_hub_download(
52
+ "julien-c/fasttext-language-id", "lid.176.ftz"
53
+ )
54
+ self.lid_model = fasttext.load_model(lid176ftz_path)
55
+
56
+ @property
57
+ def num_samples(self) -> int:
58
+ return len(self.samples)
59
+
60
+ def evaluate(
61
+ self,
62
+ model: Model,
63
+ sample_ids: Optional[Sequence[int]] = None,
64
+ ) -> TaskResult:
65
+ if sample_ids is None:
66
+ sample_ids = range(len(self.samples))
67
+ samples = [self.samples[sample_id] for sample_id in sample_ids]
68
+
69
+ requests = []
70
+ for sample in samples:
71
+ messages = list(self.context_messages)
72
+ messages.append(Message(role="user", content=sample.problem))
73
+ requests.append(GenerationRequest(messages=messages))
74
+
75
+ sample_details = []
76
+ for sample, result in zip(samples, model.generate(requests)):
77
+ output = result.generation
78
+ prediction = extract_answer_number(result.generation)
79
+ if len(self.languages) != 0:
80
+ lid_probs = dict(
81
+ zip(*self.lid_model.predict(output.replace("\n", ""), k=-1))
82
+ )
83
+
84
+ sample_details.append(
85
+ dict(
86
+ problem=sample.problem,
87
+ output=output,
88
+ answer=sample.answer,
89
+ prediction=prediction,
90
+ correct=sample.answer == prediction,
91
+ **{
92
+ f"lang_{lang}": lid_probs.get(f"__label__{lang}", 0.0)
93
+ for lang in self.languages
94
+ },
95
+ )
96
+ )
97
+
98
+ aggregate_metrics = {"acc": mean(sd["correct"] for sd in sample_details)}
99
+ for lang in self.languages:
100
+ aggregate_metrics[f"acc_{lang}"] = mean(
101
+ (sd["correct"] and sd[f"lang_{lang}"] > 0.5) for sd in sample_details
102
+ )
103
+
104
+ return TaskResult(
105
+ aggregate_metrics=aggregate_metrics, sample_details=sample_details
106
+ )
data/evaluation/fishfarm/fishfarm/version.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = "0.1.0dev"
data/evaluation/fishfarm/pyproject.toml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "fishfarm"
3
+ description = ""
4
+ readme = "README.md"
5
+ license = {file = "LICENSE"}
6
+ authors = [
7
+ {name = "Takuya Akiba"},
8
+ {email = "[email protected]"}
9
+ ]
10
+ classifiers = [
11
+ "Development Status :: 2 - Pre-Alpha",
12
+ "Intended Audience :: Science/Research",
13
+ "Intended Audience :: Developers",
14
+ "License :: OSI Approved :: MIT License",
15
+ "Programming Language :: Python :: 3",
16
+ "Programming Language :: Python :: 3.8",
17
+ "Programming Language :: Python :: 3.9",
18
+ "Programming Language :: Python :: 3.10",
19
+ "Programming Language :: Python :: 3.11",
20
+ "Programming Language :: Python :: 3 :: Only",
21
+ "Topic :: Scientific/Engineering",
22
+ "Topic :: Scientific/Engineering :: Mathematics",
23
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
24
+ "Topic :: Software Development",
25
+ "Topic :: Software Development :: Libraries",
26
+ "Topic :: Software Development :: Libraries :: Python Modules",
27
+ ]
28
+ requires-python = ">=3.10"
29
+ dependencies = [
30
+ "huggingface_hub",
31
+ "transformers",
32
+ "pydantic",
33
+ "colorlog"
34
+ ]
35
+ dynamic = ["version"]
36
+
37
+ [project.optional-dependencies]
38
+ development = [
39
+ "black",
40
+ "blackdoc",
41
+ "flake8",
42
+ "isort",
43
+ "mypy",
44
+ "pytest",
45
+ "pytest-mock",
46
+ "types-PyYAML",
47
+ ]
48
+
49
+ full = [
50
+ "vllm",
51
+ "langchain",
52
+ "langchain-openai",
53
+ "fasttext-wheel",
54
+ "datasets",
55
+ "mysql-connector-python==8.0.32",
56
+ "docker==6.1.2",
57
+ "evalplus @ git+https://github.com/evalplus/evalplus@1895d2f6aa8895044a7cf69defc24bd57695e885",
58
+ "rouge-score"
59
+ ]
60
+
61
+ [project.urls]
62
+ repository = "https://github.com/SakanaAI/fishfarm"
63
+
64
+ [tool.setuptools.packages.find]
65
+ include = ["fishfarm*"]
66
+
67
+ [tool.setuptools.dynamic]
68
+ version = {attr = "fishfarm.version.__version__"}
69
+
70
+ [tool.black]
71
+ line-length = 99
72
+ target-version = ['py310']
73
+ exclude = '''
74
+ /(
75
+ \.eggs
76
+ | \.git
77
+ | \.hg
78
+ | \.mypy_cache
79
+ | \.venv
80
+ | venv
81
+ | _build
82
+ | buck-out
83
+ | build
84
+ | dist
85
+ | docs
86
+ | data
87
+ )/
88
+ '''
89
+
90
+ [tool.isort]
91
+ profile = 'black'
92
+ src_paths = ['fishfarm', 'tests']
93
+ line_length = 99
94
+ lines_after_imports = 2
95
+
96
+ [tool.mypy]
97
+ python_version = "3.10"
98
+ strict = true
99
+ ignore_missing_imports = true
100
+ warn_unused_configs = true
101
+ disallow_untyped_defs = true
102
+ warn_redundant_casts = true
103
+ warn_unused_ignores = true
104
+ warn_unreachable = true
105
+ disallow_any_generics = false
106
+ exclude = ".venv|venv|build|docs|tutorial|data"
107
+
108
+ [tool.pytest]
109
+ mock_use_standalone_module = true
data/evaluation/fishfarm/tox.ini ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ [flake8]
2
+ max-line-length = 99
3
+ statistics = True
4
+ exclude = .venv,venv,build,notebooks,.asv,data
5
+ ignore =
6
+ E203,
7
+ W503,
8
+ E704