meg-huggingface commited on
Commit
1702f66
·
1 Parent(s): f1e6565
src/backend/run_eval_suite_harness.py CHANGED
@@ -55,11 +55,25 @@ def run_evaluation(eval_request: EvalRequest, task_names: list, num_fewshot: int
55
 
56
  logger.info(f"Selected Tasks: {task_names}")
57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  # no_cache=no_cache,
59
  # output_base_path="logs"
60
  results = evaluator.simple_evaluate(
61
  model="hf-auto", #= "hf-causal-experimental", # "hf-causal"
62
- model_args=eval_request.get_model_args(),
63
  tasks=task_names,
64
  num_fewshot=num_fewshot,
65
  batch_size=batch_size,
 
55
 
56
  logger.info(f"Selected Tasks: {task_names}")
57
 
58
+ print("Here are the deets we are putting in:")
59
+ print("model args:")
60
+ model_args = eval_request.get_model_args()
61
+ print(model_args)
62
+ print("Tasks")
63
+ print(task_names)
64
+ print("Num fewshot")
65
+ print(num_fewshot)
66
+ print("Batch size")
67
+ print(batch_size)
68
+ print("Device")
69
+ print(device)
70
+ print("Limit")
71
+ print(limit)
72
  # no_cache=no_cache,
73
  # output_base_path="logs"
74
  results = evaluator.simple_evaluate(
75
  model="hf-auto", #= "hf-causal-experimental", # "hf-causal"
76
+ model_args=model_args,
77
  tasks=task_names,
78
  num_fewshot=num_fewshot,
79
  batch_size=batch_size,