Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -48,7 +48,7 @@ unimath4 = """Goal:
|
|
48 |
============================
|
49 |
(x β y β (β i : approximating_family CX x, approximating_family CX x i β y))"""
|
50 |
|
51 |
-
additional_info_prompt = "/-Explain using mathematics
|
52 |
|
53 |
examples = [
|
54 |
[unimath1, additional_info_prompt, 2500],
|
@@ -66,6 +66,9 @@ model.generation_config = GenerationConfig.from_pretrained(model_name)
|
|
66 |
model.generation_config.pad_token_id = model.generation_config.eos_token_id
|
67 |
model.generation_config.bos_token_id = 100000
|
68 |
model.generation_config.eos_token_id = 100001
|
|
|
|
|
|
|
69 |
|
70 |
@spaces.GPU
|
71 |
def solve_math_problem(question, informal_prefix, max_tokens):
|
@@ -73,13 +76,15 @@ def solve_math_problem(question, informal_prefix, max_tokens):
|
|
73 |
prompt = format_prompt(question, informal_prefix)
|
74 |
|
75 |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
|
|
|
|
|
76 |
outputs = model.generate(
|
77 |
input_ids,
|
|
|
78 |
max_length=max_tokens + input_ids.shape[1],
|
79 |
pad_token_id=model.generation_config.pad_token_id,
|
80 |
temperature=1.0,
|
81 |
top_p=0.95,
|
82 |
-
do_sample=True
|
83 |
)
|
84 |
|
85 |
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
48 |
============================
|
49 |
(x β y β (β i : approximating_family CX x, approximating_family CX x i β y))"""
|
50 |
|
51 |
+
additional_info_prompt = "/-Explain using mathematics-/\n"
|
52 |
|
53 |
examples = [
|
54 |
[unimath1, additional_info_prompt, 2500],
|
|
|
66 |
model.generation_config.pad_token_id = model.generation_config.eos_token_id
|
67 |
model.generation_config.bos_token_id = 100000
|
68 |
model.generation_config.eos_token_id = 100001
|
69 |
+
model.generation_config.do_sample = True
|
70 |
+
model.generation_config.temperature = 1.0
|
71 |
+
model.generation_config.top_p = 0.95
|
72 |
|
73 |
@spaces.GPU
|
74 |
def solve_math_problem(question, informal_prefix, max_tokens):
|
|
|
76 |
prompt = format_prompt(question, informal_prefix)
|
77 |
|
78 |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
|
79 |
+
attention_mask = torch.ones_like(input_ids)
|
80 |
+
|
81 |
outputs = model.generate(
|
82 |
input_ids,
|
83 |
+
attention_mask=attention_mask,
|
84 |
max_length=max_tokens + input_ids.shape[1],
|
85 |
pad_token_id=model.generation_config.pad_token_id,
|
86 |
temperature=1.0,
|
87 |
top_p=0.95,
|
|
|
88 |
)
|
89 |
|
90 |
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|