Spaces:
Sleeping
Sleeping
Hellisotherpeople
commited on
Commit
·
53a8589
1
Parent(s):
ffaaaf6
Update pages/Text-to-Text.py
Browse files- pages/Text-to-Text.py +3 -3
pages/Text-to-Text.py
CHANGED
@@ -51,7 +51,7 @@ else:
|
|
51 |
|
52 |
|
53 |
length = form.number_input("Select how long you want the generated text to be", value = 100)
|
54 |
-
number_of_tokens_to_sample = form.number_input("Select how many tokens we want to search through when we do the filtering", value =
|
55 |
form.caption("Settings this to higher numbers will improve the experience but will cause generating to slow. Low numbers may cause lots of blank or failed generations")
|
56 |
temperature = form.number_input("How spicy/interesting do we want our models output to be", value = 0.10, min_value = 0.0)
|
57 |
form.caption("Setting this higher decreases the likelihood of high probability words and increases the likelihood of low probability (and presumably more interesting) words")
|
@@ -90,10 +90,10 @@ def get_next_word_without_e():
|
|
90 |
if temperature != 1.0:
|
91 |
next_token_candidates_logits = next_token_candidates_logits / temperature
|
92 |
# filter
|
93 |
-
filtered_next_token_candidates_logits = top_k_top_p_filtering(next_token_candidates_logits, top_k=number_of_tokens_to_sample, top_p=number_of_tokens_to_sample)
|
94 |
# sample and get a probability distribution
|
95 |
probs = F.softmax(filtered_next_token_candidates_logits, dim=-1)
|
96 |
-
next_token_candidates = torch.multinomial(probs, num_samples=number_of_tokens_to_sample) ## 10000 random samples
|
97 |
word_list = []
|
98 |
for candidate_string in next_token_candidates:
|
99 |
for candidate in candidate_string:
|
|
|
51 |
|
52 |
|
53 |
length = form.number_input("Select how long you want the generated text to be", value = 100)
|
54 |
+
number_of_tokens_to_sample = form.number_input("Select how many tokens we want to search through when we do the filtering", value = 25000)
|
55 |
form.caption("Settings this to higher numbers will improve the experience but will cause generating to slow. Low numbers may cause lots of blank or failed generations")
|
56 |
temperature = form.number_input("How spicy/interesting do we want our models output to be", value = 0.10, min_value = 0.0)
|
57 |
form.caption("Setting this higher decreases the likelihood of high probability words and increases the likelihood of low probability (and presumably more interesting) words")
|
|
|
90 |
if temperature != 1.0:
|
91 |
next_token_candidates_logits = next_token_candidates_logits / temperature
|
92 |
# filter
|
93 |
+
filtered_next_token_candidates_logits = top_k_top_p_filtering(next_token_candidates_logits, top_k=int(number_of_tokens_to_sample), top_p=int(number_of_tokens_to_sample))
|
94 |
# sample and get a probability distribution
|
95 |
probs = F.softmax(filtered_next_token_candidates_logits, dim=-1)
|
96 |
+
next_token_candidates = torch.multinomial(probs, num_samples=int(number_of_tokens_to_sample)) ## 10000 random samples
|
97 |
word_list = []
|
98 |
for candidate_string in next_token_candidates:
|
99 |
for candidate in candidate_string:
|