|
import gradio as gr |
|
import os |
|
from gpt4all import GPT4All |
|
""" |
|
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co./docs/huggingface_hub/v0.22.2/en/guides/inference |
|
""" |
|
model = GPT4All(model_name='strela-q4_k_m.gguf', model_path=os.getcwd()) |
|
|
|
def stop_on_token_callback(token_id, token_string): |
|
print(token_string, end='') |
|
if '#' in token_string: |
|
return False |
|
else: |
|
return True |
|
def respond( |
|
message, |
|
history: list[tuple[str, str]], |
|
system_message, |
|
max_tokens, |
|
temperature, |
|
top_p, |
|
): |
|
chat = f"""### System: |
|
{system_message} |
|
""" |
|
for group in history: |
|
chat += f"""### Human: |
|
{group[0]} |
|
### Assistant: |
|
{group[1]}""" |
|
chat += f"""### Human: |
|
{message} |
|
### Assistant: |
|
""" |
|
tokens = "" |
|
for token in model.generate(chat, temp=temperature, callback=stop_on_token_callback, streaming=True): |
|
tokens += token |
|
yield tokens |
|
""" |
|
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface |
|
""" |
|
demo = gr.ChatInterface( |
|
respond, |
|
additional_inputs=[ |
|
gr.Textbox(value="You are a friendly Chatbot.", label="System message"), |
|
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), |
|
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), |
|
gr.Slider( |
|
minimum=0.1, |
|
maximum=1.0, |
|
value=0.95, |
|
step=0.05, |
|
label="Top-p (nucleus sampling)", |
|
), |
|
], |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |