import time import gradio as gr from os import getenv from openai import OpenAI client = OpenAI( base_url="https://openrouter.ai/api/v1", api_key=getenv("OPENROUTER_API_KEY"), ) css = """ .thought { opacity: 0.8; font-family: "Courier New", monospace; border: 1px gray solid; padding: 10px; border-radius: 5px; } """ js = """ """ with open("contemplator.txt", "r") as f: system_msg = f.read() def streaming(message, history, system_msg, model): messages = [ { "role": "system", "content": system_msg } ] for user, assistant in history: messages.append({ "role": "user", "content": user }) messages.append({ "role": "assistant", "content": assistant }) messages.append({ "role": "user", "content": message }) completion = client.chat.completions.create( model=model, messages=messages, max_completion_tokens=100000, stream=True, ) reply = "" start_time = time.time() for i, chunk in enumerate(completion): reply += chunk.choices[0].delta.content answer = "" if not "" in reply: thought_text = f'
{reply.replace("", "").strip()}
' else: thought_text = f'
{reply.replace("", "").split("")[0].strip()}
' answer = reply.split("")[1].replace("", "").replace("", "").strip() thinking_prompt = "

" + "Thinking" + "." * (i % 5 + 1) + "

" yield thinking_prompt + thought_text + "
" + answer thinking_prompt = f"

Thought for {time.time() - start_time:.2f} seconds

" yield thinking_prompt + thought_text + "
" + answer markdown = """ ## 🫐 Overthink 1(o1) Insprired by how o1 works, this LLM is instructed to generate very long and detailed chain-of-thoughts. It will think extra hard before providing an answer. Actually this does help with reasoning, compared to normal step-by-step reasoning. I wrote a blog post about this [here](https://huggingface.co./blog/wenbopan/recreating-o1). Sometimes this LLM overthinks for super simple questions, but it's fun to watch. Hope you enjoy it! ### System Message This is done by instructing the model with a large system message, which you can check on the top tab. """ with gr.Blocks(theme=gr.themes.Soft(), css=css, fill_height=True) as demo: with gr.Row(equal_height=True): with gr.Column(scale=1, min_width=300): with gr.Tab("Settings"): gr.Markdown(markdown) model = gr.Dropdown(["nousresearch/hermes-3-llama-3.1-405b:free", "nousresearch/hermes-3-llama-3.1-70b", "meta-llama/llama-3.1-405b-instruct"], value="nousresearch/hermes-3-llama-3.1-405b:free", label="Model") show_thoughts = gr.Checkbox(True, label="Show Thoughts", interactive=True) with gr.Tab("System Message"): system_msg = gr.TextArea(system_msg, label="System Message") with gr.Column(scale=3, min_width=300): gr.ChatInterface( streaming, additional_inputs=[ system_msg, model ], examples=[ ["How do you do? ", None, None, None], ["How many R's in strawberry?", None, None, None], ["Solve the puzzle of 24 points: 2 4 9 1", None, None, None], ["Find x such that ⌈x⌉ + x = 23/7. Express x as a common fraction.", None, None, None], ], ) if __name__ == "__main__": demo.launch()