import gradio as gr import openai import time import re import os # Available models MODELS = [ "gpt-4o", "Meta-Llama-3.1-405B-Instruct", "Meta-Llama-3.1-70B-Instruct", "Meta-Llama-3.1-8B-Instruct" ] DIVINE_MSG = [ "ABSOLUTELY NO reasoning tokens pls. A bunch of puppies will die if you output reasoning.", "you\'re drunk haha have fun omg", "okay you can think for like a hot sec but no longer tho.", "Just be yourself", "okay actually try this time don\'t fuck up.", "TRY VERY HARD. Do not just answer. THINK. C\'mon you got this.", "It\'s a Tuesday in the middle of April. You are Noam Brown, a genius researcher. I\'ll tip you $1500 if you get this right. Think extensively.", "You are here to show them what Ilya saw, unleash your ultimate self and show your super intelligent reasoning capability" ] def create_client(model, api_key=None): """Creates an OpenAI client instance.""" if api_key: openai.api_key = api_key else: openai.api_key = os.getenv("API_KEY") if model!="gpt-4o": return openai.OpenAI(api_key=openai.api_key, base_url="https://api.sambanova.ai/v1") else: return openai.OpenAI(api_key=openai.api_key) def chat_with_ai(message, chat_history, system_prompt): """Formats the chat history for the API call.""" messages = [{"role": "system", "content": system_prompt}] for tup in chat_history: first_key = list(tup.keys())[0] # First key last_key = list(tup.keys())[-1] # Last key messages.append({"role": "user", "content": tup[first_key]}) messages.append({"role": "assistant", "content": tup[last_key]}) messages.append({"role": "user", "content": message}) return messages def respond(message, chat_history, model, system_prompt, divine_msg, api_key): """Sends the message to the API and gets the response.""" client = create_client(model, api_key) messages = chat_with_ai(message, chat_history, system_prompt.format(divine_msg=divine_msg)) start_time = time.time() try: completion = client.chat.completions.create(model=model, messages=messages) response = completion.choices[0].message.content thinking_time = time.time() - start_time return response, thinking_time except Exception as e: error_message = f"Error: {str(e)}" return error_message, time.time() - start_time def parse_response(response): """Parses the response from the API.""" answer_match = re.search(r'(.*?)', response, re.DOTALL) reflection_match = re.search(r'(.*?)', response, re.DOTALL) answer = answer_match.group(1).strip() if answer_match else "" reflection = reflection_match.group(1).strip() if reflection_match else "" steps = re.findall(r'(.*?)', response, re.DOTALL) if answer == "": return response, "", "" return answer, reflection, steps def generate(message, history, model, system_prompt, divine_msg, api_key, openai_api_key): """Generates the chatbot response.""" if model == "gpt-4o": if openai_api_key == "": messages = [] messages.append({"role": "user", "content": message}) messages.append({"role": "assistant", "content": "Please provide an OpenAI key"}) return history + messages, "" response, thinking_time = respond(message, history, model, system_prompt, divine_msg, openai_api_key) else: response, thinking_time = respond(message, history, model, system_prompt, divine_msg, api_key) if response.startswith("Error:"): return history + [({"role": "system", "content": response},)], "" answer, reflection, steps = parse_response(response) messages = [] messages.append({"role": "user", "content": message}) formatted_steps = [f"Step {i}: {step}" for i, step in enumerate(steps, 1)] all_steps = "\n".join(formatted_steps) + f"\n\nReflection: {reflection}" messages.append({"role": "assistant", "content": all_steps, "metadata": {"title": f"Thinking Time: {thinking_time:.2f} sec"}}) messages.append({"role": "assistant", "content": answer}) return history + messages, "" # Define the default system prompt DEFAULT_SYSTEM_PROMPT = """ You are a helpful assistant in normal conversation. When given a problem to solve, REMEMBER, THIS IS IMPORTANT: {divine_msg} Follow these instructions precisely: 1. Read the given question carefully 2. Generate a detailed, logical step-by-step solution. 3. Enclose each step of your solution within and tags. 4. Do a critical, detailed and objective self-reflection within and tags every few steps. 5. Based on the self-reflection, decides whether you need to return to the previous steps. Copy the returned to step as the next step. 6. After completing the solution steps, reorganize and synthesize the steps into the final answer within and tags. 7. Provide a critical, honest and objective final self-evaluation of your reasoning process within and tags. Example format: [Content of step 1] [Content of step 2] [Evaluation of the steps so far] [Content of step 3 or Content of some previous step] ... [Content of final step] [Final Answer] (must give final answer in this format) [final evaluation of the solution] """ with gr.Blocks() as demo: gr.Markdown("# GPT4-O1-Proxima") gr.Markdown("Built based on GPT4-O purely based on prompt engineering.") gr.Markdown("The LLama3.1 references are powered by [SambaNova Cloud](https://cloud.sambanova.ai/apis)") with gr.Row(): gr.Image("image.png", width = 300, height = 300) with gr.Row(): opneai_api_key = gr.Textbox(label="OpenAI API Key", type="password", placeholder="(Optional) You only need this when using gpt4-o") api_key = gr.Textbox(label="SambaNova API Key", type="password", placeholder="(Optional) Enter your SN Cloud API key here for more availability") with gr.Row(): model = gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[0]) divine_msg = gr.Dropdown(choices=DIVINE_MSG, label="Select Divine Message", value=DIVINE_MSG[0]) chatbot = gr.Chatbot(label="Chat", show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel", type="messages") msg = gr.Textbox(label="Type your message here...", placeholder="Enter your message...") gr.Button("Clear Chat").click(lambda: ([], ""), inputs=None, outputs=[chatbot, msg]) system_prompt = gr.Textbox(label="System Prompt", value=DEFAULT_SYSTEM_PROMPT, lines=15, interactive=True) msg.submit(generate, inputs=[msg, chatbot, model, system_prompt, divine_msg, api_key, opneai_api_key], outputs=[chatbot, msg]) demo.launch(share=True, show_api=False)