Spaces:
Runtime error
Runtime error
File size: 2,733 Bytes
05f31cc 9dd4f46 874a573 05f31cc 9dd4f46 874a573 05f31cc 9dd4f46 874a573 05f31cc 9dd4f46 05f31cc 9dd4f46 874a573 9dd4f46 05f31cc 9dd4f46 05f31cc 9dd4f46 05f31cc 9dd4f46 05f31cc 9dd4f46 05f31cc 9dd4f46 05f31cc 9dd4f46 05f31cc 9dd4f46 05f31cc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
import discord
from discord.ext import commands
from discord.ext.commands import Context
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import JSONResponse
import gradio as gr
# Initialize FastAPI app
app = FastAPI()
# Initialize Gradio Llama model
llm = gr.Llama(model_path="model.gguf", n_ctx=4000, n_threads=2, chat_format="chatml")
# Initialize Discord bot
bot = commands.Bot(command_prefix='&') # Define the command prefix
# Global variable to store the channel where chats will be sent
chat_channel = None
# Define the command to set the chat channel
@bot.command()
async def set_channel(ctx: Context, channel: discord.TextChannel):
global chat_channel
chat_channel = channel
await ctx.send(f"Chat channel set to {channel.mention}")
# Define the function to handle the chat endpoint
@app.post("/api/v1/chat")
async def chat_post(request: Request):
global chat_channel
if chat_channel is None:
raise HTTPException(status_code=400, detail="Chat channel is not set")
data = await request.json()
message = data.get("message")
history = data.get("history", [])
temperature = data.get("temperature", 0.3)
max_tokens = data.get("max_tokens", 512)
async def generate():
system_prompt = "You are OpenChat, a useful AI assistant."
formatted_prompt = [{"role": "system", "content": system_prompt}]
for user_prompt, bot_response in history:
formatted_prompt.append({"role": "user", "content": user_prompt})
formatted_prompt.append({"role": "assistant", "content": bot_response})
formatted_prompt.append({"role": "user", "content": message})
stream_response = llm.create_chat_completion(messages=formatted_prompt, temperature=temperature,
max_tokens=max_tokens, stream=True)
response = ""
for chunk in stream_response:
if len(chunk['choices'][0]["delta"]) != 0 and "content" in chunk['choices'][0]["delta"]:
response += chunk['choices'][0]["delta"]["content"]
yield response
# Send the generated response to the chat channel
async for response in generate():
await chat_channel.send(response)
return JSONResponse(content={"response": "Message sent to chat channel"})
# Define the function to handle the GET request for chat
@app.get("/api/v1/chat")
async def chat_get():
return {"message": "Send a POST request to this endpoint to chat."}
# Run the bot
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
bot.run('YOUR_DISCORD_BOT_TOKEN') # Replace 'YOUR_DISCORD_BOT_TOKEN' with your actual bot token
|