Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,9 @@
|
|
1 |
import gradio as gr
|
2 |
from openai import OpenAI
|
3 |
|
4 |
-
# API istemcisini başlatıyoruz
|
5 |
client = OpenAI(
|
6 |
base_url="https://integrate.api.nvidia.com/v1",
|
7 |
-
api_key="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"
|
8 |
)
|
9 |
|
10 |
def respond(
|
@@ -17,19 +16,16 @@ def respond(
|
|
17 |
):
|
18 |
messages = [{"role": "system", "content": system_message}]
|
19 |
|
20 |
-
# Geçmiş mesajları ekliyoruz
|
21 |
for val in history:
|
22 |
if val[0]:
|
23 |
messages.append({"role": "user", "content": val[0]})
|
24 |
if val[1]:
|
25 |
messages.append({"role": "assistant", "content": val[1]})
|
26 |
|
27 |
-
# Kullanıcı mesajını ekliyoruz
|
28 |
messages.append({"role": "user", "content": message})
|
29 |
|
30 |
response = ""
|
31 |
|
32 |
-
# API'den gelen yanıtı işliyoruz
|
33 |
completion = client.chat.completions.create(
|
34 |
model="nvidia/nemotron-4-340b-instruct",
|
35 |
messages=messages,
|
@@ -45,7 +41,6 @@ def respond(
|
|
45 |
response += token
|
46 |
yield response
|
47 |
|
48 |
-
# Gradio arayüzünü tanımlıyoruz
|
49 |
demo = gr.ChatInterface(
|
50 |
respond,
|
51 |
additional_inputs=[
|
|
|
1 |
import gradio as gr
|
2 |
from openai import OpenAI
|
3 |
|
|
|
4 |
client = OpenAI(
|
5 |
base_url="https://integrate.api.nvidia.com/v1",
|
6 |
+
api_key="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"
|
7 |
)
|
8 |
|
9 |
def respond(
|
|
|
16 |
):
|
17 |
messages = [{"role": "system", "content": system_message}]
|
18 |
|
|
|
19 |
for val in history:
|
20 |
if val[0]:
|
21 |
messages.append({"role": "user", "content": val[0]})
|
22 |
if val[1]:
|
23 |
messages.append({"role": "assistant", "content": val[1]})
|
24 |
|
|
|
25 |
messages.append({"role": "user", "content": message})
|
26 |
|
27 |
response = ""
|
28 |
|
|
|
29 |
completion = client.chat.completions.create(
|
30 |
model="nvidia/nemotron-4-340b-instruct",
|
31 |
messages=messages,
|
|
|
41 |
response += token
|
42 |
yield response
|
43 |
|
|
|
44 |
demo = gr.ChatInterface(
|
45 |
respond,
|
46 |
additional_inputs=[
|