Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -16,7 +16,8 @@ list_llm = [
|
|
16 |
"mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
17 |
"google/gemma-7b-it", "google/gemma-2b-it",
|
18 |
"HuggingFaceH4/zephyr-7b-beta", "HuggingFaceH4/zephyr-7b-gemma-v0.1",
|
19 |
-
"TinyLlama/TinyLlama-1.1B-Chat-v1.0", "tiiuae/falcon-7b-instruct"
|
|
|
20 |
]
|
21 |
list_llm_simple = [os.path.basename(llm) for llm in list_llm]
|
22 |
|
@@ -124,7 +125,7 @@ def demo():
|
|
124 |
db_btn = gr.Button("Genera database vettoriale")
|
125 |
|
126 |
with gr.Tab("Passo 3 - Inizializza catena QA"):
|
127 |
-
llm_btn = gr.Radio(list_llm_simple, label="Modelli LLM", value=list_llm_simple[
|
128 |
with gr.Accordion("Opzioni avanzate - Modello LLM", open=False):
|
129 |
slider_temperature = gr.Slider(0.01, 1.0, 0.3, step=0.1, label="Temperatura")
|
130 |
slider_maxtokens = gr.Slider(224, 4096, 1024, step=32, label="Token massimi")
|
|
|
16 |
"mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
17 |
"google/gemma-7b-it", "google/gemma-2b-it",
|
18 |
"HuggingFaceH4/zephyr-7b-beta", "HuggingFaceH4/zephyr-7b-gemma-v0.1",
|
19 |
+
"TinyLlama/TinyLlama-1.1B-Chat-v1.0", "tiiuae/falcon-7b-instruct",
|
20 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
|
21 |
]
|
22 |
list_llm_simple = [os.path.basename(llm) for llm in list_llm]
|
23 |
|
|
|
125 |
db_btn = gr.Button("Genera database vettoriale")
|
126 |
|
127 |
with gr.Tab("Passo 3 - Inizializza catena QA"):
|
128 |
+
llm_btn = gr.Radio(list_llm_simple, label="Modelli LLM", value=list_llm_simple[5], type="index")
|
129 |
with gr.Accordion("Opzioni avanzate - Modello LLM", open=False):
|
130 |
slider_temperature = gr.Slider(0.01, 1.0, 0.3, step=0.1, label="Temperatura")
|
131 |
slider_maxtokens = gr.Slider(224, 4096, 1024, step=32, label="Token massimi")
|