Spaces:
Running
Running
File size: 1,743 Bytes
a677a59 4a46abc 523927e 4a46abc 523927e 4a46abc 10ad72f 4a46abc 10ad72f 4a46abc 10ad72f 4a46abc 19b159e 4a46abc 19b159e 4a46abc 10ad72f 6ed7668 4a46abc 4c5e550 10ad72f 4a46abc 10ad72f 4a46abc 4c5e550 4a46abc 10ad72f 4c5e550 4a46abc 523927e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
import gradio as gr
import promptquality as pq
from dotenv import load_dotenv
load_dotenv()
pq.login("https://console.demo.rungalileo.io")
from data_loader import (
load_data,
CATEGORIES,
METHODOLOGY,
HEADER_CONTENT,
CARDS,
DATASETS,
SCORES,
)
from tabs.leaderboard import create_leaderboard_tab, filter_leaderboard
from tabs.model_comparison import create_model_comparison_tab, compare_models
from tabs.data_exploration import create_exploration_tab
from chat import filter_and_update_display
def create_app():
df = load_data()
MODELS = [x.strip() for x in df["Model"].unique().tolist()]
with gr.Blocks(theme=gr.themes.Soft()) as app:
with gr.Tabs():
# Create tabs
lb_output, lb_plot1, lb_plot2 = create_leaderboard_tab(
df, CATEGORIES, METHODOLOGY, HEADER_CONTENT, CARDS
)
mc_info, mc_plot = create_model_comparison_tab(df, HEADER_CONTENT, CARDS)
# exp_outputs = create_exploration_tab(
# df, MODELS, DATASETS, SCORES, HEADER_CONTENT
# )
# Initial loads
app.load(
fn=lambda: filter_leaderboard(
df, "All", list(CATEGORIES.keys())[0], "Performance"
),
outputs=[lb_output, lb_plot1, lb_plot2],
)
app.load(
fn=lambda: compare_models(
df, [df.sort_values("Model Avg", ascending=False).iloc[0]["Model"]]
),
outputs=[mc_info, mc_plot],
)
# app.load(
# fn=lambda: filter_and_update_display(MODELS[0], DATASETS[0], [], 0),
# outputs=exp_outputs,
# )
return app
demo = create_app()
demo.launch()
|