agh123 commited on
Commit
37d5f61
·
1 Parent(s): 3c06493
.gitignore ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+
23
+ # Virtual Environment
24
+ venv/
25
+ ENV/
26
+
27
+ # Environment Variables
28
+ .env
29
+ .env.local
30
+
31
+ # IDE
32
+ .idea/
33
+ .vscode/
34
+ *.swp
35
+ *.swo
36
+
37
+ # Streamlit
38
+ .streamlit/
39
+
40
+ # Logs
41
+ *.log
Makefile ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: install venv run test clean lint format help
2
+
3
+ # Variables
4
+ PYTHON_VERSION := 3.12
5
+ PYTHON := python$(PYTHON_VERSION)
6
+ PIP := pip3
7
+ VENV_NAME := venv
8
+ VENV_BIN := $(VENV_NAME)/bin
9
+ VENV_PYTHON := $(VENV_BIN)/python
10
+ VENV_PIP := $(VENV_BIN)/pip
11
+ PORT := 8501
12
+
13
+ # Default target
14
+ .DEFAULT_GOAL := help
15
+
16
+ help: ## Show this help message
17
+ @echo 'Usage:'
18
+ @echo ' make <target>'
19
+ @echo ''
20
+ @echo 'Targets:'
21
+ @awk -F ':|##' '/^[^\t].+?:.*?##/ { printf " %-20s %s\n", $$1, $$NF }' $(MAKEFILE_LIST)
22
+
23
+ check-python: ## Verify Python version
24
+ @which $(PYTHON) > /dev/null || (echo "Python $(PYTHON_VERSION) not found. Please install it first." && exit 1)
25
+
26
+ $(VENV_NAME): check-python ## Create virtual environment if it doesn't exist
27
+ $(PYTHON) -m venv $(VENV_NAME)
28
+ $(VENV_PIP) install --upgrade pip
29
+
30
+ install-dev: $(VENV_NAME) ## Install development dependencies
31
+ $(VENV_PIP) install -r requirements/dev.txt
32
+
33
+ install-prod: $(VENV_NAME) ## Install production dependencies
34
+ $(VENV_PIP) install -r requirements/prod.txt
35
+
36
+ run: ## Run Streamlit application
37
+ PYTHONPATH=. $(VENV_BIN)/streamlit run main.py --server.port $(PORT)
38
+
39
+ clean: ## Clean cache files and remove virtual environment
40
+ find . -type d -name "__pycache__" -exec rm -rf {} +
41
+ find . -type d -name ".pytest_cache" -exec rm -rf {} +
42
+ find . -type f -name "*.pyc" -delete
43
+ rm -rf $(VENV_NAME)
44
+
45
+ lint: ## Run linter
46
+ $(VENV_PIP) install pylint
47
+ $(VENV_BIN)/pylint src
48
+
49
+ format: ## Format code using black
50
+ $(VENV_PIP) install black
51
+ $(VENV_BIN)/black src
52
+
53
+ test: ## Run tests
54
+ $(VENV_BIN)/pytest tests
55
+
56
+ setup-dev: clean $(VENV_NAME) install-dev ## Clean existing setup and create fresh development installation
57
+
58
+ setup-prod: clean $(VENV_NAME) install-prod ## Clean existing setup and create fresh production installation
README.md CHANGED
@@ -1,14 +1,91 @@
1
  ---
2
- title: Ai Phone Leaderboard
3
- emoji: 🌍
4
- colorFrom: purple
5
- colorTo: blue
6
  sdk: streamlit
7
  sdk_version: 1.41.0
8
- app_file: app.py
9
  pinned: false
10
  license: mit
11
  short_description: AI Phone Leaderboard
12
  ---
13
 
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: AI Phone Leaderboard
3
+ emoji: 📱
4
+ colorFrom: blue
5
+ colorTo: purple
6
  sdk: streamlit
7
  sdk_version: 1.41.0
8
+ app_file: main.py
9
  pinned: false
10
  license: mit
11
  short_description: AI Phone Leaderboard
12
  ---
13
 
14
+ # AI Phone Benchmark Leaderboard
15
+
16
+ Streamlit frontend for displaying AI model performance benchmarks across different mobile devices.
17
+
18
+ ## Features
19
+
20
+ - Interactive data filtering
21
+ - Performance comparison charts
22
+ - Detailed benchmark leaderboard
23
+ - Real-time data updates
24
+ - Responsive design
25
+
26
+ ## Local Development
27
+
28
+ 1. Create a virtual environment:
29
+ ```bash
30
+ make venv
31
+ ```
32
+
33
+ 2. Install dependencies:
34
+ ```bash
35
+ make setup-dev
36
+ ```
37
+
38
+ 3. Set up environment variables:
39
+ Create a `.env` file:
40
+ ```env
41
+ HF_TOKEN=your_hugging_face_token_here # Required for accessing the API
42
+ ```
43
+
44
+ 4. Run the application:
45
+ ```bash
46
+ make run
47
+ ```
48
+
49
+ ## Deployment
50
+
51
+ This application is configured for deployment on Hugging Face Spaces:
52
+
53
+ 1. Create a new Space:
54
+ - Go to huggingface.co/spaces
55
+ - Click "Create new Space"
56
+ - Select "Streamlit" as the SDK
57
+ - Choose a name for your space
58
+
59
+ 2. Add required secret:
60
+ - Go to Space Settings
61
+ - Under "Repository Secrets"
62
+ - Add `HF_TOKEN` with your Hugging Face token
63
+
64
+ 3. The application will automatically deploy when you push to the repository.
65
+
66
+ ## Configuration
67
+
68
+ The application can be configured through environment variables:
69
+ - `HF_TOKEN`: Hugging Face access token (required)
70
+ - `API_URL`: Backend API URL (defaults to production)
71
+
72
+ ## API Integration
73
+
74
+ The application integrates with the AI Phone Benchmark API for data retrieval:
75
+ - Leaderboard data
76
+ - Performance metrics
77
+ - Device information
78
+
79
+ ## Development
80
+
81
+ Available make commands:
82
+ ```bash
83
+ make help # Show available commands
84
+ make setup-dev # Setup development environment
85
+ make setup-prod # Setup production environment
86
+ make run # Run Streamlit application
87
+ make lint # Run code linter
88
+ make format # Format code using black
89
+ make test # Run tests
90
+ make clean # Clean cache files
91
+ ```
main.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import asyncio
3
+ from src.core.styles import CUSTOM_CSS
4
+ from src.components.header import render_header
5
+ from src.components.filters import render_table_filters, render_plot_filters
6
+ from src.components.visualizations import (
7
+ render_performance_plots,
8
+ render_leaderboard_table,
9
+ )
10
+ from src.services.api import fetch_leaderboard_data
11
+
12
+ # Configure the page
13
+ st.set_page_config(
14
+ page_title="AI-Phone Leaderboard",
15
+ page_icon="src/static/images/favicon.png",
16
+ layout="wide",
17
+ initial_sidebar_state="expanded",
18
+ )
19
+
20
+ # Apply custom CSS
21
+ st.markdown(CUSTOM_CSS, unsafe_allow_html=True)
22
+
23
+ async def main():
24
+ # Render header
25
+ render_header()
26
+
27
+ # Fetch initial data
28
+ full_df = await fetch_leaderboard_data()
29
+ if full_df.empty:
30
+ st.info("No benchmark data available yet!")
31
+ return
32
+
33
+ # Get unique values for filters
34
+ models = sorted(full_df["Model"].unique())
35
+ benchmarks = sorted(full_df["Benchmark"].unique())
36
+ platforms = sorted(full_df["Platform"].unique())
37
+ devices = sorted(full_df["Normalized Device ID"].unique())
38
+
39
+ # Render table filters and get selections
40
+ (
41
+ selected_model_table,
42
+ selected_benchmark_table,
43
+ selected_platform_table,
44
+ selected_device_table,
45
+ ) = render_table_filters(models, benchmarks, platforms, devices)
46
+
47
+ # Filter data for table
48
+ table_df = full_df.copy()
49
+ if selected_model_table != "All":
50
+ table_df = table_df[table_df["Model"] == selected_model_table]
51
+ if selected_benchmark_table != "All":
52
+ table_df = table_df[table_df["Benchmark"] == selected_benchmark_table]
53
+ if selected_platform_table != "All":
54
+ table_df = table_df[table_df["Platform"] == selected_platform_table]
55
+ if selected_device_table != "All":
56
+ table_df = table_df[table_df["Normalized Device ID"] == selected_device_table]
57
+
58
+ # Render leaderboard table
59
+ render_leaderboard_table(table_df)
60
+
61
+ # Performance plots section
62
+ st.subheader("Performance Comparison")
63
+
64
+ # Render plot filters and get selections
65
+ selected_model_plot, selected_benchmark_plot = render_plot_filters(
66
+ models, benchmarks
67
+ )
68
+
69
+ # Filter data for plots
70
+ plot_df = full_df[
71
+ (full_df["Model"] == selected_model_plot)
72
+ & (full_df["Benchmark"] == selected_benchmark_plot)
73
+ ]
74
+
75
+ # Render performance plots
76
+ render_performance_plots(plot_df, selected_model_plot)
77
+
78
+ if __name__ == "__main__":
79
+ asyncio.run(main())
requirements/base.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ streamlit>=1.28.0
2
+ requests>=2.31.0
3
+ python-dotenv>=1.0.0
4
+ pandas>=2.1.3
5
+ plotly>=5.18.0
6
+ httpx>=0.25.1
7
+ pydantic-settings>=2.0.3
requirements/dev.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ -r base.txt
2
+ pytest>=7.4.3
3
+ black>=23.10.1
4
+ pylint>=3.0.2
5
+ pytest-cov>=4.1.0
requirements/prod.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ -r base.txt
src/__init__.py ADDED
File without changes
src/app.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from typing import Optional
3
+ import pandas as pd
4
+
5
+ async def fetch_and_filter_data(
6
+ model_name: Optional[str] = None,
7
+ benchmark_label: Optional[str] = None
8
+ ) -> pd.DataFrame:
9
+ """Fetch and filter data based on parameters"""
10
+ from .services.api import fetch_leaderboard_data
11
+
12
+ return await fetch_leaderboard_data(
13
+ model_name=model_name,
14
+ benchmark_label=benchmark_label
15
+ )
src/components/__init__.py ADDED
File without changes
src/components/filters.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from typing import List, Tuple
3
+
4
+ def render_table_filters(
5
+ models: List[str],
6
+ benchmarks: List[str],
7
+ platforms: List[str],
8
+ devices: List[str]
9
+ ) -> Tuple[str, str, str, str]:
10
+ """Render and handle table filters"""
11
+ table_filters = st.container()
12
+ with table_filters:
13
+ t1, t2, t3, t4 = st.columns(4)
14
+ with t1:
15
+ selected_model = st.selectbox(
16
+ "Model", ["All"] + list(models), key="table_model"
17
+ )
18
+ with t2:
19
+ selected_benchmark = st.selectbox(
20
+ "Benchmark", ["All"] + list(benchmarks), key="table_benchmark"
21
+ )
22
+ with t3:
23
+ selected_platform = st.selectbox(
24
+ "Platform", ["All"] + list(platforms), key="table_platform"
25
+ )
26
+ with t4:
27
+ selected_device = st.selectbox(
28
+ "Device", ["All"] + list(devices), key="table_device"
29
+ )
30
+
31
+ return selected_model, selected_benchmark, selected_platform, selected_device
32
+
33
+ def render_plot_filters(
34
+ models: List[str],
35
+ benchmarks: List[str]
36
+ ) -> Tuple[str, str]:
37
+ """Render and handle plot filters"""
38
+ plot_filters = st.container()
39
+ with plot_filters:
40
+ p1, p2 = st.columns(2)
41
+ with p1:
42
+ selected_model = st.selectbox(
43
+ "Model for Comparison", models, key="plot_model"
44
+ )
45
+ with p2:
46
+ selected_benchmark = st.selectbox(
47
+ "Benchmark for Comparison", benchmarks, key="plot_benchmark"
48
+ )
49
+
50
+ return selected_model, selected_benchmark
src/components/header.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import base64
3
+ from pathlib import Path
4
+
5
+ def get_image_base64(image_path: Path) -> str:
6
+ """Convert image to base64 string"""
7
+ if not image_path.exists():
8
+ st.error(f"Logo not found at {image_path}")
9
+ return ""
10
+
11
+ with open(image_path, "rb") as img_file:
12
+ return base64.b64encode(img_file.read()).decode("utf-8")
13
+
14
+ def render_header():
15
+ """Render the application header with logos"""
16
+ # Logo paths
17
+ hf_logo_path = Path("src/static/images/hf-logo.png")
18
+ pocketpal_logo_path = Path("src/static/images/pocketpal-ai-logo.png")
19
+
20
+ header_html = f"""
21
+ <div class="header-container">
22
+ <div class="logos-container">
23
+ <img src="data:image/png;base64,{get_image_base64(hf_logo_path)}" class="logo" alt="Hugging Face Logo">
24
+ <img src="data:image/png;base64,{get_image_base64(pocketpal_logo_path)}" class="logo pocketpal" alt="PocketPal AI Logo">
25
+ </div>
26
+ <h1 class="header-title">AI Phone Benchmark Leaderboard</h1>
27
+ <p class="header-subtitle">Comparing Large Language Models performance across AI Phones. Powered by PocketPal AI.</p>
28
+ </div>
29
+ """
30
+ st.markdown(header_html, unsafe_allow_html=True)
src/components/visualizations.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import plotly.express as px
3
+ import pandas as pd
4
+ from typing import Optional
5
+
6
+ def create_performance_plot(df: pd.DataFrame, metric: str, title: str):
7
+ """Create a performance comparison plot"""
8
+ if df.empty:
9
+ return None
10
+
11
+ fig = px.bar(
12
+ df,
13
+ x="Device",
14
+ y=metric,
15
+ color="Platform",
16
+ title=title,
17
+ template="plotly_white",
18
+ barmode="group",
19
+ hover_data=["CPU Cores", "Memory Usage (GB)", "Model Size"],
20
+ )
21
+ fig.update_layout(
22
+ xaxis_title="Device",
23
+ yaxis_title="Time (ms)",
24
+ legend_title="Platform",
25
+ plot_bgcolor="white",
26
+ height=400,
27
+ )
28
+ return fig
29
+
30
+ def render_performance_plots(plot_df: pd.DataFrame, model_name: str):
31
+ """Render performance comparison plots"""
32
+ if plot_df.empty:
33
+ st.warning(
34
+ "No data available for the selected model and benchmark combination."
35
+ )
36
+ return
37
+
38
+ col1, col2 = st.columns(2)
39
+ with col1:
40
+ fig1 = create_performance_plot(
41
+ plot_df,
42
+ "Prompt Processing",
43
+ f"Prompt Processing Time - {model_name}",
44
+ )
45
+ if fig1:
46
+ st.plotly_chart(fig1, use_container_width=True)
47
+
48
+ with col2:
49
+ fig2 = create_performance_plot(
50
+ plot_df,
51
+ "Token Generation",
52
+ f"Token Generation Time - {model_name}",
53
+ )
54
+ if fig2:
55
+ st.plotly_chart(fig2, use_container_width=True)
56
+
57
+ def render_leaderboard_table(df: pd.DataFrame):
58
+ """Render the leaderboard table with grouped and formatted data"""
59
+ # Group and average the results
60
+ grouped_df = (
61
+ df.groupby(
62
+ ["Model ID", "Benchmark", "Normalized Device ID", "Platform", "Device", "Model Size", "CPU Cores"]
63
+ )
64
+ .agg(
65
+ {
66
+ "Prompt Processing": ["mean", "count", "std"],
67
+ "Token Generation": ["mean", "std"],
68
+ }
69
+ )
70
+ .reset_index()
71
+ )
72
+
73
+ # Flatten column names
74
+ grouped_df.columns = [
75
+ col[0] if col[1] == "" else f"{col[0]} ({col[1]})" for col in grouped_df.columns
76
+ ]
77
+
78
+ # Round numeric columns
79
+ numeric_cols = [
80
+ "Prompt Processing (mean)",
81
+ "Prompt Processing (std)",
82
+ "Token Generation (mean)",
83
+ "Token Generation (std)",
84
+ ]
85
+ grouped_df[numeric_cols] = grouped_df[numeric_cols].round(2)
86
+
87
+ # Rename columns for display
88
+ grouped_df = grouped_df.rename(
89
+ columns={
90
+ "Prompt Processing (mean)": "PP Avg (s)",
91
+ "Prompt Processing (std)": "PP Std",
92
+ "Prompt Processing (count)": "Runs",
93
+ "Token Generation (mean)": "TG Avg (s)",
94
+ "Token Generation (std)": "TG Std",
95
+ }
96
+ )
97
+
98
+ # Reorder columns for display
99
+ display_cols = [
100
+ "Platform",
101
+ "Device",
102
+ "Model ID",
103
+ "Model Size",
104
+ "Benchmark",
105
+ "TG Avg (s)",
106
+ "TG Std",
107
+ "PP Avg (s)",
108
+ "PP Std",
109
+ ]
110
+
111
+ # Display the filtered and grouped table
112
+ st.dataframe(
113
+ grouped_df[display_cols].sort_values(
114
+ ["Model Size", "Benchmark", "TG Avg (s)"],
115
+ ascending=[False, True, True],
116
+ ),
117
+ use_container_width=True,
118
+ height=400,
119
+ )
src/core/__init__.py ADDED
File without changes
src/core/config.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic_settings import BaseSettings
2
+ from functools import lru_cache
3
+
4
+ class Settings(BaseSettings):
5
+ API_URL: str = "https://a-ghorbani-ai-phone-benchmark-api.hf.space"
6
+ HF_TOKEN: str
7
+
8
+ class Config:
9
+ case_sensitive = True
10
+ env_file = ".env"
11
+
12
+ @lru_cache()
13
+ def get_settings():
14
+ return Settings()
15
+
16
+ settings = get_settings()
src/core/styles.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CUSTOM_CSS = """
2
+ <style>
3
+ .stApp {
4
+ max-width: 1200px;
5
+ margin: 0 auto;
6
+ }
7
+ .plot-container {
8
+ background-color: white;
9
+ border-radius: 10px;
10
+ padding: 20px;
11
+ margin: 10px 0;
12
+ box-shadow: 0 2px 4px rgba(0,0,0,0.1);
13
+ }
14
+ div[data-testid="stHorizontalBlock"] > div[data-testid="column"] {
15
+ background-color: #f8f9fa;
16
+ padding: 10px;
17
+ border-radius: 5px;
18
+ margin: 0 5px;
19
+ }
20
+ .header-container {
21
+ display: flex;
22
+ flex-direction: column;
23
+ align-items: center;
24
+ padding: 1rem 0 2rem 0;
25
+ }
26
+ .logos-container {
27
+ display: flex;
28
+ align-items: center;
29
+ justify-content: center;
30
+ gap: 2rem;
31
+ margin-bottom: 1rem;
32
+ height: 100px;
33
+ }
34
+ .logo {
35
+ width: 100px;
36
+ height: 100px;
37
+ object-fit: contain;
38
+ display: block;
39
+ }
40
+ .logo.pocketpal {
41
+ width: 80px;
42
+ height: 80px;
43
+ border-radius: 20px;
44
+ }
45
+ .header-title {
46
+ font-size: 2.5rem;
47
+ font-weight: 600;
48
+ text-align: center;
49
+ color: #1a1a1a;
50
+ margin: 0;
51
+ padding: 10px 0;
52
+ }
53
+ .header-subtitle {
54
+ font-size: 1.2rem;
55
+ text-align: center;
56
+ color: #666;
57
+ margin: 0;
58
+ padding-bottom: 1rem;
59
+ max-width: 800px;
60
+ }
61
+ </style>
62
+ """
src/main.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ """
2
+ Main module for the frontend application.
3
+ This file serves as a module init file.
4
+ """
src/services/__init__.py ADDED
File without changes
src/services/api.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import httpx
2
+ import pandas as pd
3
+ from typing import Optional, Dict
4
+ import streamlit as st
5
+ from src.core.config import settings
6
+
7
+ async def fetch_leaderboard_data(
8
+ model_name: Optional[str] = None,
9
+ benchmark_label: Optional[str] = None
10
+ ) -> pd.DataFrame:
11
+ """Fetch and process leaderboard data"""
12
+ params = {}
13
+ if model_name and model_name != "All":
14
+ params["model_name"] = model_name
15
+ if benchmark_label and benchmark_label != "All":
16
+ params["benchmark_label"] = benchmark_label
17
+
18
+ headers = {
19
+ "Authorization": f"Bearer {settings.HF_TOKEN}",
20
+ "Accept": "application/json"
21
+ }
22
+
23
+ try:
24
+ async with httpx.AsyncClient() as client:
25
+ response = await client.get(
26
+ f"{settings.API_URL}/api/v1/leaderboard",
27
+ params=params,
28
+ headers=headers,
29
+ follow_redirects=True
30
+ )
31
+ response.raise_for_status()
32
+ data = response.json()
33
+ return pd.DataFrame(data)
34
+ except Exception as e:
35
+ st.error(f"Error fetching data: {str(e)}")
36
+ if hasattr(e, 'response'):
37
+ st.error(f"Response: {e.response.text}")
38
+ return pd.DataFrame()
src/static/.DS_Store ADDED
Binary file (6.15 kB). View file
 
src/static/images/favicon.png ADDED
src/static/images/hf-logo.png ADDED
src/static/images/pocketpal-ai-logo.png ADDED