name
stringlengths 8
58
| hf_id
stringlengths 12
49
โ | author
stringlengths 3
21
| providers
listlengths 1
16
| median_input_cost
float64 0
75
| median_output_cost
float64 0
150
| low_input_cost
float64 0
75
| low_output_cost
float64 0
150
| high_input_cost
float64 0
75
| high_output_cost
float64 0
150
| is_open_weights
bool 2
classes |
---|---|---|---|---|---|---|---|---|---|---|
Qwen: QwQ 32B | Qwen/QwQ-32B | Qwen | [
{
"name": "Nebius AI Studio",
"context": 131000,
"max_output": 131000,
"input": 0.15,
"output": 0.45,
"latency": 0.38,
"throughput": 40.22
},
{
"name": "NovitaAI",
"context": 33000,
"max_output": 33000,
"input": 0.18,
"output": 0.2,
"latency": 1.4,
"throughput": 20.63
},
{
"name": "Groq",
"context": 131000,
"max_output": 131000,
"input": 0.29,
"output": 0.39,
"latency": 0.38,
"throughput": 438.3
},
{
"name": "Hyperbolic",
"context": 131000,
"max_output": 131000,
"input": 0.4,
"output": 0.4,
"latency": 0.73,
"throughput": 26.72
},
{
"name": "Parasail",
"context": 131000,
"max_output": 131000,
"input": 0.5,
"output": 0.5,
"latency": 3.22,
"throughput": 32.44
},
{
"name": "Nebius AI Studio (Fast)",
"context": 131000,
"max_output": 131000,
"input": 0.5,
"output": 1.5,
"latency": 0.32,
"throughput": 83.05
},
{
"name": "Fireworks",
"context": 131000,
"max_output": 131000,
"input": 0.9,
"output": 0.9,
"latency": 0.82,
"throughput": 124.8
}
] | 0.4 | 0.4 | 0.18 | 0.2 | 0.5 | 1.5 | true |
Perplexity: R1 1776 | perplexity-ai/r1-1776 | perplexity-ai | [
{
"name": "Perplexity",
"context": 128000,
"max_output": 128000,
"input": 2,
"output": 8,
"latency": 6.35,
"throughput": 85.07
}
] | 2 | 8 | 2 | 8 | 2 | 8 | true |
Llama Guard 3 8B | meta-llama/Llama-Guard-3-8B | meta-llama | [
{
"name": "Groq",
"context": 8000,
"max_output": 8000,
"input": 0.2,
"output": 0.2,
"latency": null,
"throughput": null
},
{
"name": "SambaNova",
"context": 16000,
"max_output": 16000,
"input": 0.3,
"output": 0.3,
"latency": 0.57,
"throughput": 897.3
}
] | 0.3 | 0.3 | 0.2 | 0.2 | 0.3 | 0.3 | true |
Llama 3.1 Tulu 3 405B | allenai/Llama-3.1-Tulu-3-405B | allenai | [
{
"name": "SambaNova",
"context": 16000,
"max_output": 16000,
"input": 5,
"output": 10,
"latency": 4.53,
"throughput": 48.59
}
] | 5 | 10 | 5 | 10 | 5 | 10 | true |
DeepSeek: R1 Distill Llama 8B | meta-llama/Llama-3.1-8B | meta-llama | [
{
"name": "NovitaAI",
"context": 32000,
"max_output": 32000,
"input": 0.04,
"output": 0.04,
"latency": 1.11,
"throughput": 54.37
}
] | 0.04 | 0.04 | 0.04 | 0.04 | 0.04 | 0.04 | true |
AionLabs: Aion-1.0-Mini | FuseAI/FuseO1-DeepSeekR1-QwQ-SkyT1-32B-Preview | FuseAI | [
{
"name": "AionLabs",
"context": 33000,
"max_output": 33000,
"input": 0.7,
"output": 1.4,
"latency": 0.42,
"throughput": 161.4
}
] | 0.7 | 1.4 | 0.7 | 1.4 | 0.7 | 1.4 | true |
Qwen: Qwen2.5 VL 72B Instruct | Qwen/Qwen2.5-VL-72B-Instruct | Qwen | [
{
"name": "Parasail",
"context": 32000,
"max_output": 32000,
"input": 0.7,
"output": 0.7,
"latency": 1.78,
"throughput": 15.24
}
] | 0.7 | 0.7 | 0.7 | 0.7 | 0.7 | 0.7 | true |
DeepSeek: R1 Distill Qwen 1.5B | Qwen/Qwen2.5-Math-1.5B | Qwen | [
{
"name": "Together",
"context": 131000,
"max_output": 33000,
"input": 0.18,
"output": 0.18,
"latency": 0.26,
"throughput": 394.9
}
] | 0.18 | 0.18 | 0.18 | 0.18 | 0.18 | 0.18 | true |
Mistral: Mistral Small 3 | mistralai/Mistral-Small-24B-Instruct-2501 | mistralai | [
{
"name": "DeepInfra",
"context": 33000,
"max_output": 8000,
"input": 0.07,
"output": 0.14,
"latency": 0.7,
"throughput": 67.14
},
{
"name": "Mistral",
"context": 32000,
"max_output": 32000,
"input": 0.1,
"output": 0.3,
"latency": 0.33,
"throughput": 100.8
},
{
"name": "Ubicloud",
"context": 33000,
"max_output": 33000,
"input": 0.3,
"output": 0.3,
"latency": 1.73,
"throughput": 30.95
},
{
"name": "Together",
"context": 33000,
"max_output": 2000,
"input": 0.8,
"output": 0.8,
"latency": 0.47,
"throughput": 80.69
},
{
"name": "Fireworks",
"context": 33000,
"max_output": 33000,
"input": 0.9,
"output": 0.9,
"latency": 1.14,
"throughput": 33.32
}
] | 0.3 | 0.3 | 0.07 | 0.14 | 0.9 | 0.9 | true |
DeepSeek: R1 Distill Qwen 32B | Qwen/Qwen2.5-32B | Qwen | [
{
"name": "DeepInfra",
"context": 131000,
"max_output": 8000,
"input": 0.12,
"output": 0.18,
"latency": 0.32,
"throughput": 43.83
},
{
"name": "Cloudflare",
"context": 80000,
"max_output": 80000,
"input": 0.5,
"output": 4.88,
"latency": 0.42,
"throughput": 36
},
{
"name": "Groq",
"context": 131000,
"max_output": 131000,
"input": 0.79,
"output": 0.79,
"latency": 0.13,
"throughput": 137.2
},
{
"name": "NovitaAI",
"context": 64000,
"max_output": 64000,
"input": 0.3,
"output": 0.3,
"latency": 8.06,
"throughput": 32.05
}
] | 0.79 | 0.79 | 0.12 | 0.18 | 0.5 | 4.88 | true |
DeepSeek: R1 Distill Qwen 14B | deepseek-ai/DeepSeek-R1-Distill-Qwen-14B | deepseek-ai | [
{
"name": "Together",
"context": 131000,
"max_output": 33000,
"input": 1.6,
"output": 1.6,
"latency": 0.32,
"throughput": 163.4
},
{
"name": "NovitaAI",
"context": 64000,
"max_output": 64000,
"input": 0.15,
"output": 0.15,
"latency": 12.9,
"throughput": 71.57
}
] | 1.6 | 1.6 | 0.15 | 0.15 | 1.6 | 1.6 | true |
DeepSeek: R1 Distill Llama 70B | deepseek-ai/DeepSeek-R1-Distill-Llama-70B | deepseek-ai | [
{
"name": "DeepInfra",
"context": 131000,
"max_output": 8000,
"input": 0.23,
"output": 0.69,
"latency": 10.86,
"throughput": 84.41
},
{
"name": "Nebius AI Studio",
"context": 128000,
"max_output": 128000,
"input": 0.25,
"output": 0.75,
"latency": 9.23,
"throughput": 64.97
},
{
"name": "inference.net",
"context": 16000,
"max_output": 16000,
"input": 0.4,
"output": 0.4,
"latency": 19.48,
"throughput": 33.32
},
{
"name": "SambaNova",
"context": 16000,
"max_output": 16000,
"input": 0.7,
"output": 1.4,
"latency": 3.8,
"throughput": 470.4
},
{
"name": "Groq",
"context": 131000,
"max_output": 131000,
"input": 0.75,
"output": 0.99,
"latency": 1.55,
"throughput": 596.1
},
{
"name": "Together",
"context": 131000,
"max_output": 16000,
"input": 2,
"output": 2,
"latency": 5.23,
"throughput": 192.7
},
{
"name": "NovitaAI",
"context": 32000,
"max_output": 32000,
"input": 0.8,
"output": 0.8,
"latency": 36.28,
"throughput": 33.42
}
] | 0.8 | 0.8 | 0.4 | 0.4 | 2 | 2 | true |
DeepSeek: R1 | deepseek-ai/DeepSeek-R1 | deepseek-ai | [
{
"name": "Minimax",
"context": 64000,
"max_output": 64000,
"input": 0.55,
"output": 2.19,
"latency": 2.41,
"throughput": 20.27
},
{
"name": "Nebius AI Studio",
"context": 128000,
"max_output": 128000,
"input": 0.8,
"output": 2.4,
"latency": 0.43,
"throughput": 12.73
},
{
"name": "DeepInfra Turbo",
"context": 33000,
"max_output": 33000,
"input": 2,
"output": 6,
"latency": 6.53,
"throughput": 59.47
},
{
"name": "inference.net",
"context": 131000,
"max_output": 33000,
"input": 3,
"output": 3,
"latency": 0.76,
"throughput": 38.14
},
{
"name": "Parasail",
"context": 128000,
"max_output": 128000,
"input": 3,
"output": 3,
"latency": 1.35,
"throughput": 55.54
},
{
"name": "Together",
"context": 164000,
"max_output": 8000,
"input": 3,
"output": 7,
"latency": 0.87,
"throughput": 58.88
},
{
"name": "Friendli",
"context": 164000,
"max_output": 164000,
"input": 3,
"output": 7,
"latency": 1.21,
"throughput": 58.67
},
{
"name": "Fireworks",
"context": 164000,
"max_output": 164000,
"input": 3,
"output": 8,
"latency": 0.81,
"throughput": 69.68
},
{
"name": "SambaNova",
"context": 8000,
"max_output": 8000,
"input": 5,
"output": 7,
"latency": 4.69,
"throughput": 158.3
},
{
"name": "kluster.ai",
"context": 128000,
"max_output": 128000,
"input": 7,
"output": 7,
"latency": 10.41,
"throughput": 34.77
},
{
"name": "DeepSeek",
"context": 64000,
"max_output": 8000,
"input": 0.55,
"output": 2.19,
"latency": 0.69,
"throughput": 27.75
},
{
"name": "DeepInfra",
"context": 66000,
"max_output": 8000,
"input": 0.75,
"output": 2.4,
"latency": 2.38,
"throughput": 10.05
},
{
"name": "NovitaAI",
"context": 64000,
"max_output": 8000,
"input": 4,
"output": 4,
"latency": 23.74,
"throughput": 18.53
},
{
"name": "Featherless",
"context": 33000,
"max_output": 4000,
"input": 6.5,
"output": 8,
"latency": 52.12,
"throughput": 19.52
},
{
"name": "Avian.io",
"context": 164000,
"max_output": 164000,
"input": 6.9,
"output": 6.9,
"latency": null,
"throughput": null
}
] | 4 | 4 | 0.55 | 2.19 | 6.5 | 8 | true |
MiniMax: MiniMax-01 | MiniMaxAI/MiniMax-Text-01 | MiniMaxAI | [
{
"name": "Minimax",
"context": 1000000,
"max_output": 1000000,
"input": 0.2,
"output": 1.1,
"latency": 1.45,
"throughput": 27.07
}
] | 0.2 | 1.1 | 0.2 | 1.1 | 0.2 | 1.1 | true |
Microsoft: Phi 4 | microsoft/phi-4 | microsoft | [
{
"name": "DeepInfra",
"context": 16000,
"max_output": 8000,
"input": 0.07,
"output": 0.14,
"latency": 0.44,
"throughput": 37.26
},
{
"name": "Nebius AI Studio",
"context": 16000,
"max_output": 16000,
"input": 0.1,
"output": 0.3,
"latency": 0.14,
"throughput": 106
}
] | 0.1 | 0.3 | 0.07 | 0.14 | 0.1 | 0.3 | true |
Sao10K: Llama 3.1 70B Hanami x1 | Sao10K/L3.1-70B-Hanami-x1 | Sao10K | [
{
"name": "Infermatic",
"context": 16000,
"max_output": 16000,
"input": 3,
"output": 3,
"latency": 1.36,
"throughput": 30.45
}
] | 3 | 3 | 3 | 3 | 3 | 3 | true |
DeepSeek: DeepSeek V3 | deepseek-ai/DeepSeek-V3 | deepseek-ai | [
{
"name": "inference.net",
"context": 131000,
"max_output": 66000,
"input": 1.2,
"output": 1.2,
"latency": 0.89,
"throughput": 12.28
},
{
"name": "Together",
"context": 131000,
"max_output": 12000,
"input": 1.25,
"output": 1.25,
"latency": 0.75,
"throughput": 34.84
},
{
"name": "DeepSeek",
"context": 64000,
"max_output": 8000,
"input": 0.27,
"output": 1.1,
"latency": 0.48,
"throughput": 18.92
},
{
"name": "NovitaAI",
"context": 64000,
"max_output": 16000,
"input": 0.4,
"output": 1.3,
"latency": 1.43,
"throughput": 34.49
},
{
"name": "DeepInfra",
"context": 66000,
"max_output": 8000,
"input": 0.49,
"output": 0.89,
"latency": 1.15,
"throughput": 7.59
},
{
"name": "Nebius AI Studio",
"context": 131000,
"max_output": 131000,
"input": 0.5,
"output": 1.5,
"latency": 0.3,
"throughput": 18.73
},
{
"name": "Fireworks",
"context": 131000,
"max_output": 131000,
"input": 0.9,
"output": 0.9,
"latency": 1.11,
"throughput": 49.07
}
] | 0.9 | 0.9 | 0.27 | 1.1 | 1.25 | 1.25 | true |
Sao10K: Llama 3.3 Euryale 70B | Sao10K/L3.3-70B-Euryale-v2.3 | Sao10K | [
{
"name": "DeepInfra",
"context": 131000,
"max_output": 8000,
"input": 0.7,
"output": 0.8,
"latency": 0.21,
"throughput": 39.36
},
{
"name": "Infermatic",
"context": 16000,
"max_output": 16000,
"input": 1.5,
"output": 1.5,
"latency": 0.73,
"throughput": 43.27
}
] | 1.5 | 1.5 | 0.7 | 0.8 | 1.5 | 1.5 | true |
EVA Llama 3.33 70B | EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1 | EVA-UNIT-01 | [
{
"name": "Featherless",
"context": 16000,
"max_output": 4000,
"input": 4,
"output": 6,
"latency": 3.18,
"throughput": 11.5
}
] | 4 | 6 | 4 | 6 | 4 | 6 | true |
Meta: Llama 3.3 70B Instruct | meta-llama/Llama-3.3-70B-Instruct | meta-llama | [
{
"name": "Lambda",
"context": 131000,
"max_output": 131000,
"input": 0.12,
"output": 0.3,
"latency": 0.88,
"throughput": 14.05
},
{
"name": "DeepInfra",
"context": 131000,
"max_output": 8000,
"input": 0.12,
"output": 0.3,
"latency": 0.39,
"throughput": 34.73
},
{
"name": "Nebius AI Studio",
"context": 131000,
"max_output": 131000,
"input": 0.13,
"output": 0.4,
"latency": 0.42,
"throughput": 24.18
},
{
"name": "inference.net",
"context": 16000,
"max_output": 16000,
"input": 0.4,
"output": 0.4,
"latency": 0.56,
"throughput": 15.03
},
{
"name": "Hyperbolic",
"context": 131000,
"max_output": 131000,
"input": 0.4,
"output": 0.4,
"latency": 1.37,
"throughput": 70.7
},
{
"name": "Groq",
"context": 33000,
"max_output": 33000,
"input": 0.59,
"output": 0.79,
"latency": 0.24,
"throughput": 310.7
},
{
"name": "Friendli",
"context": 131000,
"max_output": 131000,
"input": 0.6,
"output": 0.6,
"latency": 0.41,
"throughput": 103.5
},
{
"name": "SambaNova",
"context": 128000,
"max_output": 128000,
"input": 0.6,
"output": 1.2,
"latency": 0.9,
"throughput": 131.6
},
{
"name": "kluster.ai",
"context": 131000,
"max_output": 131000,
"input": 0.7,
"output": 0.7,
"latency": 0.61,
"throughput": 21.99
},
{
"name": "Parasail",
"context": 131000,
"max_output": 131000,
"input": 0.7,
"output": 0.7,
"latency": 0.64,
"throughput": 66.03
},
{
"name": "Cloudflare",
"context": 131000,
"max_output": 131000,
"input": 0.75,
"output": 0.75,
"latency": 0.8,
"throughput": 28.37
},
{
"name": "Lepton",
"context": 131000,
"max_output": 131000,
"input": 0.8,
"output": 0.8,
"latency": 0.39,
"throughput": 47.81
},
{
"name": "Together",
"context": 131000,
"max_output": 2000,
"input": 0.88,
"output": 0.88,
"latency": 0.51,
"throughput": 114
},
{
"name": "Avian.io",
"context": 131000,
"max_output": 131000,
"input": 0.9,
"output": 0.9,
"latency": 0.24,
"throughput": 122.4
},
{
"name": "Fireworks",
"context": 131000,
"max_output": 131000,
"input": 0.9,
"output": 0.9,
"latency": 0.39,
"throughput": 117.1
},
{
"name": "NovitaAI",
"context": 131000,
"max_output": 131000,
"input": 0.39,
"output": 0.39,
"latency": 0.59,
"throughput": 44.5
}
] | 0.7 | 0.7 | 0.12 | 0.3 | 0.9 | 0.9 | true |
Qwen: QwQ 32B Preview | Qwen/QwQ-32B-Preview | Qwen | [
{
"name": "Hyperbolic",
"context": 33000,
"max_output": 33000,
"input": 0.2,
"output": 0.2,
"latency": 11.74,
"throughput": 35.7
},
{
"name": "Fireworks",
"context": 33000,
"max_output": 33000,
"input": 0.9,
"output": 0.9,
"latency": 5.12,
"throughput": 25.3
},
{
"name": "Together",
"context": 33000,
"max_output": 33000,
"input": 1.2,
"output": 1.2,
"latency": 15.19,
"throughput": 36.67
},
{
"name": "SambaNova",
"context": 16000,
"max_output": 16000,
"input": 1.5,
"output": 3,
"latency": 0.92,
"throughput": 267.7
},
{
"name": "NovitaAI",
"context": 33000,
"max_output": 33000,
"input": 0.18,
"output": 0.6,
"latency": null,
"throughput": null
}
] | 0.9 | 0.9 | 0.2 | 0.2 | 1.5 | 3 | true |
Infermatic: Mistral Nemo Inferor 12B | Infermatic/MN-12B-Inferor-v0.0 | Infermatic | [
{
"name": "Featherless",
"context": 16000,
"max_output": 4000,
"input": 0.8,
"output": 1.2,
"latency": 2.69,
"throughput": 14.62
}
] | 0.8 | 1.2 | 0.8 | 1.2 | 0.8 | 1.2 | true |
Qwen2.5 Coder 32B Instruct | Qwen/Qwen2.5-Coder-32B-Instruct | Qwen | [
{
"name": "Lambda",
"context": 33000,
"max_output": 3000,
"input": 0.07,
"output": 0.16,
"latency": 0.5,
"throughput": 75.8
},
{
"name": "DeepInfra",
"context": 33000,
"max_output": 8000,
"input": 0.07,
"output": 0.16,
"latency": 0.26,
"throughput": 50.78
},
{
"name": "Hyperbolic",
"context": 128000,
"max_output": 8000,
"input": 0.2,
"output": 0.2,
"latency": 1.14,
"throughput": 54.07
},
{
"name": "Parasail",
"context": 128000,
"max_output": 128000,
"input": 0.5,
"output": 0.5,
"latency": 0.56,
"throughput": 52.8
},
{
"name": "Groq",
"context": 131000,
"max_output": 131000,
"input": 0.79,
"output": 0.79,
"latency": 0.42,
"throughput": 333.3
},
{
"name": "Together",
"context": 16000,
"max_output": 2000,
"input": 0.8,
"output": 0.8,
"latency": 0.5,
"throughput": 80.49
},
{
"name": "Fireworks",
"context": 131000,
"max_output": 4000,
"input": 0.9,
"output": 0.9,
"latency": 0.45,
"throughput": 55.73
},
{
"name": "Mancer",
"context": 33000,
"max_output": 2000,
"input": 1.5,
"output": 2.813,
"latency": 0.93,
"throughput": 19.21
},
{
"name": "SambaNova",
"context": 16000,
"max_output": 16000,
"input": 1.5,
"output": 3,
"latency": 0.78,
"throughput": 414.9
},
{
"name": "Mancer (private)",
"context": 33000,
"max_output": 2000,
"input": 2,
"output": 3.75,
"latency": null,
"throughput": null
},
{
"name": "Featherless",
"context": 16000,
"max_output": 4000,
"input": 2.6,
"output": 3.4,
"latency": 2.06,
"throughput": 13.11
}
] | 0.8 | 0.8 | 0.07 | 0.16 | 2.6 | 3.4 | true |
SorcererLM 8x22B | rAIfle/SorcererLM-8x22b-bf16 | rAIfle | [
{
"name": "Infermatic",
"context": 16000,
"max_output": 16000,
"input": 4.5,
"output": 4.5,
"latency": 2.83,
"throughput": 8.29
}
] | 4.5 | 4.5 | 4.5 | 4.5 | 4.5 | 4.5 | true |
EVA Qwen2.5 32B | EVA-UNIT-01/EVA-Qwen2.5-32B-v0.2 | EVA-UNIT-01 | [
{
"name": "Featherless",
"context": 16000,
"max_output": 4000,
"input": 2.6,
"output": 3.4,
"latency": 1.42,
"throughput": 11.58
}
] | 2.6 | 3.4 | 2.6 | 3.4 | 2.6 | 3.4 | true |
Unslopnemo 12B | TheDrummer/UnslopNemo-12B-v4.1 | TheDrummer | [
{
"name": "Infermatic",
"context": 32000,
"max_output": 32000,
"input": 0.5,
"output": 0.5,
"latency": 0.51,
"throughput": 80.43
}
] | 0.5 | 0.5 | 0.5 | 0.5 | 0.5 | 0.5 | true |
NeverSleep: Lumimaid v0.2 70B | NeverSleep/Lumimaid-v0.2-70B | NeverSleep | [
{
"name": "Mancer",
"context": 16000,
"max_output": 2000,
"input": 3.375,
"output": 4.5,
"latency": 3.25,
"throughput": 12.05
},
{
"name": "Featherless",
"context": 16000,
"max_output": 4000,
"input": 4,
"output": 6,
"latency": 2.84,
"throughput": 11.9
},
{
"name": "Mancer (private)",
"context": 16000,
"max_output": 2000,
"input": 4.5,
"output": 6,
"latency": 1.77,
"throughput": 12.8
}
] | 4 | 6 | 3.375 | 4.5 | 4.5 | 6 | true |
Magnum v4 72B | anthracite-org/magnum-v4-72b | anthracite-org | [
{
"name": "Mancer",
"context": 16000,
"max_output": 1000,
"input": 1.875,
"output": 2.25,
"latency": 0.87,
"throughput": 13.91
},
{
"name": "Mancer (private)",
"context": 16000,
"max_output": 1000,
"input": 2.5,
"output": 3,
"latency": 0.89,
"throughput": 13.84
},
{
"name": "Infermatic",
"context": 33000,
"max_output": 33000,
"input": 3,
"output": 3,
"latency": 0.77,
"throughput": 22.54
},
{
"name": "Featherless",
"context": 16000,
"max_output": 4000,
"input": 4,
"output": 6,
"latency": 2.81,
"throughput": 11.74
}
] | 3 | 3 | 1.875 | 2.25 | 4 | 6 | true |
Qwen2.5 7B Instruct | Qwen/Qwen2.5-7B-Instruct | Qwen | [
{
"name": "DeepInfra",
"context": 33000,
"max_output": 8000,
"input": 0.025,
"output": 0.05,
"latency": 0.19,
"throughput": 47.01
},
{
"name": "Together",
"context": 33000,
"max_output": 2000,
"input": 0.3,
"output": 0.3,
"latency": 0.4,
"throughput": 123
}
] | 0.3 | 0.3 | 0.025 | 0.05 | 0.3 | 0.3 | true |
NVIDIA: Llama 3.1 Nemotron 70B Instruct | nvidia/Llama-3.1-Nemotron-70B-Instruct-HF | nvidia | [
{
"name": "Lambda",
"context": 131000,
"max_output": 131000,
"input": 0.12,
"output": 0.3,
"latency": 0.93,
"throughput": 33.31
},
{
"name": "DeepInfra",
"context": 131000,
"max_output": 8000,
"input": 0.12,
"output": 0.3,
"latency": 0.59,
"throughput": 28.13
},
{
"name": "Together",
"context": 33000,
"max_output": 33000,
"input": 0.88,
"output": 0.88,
"latency": 0.54,
"throughput": 70.39
},
{
"name": "Infermatic",
"context": 32000,
"max_output": 32000,
"input": 1,
"output": 1,
"latency": 2.63,
"throughput": 13.9
}
] | 0.88 | 0.88 | 0.12 | 0.3 | 1 | 1 | true |
Magnum v2 72B | anthracite-org/magnum-v2-72b | anthracite-org | [
{
"name": "Infermatic",
"context": 33000,
"max_output": 33000,
"input": 3,
"output": 3,
"latency": 2.98,
"throughput": 42.32
},
{
"name": "Featherless",
"context": 16000,
"max_output": 4000,
"input": 4,
"output": 6,
"latency": 6.41,
"throughput": 12.61
}
] | 4 | 6 | 3 | 3 | 4 | 6 | true |
Rocinante 12B | TheDrummer/Rocinante-12B-v1.1 | TheDrummer | [
{
"name": "Infermatic",
"context": 33000,
"max_output": 33000,
"input": 0.25,
"output": 0.5,
"latency": 0.65,
"throughput": 24.05
},
{
"name": "Featherless",
"context": 16000,
"max_output": 4000,
"input": 0.8,
"output": 1.2,
"latency": 2.45,
"throughput": 11.45
}
] | 0.8 | 1.2 | 0.25 | 0.5 | 0.8 | 1.2 | true |
Meta: Llama 3.2 3B Instruct | meta-llama/Llama-3.2-3B-Instruct | meta-llama | [
{
"name": "Lambda",
"context": 131000,
"max_output": 131000,
"input": 0.015,
"output": 0.025,
"latency": 0.47,
"throughput": 94.12
},
{
"name": "DeepInfra",
"context": 131000,
"max_output": 8000,
"input": 0.015,
"output": 0.025,
"latency": 0.08,
"throughput": 92.2
},
{
"name": "inference.net",
"context": 16000,
"max_output": 16000,
"input": 0.02,
"output": 0.02,
"latency": 0.87,
"throughput": 85.37
},
{
"name": "Lepton",
"context": 131000,
"max_output": 131000,
"input": 0.03,
"output": 0.03,
"latency": 0.34,
"throughput": 201
},
{
"name": "Together",
"context": 131000,
"max_output": 16000,
"input": 0.06,
"output": 0.06,
"latency": 1.08,
"throughput": 58.03
},
{
"name": "SambaNova",
"context": 4000,
"max_output": 2000,
"input": 0.08,
"output": 0.16,
"latency": 0.25,
"throughput": 1353
},
{
"name": "Fireworks",
"context": 131000,
"max_output": 131000,
"input": 0.1,
"output": 0.1,
"latency": 0.55,
"throughput": 156.5
},
{
"name": "Hyperbolic",
"context": 131000,
"max_output": 131000,
"input": 0.1,
"output": 0.1,
"latency": 1.26,
"throughput": 187.4
},
{
"name": "Cloudflare",
"context": 131000,
"max_output": 131000,
"input": 0.1,
"output": 0.1,
"latency": 0.89,
"throughput": 159.5
},
{
"name": "NovitaAI",
"context": 33000,
"max_output": 33000,
"input": 0.03,
"output": 0.05,
"latency": 1.26,
"throughput": 79.98
}
] | 0.06 | 0.06 | 0.015 | 0.025 | 0.08 | 0.16 | true |
Meta: Llama 3.2 1B Instruct | meta-llama/Llama-3.2-1B-Instruct | meta-llama | [
{
"name": "Lepton",
"context": 131000,
"max_output": 131000,
"input": 0.01,
"output": 0.01,
"latency": 0.33,
"throughput": 421.3
},
{
"name": "inference.net",
"context": 16000,
"max_output": 16000,
"input": 0.01,
"output": 0.01,
"latency": 0.53,
"throughput": 133.9
},
{
"name": "DeepInfra",
"context": 131000,
"max_output": 8000,
"input": 0.01,
"output": 0.01,
"latency": 0.18,
"throughput": 154.1
},
{
"name": "SambaNova",
"context": 4000,
"max_output": 2000,
"input": 0.04,
"output": 0.08,
"latency": 0.46,
"throughput": 2093
},
{
"name": "Cloudflare",
"context": 131000,
"max_output": 131000,
"input": 0.1,
"output": 0.1,
"latency": 0.87,
"throughput": 225.2
},
{
"name": "NovitaAI",
"context": 131000,
"max_output": 131000,
"input": 0.02,
"output": 0.02,
"latency": null,
"throughput": null
}
] | 0.02 | 0.02 | 0.01 | 0.01 | 0.1 | 0.1 | true |
Meta: Llama 3.2 90B Vision Instruct | meta-llama/Llama-3.2-90B-Vision-Instruct | meta-llama | [
{
"name": "SambaNova",
"context": 4000,
"max_output": 2000,
"input": 0.8,
"output": 1.6,
"latency": 0.59,
"throughput": 272.8
},
{
"name": "Fireworks",
"context": 131000,
"max_output": 131000,
"input": 0.9,
"output": 0.9,
"latency": 0.86,
"throughput": 34.05
},
{
"name": "Together",
"context": 131000,
"max_output": 131000,
"input": 1.2,
"output": 1.2,
"latency": 0.63,
"throughput": 35.98
},
{
"name": "DeepInfra",
"context": 33000,
"max_output": 8000,
"input": 0.35,
"output": 0.4,
"latency": 0.46,
"throughput": 24.82
}
] | 1.2 | 1.2 | 0.35 | 0.4 | 0.8 | 1.6 | true |
Meta: Llama 3.2 11B Vision Instruct | meta-llama/Llama-3.2-11B-Vision-Instruct | meta-llama | [
{
"name": "inference.net",
"context": 16000,
"max_output": 16000,
"input": 0.055,
"output": 0.055,
"latency": 0.98,
"throughput": 39.34
},
{
"name": "DeepInfra",
"context": 131000,
"max_output": 8000,
"input": 0.055,
"output": 0.055,
"latency": 0.14,
"throughput": 52.31
},
{
"name": "SambaNova",
"context": 4000,
"max_output": 2000,
"input": 0.15,
"output": 0.3,
"latency": 0.62,
"throughput": 869.8
},
{
"name": "Together",
"context": 131000,
"max_output": 131000,
"input": 0.18,
"output": 0.18,
"latency": 0.34,
"throughput": 162.1
},
{
"name": "Cloudflare",
"context": 131000,
"max_output": 131000,
"input": 0.2,
"output": 0.2,
"latency": 1.49,
"throughput": 35.49
},
{
"name": "Fireworks",
"context": 131000,
"max_output": 131000,
"input": 0.2,
"output": 0.2,
"latency": 5.15,
"throughput": 35.08
},
{
"name": "NovitaAI",
"context": 33000,
"max_output": 33000,
"input": 0.06,
"output": 0.06,
"latency": null,
"throughput": null
}
] | 0.18 | 0.18 | 0.055 | 0.055 | 0.15 | 0.3 | true |
Qwen2.5 72B Instruct | Qwen/Qwen2.5-72B-Instruct | Qwen | [
{
"name": "Nebius AI Studio",
"context": 128000,
"max_output": 128000,
"input": 0.13,
"output": 0.4,
"latency": 0.21,
"throughput": 29.47
},
{
"name": "DeepInfra",
"context": 33000,
"max_output": 8000,
"input": 0.13,
"output": 0.4,
"latency": 0.44,
"throughput": 36.79
},
{
"name": "Hyperbolic",
"context": 33000,
"max_output": 33000,
"input": 0.4,
"output": 0.4,
"latency": 1.34,
"throughput": 44.11
},
{
"name": "Fireworks",
"context": 33000,
"max_output": 33000,
"input": 0.9,
"output": 0.9,
"latency": 0.39,
"throughput": 38.62
},
{
"name": "Together",
"context": 131000,
"max_output": 2000,
"input": 1.2,
"output": 1.2,
"latency": 0.43,
"throughput": 96.85
},
{
"name": "SambaNova",
"context": 16000,
"max_output": 16000,
"input": 2,
"output": 4,
"latency": 0.41,
"throughput": 359.3
},
{
"name": "NovitaAI",
"context": 32000,
"max_output": 4000,
"input": 0.38,
"output": 0.4,
"latency": 0.83,
"throughput": 20.55
}
] | 0.4 | 0.4 | 0.13 | 0.4 | 2 | 4 | true |
Qwen: Qwen2-VL 72B Instruct | Qwen/Qwen2-VL-72B-Instruct | Qwen | [
{
"name": "Hyperbolic",
"context": 4000,
"max_output": 4000,
"input": 0.4,
"output": 0.4,
"latency": 1.21,
"throughput": 37.83
}
] | 0.4 | 0.4 | 0.4 | 0.4 | 0.4 | 0.4 | true |
NeverSleep: Lumimaid v0.2 8B | NeverSleep/Lumimaid-v0.2-8B | NeverSleep | [
{
"name": "Mancer",
"context": 33000,
"max_output": 2000,
"input": 0.1875,
"output": 1.125,
"latency": 0.95,
"throughput": 36.34
},
{
"name": "Mancer (private)",
"context": 33000,
"max_output": 2000,
"input": 0.25,
"output": 1.5,
"latency": 0.69,
"throughput": 36.51
},
{
"name": "Featherless",
"context": 16000,
"max_output": 4000,
"input": 0.8,
"output": 1.2,
"latency": 1.6,
"throughput": 26.34
}
] | 0.25 | 1.5 | 0.1875 | 1.125 | 0.8 | 1.2 | true |
Mistral: Pixtral 12B | mistralai/Pixtral-12B-2409 | mistralai | [
{
"name": "Hyperbolic",
"context": 4000,
"max_output": 4000,
"input": 0.1,
"output": 0.1,
"latency": 1.4,
"throughput": 52.28
},
{
"name": "Mistral",
"context": 4000,
"max_output": 4000,
"input": 0.15,
"output": 0.15,
"latency": 1.13,
"throughput": 90.11
}
] | 0.15 | 0.15 | 0.1 | 0.1 | 0.15 | 0.15 | true |
Sao10K: Llama 3.1 Euryale 70B v2.2 | Sao10K/L3.1-70B-Euryale-v2.2 | Sao10K | [
{
"name": "DeepInfra",
"context": 131000,
"max_output": 8000,
"input": 0.7,
"output": 0.8,
"latency": 0.32,
"throughput": 37.38
},
{
"name": "Infermatic",
"context": 16000,
"max_output": 16000,
"input": 1.5,
"output": 1.5,
"latency": 0.93,
"throughput": 19.76
},
{
"name": "NovitaAI",
"context": 16000,
"max_output": 16000,
"input": 1.48,
"output": 1.48,
"latency": 1.51,
"throughput": 20.91
}
] | 1.48 | 1.48 | 0.7 | 0.8 | 1.5 | 1.5 | true |
Qwen: Qwen2-VL 7B Instruct | Qwen/Qwen2-VL-7B-Instruct | Qwen | [
{
"name": "Hyperbolic",
"context": 4000,
"max_output": 4000,
"input": 0.1,
"output": 0.1,
"latency": 1,
"throughput": 93.31
}
] | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | true |
Nous: Hermes 3 70B Instruct | NousResearch/Hermes-3-Llama-3.1-70B | NousResearch | [
{
"name": "Lambda",
"context": 131000,
"max_output": 131000,
"input": 0.12,
"output": 0.3,
"latency": 0.74,
"throughput": 32.01
},
{
"name": "Hyperbolic",
"context": 12000,
"max_output": 12000,
"input": 0.4,
"output": 0.4,
"latency": 0.95,
"throughput": 31.07
}
] | 0.4 | 0.4 | 0.12 | 0.3 | 0.4 | 0.4 | true |
Nous: Hermes 3 405B Instruct | NousResearch/Hermes-3-Llama-3.1-405B | NousResearch | [
{
"name": "Lambda",
"context": 131000,
"max_output": 131000,
"input": 0.8,
"output": 0.8,
"latency": 1.1,
"throughput": 26.23
},
{
"name": "inference.net",
"context": 33000,
"max_output": 33000,
"input": 0.8,
"output": 0.8,
"latency": 0.92,
"throughput": 25.74
},
{
"name": "DeepInfra",
"context": 131000,
"max_output": 8000,
"input": 0.8,
"output": 0.8,
"latency": 1.01,
"throughput": 11.46
},
{
"name": "Nebius AI Studio",
"context": 131000,
"max_output": 131000,
"input": 1,
"output": 3,
"latency": 0.31,
"throughput": 26.61
}
] | 0.8 | 0.8 | 0.8 | 0.8 | 1 | 3 | true |
Sao10K: Llama 3 8B Lunaris | Sao10K/L3-8B-Lunaris-v1 | Sao10K | [
{
"name": "DeepInfra",
"context": 8000,
"max_output": 8000,
"input": 0.03,
"output": 0.06,
"latency": 0.38,
"throughput": 73.74
},
{
"name": "NovitaAI",
"context": 8000,
"max_output": 8000,
"input": 0.05,
"output": 0.05,
"latency": 0.88,
"throughput": 59.26
}
] | 0.05 | 0.05 | 0.03 | 0.06 | 0.05 | 0.05 | true |
Aetherwiing: Starcannon 12B | intervitens/mini-magnum-12b-v1.1 | intervitens | [
{
"name": "Featherless",
"context": 16000,
"max_output": 4000,
"input": 0.8,
"output": 1.2,
"latency": 3.92,
"throughput": 15.83
}
] | 0.8 | 1.2 | 0.8 | 1.2 | 0.8 | 1.2 | true |
Meta: Llama 3.1 405B (base) | meta-llama/llama-3.1-405B | meta-llama | [
{
"name": "Hyperbolic (quantized)",
"context": 33000,
"max_output": 33000,
"input": 2,
"output": 2,
"latency": 0.74,
"throughput": 23.31
},
{
"name": "Hyperbolic",
"context": 33000,
"max_output": 33000,
"input": 4,
"output": 4,
"latency": 1.19,
"throughput": 16.15
}
] | 4 | 4 | 2 | 2 | 4 | 4 | true |
Mistral Nemo 12B Celeste | nothingiisreal/MN-12B-Celeste-V1.9 | nothingiisreal | [
{
"name": "Featherless",
"context": 16000,
"max_output": 4000,
"input": 0.8,
"output": 1.2,
"latency": 2.54,
"throughput": 14.35
}
] | 0.8 | 1.2 | 0.8 | 1.2 | 0.8 | 1.2 | true |
Meta: Llama 3.1 405B Instruct | meta-llama/Meta-Llama-3.1-405B-Instruct | meta-llama | [
{
"name": "DeepInfra",
"context": 33000,
"max_output": 8000,
"input": 0.8,
"output": 0.8,
"latency": 0.15,
"throughput": 17.38
},
{
"name": "Lambda",
"context": 131000,
"max_output": 131000,
"input": 0.8,
"output": 0.8,
"latency": 0.64,
"throughput": 34.89
},
{
"name": "Nebius AI Studio",
"context": 128000,
"max_output": 128000,
"input": 1,
"output": 3,
"latency": 0.15,
"throughput": 27.72
},
{
"name": "Fireworks",
"context": 131000,
"max_output": 131000,
"input": 3,
"output": 3,
"latency": 1.12,
"throughput": 44.58
},
{
"name": "Together",
"context": 131000,
"max_output": 131000,
"input": 3.5,
"output": 3.5,
"latency": 1.08,
"throughput": 49.02
},
{
"name": "kluster.ai",
"context": 131000,
"max_output": 131000,
"input": 3.5,
"output": 3.5,
"latency": 1.41,
"throughput": 17.25
},
{
"name": "Hyperbolic",
"context": 33000,
"max_output": 33000,
"input": 4,
"output": 4,
"latency": 1.02,
"throughput": 8.78
},
{
"name": "SambaNova",
"context": 8000,
"max_output": 4000,
"input": 5,
"output": 10,
"latency": 1.53,
"throughput": 81.81
},
{
"name": "NovitaAI",
"context": 33000,
"max_output": 33000,
"input": 2.75,
"output": 2.75,
"latency": null,
"throughput": null
}
] | 3 | 3 | 0.8 | 0.8 | 5 | 10 | true |
Meta: Llama 3.1 8B Instruct | meta-llama/Meta-Llama-3.1-8B-Instruct | meta-llama | [
{
"name": "DeepInfra",
"context": 131000,
"max_output": 8000,
"input": 0.02,
"output": 0.05,
"latency": 0.08,
"throughput": 74.22
},
{
"name": "Lambda",
"context": 131000,
"max_output": 131000,
"input": 0.025,
"output": 0.04,
"latency": 0.43,
"throughput": 220.6
},
{
"name": "inference.net",
"context": 16000,
"max_output": 16000,
"input": 0.03,
"output": 0.03,
"latency": 0.63,
"throughput": 59.25
},
{
"name": "Groq",
"context": 131000,
"max_output": 8000,
"input": 0.05,
"output": 0.08,
"latency": 0.3,
"throughput": 552.2
},
{
"name": "Lepton",
"context": 131000,
"max_output": 131000,
"input": 0.07,
"output": 0.07,
"latency": 0.21,
"throughput": 100.1
},
{
"name": "Friendli",
"context": 131000,
"max_output": 131000,
"input": 0.1,
"output": 0.1,
"latency": 0.42,
"throughput": 245.1
},
{
"name": "Hyperbolic",
"context": 33000,
"max_output": 33000,
"input": 0.1,
"output": 0.1,
"latency": 1.19,
"throughput": 101
},
{
"name": "SambaNova",
"context": 8000,
"max_output": 4000,
"input": 0.1,
"output": 0.2,
"latency": 0.28,
"throughput": 803.3
},
{
"name": "Cloudflare",
"context": 131000,
"max_output": 131000,
"input": 0.15,
"output": 0.15,
"latency": 0.31,
"throughput": 126.3
},
{
"name": "kluster.ai",
"context": 131000,
"max_output": 131000,
"input": 0.18,
"output": 0.18,
"latency": 0.32,
"throughput": 13.41
},
{
"name": "Together",
"context": 131000,
"max_output": 131000,
"input": 0.18,
"output": 0.18,
"latency": 0.25,
"throughput": 313.2
},
{
"name": "Fireworks",
"context": 131000,
"max_output": 131000,
"input": 0.2,
"output": 0.2,
"latency": 0.36,
"throughput": 223.2
},
{
"name": "Avian.io",
"context": 131000,
"max_output": 131000,
"input": 0.2,
"output": 0.2,
"latency": 0.29,
"throughput": 308
},
{
"name": "NovitaAI",
"context": 16000,
"max_output": 8000,
"input": 0.05,
"output": 0.05,
"latency": 1.84,
"throughput": 66.66
}
] | 0.1 | 0.1 | 0.03 | 0.03 | 0.2 | 0.2 | true |
Meta: Llama 3.1 70B Instruct | meta-llama/Meta-Llama-3.1-70B-Instruct | meta-llama | [
{
"name": "DeepInfra",
"context": 131000,
"max_output": 8000,
"input": 0.12,
"output": 0.3,
"latency": 0.54,
"throughput": 33.04
},
{
"name": "Lambda",
"context": 131000,
"max_output": 131000,
"input": 0.12,
"output": 0.3,
"latency": 0.58,
"throughput": 37.2
},
{
"name": "Nebius AI Studio",
"context": 128000,
"max_output": 128000,
"input": 0.13,
"output": 0.4,
"latency": 0.15,
"throughput": 29.24
},
{
"name": "inference.net",
"context": 16000,
"max_output": 16000,
"input": 0.4,
"output": 0.4,
"latency": 1.38,
"throughput": 16.81
},
{
"name": "Hyperbolic",
"context": 33000,
"max_output": 33000,
"input": 0.4,
"output": 0.4,
"latency": 0.88,
"throughput": 106.8
},
{
"name": "Friendli",
"context": 131000,
"max_output": 131000,
"input": 0.6,
"output": 0.6,
"latency": 0.2,
"throughput": 113.5
},
{
"name": "SambaNova",
"context": 8000,
"max_output": 4000,
"input": 0.6,
"output": 1.2,
"latency": 0.53,
"throughput": 269.4
},
{
"name": "Cloudflare",
"context": 131000,
"max_output": 131000,
"input": 0.75,
"output": 0.75,
"latency": 0.78,
"throughput": 27.26
},
{
"name": "Lepton",
"context": 131000,
"max_output": 131000,
"input": 0.8,
"output": 0.8,
"latency": 0.3,
"throughput": 45.24
},
{
"name": "Together",
"context": 131000,
"max_output": 131000,
"input": 0.88,
"output": 0.88,
"latency": 0.33,
"throughput": 115.5
},
{
"name": "Fireworks",
"context": 131000,
"max_output": 131000,
"input": 0.9,
"output": 0.9,
"latency": 0.44,
"throughput": 94.56
},
{
"name": "NovitaAI",
"context": 33000,
"max_output": 33000,
"input": 0.34,
"output": 0.39,
"latency": 1.72,
"throughput": 52.16
}
] | 0.6 | 0.6 | 0.12 | 0.3 | 0.9 | 0.9 | true |
Mistral: Mistral Nemo | mistralai/Mistral-Nemo-Instruct-2407 | mistralai | [
{
"name": "DeepInfra",
"context": 131000,
"max_output": 8000,
"input": 0.035,
"output": 0.08,
"latency": 0.25,
"throughput": 52.49
},
{
"name": "Nebius AI Studio",
"context": 128000,
"max_output": 128000,
"input": 0.04,
"output": 0.12,
"latency": 0.64,
"throughput": 22.83
},
{
"name": "inference.net",
"context": 16000,
"max_output": 16000,
"input": 0.1,
"output": 0.1,
"latency": 0.69,
"throughput": 63.6
},
{
"name": "Parasail",
"context": 128000,
"max_output": 128000,
"input": 0.11,
"output": 0.11,
"latency": 0.75,
"throughput": 134.1
},
{
"name": "Mistral",
"context": 128000,
"max_output": 128000,
"input": 0.15,
"output": 0.15,
"latency": 0.29,
"throughput": 128.1
},
{
"name": "Lepton",
"context": 128000,
"max_output": 128000,
"input": 0.18,
"output": 0.18,
"latency": 0.28,
"throughput": 50.18
},
{
"name": "Azure",
"context": 128000,
"max_output": 128000,
"input": 0.3,
"output": 0.3,
"latency": 0.64,
"throughput": 90.2
},
{
"name": "NovitaAI",
"context": 131000,
"max_output": 131000,
"input": 0.17,
"output": 0.17,
"latency": 1.61,
"throughput": 39.52
}
] | 0.15 | 0.15 | 0.035 | 0.08 | 0.3 | 0.3 | true |
Mistral: Codestral Mamba | mistralai/mamba-codestral-7B-v0.1 | mistralai | [
{
"name": "Mistral",
"context": 256000,
"max_output": 256000,
"input": 0.25,
"output": 0.25,
"latency": 0.34,
"throughput": 97.73
}
] | 0.25 | 0.25 | 0.25 | 0.25 | 0.25 | 0.25 | true |
Qwen 2 7B Instruct | Qwen/Qwen2-7B-Instruct | Qwen | [
{
"name": "NovitaAI",
"context": 33000,
"max_output": 33000,
"input": 0.054,
"output": 0.054,
"latency": 0.65,
"throughput": 82.28
}
] | 0.054 | 0.054 | 0.054 | 0.054 | 0.054 | 0.054 | true |
Google: Gemma 2 27B | google/gemma-2-27b-it | google | [
{
"name": "DeepInfra",
"context": 8000,
"max_output": 8000,
"input": 0.27,
"output": 0.27,
"latency": 0.49,
"throughput": 38.62
},
{
"name": "Together",
"context": 8000,
"max_output": 2000,
"input": 0.8,
"output": 0.8,
"latency": 0.41,
"throughput": 69.52
}
] | 0.8 | 0.8 | 0.27 | 0.27 | 0.8 | 0.8 | true |
Magnum 72B | alpindale/magnum-72b-v1 | alpindale | [
{
"name": "Mancer",
"context": 16000,
"max_output": 1000,
"input": 1.875,
"output": 2.25,
"latency": 1.21,
"throughput": 22.84
},
{
"name": "Mancer (private)",
"context": 16000,
"max_output": 1000,
"input": 2.5,
"output": 3,
"latency": 0.92,
"throughput": 22.6
},
{
"name": "Featherless",
"context": 16000,
"max_output": 4000,
"input": 4,
"output": 6,
"latency": 8.11,
"throughput": 11.31
}
] | 2.5 | 3 | 1.875 | 2.25 | 4 | 6 | true |
Google: Gemma 2 9B | google/gemma-2-9b-it | google | [
{
"name": "DeepInfra",
"context": 8000,
"max_output": 8000,
"input": 0.03,
"output": 0.06,
"latency": 0.09,
"throughput": 35.94
},
{
"name": "Lepton",
"context": 8000,
"max_output": 8000,
"input": 0.07,
"output": 0.07,
"latency": 0.27,
"throughput": 103.4
},
{
"name": "Groq",
"context": 8000,
"max_output": 8000,
"input": 0.2,
"output": 0.2,
"latency": 0.22,
"throughput": 560.1
},
{
"name": "Together",
"context": 8000,
"max_output": 8000,
"input": 0.3,
"output": 0.3,
"latency": 0.22,
"throughput": 109
},
{
"name": "NovitaAI",
"context": 8000,
"max_output": 8000,
"input": 0.08,
"output": 0.08,
"latency": 4.21,
"throughput": 29.98
}
] | 0.08 | 0.08 | 0.03 | 0.06 | 0.3 | 0.3 | true |
Sao10k: Llama 3 Euryale 70B v2.1 | Sao10K/L3-70B-Euryale-v2.1 | Sao10K | [
{
"name": "DeepInfra",
"context": 8000,
"max_output": 8000,
"input": 0.7,
"output": 0.8,
"latency": 0.2,
"throughput": 40.32
},
{
"name": "NovitaAI",
"context": 16000,
"max_output": 16000,
"input": 1.48,
"output": 1.48,
"latency": 3.58,
"throughput": 19.49
}
] | 1.48 | 1.48 | 0.7 | 0.8 | 1.48 | 1.48 | true |
Dolphin 2.9.2 Mixtral 8x22B ๐ฌ | cognitivecomputations/dolphin-2.9.2-mixtral-8x22b | cognitivecomputations | [
{
"name": "NovitaAI",
"context": 16000,
"max_output": 16000,
"input": 0.9,
"output": 0.9,
"latency": 6.55,
"throughput": 9.32
}
] | 0.9 | 0.9 | 0.9 | 0.9 | 0.9 | 0.9 | true |
Qwen 2 72B Instruct | Qwen/Qwen2-72B-Instruct | Qwen | [
{
"name": "Together",
"context": 33000,
"max_output": 4000,
"input": 0.9,
"output": 0.9,
"latency": 0.45,
"throughput": 67.28
}
] | 0.9 | 0.9 | 0.9 | 0.9 | 0.9 | 0.9 | true |
Mistral: Mistral 7B Instruct | mistralai/Mistral-7B-Instruct-v0.3 | mistralai | [
{
"name": "DeepInfra",
"context": 33000,
"max_output": 8000,
"input": 0.03,
"output": 0.055,
"latency": 0.29,
"throughput": 82.05
},
{
"name": "Parasail",
"context": 33000,
"max_output": 33000,
"input": 0.11,
"output": 0.11,
"latency": 0.63,
"throughput": 128.1
},
{
"name": "Together",
"context": 33000,
"max_output": 4000,
"input": 0.2,
"output": 0.2,
"latency": 0.37,
"throughput": 163.2
},
{
"name": "NovitaAI",
"context": 33000,
"max_output": 33000,
"input": 0.059,
"output": 0.059,
"latency": 1.1,
"throughput": 121.5
},
{
"name": "Lepton",
"context": 33000,
"max_output": 33000,
"input": 0.07,
"output": 0.07,
"latency": 0.54,
"throughput": 105.4
}
] | 0.07 | 0.07 | 0.03 | 0.055 | 0.2 | 0.2 | true |
Mistral: Mistral 7B Instruct v0.3 | mistralai/Mistral-7B-Instruct-v0.3 | mistralai | [
{
"name": "DeepInfra",
"context": 33000,
"max_output": 8000,
"input": 0.03,
"output": 0.055,
"latency": 0.2,
"throughput": 91.8
},
{
"name": "Lepton",
"context": 33000,
"max_output": 33000,
"input": 0.07,
"output": 0.07,
"latency": 0.28,
"throughput": 107.9
},
{
"name": "Together",
"context": 33000,
"max_output": 4000,
"input": 0.2,
"output": 0.2,
"latency": 0.46,
"throughput": 136.8
},
{
"name": "NovitaAI",
"context": 33000,
"max_output": 33000,
"input": 0.059,
"output": 0.059,
"latency": null,
"throughput": null
}
] | 0.07 | 0.07 | 0.03 | 0.055 | 0.2 | 0.2 | true |
NousResearch: Hermes 2 Pro - Llama-3 8B | NousResearch/Hermes-2-Pro-Llama-3-8B | NousResearch | [
{
"name": "Lambda",
"context": 131000,
"max_output": 131000,
"input": 0.025,
"output": 0.04,
"latency": 0.5,
"throughput": 271.2
},
{
"name": "NovitaAI",
"context": 8000,
"max_output": 8000,
"input": 0.14,
"output": 0.14,
"latency": 1.23,
"throughput": 126.7
}
] | 0.14 | 0.14 | 0.025 | 0.04 | 0.14 | 0.14 | true |
Microsoft: Phi-3 Mini 128K Instruct | microsoft/Phi-3-mini-128k-instruct | microsoft | [
{
"name": "Azure",
"context": 128000,
"max_output": 128000,
"input": 0.1,
"output": 0.1,
"latency": 0.48,
"throughput": 91.92
}
] | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | true |
Microsoft: Phi-3 Medium 128K Instruct | microsoft/Phi-3-medium-128k-instruct | microsoft | [
{
"name": "Azure",
"context": 128000,
"max_output": 128000,
"input": 1,
"output": 1,
"latency": 0.56,
"throughput": 58.03
}
] | 1 | 1 | 1 | 1 | 1 | 1 | true |
NeverSleep: Llama 3 Lumimaid 70B | NeverSleep/Llama-3-Lumimaid-70B-v0.1 | NeverSleep | [
{
"name": "Mancer",
"context": 8000,
"max_output": 2000,
"input": 3.375,
"output": 4.5,
"latency": 0.91,
"throughput": 13.09
},
{
"name": "Featherless",
"context": 8000,
"max_output": 4000,
"input": 4,
"output": 6,
"latency": 2.25,
"throughput": 9.73
},
{
"name": "Mancer (private)",
"context": 8000,
"max_output": 2000,
"input": 4.5,
"output": 6,
"latency": 0.82,
"throughput": 13.63
}
] | 4 | 6 | 3.375 | 4.5 | 4.5 | 6 | true |
Meta: LlamaGuard 2 8B | meta-llama/LlamaGuard-7b | meta-llama | [
{
"name": "Together",
"context": 8000,
"max_output": 8000,
"input": 0.2,
"output": 0.2,
"latency": 0.98,
"throughput": 76.25
}
] | 0.2 | 0.2 | 0.2 | 0.2 | 0.2 | 0.2 | true |
NeverSleep: Llama 3 Lumimaid 8B (extended) | NeverSleep/Llama-3-Lumimaid-8B-v0.1 | NeverSleep | [
{
"name": "Mancer",
"context": 25000,
"max_output": 2000,
"input": 0.1875,
"output": 1.125,
"latency": 0.77,
"throughput": 50.08
},
{
"name": "Mancer (private)",
"context": 25000,
"max_output": 2000,
"input": 0.25,
"output": 1.5,
"latency": 0.54,
"throughput": 53.36
}
] | 0.25 | 1.5 | 0.1875 | 1.125 | 0.25 | 1.5 | true |
NeverSleep: Llama 3 Lumimaid 8B | NeverSleep/Llama-3-Lumimaid-8B-v0.1 | NeverSleep | [
{
"name": "Mancer",
"context": 25000,
"max_output": 2000,
"input": 0.1875,
"output": 1.125,
"latency": 0.76,
"throughput": 50.01
},
{
"name": "Mancer (private)",
"context": 25000,
"max_output": 2000,
"input": 0.25,
"output": 1.5,
"latency": 0.54,
"throughput": 53.36
},
{
"name": "Featherless",
"context": 8000,
"max_output": 4000,
"input": 0.8,
"output": 1.2,
"latency": 1.3,
"throughput": 25.6
}
] | 0.25 | 1.5 | 0.1875 | 1.125 | 0.8 | 1.2 | true |
Fimbulvetr 11B v2 | Sao10K/Fimbulvetr-11B-v2 | Sao10K | [
{
"name": "Featherless",
"context": 4000,
"max_output": 4000,
"input": 0.8,
"output": 1.2,
"latency": 1.74,
"throughput": 24.72
}
] | 0.8 | 1.2 | 0.8 | 1.2 | 0.8 | 1.2 | true |
Meta: Llama 3 8B Instruct | meta-llama/Meta-Llama-3-8B-Instruct | meta-llama | [
{
"name": "DeepInfra",
"context": 8000,
"max_output": 8000,
"input": 0.03,
"output": 0.06,
"latency": 0.17,
"throughput": 115.6
},
{
"name": "Groq",
"context": 8000,
"max_output": 8000,
"input": 0.05,
"output": 0.08,
"latency": 0.29,
"throughput": 478.4
},
{
"name": "Cloudflare",
"context": 8000,
"max_output": 8000,
"input": 0.15,
"output": 0.15,
"latency": 0.72,
"throughput": 15.63
},
{
"name": "Mancer",
"context": 16000,
"max_output": 2000,
"input": 0.1875,
"output": 1.125,
"latency": 0.56,
"throughput": 51.09
},
{
"name": "Fireworks",
"context": 8000,
"max_output": 8000,
"input": 0.2,
"output": 0.2,
"latency": 0.48,
"throughput": 146.5
},
{
"name": "Mancer (private)",
"context": 16000,
"max_output": 2000,
"input": 0.25,
"output": 1.5,
"latency": 0.6,
"throughput": 51.02
},
{
"name": "NovitaAI",
"context": 8000,
"max_output": 8000,
"input": 0.04,
"output": 0.04,
"latency": null,
"throughput": null
}
] | 0.15 | 0.15 | 0.04 | 0.04 | 0.25 | 1.5 | true |
Meta: Llama 3 70B Instruct | meta-llama/Meta-Llama-3-70B-Instruct | meta-llama | [
{
"name": "DeepInfra",
"context": 8000,
"max_output": 8000,
"input": 0.23,
"output": 0.4,
"latency": 0.41,
"throughput": 24.94
},
{
"name": "Groq",
"context": 8000,
"max_output": 8000,
"input": 0.59,
"output": 0.79,
"latency": 0.22,
"throughput": 265
},
{
"name": "Together",
"context": 8000,
"max_output": 8000,
"input": 0.88,
"output": 0.88,
"latency": 0.42,
"throughput": 20.15
},
{
"name": "Fireworks",
"context": 8000,
"max_output": 8000,
"input": 0.9,
"output": 0.9,
"latency": 0.59,
"throughput": 162.2
},
{
"name": "NovitaAI",
"context": 8000,
"max_output": 8000,
"input": 0.51,
"output": 0.74,
"latency": 4.84,
"throughput": 20.38
}
] | 0.59 | 0.79 | 0.23 | 0.4 | 0.9 | 0.9 | true |
Mistral: Mixtral 8x22B Instruct | mistralai/Mixtral-8x22B-Instruct-v0.1 | mistralai | [
{
"name": "Fireworks",
"context": 66000,
"max_output": 66000,
"input": 0.9,
"output": 0.9,
"latency": 0.67,
"throughput": 77.48
},
{
"name": "Together",
"context": 66000,
"max_output": 2000,
"input": 1.2,
"output": 1.2,
"latency": 0.78,
"throughput": 104.2
},
{
"name": "Mistral",
"context": 66000,
"max_output": 66000,
"input": 2,
"output": 6,
"latency": 0.26,
"throughput": 97.77
}
] | 1.2 | 1.2 | 0.9 | 0.9 | 2 | 6 | true |
WizardLM-2 8x22B | microsoft/WizardLM-2-8x22B | microsoft | [
{
"name": "DeepInfra",
"context": 66000,
"max_output": 8000,
"input": 0.5,
"output": 0.5,
"latency": 0.1,
"throughput": 29.57
},
{
"name": "Lepton",
"context": 66000,
"max_output": 66000,
"input": 1,
"output": 1,
"latency": 0.39,
"throughput": 33.75
},
{
"name": "NovitaAI",
"context": 66000,
"max_output": 66000,
"input": 0.62,
"output": 0.62,
"latency": 1.06,
"throughput": 17.49
},
{
"name": "Together",
"context": 66000,
"max_output": 66000,
"input": 1.2,
"output": 1.2,
"latency": 0.56,
"throughput": 64.35
}
] | 1 | 1 | 0.5 | 0.5 | 1.2 | 1.2 | true |
WizardLM-2 7B | microsoft/WizardLM-2-7B | microsoft | [
{
"name": "Lepton",
"context": 32000,
"max_output": 32000,
"input": 0.07,
"output": 0.07,
"latency": 0.36,
"throughput": 102.4
}
] | 0.07 | 0.07 | 0.07 | 0.07 | 0.07 | 0.07 | true |
Databricks: DBRX 132B Instruct | databricks/dbrx-instruct | databricks | [
{
"name": "Together",
"context": 33000,
"max_output": 2000,
"input": 1.2,
"output": 1.2,
"latency": 0.42,
"throughput": 83.57
}
] | 1.2 | 1.2 | 1.2 | 1.2 | 1.2 | 1.2 | true |
Midnight Rose 70B | sophosympatheia/Wizard-Tulu-Dolphin-70B-v1.0 | sophosympatheia | [
{
"name": "NovitaAI",
"context": 4000,
"max_output": 4000,
"input": 0.8,
"output": 0.8,
"latency": 1.84,
"throughput": 14.85
}
] | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | true |
Google: Gemma 7B | google/gemma-1.1-7b-it | google | [
{
"name": "Cloudflare",
"context": 8000,
"max_output": 8000,
"input": 0.15,
"output": 0.15,
"latency": 1.33,
"throughput": 13.07
}
] | 0.15 | 0.15 | 0.15 | 0.15 | 0.15 | 0.15 | true |
Nous: Hermes 2 Mixtral 8x7B DPO | NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO | NousResearch | [
{
"name": "Together",
"context": 33000,
"max_output": 2000,
"input": 0.6,
"output": 0.6,
"latency": 0.45,
"throughput": 105.8
}
] | 0.6 | 0.6 | 0.6 | 0.6 | 0.6 | 0.6 | true |
Dolphin 2.6 Mixtral 8x7B ๐ฌ | cognitivecomputations/dolphin-2.6-mixtral-8x7b | cognitivecomputations | [
{
"name": "Lepton",
"context": 33000,
"max_output": 33000,
"input": 0.5,
"output": 0.5,
"latency": 0.11,
"throughput": 84.13
}
] | 0.5 | 0.5 | 0.5 | 0.5 | 0.5 | 0.5 | true |
Mistral: Mixtral 8x7B (base) | mistralai/Mixtral-8x7B-v0.1 | mistralai | [
{
"name": "Together",
"context": 33000,
"max_output": 2000,
"input": 0.6,
"output": 0.6,
"latency": 0.38,
"throughput": 128
}
] | 0.6 | 0.6 | 0.6 | 0.6 | 0.6 | 0.6 | true |
Mistral: Mixtral 8x7B Instruct | mistralai/Mixtral-8x7B-Instruct-v0.1 | mistralai | [
{
"name": "DeepInfra",
"context": 33000,
"max_output": 8000,
"input": 0.24,
"output": 0.24,
"latency": 0.31,
"throughput": 101.4
},
{
"name": "Groq",
"context": 33000,
"max_output": 33000,
"input": 0.24,
"output": 0.24,
"latency": 0.34,
"throughput": 676.8
},
{
"name": "Fireworks",
"context": 33000,
"max_output": 33000,
"input": 0.5,
"output": 0.5,
"latency": 0.62,
"throughput": 155.2
},
{
"name": "Lepton",
"context": 33000,
"max_output": 33000,
"input": 0.5,
"output": 0.5,
"latency": 0.31,
"throughput": 73.77
},
{
"name": "Together",
"context": 33000,
"max_output": 2000,
"input": 0.6,
"output": 0.6,
"latency": 0.51,
"throughput": 92.37
}
] | 0.5 | 0.5 | 0.24 | 0.24 | 0.6 | 0.6 | true |
OpenChat 3.5 7B | openchat/openchat-3.5-0106 | openchat | [
{
"name": "DeepInfra",
"context": 8000,
"max_output": 8000,
"input": 0.055,
"output": 0.055,
"latency": 0.23,
"throughput": 98.01
},
{
"name": "Lepton",
"context": 8000,
"max_output": 8000,
"input": 0.07,
"output": 0.07,
"latency": 0.41,
"throughput": 106.5
},
{
"name": "NovitaAI",
"context": 4000,
"max_output": 4000,
"input": 0.06,
"output": 0.06,
"latency": 0.8,
"throughput": 58.78
}
] | 0.06 | 0.06 | 0.055 | 0.055 | 0.07 | 0.07 | true |
Noromaid 20B | NeverSleep/Noromaid-20b-v0.1.1 | NeverSleep | [
{
"name": "Mancer",
"context": 8000,
"max_output": 2000,
"input": 1.5,
"output": 2.25,
"latency": 0.55,
"throughput": 26.36
},
{
"name": "Mancer (private)",
"context": 8000,
"max_output": 2000,
"input": 2,
"output": 3,
"latency": 1.05,
"throughput": 25.55
}
] | 2 | 3 | 1.5 | 2.25 | 2 | 3 | true |
OpenHermes 2.5 Mistral 7B | teknium/OpenHermes-2.5-Mistral-7B | teknium | [
{
"name": "NovitaAI",
"context": 4000,
"max_output": 4000,
"input": 0.17,
"output": 0.17,
"latency": 0.66,
"throughput": 148.9
}
] | 0.17 | 0.17 | 0.17 | 0.17 | 0.17 | 0.17 | true |
Toppy M 7B | Undi95/Toppy-M-7B | Undi95 | [
{
"name": "Lepton",
"context": 4000,
"max_output": 4000,
"input": 0.07,
"output": 0.07,
"latency": 0.41,
"throughput": 107.1
}
] | 0.07 | 0.07 | 0.07 | 0.07 | 0.07 | 0.07 | true |
Goliath 120B | alpindale/goliath-120b | alpindale | [
{
"name": "Mancer",
"context": 6000,
"max_output": 512,
"input": 9.375,
"output": 9.375,
"latency": 1.44,
"throughput": 17.83
},
{
"name": "Mancer (private)",
"context": 6000,
"max_output": 512,
"input": 12.5,
"output": 12.5,
"latency": 1.29,
"throughput": 15.68
}
] | 12.5 | 12.5 | 9.375 | 9.375 | 12.5 | 12.5 | true |
Airoboros 70B | jondurbin/airoboros-l2-70b-2.2.1 | jondurbin | [
{
"name": "NovitaAI",
"context": 4000,
"max_output": 4000,
"input": 0.5,
"output": 0.5,
"latency": 2.14,
"throughput": 56.12
}
] | 0.5 | 0.5 | 0.5 | 0.5 | 0.5 | 0.5 | true |
Xwin 70B | Xwin-LM/Xwin-LM-70B-V0.1 | Xwin-LM | [
{
"name": "Mancer",
"context": 8000,
"max_output": 512,
"input": 3.75,
"output": 3.75,
"latency": 2.52,
"throughput": 18.03
},
{
"name": "Mancer (private)",
"context": 8000,
"max_output": 512,
"input": 5,
"output": 5,
"latency": 2.09,
"throughput": 18.19
}
] | 5 | 5 | 3.75 | 3.75 | 5 | 5 | true |
Mistral: Mistral 7B Instruct v0.1 | mistralai/Mistral-7B-Instruct-v0.1 | mistralai | [
{
"name": "Together",
"context": 33000,
"max_output": 2000,
"input": 0.2,
"output": 0.2,
"latency": 0.31,
"throughput": 178.2
}
] | 0.2 | 0.2 | 0.2 | 0.2 | 0.2 | 0.2 | true |
Pygmalion: Mythalion 13B | PygmalionAI/mythalion-13b | PygmalionAI | [
{
"name": "Featherless",
"context": 4000,
"max_output": 4000,
"input": 0.8,
"output": 1.2,
"latency": 1.86,
"throughput": 12.32
},
{
"name": "Mancer",
"context": 8000,
"max_output": 512,
"input": 1.125,
"output": 1.125,
"latency": 0.92,
"throughput": 22.86
},
{
"name": "Mancer (private)",
"context": 8000,
"max_output": 512,
"input": 1.5,
"output": 1.5,
"latency": 0.74,
"throughput": 22.35
}
] | 1.125 | 1.125 | 0.8 | 1.2 | 1.5 | 1.5 | true |
Nous: Hermes 13B | NousResearch/Nous-Hermes-Llama2-13b | NousResearch | [
{
"name": "Lepton",
"context": 4000,
"max_output": 4000,
"input": 0.18,
"output": 0.18,
"latency": 0.26,
"throughput": 81.6
},
{
"name": "NovitaAI",
"context": 4000,
"max_output": 4000,
"input": 0.17,
"output": 0.17,
"latency": 0.83,
"throughput": 82.68
}
] | 0.18 | 0.18 | 0.17 | 0.17 | 0.18 | 0.18 | true |
ReMM SLERP 13B | Undi95/ReMM-SLERP-L2-13B | Undi95 | [
{
"name": "Featherless",
"context": 4000,
"max_output": 4000,
"input": 0.8,
"output": 1.2,
"latency": null,
"throughput": null
},
{
"name": "Mancer",
"context": 6000,
"max_output": 512,
"input": 1.125,
"output": 1.125,
"latency": 0.6,
"throughput": 43.76
},
{
"name": "Mancer (private)",
"context": 6000,
"max_output": 512,
"input": 1.5,
"output": 1.5,
"latency": 0.57,
"throughput": 43.3
}
] | 1.125 | 1.125 | 0.8 | 1.2 | 1.5 | 1.5 | true |
MythoMax 13B | Gryphe/MythoMax-L2-13b | Gryphe | [
{
"name": "DeepInfra",
"context": 4000,
"max_output": 4000,
"input": 0.065,
"output": 0.065,
"latency": 0.2,
"throughput": 108.8
},
{
"name": "Parasail",
"context": 4000,
"max_output": 4000,
"input": 0.11,
"output": 0.11,
"latency": 0.56,
"throughput": 91.29
},
{
"name": "Lepton",
"context": 4000,
"max_output": 4000,
"input": 0.18,
"output": 0.18,
"latency": 0.21,
"throughput": 104.1
},
{
"name": "Fireworks",
"context": 4000,
"max_output": 4000,
"input": 0.2,
"output": 0.2,
"latency": 2.02,
"throughput": 7.59
},
{
"name": "Together",
"context": 4000,
"max_output": 4000,
"input": 0.3,
"output": 0.3,
"latency": 0.45,
"throughput": 123.7
},
{
"name": "Mancer",
"context": 8000,
"max_output": 512,
"input": 1.125,
"output": 1.125,
"latency": 0.59,
"throughput": 41.95
},
{
"name": "Mancer (private)",
"context": 8000,
"max_output": 512,
"input": 1.5,
"output": 1.5,
"latency": 0.92,
"throughput": 41.69
},
{
"name": "NovitaAI",
"context": 4000,
"max_output": 4000,
"input": 0.09,
"output": 0.09,
"latency": 1.9,
"throughput": 79.17
}
] | 0.2 | 0.2 | 0.065 | 0.065 | 1.5 | 1.5 | true |
Meta: Llama 2 13B Chat | meta-llama/Llama-2-13b-chat-hf | meta-llama | [
{
"name": "Together",
"context": 4000,
"max_output": 2000,
"input": 0.22,
"output": 0.22,
"latency": 0.76,
"throughput": 63.12
},
{
"name": "Lepton",
"context": 4000,
"max_output": 4000,
"input": 0.3,
"output": 0.3,
"latency": 0.44,
"throughput": 98.15
}
] | 0.3 | 0.3 | 0.22 | 0.22 | 0.3 | 0.3 | true |
Meta: Llama 2 70B Chat | meta-llama/Llama-2-70b-chat-hf | meta-llama | [
{
"name": "Together",
"context": 4000,
"max_output": 4000,
"input": 0.9,
"output": 0.9,
"latency": 0.73,
"throughput": 45.19
}
] | 0.9 | 0.9 | 0.9 | 0.9 | 0.9 | 0.9 | true |
Perplexity: Sonar Reasoning Pro | null | perplexity-ai | [
{
"name": "Perplexity",
"context": 128000,
"max_output": 128000,
"input": 2,
"output": 8,
"latency": 11.05,
"throughput": 91.82
}
] | 2 | 8 | 2 | 8 | 2 | 8 | false |
Perplexity: Sonar Pro | null | perplexity-ai | [
{
"name": "Perplexity",
"context": 200000,
"max_output": 8000,
"input": 3,
"output": 15,
"latency": 2.49,
"throughput": 51.06
}
] | 3 | 15 | 3 | 15 | 3 | 15 | false |
Perplexity: Sonar Deep Research | null | perplexity-ai | [
{
"name": "Perplexity",
"context": 200000,
"max_output": 200000,
"input": 2,
"output": 8,
"latency": 24.87,
"throughput": 51.64
}
] | 2 | 8 | 2 | 8 | 2 | 8 | false |
OpenAI: GPT-4.5 (Preview) | null | OpenAI | [
{
"name": "OpenAI",
"context": 128000,
"max_output": 16000,
"input": 75,
"output": 150,
"latency": 1.64,
"throughput": 14.34
}
] | 75 | 150 | 75 | 150 | 75 | 150 | false |
End of preview. Expand
in Data Studio
- Downloads last month
- 24