Upload 2 files
#1
by
dhuynh95
- opened
app.py
CHANGED
@@ -1,388 +1,25 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
3 |
-
import matplotlib.pyplot as plt
|
4 |
-
import io
|
5 |
-
import base64
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
text3 = "<h1 style='text-align: center; color: blue; font-size: 25px;'>Comparison"
|
11 |
-
text4 = "<h1 style='text-align: center; color: blue; font-size: 25px;'>Results"
|
12 |
-
|
13 |
-
diy_value = 0
|
14 |
-
saas_value = 0
|
15 |
-
|
16 |
-
def calculate_tco(maxed_out, used, tokens_per_second_inp, vm_cost_per_hour, vm_rental_choice, out_diy):
|
17 |
-
tokens_per_request = 64
|
18 |
-
maxed_out = maxed_out / 100
|
19 |
-
used = used / 100
|
20 |
-
|
21 |
-
if vm_rental_choice == "pay as you go":
|
22 |
-
reduction = 0
|
23 |
-
|
24 |
-
elif vm_rental_choice == "1 year reserved":
|
25 |
-
reduction = 0.34
|
26 |
-
|
27 |
-
elif vm_rental_choice == "3 years reserved":
|
28 |
-
reduction = 0.62
|
29 |
-
|
30 |
-
homemade_cost_per_token = vm_cost_per_hour * (1 - reduction) / (tokens_per_second_inp * 3600 * maxed_out * used)
|
31 |
-
homemade_cost_per_request = tokens_per_request * homemade_cost_per_token
|
32 |
-
out_diy = homemade_cost_per_token
|
33 |
-
return out_diy
|
34 |
-
|
35 |
-
def calculate_tco_2(model_provider, context, out_saas):
|
36 |
-
tokens_per_request = 64
|
37 |
-
|
38 |
-
if model_provider == "OpenAI":
|
39 |
-
if context == "4K context":
|
40 |
-
saas_cost_per_token = 0.00035
|
41 |
-
saas_cost_per_request = saas_cost_per_token * tokens_per_request
|
42 |
-
elif context == "16K context" :
|
43 |
-
saas_cost_per_token = 0.0007
|
44 |
-
saas_cost_per_request = saas_cost_per_token * tokens_per_request
|
45 |
-
out_saas = saas_cost_per_token
|
46 |
-
return out_saas
|
47 |
-
|
48 |
-
def update_tco(maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy):
|
49 |
-
if maxed_out!=None and used!=None and tokens_per_second_inp!=None and vm_cost_per_hour_inp!=None and rental_plan_inp!=None:
|
50 |
-
return calculate_tco(maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy)
|
51 |
-
return None
|
52 |
-
|
53 |
-
def update_tco2(model_provider_inp, context_inp, out_saas):
|
54 |
-
if model_provider_inp!=None and context_inp!=None:
|
55 |
-
return calculate_tco_2(model_provider_inp, context_inp, out_saas)
|
56 |
-
return None
|
57 |
-
|
58 |
-
def extract_cost_from_text(text):
|
59 |
-
try:
|
60 |
-
cost = float(text)
|
61 |
-
return cost
|
62 |
-
except ValueError as e:
|
63 |
-
raise ValueError("Invalid cost text format")
|
64 |
-
|
65 |
-
def compare(cost_text1, cost_text2):
|
66 |
-
try:
|
67 |
-
# Extract the costs from the input strings
|
68 |
-
cost1 = extract_cost_from_text(cost_text1)
|
69 |
-
cost2 = extract_cost_from_text(cost_text2)
|
70 |
-
|
71 |
-
r = cost1 / cost2
|
72 |
-
|
73 |
-
if r < 1:
|
74 |
-
comparison_result = f"First solution is cheaper, with a ratio of {r:.2f}."
|
75 |
-
elif r > 1:
|
76 |
-
comparison_result = f"Second solution is cheaper, with a ratio of {r:.2f}."
|
77 |
-
else:
|
78 |
-
comparison_result = "Both solutions will cost the same."
|
79 |
-
|
80 |
-
return comparison_result
|
81 |
-
|
82 |
-
except ValueError as e:
|
83 |
-
return f"Error: {str(e)}"
|
84 |
-
|
85 |
-
def update_plot(diy_value, saas_value):
|
86 |
-
# if maxed_out and used and tokens_per_second_inp and vm_cost_per_hour:
|
87 |
-
# diy_value = calculate_tco(maxed_out.value, used.value, tokens_per_second_inp, vm_cost_per_hour, vm_rental_choice, out_diy)
|
88 |
-
# else :
|
89 |
-
# diy_value = 0
|
90 |
-
# if model_provider_inp2 and context_inp2:
|
91 |
-
# saas_value = calculate_tco_2(model_provider_inp2, context_inp2, out_saas2)
|
92 |
-
# else:
|
93 |
-
# saas_value = 0
|
94 |
-
data = pd.DataFrame(
|
95 |
-
{
|
96 |
-
"Solution": ["Open-source", "SaaS"],
|
97 |
-
"Cost/token ($)": [diy_value, saas_value],
|
98 |
-
}
|
99 |
-
)
|
100 |
-
return gr.BarPlot.update(data, x="Solution", y="Cost/token ($)")
|
101 |
-
|
102 |
-
def update_plot2(diy_value, saas_value):
|
103 |
-
# if maxed_out2!=None and used2!=None and tokens_per_second_inp2!=None and vm_cost_per_hour2!=None and vm_rental_choice!=None:
|
104 |
-
# diy_value = calculate_tco(maxed_out2.value, used2.value, tokens_per_second_inp2, vm_cost_per_hour2, vm_rental_choice, out_diy2)
|
105 |
-
# else:
|
106 |
-
# diy_value = 0
|
107 |
-
# if model_provider_inp2 and context_inp2:
|
108 |
-
# saas_value = calculate_tco_2(model_provider_inp, context_inp, out_saas)
|
109 |
-
# else:
|
110 |
-
# saas_value = 0
|
111 |
-
data = pd.DataFrame(
|
112 |
-
{
|
113 |
-
"Solution": ["Open-source", "SaaS"],
|
114 |
-
"Cost/token ($)": [diy_value, saas_value],
|
115 |
-
}
|
116 |
-
)
|
117 |
-
return gr.BarPlot.update(data, x="Solution", y="Cost/token ($)")
|
118 |
-
|
119 |
-
def render_latex(latex_str):
|
120 |
-
fig, ax = plt.subplots(figsize=(1, 1))
|
121 |
-
ax.text(0.5, 0.5, f"${latex_str}$", size=12, usetex=True, va="center", ha="center")
|
122 |
-
ax.axis("off")
|
123 |
-
|
124 |
-
buf = io.BytesIO()
|
125 |
-
plt.savefig(buf, format="png")
|
126 |
-
plt.close(fig)
|
127 |
-
|
128 |
-
base64_str = base64.b64encode(buf.getvalue()).decode("utf-8")
|
129 |
-
return f"<img src='data:image/png;base64,{base64_str}'>"
|
130 |
-
|
131 |
-
def update_vm_choice(model_inp):
|
132 |
-
if model_inp == "Llama-2-7B" or "Llama-2-13B" or "Llama-2-70B":
|
133 |
-
new_options = ["A100 40GB"]
|
134 |
-
return gr.Dropdown.update(choices=new_options)
|
135 |
-
|
136 |
-
def token_per_s_and_cost(vm_inp):
|
137 |
-
if vm_inp == "A100 40GB":
|
138 |
-
return [694.38, 3.6730, 694.38, 3.6730]
|
139 |
-
|
140 |
-
def submit_diy(rental_plan):
|
141 |
-
calculate_tco(maxed_out.value, used.value, tokens_per_second_inp.value, vm_cost_per_hour_inp.value, rental_plan, out_diy)
|
142 |
-
|
143 |
-
def submit_saas(context_inp):
|
144 |
-
calculate_tco_2(model_provider_inp, context_inp, out_saas)
|
145 |
-
|
146 |
-
description=f"""
|
147 |
-
<p>In this demo application, we help you compare different solutions for your AI incorporation plans, such as open-source or SaaS.</p>
|
148 |
-
<p>First, you'll have to choose the two solutions you'd like to compare. Then, follow the instructions to select your configurations for each solution and we will compute the cost/request accordingly to them. Eventually, you can compare both solutions to evaluate which one best suits your needs, in the short or long term.</p>
|
149 |
-
"""
|
150 |
-
description1="This interface provides you with the cost per token you get using the open-source solution, based on the model you choose to use and how long you're planning to use it. The selected prices for a Virtual Machine rental come from Azure's VM rental plans, which can offer reductions for long-term reserved usage."
|
151 |
-
description2="This interface provides you with the cost per token resulting from the AI model provider you choose and the number of tokens you select for context, which the model will take into account when processing input texts."
|
152 |
-
description3="This interface compares the cost per request for the two solutions you selected and gives you an insight of whether a solution is more valuable in the long term."
|
153 |
-
|
154 |
-
test_list = []
|
155 |
-
models = ["Llama-2-7B", "Llama-2-13B", "Llama-2-70B"]
|
156 |
-
vm_rental_choice = ["pay as you go", "1 year reserved", "3 years reserved"]
|
157 |
-
vm_choice = ["A100 40GB"]
|
158 |
-
model_provider = ["OpenAI"]
|
159 |
-
context = ["4K context", "16K context"]
|
160 |
-
error_box = gr.Textbox(label="Error", visible=False)
|
161 |
-
|
162 |
-
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
163 |
-
gr.Markdown(value=text)
|
164 |
-
gr.Markdown(value=description)
|
165 |
-
|
166 |
-
out_diy = gr.State(value=0)
|
167 |
-
out_saas = gr.State(value=0)
|
168 |
-
out_diy2 = gr.State(value=0)
|
169 |
-
out_saas2 = gr.State(value=0)
|
170 |
-
tokens_per_second_inp = gr.State()
|
171 |
-
vm_cost_per_hour_inp = gr.State()
|
172 |
-
tokens_per_second_inp2 = gr.State()
|
173 |
-
vm_cost_per_hour_inp2 = gr.State()
|
174 |
-
|
175 |
with gr.Row():
|
176 |
with gr.Column():
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
with gr.Row(visible=False) as title_column:
|
181 |
-
gr.Markdown(value=text1)
|
182 |
-
|
183 |
-
with gr.Row(visible=False) as text_diy_column:
|
184 |
-
gr.Markdown(description1)
|
185 |
-
|
186 |
-
with gr.Accordion("Open to see the formula", visible=False, open=False) as formula_diy:
|
187 |
-
gr.Markdown(
|
188 |
-
r"$ opensource\_cost\_per\_request = \frac{tokens\_per\_request \times VM\_cost\_per\_hour \times (1 - reduction)}{tokens\_per\_second \times 3600 \times maxed\_out \times used}$"
|
189 |
-
)
|
190 |
-
|
191 |
-
with gr.Row(visible=False) as input_diy_column:
|
192 |
-
with gr.Column():
|
193 |
-
|
194 |
-
with gr.Row():
|
195 |
-
model_inp = gr.Dropdown(models, label="Select an AI Model", info="Open-source AI model used for your application")
|
196 |
-
|
197 |
-
with gr.Row() as vm:
|
198 |
-
with gr.Column():
|
199 |
-
with gr.Row():
|
200 |
-
vm_inp = gr.Dropdown(vm_choice, label="Select a Virtual Machine", info="Your options for this choice depend on the model you previously chose")
|
201 |
-
with gr.Row(visible=False) as vm_info:
|
202 |
-
token_per_seconds = gr.Textbox(interactive=False, label="Token/s", info="To compute this value based on your model and VM choice, we chose an input length of 233 tokens.")
|
203 |
-
vm_cost_per_hour = gr.Textbox(interactive=False, label="Cost/h ($) for the VM")
|
204 |
-
|
205 |
-
with gr.Row() as use_case:
|
206 |
-
maxed_out = gr.Slider(minimum=0.01, value=80, label="% maxed out", info="percentage of how much your machine is maxed out")
|
207 |
-
used = gr.Slider(minimum=0.01, value=50, label="% used", info="percentage of time your machine is used")
|
208 |
-
rental_plan_inp = gr.Dropdown(vm_rental_choice, label="Select a VM Rental Plan", info="These options are from Azure's VM rental plans. By default, the cost taken into account are from the pay as you go plan.")
|
209 |
-
|
210 |
-
model_inp.change(fn=update_vm_choice, inputs=model_inp, outputs=vm_inp)
|
211 |
-
vm_inp.change(fn=token_per_s_and_cost, inputs=vm_inp, outputs=[tokens_per_second_inp, vm_cost_per_hour_inp, token_per_seconds, vm_cost_per_hour])
|
212 |
-
|
213 |
-
maxed_out.change(fn=update_tco, inputs=[maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy], outputs=out_diy)
|
214 |
-
used.change(fn=update_tco, inputs=[maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy], outputs=out_diy)
|
215 |
-
model_inp.change(fn=update_tco, inputs=[maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy], outputs=out_diy)
|
216 |
-
vm_inp.change(fn=update_tco, inputs=[maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy], outputs=out_diy)
|
217 |
-
rental_plan_inp.change(fn=update_tco, inputs=[maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy], outputs=out_diy)
|
218 |
-
|
219 |
-
with gr.Row(visible=False) as text_saas_column:
|
220 |
-
gr.Markdown(description2)
|
221 |
-
|
222 |
-
with gr.Accordion("Open to see the formula", visible=False, open=False) as formula_saas:
|
223 |
-
gr.Markdown(
|
224 |
-
r"$ saas\_cost\_per\_request = saas\_cost\_per\_token \times tokens\_per\_request$"
|
225 |
-
)
|
226 |
-
|
227 |
-
with gr.Row(visible=False) as input_saas_column:
|
228 |
-
model_provider_inp = gr.Dropdown(model_provider, label="Model Provider", value="OpenAI", info="Choose an AI model provider you want to work with")
|
229 |
-
context_inp = gr.Dropdown(context, label="Context", info="Number of tokens the model considers when processing text")
|
230 |
-
|
231 |
-
model_provider_inp.change(fn=update_tco2, inputs=[model_provider_inp, context_inp, out_saas], outputs=out_saas)
|
232 |
-
context_inp.change(fn=update_tco2, inputs=[model_provider_inp, context_inp, out_saas], outputs=out_saas)
|
233 |
-
|
234 |
-
def show_vm_info():
|
235 |
-
return {
|
236 |
-
vm_info: gr.update(visible=True),
|
237 |
-
}
|
238 |
|
239 |
-
vm_inp.change(show_vm_info, outputs=vm_info)
|
240 |
-
|
241 |
-
def submit(solution_selection):
|
242 |
-
if solution_selection == "Open-source":
|
243 |
-
return {
|
244 |
-
formula_diy: gr.update(visible=True),
|
245 |
-
title_column: gr.update(visible=True),
|
246 |
-
text_diy_column: gr.update(visible=True),
|
247 |
-
input_diy_column: gr.update(visible=True),
|
248 |
-
formula_saas: gr.update(visible=False),
|
249 |
-
text_saas_column: gr.update(visible=False),
|
250 |
-
input_saas_column: gr.update(visible=False),
|
251 |
-
}
|
252 |
-
else:
|
253 |
-
return {
|
254 |
-
formula_saas: gr.update(visible=True),
|
255 |
-
formula_diy: gr.update(visible=False),
|
256 |
-
text_diy_column: gr.update(visible=False),
|
257 |
-
input_diy_column: gr.update(visible=False),
|
258 |
-
title_column: gr.update(visible=True),
|
259 |
-
text_saas_column: gr.update(visible=True),
|
260 |
-
input_saas_column: gr.update(visible=True),
|
261 |
-
}
|
262 |
-
|
263 |
-
solution_selection.change(
|
264 |
-
submit,
|
265 |
-
solution_selection,
|
266 |
-
[model_inp, vm, vm_info, vm_inp, maxed_out, used, out_saas, text_diy_column, formula_diy, formula_saas, title_column, text_saas_column, model_inp, rental_plan_inp, model_provider_inp, context_inp, input_diy_column, input_saas_column],
|
267 |
-
)
|
268 |
-
|
269 |
-
# gr.Divider(style="vertical", thickness=2, color="blue")
|
270 |
-
|
271 |
with gr.Column():
|
|
|
|
|
|
|
272 |
|
273 |
-
|
274 |
-
|
275 |
-
with gr.Row(visible=False) as title_column2:
|
276 |
-
gr.Markdown(value=text2)
|
277 |
-
|
278 |
-
with gr.Row(visible=False) as text_diy_column2:
|
279 |
-
gr.Markdown(description1)
|
280 |
-
|
281 |
-
with gr.Accordion("Open to see the formula", visible=False, open=False) as formula_diy2:
|
282 |
-
gr.Markdown(
|
283 |
-
r"$ homemade\_cost\_per\_request = \frac{tokens\_per\_request \times VM\_cost\_per\_hour \times (1 - reduction)}{tokens\_per\_second \times 3600 \times maxed\_out \times used}$"
|
284 |
-
)
|
285 |
-
|
286 |
-
with gr.Row(visible=False) as input_diy_column2:
|
287 |
-
with gr.Column():
|
288 |
-
with gr.Row():
|
289 |
-
model_inp2 = gr.Dropdown(models, label="Select an AI Model", info="Open-source AI model used for your application")
|
290 |
-
|
291 |
-
with gr.Row() as vm2:
|
292 |
-
with gr.Column():
|
293 |
-
with gr.Row():
|
294 |
-
vm_inp2 = gr.Dropdown(vm_choice, label="Select a Virtual Machine", info="Your options for this choice depend on the model you previously chose")
|
295 |
-
with gr.Row(visible=False) as vm_info2:
|
296 |
-
tokens_per_second2 = gr.Textbox(interactive=False, label="Token/s", info="To compute this value based on your model and VM choice, we chose an input length of 233 tokens.")
|
297 |
-
vm_cost_per_hour2 = gr.Textbox(interactive=False, label="Cost/h ($) for the VM")
|
298 |
-
|
299 |
-
with gr.Row() as use_case2:
|
300 |
-
maxed_out2 = gr.Slider(minimum=0.01, value=80, label="% maxed out", info="percentage of how much your machine is maxed out")
|
301 |
-
used2 = gr.Slider(minimum=0.01, value=50, label="% used", info="percentage of time your machine is used")
|
302 |
-
rental_plan_inp2 = gr.Dropdown(vm_rental_choice, label="Select a VM Rental Plan", info="These options are from Azure's VM rental plans")
|
303 |
-
|
304 |
-
model_inp2.change(fn=update_vm_choice, inputs=model_inp2, outputs=vm_inp2)
|
305 |
-
vm_inp2.change(fn=token_per_s_and_cost, inputs=vm_inp2, outputs=[tokens_per_second_inp2, vm_cost_per_hour_inp2, tokens_per_second2, vm_cost_per_hour2])
|
306 |
-
|
307 |
-
maxed_out2.change(fn=update_tco, inputs=[maxed_out2, used2, tokens_per_second_inp2, vm_cost_per_hour_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
|
308 |
-
used2.change(fn=update_tco, inputs=[maxed_out2, used2, tokens_per_second_inp2, vm_cost_per_hour_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
|
309 |
-
model_inp2.change(fn=update_tco, inputs=[maxed_out2, used2, tokens_per_second_inp2, vm_cost_per_hour_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
|
310 |
-
vm_inp2.change(fn=update_tco, inputs=[maxed_out2, used2, tokens_per_second_inp2, vm_cost_per_hour_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
|
311 |
-
rental_plan_inp2.change(fn=update_tco, inputs=[maxed_out2, used2, tokens_per_second_inp2, vm_cost_per_hour_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
|
312 |
-
|
313 |
-
with gr.Row(visible=False) as text_saas_column2:
|
314 |
-
gr.Markdown(description2)
|
315 |
-
|
316 |
-
with gr.Accordion("Open to see the formula", visible=False, open=False) as formula_saas2:
|
317 |
-
gr.Markdown(
|
318 |
-
r"$ saas\_cost\_per\_request = saas\_cost\_per\_token \times tokens\_per\_request$"
|
319 |
-
)
|
320 |
-
|
321 |
-
with gr.Row(visible=False) as input_saas_column2:
|
322 |
-
model_provider_inp2 = gr.Dropdown(['OpenAI'], label="Model Provider", value="OpenAI", info="Choose an AI model provider you want to work with")
|
323 |
-
context_inp2 = gr.Dropdown(['4K context', '16K context'], label="Context", info="Number of tokens the model considers when processing text")
|
324 |
-
|
325 |
-
model_provider_inp2.change(fn=update_tco2, inputs=[model_provider_inp2, context_inp2, out_saas2], outputs=out_saas2)
|
326 |
-
context_inp2.change(fn=update_tco2, inputs=[model_provider_inp2, context_inp2, out_saas2], outputs=out_saas2)
|
327 |
-
|
328 |
-
def show_vm_info():
|
329 |
-
return {
|
330 |
-
vm_info2: gr.update(visible=True),
|
331 |
-
}
|
332 |
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
formula_diy2: gr.update(visible=True),
|
339 |
-
title_column2: gr.update(visible=True),
|
340 |
-
text_diy_column2: gr.update(visible=True),
|
341 |
-
input_diy_column2: gr.update(visible=True),
|
342 |
-
formula_saas2: gr.update(visible=False),
|
343 |
-
text_saas_column2: gr.update(visible=False),
|
344 |
-
input_saas_column2: gr.update(visible=False),
|
345 |
-
}
|
346 |
-
else:
|
347 |
-
return {
|
348 |
-
formula_diy2: gr.update(visible=False),
|
349 |
-
text_diy_column2: gr.update(visible=False),
|
350 |
-
input_diy_column2: gr.update(visible=False),
|
351 |
-
title_column2: gr.update(visible=True),
|
352 |
-
formula_saas2: gr.update(visible=True),
|
353 |
-
text_saas_column2: gr.update(visible=True),
|
354 |
-
input_saas_column2: gr.update(visible=True),
|
355 |
-
}
|
356 |
-
|
357 |
-
solution_selection2.change(
|
358 |
-
submit2,
|
359 |
-
solution_selection2,
|
360 |
-
[vm2, vm_info2, vm_inp2, maxed_out2, used2, out_diy2, out_saas2, formula_diy2, formula_saas2, title_column2, text_diy_column2, text_saas_column2, model_inp2, rental_plan_inp2, model_provider_inp2, context_inp2, input_diy_column2, input_saas_column2],
|
361 |
-
)
|
362 |
-
|
363 |
-
with gr.Row():
|
364 |
-
with gr.Column():
|
365 |
-
|
366 |
-
with gr.Row():
|
367 |
-
gr.Markdown(text3)
|
368 |
-
|
369 |
-
with gr.Row():
|
370 |
-
plot = gr.BarPlot(vertical=False, title="Comparison", y_title="Cost/token ($)", width=500, interactive=True)
|
371 |
-
|
372 |
-
context_inp.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
|
373 |
-
maxed_out2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
|
374 |
-
used2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
|
375 |
-
vm_inp2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
|
376 |
-
model_provider_inp.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
|
377 |
-
rental_plan_inp2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
|
378 |
-
model_inp2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
|
379 |
-
|
380 |
-
context_inp2.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
|
381 |
-
vm_inp.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
|
382 |
-
maxed_out.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
|
383 |
-
used.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
|
384 |
-
model_provider_inp2.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
|
385 |
-
rental_plan_inp.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
|
386 |
-
model_inp.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
|
387 |
-
|
388 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
import models
|
|
|
|
|
|
|
3 |
|
4 |
+
with gr.Blocks() as demo:
|
5 |
+
Models: list[models.BaseTCOModel] = [models.OpenAIModel, models.OpenSourceLlama2Model]
|
6 |
+
model_names = [Model().get_name() for Model in Models]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
with gr.Row():
|
8 |
with gr.Column():
|
9 |
+
page1 = models.ModelPage(Models)
|
10 |
+
dropdown = gr.Dropdown(model_names, interactive=True)
|
11 |
+
page1.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
with gr.Column():
|
14 |
+
page2 = models.ModelPage(Models)
|
15 |
+
dropdown2 = gr.Dropdown(model_names, interactive=True)
|
16 |
+
page2.render()
|
17 |
|
18 |
+
dropdown.change(page1.make_model_visible, inputs=dropdown, outputs=page1.get_all_components())
|
19 |
+
dropdown2.change(page2.make_model_visible, inputs=dropdown2, outputs=page2.get_all_components())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
+
compute_tco_btn = gr.Button("Compute TCO")
|
22 |
+
tco_output = gr.Text("Output: ")
|
23 |
+
compute_tco_btn.click(page1.compute_cost_per_token, inputs=page1.get_all_components_for_cost_computing() + [dropdown], outputs=tco_output)
|
24 |
+
|
25 |
+
demo.launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
models.py
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from gradio.components import Component
|
2 |
+
import gradio as gr
|
3 |
+
from abc import ABC, abstractclassmethod
|
4 |
+
import inspect
|
5 |
+
|
6 |
+
class BaseTCOModel(ABC):
|
7 |
+
# TO DO: Find way to specify which component should be used for computing cost
|
8 |
+
def __setattr__(self, name, value):
|
9 |
+
if isinstance(value, Component):
|
10 |
+
self._components.append(value)
|
11 |
+
self.__dict__[name] = value
|
12 |
+
|
13 |
+
def __init__(self):
|
14 |
+
super(BaseTCOModel, self).__setattr__("_components", [])
|
15 |
+
|
16 |
+
def get_components(self) -> list[Component]:
|
17 |
+
return self._components
|
18 |
+
|
19 |
+
def get_components_for_cost_computing(self):
|
20 |
+
return self.components_for_cost_computing
|
21 |
+
|
22 |
+
def get_name(self):
|
23 |
+
return self.name
|
24 |
+
|
25 |
+
def register_components_for_cost_computing(self):
|
26 |
+
args = inspect.getfullargspec(self.compute_cost_per_token)[0][1:]
|
27 |
+
self.components_for_cost_computing = [self.__getattribute__(arg) for arg in args]
|
28 |
+
|
29 |
+
@abstractclassmethod
|
30 |
+
def compute_cost_per_token(self):
|
31 |
+
pass
|
32 |
+
|
33 |
+
@abstractclassmethod
|
34 |
+
def render(self):
|
35 |
+
pass
|
36 |
+
|
37 |
+
def set_name(self, name):
|
38 |
+
self.name = name
|
39 |
+
|
40 |
+
class OpenAIModel(BaseTCOModel):
|
41 |
+
|
42 |
+
def __init__(self):
|
43 |
+
self.set_name("(SaaS) OpenAI")
|
44 |
+
super().__init__()
|
45 |
+
|
46 |
+
def render(self):
|
47 |
+
def on_model_change(model):
|
48 |
+
|
49 |
+
if model == "GPT-4":
|
50 |
+
print("GPT4")
|
51 |
+
return gr.Dropdown.update(choices=["8K", "32K"])
|
52 |
+
else:
|
53 |
+
print("GPT3.5")
|
54 |
+
return gr.Dropdown.update(choices=["4K", "16K"])
|
55 |
+
|
56 |
+
self.model = gr.Dropdown(["GPT-4", "GPT-3.5 Turbo"], value="GPT-4",
|
57 |
+
label="OpenAI model",
|
58 |
+
interactive=True, visible=False)
|
59 |
+
self.context_length = gr.Dropdown(["8K", "32K"], value="8K", interactive=True,
|
60 |
+
label="Context size",
|
61 |
+
visible=False)
|
62 |
+
self.model.change(on_model_change, inputs=self.model, outputs=self.context_length)
|
63 |
+
self.input_length = gr.Number(350, label="Average number of input tokens",
|
64 |
+
interactive=True, visible=False)
|
65 |
+
|
66 |
+
def compute_cost_per_token(self, model, context_length, input_length):
|
67 |
+
"""Cost per token = """
|
68 |
+
model = model[0]
|
69 |
+
context_length = context_length[0]
|
70 |
+
|
71 |
+
if model == "GPT-4" and context_length == "8K":
|
72 |
+
cost_per_1k_input_tokens = 0.03
|
73 |
+
elif model == "GPT-4" and context_length == "32K":
|
74 |
+
cost_per_1k_input_tokens = 0.06
|
75 |
+
elif model == "GPT-3.5" and context_length == "4K":
|
76 |
+
cost_per_1k_input_tokens = 0.0015
|
77 |
+
else:
|
78 |
+
cost_per_1k_input_tokens = 0.003
|
79 |
+
|
80 |
+
cost_per_output_token = cost_per_1k_input_tokens * input_length / 1000
|
81 |
+
|
82 |
+
return cost_per_output_token
|
83 |
+
|
84 |
+
class OpenSourceLlama2Model(BaseTCOModel):
|
85 |
+
def __init__(self):
|
86 |
+
self.set_name("(Open source) Llama 2")
|
87 |
+
super().__init__()
|
88 |
+
|
89 |
+
def render(self):
|
90 |
+
vm_choices = ["1x Nvidia A100 (Azure NC24ads A100 v4)",
|
91 |
+
"2x Nvidia A100 (Azure NC48ads A100 v4)"]
|
92 |
+
|
93 |
+
def on_model_change(model):
|
94 |
+
if model == "Llama 2 7B":
|
95 |
+
return gr.Dropdown.update(choices=vm_choices)
|
96 |
+
else:
|
97 |
+
not_supported_vm = ["1x Nvidia A100 (Azure NC24ads A100 v4)"]
|
98 |
+
choices = [x for x in vm_choices if x not in not_supported_vm]
|
99 |
+
return gr.Dropdown.update(choices=choices)
|
100 |
+
|
101 |
+
def on_vm_change(model, vm):
|
102 |
+
# TO DO: load info from CSV
|
103 |
+
if model == "Llama 2 7B" and vm == "1x Nvidia A100 (Azure NC24ads A100 v4)":
|
104 |
+
return gr.Number.update(value=900)
|
105 |
+
elif model == "Llama 2 7B" and vm == "2x Nvidia A100 (Azure NC48ads A100 v4)":
|
106 |
+
return gr.Number.update(value=1800)
|
107 |
+
|
108 |
+
self.model = gr.Dropdown(["Llama 2 7B", "Llama 2 70B"], value="Llama 2 7B", visible=False)
|
109 |
+
self.vm = gr.Dropdown(vm_choices,
|
110 |
+
visible=False,
|
111 |
+
label="Instance of VM with GPU"
|
112 |
+
)
|
113 |
+
self.vm_cost_per_hour = gr.Number(3.5, label="VM instance cost per hour",
|
114 |
+
interactive=True, visible=False)
|
115 |
+
self.tokens_per_second = gr.Number(900, visible=False,
|
116 |
+
label="Number of tokens per second for this specific model and VM instance",
|
117 |
+
interactive=False
|
118 |
+
)
|
119 |
+
self.input_length = gr.Number(350, label="Average number of input tokens",
|
120 |
+
interactive=True, visible=False)
|
121 |
+
|
122 |
+
self.model.change(on_model_change, inputs=self.model, outputs=self.vm)
|
123 |
+
self.vm.change(on_vm_change, inputs=[self.model, self.vm], outputs=self.tokens_per_second)
|
124 |
+
self.maxed_out = gr.Slider(minimum=0.01, value=1., step=0.01, label="% maxed out",
|
125 |
+
info="How much the GPU is fully used.",
|
126 |
+
interactive=True,
|
127 |
+
visible=False)
|
128 |
+
|
129 |
+
def compute_cost_per_token(self, vm_cost_per_hour, tokens_per_second, maxed_out):
|
130 |
+
cost_per_token = vm_cost_per_hour / (tokens_per_second * 3600 * maxed_out)
|
131 |
+
return cost_per_token
|
132 |
+
|
133 |
+
class ModelPage:
|
134 |
+
def __init__(self, Models: BaseTCOModel):
|
135 |
+
self.models: list[BaseTCOModel] = []
|
136 |
+
for Model in Models:
|
137 |
+
model = Model()
|
138 |
+
self.models.append(model)
|
139 |
+
|
140 |
+
def render(self):
|
141 |
+
for model in self.models:
|
142 |
+
model.render()
|
143 |
+
model.register_components_for_cost_computing()
|
144 |
+
|
145 |
+
def get_all_components(self) -> list[Component]:
|
146 |
+
output = []
|
147 |
+
for model in self.models:
|
148 |
+
output += model.get_components()
|
149 |
+
return output
|
150 |
+
|
151 |
+
def get_all_components_for_cost_computing(self) -> list[Component]:
|
152 |
+
output = []
|
153 |
+
for model in self.models:
|
154 |
+
output += model.get_components_for_cost_computing()
|
155 |
+
return output
|
156 |
+
|
157 |
+
def make_model_visible(self, name:str):
|
158 |
+
# First decide which indexes
|
159 |
+
output = []
|
160 |
+
for model in self.models:
|
161 |
+
if model.get_name() == name:
|
162 |
+
output+= [gr.update(visible=True)] * len(model.get_components())
|
163 |
+
else:
|
164 |
+
output+= [gr.update(visible=False)] * len(model.get_components())
|
165 |
+
return output
|
166 |
+
|
167 |
+
def compute_cost_per_token(self, *args):
|
168 |
+
begin=0
|
169 |
+
current_model = args[-1]
|
170 |
+
for model in self.models:
|
171 |
+
model_n_args = len(model.get_components_for_cost_computing())
|
172 |
+
if current_model == model.get_name():
|
173 |
+
model_args = args[begin:begin+model_n_args]
|
174 |
+
print("Model args: ",model_args)
|
175 |
+
model_tco = model.compute_cost_per_token(*model_args)
|
176 |
+
return f"Model {current_model} has TCO {model_tco}"
|
177 |
+
begin = begin+model_n_args
|