Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
show ffmpg command in case of error
#6
by
Aivo
- opened
- app.py +28 -67
- requirements.txt +1 -1
app.py
CHANGED
@@ -12,28 +12,13 @@ import tempfile
|
|
12 |
import shlex
|
13 |
import shutil
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
"base_url": "https://api.deepseek.com/v1",
|
19 |
-
"env_key": "DEEPSEEK_API_KEY",
|
20 |
-
},
|
21 |
-
"Qwen/Qwen2.5-Coder-32B-Instruct": {
|
22 |
-
"base_url": "https://api-inference.huggingface.co/v1/",
|
23 |
-
"env_key": "HF_TOKEN",
|
24 |
-
},
|
25 |
-
}
|
26 |
-
|
27 |
-
# Initialize client with first available model
|
28 |
-
client = OpenAI(
|
29 |
-
base_url=next(iter(MODELS.values()))["base_url"],
|
30 |
-
api_key=os.environ[next(iter(MODELS.values()))["env_key"]],
|
31 |
-
)
|
32 |
|
33 |
allowed_medias = [
|
34 |
".png",
|
35 |
".jpg",
|
36 |
-
".webp",
|
37 |
".jpeg",
|
38 |
".tiff",
|
39 |
".bmp",
|
@@ -99,7 +84,7 @@ def get_files_infos(files):
|
|
99 |
return results
|
100 |
|
101 |
|
102 |
-
def get_completion(prompt, files_info, top_p, temperature
|
103 |
# Create table header
|
104 |
files_info_string = "| Type | Name | Dimensions | Duration | Audio Channels |\n"
|
105 |
files_info_string += "|------|------|------------|-----------|--------|\n"
|
@@ -167,16 +152,8 @@ YOUR FFMPEG COMMAND:
|
|
167 |
print(msg["content"])
|
168 |
print("=====================\n")
|
169 |
|
170 |
-
if model_choice not in MODELS:
|
171 |
-
raise ValueError(f"Model {model_choice} is not supported")
|
172 |
-
|
173 |
-
model_config = MODELS[model_choice]
|
174 |
-
client.base_url = model_config["base_url"]
|
175 |
-
client.api_key = os.environ[model_config["env_key"]]
|
176 |
-
model = "deepseek-chat" if "deepseek" in model_choice.lower() else model_choice
|
177 |
-
|
178 |
completion = client.chat.completions.create(
|
179 |
-
model=
|
180 |
messages=messages,
|
181 |
temperature=temperature,
|
182 |
top_p=top_p,
|
@@ -204,13 +181,7 @@ YOUR FFMPEG COMMAND:
|
|
204 |
raise Exception("API Error")
|
205 |
|
206 |
|
207 |
-
def update(
|
208 |
-
files,
|
209 |
-
prompt,
|
210 |
-
top_p=1,
|
211 |
-
temperature=1,
|
212 |
-
model_choice="Qwen/Qwen2.5-Coder-32B-Instruct",
|
213 |
-
):
|
214 |
if prompt == "":
|
215 |
raise gr.Error("Please enter a prompt.")
|
216 |
|
@@ -222,16 +193,14 @@ def update(
|
|
222 |
raise gr.Error(
|
223 |
"Please make sure all videos are less than 2 minute long."
|
224 |
)
|
225 |
-
if file_info["size"] >
|
226 |
-
raise gr.Error("Please make sure all files are less than
|
227 |
|
228 |
attempts = 0
|
229 |
while attempts < 2:
|
230 |
print("ATTEMPT", attempts)
|
231 |
try:
|
232 |
-
command_string = get_completion(
|
233 |
-
prompt, files_info, top_p, temperature, model_choice
|
234 |
-
)
|
235 |
print(
|
236 |
f"""///PROMTP {prompt} \n\n/// START OF COMMAND ///:\n\n{command_string}\n\n/// END OF COMMAND ///\n\n"""
|
237 |
)
|
@@ -283,7 +252,7 @@ with gr.Blocks() as demo:
|
|
283 |
gr.Markdown(
|
284 |
"""
|
285 |
# π AI Video Composer
|
286 |
-
Compose new videos from your assets using natural language. Add video, image and audio assets and let [Qwen2.5-Coder](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct)
|
287 |
""",
|
288 |
elem_id="header",
|
289 |
)
|
@@ -295,16 +264,11 @@ with gr.Blocks() as demo:
|
|
295 |
file_types=allowed_medias,
|
296 |
)
|
297 |
user_prompt = gr.Textbox(
|
298 |
-
placeholder="
|
299 |
label="Instructions",
|
300 |
)
|
301 |
btn = gr.Button("Run")
|
302 |
with gr.Accordion("Parameters", open=False):
|
303 |
-
model_choice = gr.Radio(
|
304 |
-
choices=list(MODELS.keys()),
|
305 |
-
value=list(MODELS.keys())[0],
|
306 |
-
label="Model",
|
307 |
-
)
|
308 |
top_p = gr.Slider(
|
309 |
minimum=-0,
|
310 |
maximum=1.0,
|
@@ -329,7 +293,7 @@ with gr.Blocks() as demo:
|
|
329 |
|
330 |
btn.click(
|
331 |
fn=update,
|
332 |
-
inputs=[user_files, user_prompt, top_p, temperature
|
333 |
outputs=[generated_video, generated_command],
|
334 |
)
|
335 |
with gr.Row():
|
@@ -340,40 +304,37 @@ with gr.Blocks() as demo:
|
|
340 |
"Use the image as the background with a waveform visualization for the audio positioned in center of the video.",
|
341 |
0.7,
|
342 |
0.1,
|
343 |
-
(
|
344 |
-
list(MODELS.keys())[1]
|
345 |
-
if len(MODELS) > 1
|
346 |
-
else list(MODELS.keys())[0]
|
347 |
-
),
|
348 |
-
],
|
349 |
-
[
|
350 |
-
["./examples/ai_talk.wav", "./examples/bg-image.png"],
|
351 |
-
"Use the image as the background with a waveform visualization for the audio positioned in center of the video. Make sure the waveform has a max height of 250 pixels.",
|
352 |
-
0.7,
|
353 |
-
0.1,
|
354 |
-
list(MODELS.keys())[0],
|
355 |
],
|
356 |
[
|
357 |
[
|
|
|
358 |
"./examples/cat1.jpeg",
|
359 |
"./examples/cat2.jpeg",
|
360 |
"./examples/cat3.jpeg",
|
361 |
"./examples/cat4.jpeg",
|
362 |
"./examples/cat5.jpeg",
|
363 |
"./examples/cat6.jpeg",
|
|
|
364 |
"./examples/heat-wave.mp3",
|
365 |
],
|
366 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
367 |
0.7,
|
368 |
0.1,
|
369 |
-
(
|
370 |
-
list(MODELS.keys())[1]
|
371 |
-
if len(MODELS) > 1
|
372 |
-
else list(MODELS.keys())[0]
|
373 |
-
),
|
374 |
],
|
375 |
],
|
376 |
-
inputs=[user_files, user_prompt, top_p, temperature
|
377 |
outputs=[generated_video, generated_command],
|
378 |
fn=update,
|
379 |
run_on_click=True,
|
|
|
12 |
import shlex
|
13 |
import shutil
|
14 |
|
15 |
+
HF_API_KEY = os.environ["HF_TOKEN"]
|
16 |
+
|
17 |
+
client = OpenAI(base_url="https://api-inference.huggingface.co/v1/", api_key=HF_API_KEY)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
allowed_medias = [
|
20 |
".png",
|
21 |
".jpg",
|
|
|
22 |
".jpeg",
|
23 |
".tiff",
|
24 |
".bmp",
|
|
|
84 |
return results
|
85 |
|
86 |
|
87 |
+
def get_completion(prompt, files_info, top_p, temperature):
|
88 |
# Create table header
|
89 |
files_info_string = "| Type | Name | Dimensions | Duration | Audio Channels |\n"
|
90 |
files_info_string += "|------|------|------------|-----------|--------|\n"
|
|
|
152 |
print(msg["content"])
|
153 |
print("=====================\n")
|
154 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
completion = client.chat.completions.create(
|
156 |
+
model="Qwen/Qwen2.5-Coder-32B-Instruct",
|
157 |
messages=messages,
|
158 |
temperature=temperature,
|
159 |
top_p=top_p,
|
|
|
181 |
raise Exception("API Error")
|
182 |
|
183 |
|
184 |
+
def update(files, prompt, top_p=1, temperature=1):
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
if prompt == "":
|
186 |
raise gr.Error("Please enter a prompt.")
|
187 |
|
|
|
193 |
raise gr.Error(
|
194 |
"Please make sure all videos are less than 2 minute long."
|
195 |
)
|
196 |
+
if file_info["size"] > 10000000:
|
197 |
+
raise gr.Error("Please make sure all files are less than 10MB in size.")
|
198 |
|
199 |
attempts = 0
|
200 |
while attempts < 2:
|
201 |
print("ATTEMPT", attempts)
|
202 |
try:
|
203 |
+
command_string = get_completion(prompt, files_info, top_p, temperature)
|
|
|
|
|
204 |
print(
|
205 |
f"""///PROMTP {prompt} \n\n/// START OF COMMAND ///:\n\n{command_string}\n\n/// END OF COMMAND ///\n\n"""
|
206 |
)
|
|
|
252 |
gr.Markdown(
|
253 |
"""
|
254 |
# π AI Video Composer
|
255 |
+
Compose new videos from your assets using natural language. Add video, image and audio assets and let [Qwen2.5-Coder](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) generate a new video for you (using FFMPEG).
|
256 |
""",
|
257 |
elem_id="header",
|
258 |
)
|
|
|
264 |
file_types=allowed_medias,
|
265 |
)
|
266 |
user_prompt = gr.Textbox(
|
267 |
+
placeholder="I want to convert to a gif under 15mb",
|
268 |
label="Instructions",
|
269 |
)
|
270 |
btn = gr.Button("Run")
|
271 |
with gr.Accordion("Parameters", open=False):
|
|
|
|
|
|
|
|
|
|
|
272 |
top_p = gr.Slider(
|
273 |
minimum=-0,
|
274 |
maximum=1.0,
|
|
|
293 |
|
294 |
btn.click(
|
295 |
fn=update,
|
296 |
+
inputs=[user_files, user_prompt, top_p, temperature],
|
297 |
outputs=[generated_video, generated_command],
|
298 |
)
|
299 |
with gr.Row():
|
|
|
304 |
"Use the image as the background with a waveform visualization for the audio positioned in center of the video.",
|
305 |
0.7,
|
306 |
0.1,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
307 |
],
|
308 |
[
|
309 |
[
|
310 |
+
"./examples/cat8.jpeg",
|
311 |
"./examples/cat1.jpeg",
|
312 |
"./examples/cat2.jpeg",
|
313 |
"./examples/cat3.jpeg",
|
314 |
"./examples/cat4.jpeg",
|
315 |
"./examples/cat5.jpeg",
|
316 |
"./examples/cat6.jpeg",
|
317 |
+
"./examples/cat7.jpeg",
|
318 |
"./examples/heat-wave.mp3",
|
319 |
],
|
320 |
+
"Generate an MP4 slideshow where each photo appears for 2 seconds, using the provided audio as soundtrack.",
|
321 |
+
0.7,
|
322 |
+
0.1,
|
323 |
+
],
|
324 |
+
[
|
325 |
+
["./examples/waterfall-overlay.png", "./examples/waterfall.mp4"],
|
326 |
+
"Add the overlay to the video.",
|
327 |
+
0.7,
|
328 |
+
0.1,
|
329 |
+
],
|
330 |
+
[
|
331 |
+
["./examples/example.mp4"],
|
332 |
+
"Make this video 10 times faster",
|
333 |
0.7,
|
334 |
0.1,
|
|
|
|
|
|
|
|
|
|
|
335 |
],
|
336 |
],
|
337 |
+
inputs=[user_files, user_prompt, top_p, temperature],
|
338 |
outputs=[generated_video, generated_command],
|
339 |
fn=update,
|
340 |
run_on_click=True,
|
requirements.txt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
openai>=1.55.0
|
2 |
-
gradio==5.
|
3 |
moviepy==1
|
|
|
1 |
openai>=1.55.0
|
2 |
+
gradio==5.6.0
|
3 |
moviepy==1
|