Spaces:
Running
Running
feat: Add optimized model
Browse files- app.py +17 -6
- weights/kokoro-quant.onnx +3 -0
app.py
CHANGED
@@ -5,10 +5,18 @@ import soundfile as sf
|
|
5 |
from models import Tokenizer, Kokoro
|
6 |
|
7 |
# Function to fetch available style vectors dynamically
|
|
|
|
|
8 |
def get_style_vector_choices(directory="voices"):
|
9 |
return [file for file in os.listdir(directory) if file.endswith(".pt")]
|
10 |
|
|
|
|
|
|
|
|
|
11 |
# Function to perform TTS using your local model
|
|
|
|
|
12 |
def local_tts(
|
13 |
text: str,
|
14 |
model_path: str,
|
@@ -20,6 +28,8 @@ def local_tts(
|
|
20 |
try:
|
21 |
tokenizer = Tokenizer()
|
22 |
style_vector_path = os.path.join("voices", style_vector)
|
|
|
|
|
23 |
inference = Kokoro(model_path, style_vector_path, tokenizer=tokenizer, lang='en-us')
|
24 |
|
25 |
audio, sample_rate = inference.generate_audio(text, speed=speed)
|
@@ -35,10 +45,12 @@ def local_tts(
|
|
35 |
else:
|
36 |
raise gr.Error("Input text cannot be empty.")
|
37 |
|
|
|
38 |
# Get the list of available style vectors
|
39 |
style_vector_choices = get_style_vector_choices()
|
|
|
40 |
|
41 |
-
# sample texts and their corresponding audio
|
42 |
sample_outputs = [
|
43 |
("Educational Note", "Machine learning models rely on large datasets and complex algorithms to identify patterns and make predictions.", "assets/edu_note.wav"),
|
44 |
("Fun Fact", "Did you know that honey never spoils? Archaeologists have found pots of honey in ancient Egyptian tombs that are over 3,000 years old and still edible!", "assets/fun_fact.wav"),
|
@@ -54,10 +66,10 @@ example_texts = [
|
|
54 |
# Gradio Interface
|
55 |
with gr.Blocks() as demo:
|
56 |
gr.Markdown("# <center> Kokoro-82m Text-to-Speech with Gradio </center>")
|
57 |
-
|
58 |
# Model-specific inputs
|
59 |
with gr.Row(variant="panel"):
|
60 |
-
model_path = gr.
|
61 |
style_vector = gr.Dropdown(choices=style_vector_choices, label="Style Vector", value=style_vector_choices[0])
|
62 |
output_file_format = gr.Dropdown(choices=["wav", "mp3"], label="Output Format", value="wav")
|
63 |
speed = gr.Slider(minimum=0.5, maximum=2.0, value=1.0, step=0.1, label="Speed")
|
@@ -76,20 +88,19 @@ with gr.Blocks() as demo:
|
|
76 |
inputs=[text, model_path, style_vector, output_file_format, speed],
|
77 |
outputs=output_audio
|
78 |
)
|
79 |
-
|
80 |
# Add example texts
|
81 |
gr.Examples(
|
82 |
examples=example_texts,
|
83 |
inputs=[text],
|
84 |
label="Click an example to populate the input text"
|
85 |
)
|
86 |
-
|
87 |
# Add example texts and audios
|
88 |
gr.Markdown("### Sample Texts and Audio")
|
89 |
for topic, sample_text, sample_audio in sample_outputs:
|
90 |
with gr.Row():
|
91 |
gr.Textbox(value=sample_text, label=topic, interactive=False)
|
92 |
gr.Audio(value=sample_audio, label="Example Audio", type="filepath", interactive=False)
|
93 |
-
|
94 |
|
95 |
demo.launch(server_name="0.0.0.0")
|
|
|
5 |
from models import Tokenizer, Kokoro
|
6 |
|
7 |
# Function to fetch available style vectors dynamically
|
8 |
+
|
9 |
+
|
10 |
def get_style_vector_choices(directory="voices"):
|
11 |
return [file for file in os.listdir(directory) if file.endswith(".pt")]
|
12 |
|
13 |
+
|
14 |
+
def get_onnx_models(directory="weights"):
|
15 |
+
return [file for file in os.listdir(directory) if file.endswith(".onnx")]
|
16 |
+
|
17 |
# Function to perform TTS using your local model
|
18 |
+
|
19 |
+
|
20 |
def local_tts(
|
21 |
text: str,
|
22 |
model_path: str,
|
|
|
28 |
try:
|
29 |
tokenizer = Tokenizer()
|
30 |
style_vector_path = os.path.join("voices", style_vector)
|
31 |
+
model_path = os.path.join("weights", model_path)
|
32 |
+
|
33 |
inference = Kokoro(model_path, style_vector_path, tokenizer=tokenizer, lang='en-us')
|
34 |
|
35 |
audio, sample_rate = inference.generate_audio(text, speed=speed)
|
|
|
45 |
else:
|
46 |
raise gr.Error("Input text cannot be empty.")
|
47 |
|
48 |
+
|
49 |
# Get the list of available style vectors
|
50 |
style_vector_choices = get_style_vector_choices()
|
51 |
+
onnx_models_choices = get_onnx_models()
|
52 |
|
53 |
+
# sample texts and their corresponding audio
|
54 |
sample_outputs = [
|
55 |
("Educational Note", "Machine learning models rely on large datasets and complex algorithms to identify patterns and make predictions.", "assets/edu_note.wav"),
|
56 |
("Fun Fact", "Did you know that honey never spoils? Archaeologists have found pots of honey in ancient Egyptian tombs that are over 3,000 years old and still edible!", "assets/fun_fact.wav"),
|
|
|
66 |
# Gradio Interface
|
67 |
with gr.Blocks() as demo:
|
68 |
gr.Markdown("# <center> Kokoro-82m Text-to-Speech with Gradio </center>")
|
69 |
+
|
70 |
# Model-specific inputs
|
71 |
with gr.Row(variant="panel"):
|
72 |
+
model_path = gr.Dropdown(choices=onnx_models_choices, label="ONNX Model Path", value=onnx_models_choices[0])
|
73 |
style_vector = gr.Dropdown(choices=style_vector_choices, label="Style Vector", value=style_vector_choices[0])
|
74 |
output_file_format = gr.Dropdown(choices=["wav", "mp3"], label="Output Format", value="wav")
|
75 |
speed = gr.Slider(minimum=0.5, maximum=2.0, value=1.0, step=0.1, label="Speed")
|
|
|
88 |
inputs=[text, model_path, style_vector, output_file_format, speed],
|
89 |
outputs=output_audio
|
90 |
)
|
91 |
+
|
92 |
# Add example texts
|
93 |
gr.Examples(
|
94 |
examples=example_texts,
|
95 |
inputs=[text],
|
96 |
label="Click an example to populate the input text"
|
97 |
)
|
98 |
+
|
99 |
# Add example texts and audios
|
100 |
gr.Markdown("### Sample Texts and Audio")
|
101 |
for topic, sample_text, sample_audio in sample_outputs:
|
102 |
with gr.Row():
|
103 |
gr.Textbox(value=sample_text, label=topic, interactive=False)
|
104 |
gr.Audio(value=sample_audio, label="Example Audio", type="filepath", interactive=False)
|
|
|
105 |
|
106 |
demo.launch(server_name="0.0.0.0")
|
weights/kokoro-quant.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1d7fe30313cc305d3290aafc748ac02a28f93cabd76702bdb1c5ebea496d4cad
|
3 |
+
size 177465355
|