hivecorp commited on
Commit
2b5d6f0
·
verified ·
1 Parent(s): 501e57f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -100
app.py CHANGED
@@ -4,14 +4,14 @@ import asyncio
4
  import tempfile
5
  import os
6
  from moviepy.editor import AudioFileClip
7
- import speech_recognition as sr
8
 
9
  # Get all available voices
10
  async def get_voices():
11
  voices = await edge_tts.list_voices()
12
  return {f"{v['ShortName']} - {v['Locale']} ({v['Gender']})": v['ShortName'] for v in voices}
13
 
14
- # Text to speech functionality
15
  async def text_to_speech(text, voice, rate, pitch):
16
  if not text.strip():
17
  return None, gr.Warning("Please enter the text to convert.")
@@ -27,120 +27,84 @@ async def text_to_speech(text, voice, rate, pitch):
27
  await communicate.save(tmp_path)
28
  return tmp_path, None
29
 
30
- # Function to analyze audio and get speech timing
31
- def analyze_audio(audio_path):
32
- recognizer = sr.Recognizer()
33
- with sr.AudioFile(audio_path) as source:
34
- audio_data = recognizer.record(source)
35
- # Recognize speech using Google Web Speech API
36
- try:
37
- text = recognizer.recognize_google(audio_data)
38
- return text
39
- except sr.UnknownValueError:
40
- return ""
41
- except sr.RequestError:
42
- return ""
43
-
44
- # Generate SRT file based on user preferences
45
- def generate_srt(words, audio_duration, srt_path, words_per_line, lines_per_paragraph):
46
- total_segments = (len(words) // words_per_line) // lines_per_paragraph + 1
47
- segment_duration = audio_duration / total_segments # Calculate duration for each segment
48
-
49
- current_time = 0
50
- with open(srt_path, 'w', encoding='utf-8') as srt_file:
51
- for i in range(0, len(words), words_per_line):
52
- # Gather lines based on the defined words per line
53
- lines = words[i:i + words_per_line]
54
- line_text = ' '.join(lines)
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  start_time = current_time
57
- end_time = min(start_time + segment_duration, audio_duration) # Ensure it doesn't exceed audio duration
58
 
59
- start_time_str = format_srt_time(start_time)
60
- end_time_str = format_srt_time(end_time)
61
- srt_file.write(f"{(i // words_per_line) + 1}\n{start_time_str} --> {end_time_str}\n")
62
- srt_file.write(f"{line_text}\n\n")
63
-
64
- current_time += segment_duration # Update current time for the next segment
65
-
66
- return srt_path
67
-
68
- def format_srt_time(seconds):
69
- millis = int((seconds - int(seconds)) * 1000)
70
- seconds = int(seconds)
71
- minutes = seconds // 60
72
- hours = seconds // 3600
73
- minutes %= 60
74
- seconds %= 60
75
- return f"{hours:02}:{minutes:02}:{seconds:02},{millis:03}"
76
 
77
- # Text to audio and SRT functionality
78
- async def text_to_audio_and_srt(text, voice, rate, pitch, words_per_line, lines_per_paragraph):
79
- # Clean up input text: remove extra spaces and newlines
80
- cleaned_text = ' '.join(text.split())
81
 
82
- audio_path, warning = await text_to_speech(cleaned_text, voice, rate, pitch)
 
 
83
  if warning:
84
  return None, None, warning
85
-
86
- audio_clip = AudioFileClip(audio_path)
87
- audio_duration = audio_clip.duration
88
 
89
- # Analyze audio to get the actual spoken text
90
- spoken_text = analyze_audio(audio_path)
91
-
92
- # Generate SRT file based on the entire text
93
- base_name = os.path.splitext(audio_path)[0]
94
- srt_path = f"{base_name}_subtitle.srt"
95
-
96
- # Split input text into words
97
- words = cleaned_text.split()
98
 
99
- generate_srt(words, audio_duration, srt_path, words_per_line, lines_per_paragraph)
 
100
 
101
  return audio_path, srt_path, None
102
 
103
- # Gradio interface function
104
- def tts_interface(text, voice, rate, pitch, words_per_line, lines_per_paragraph):
105
- audio_path, srt_path, warning = asyncio.run(text_to_audio_and_srt(text, voice, rate, pitch, words_per_line, lines_per_paragraph))
106
- return audio_path, srt_path, warning
107
-
108
  # Create Gradio app
109
  async def create_demo():
110
  voices = await get_voices()
111
 
112
- with gr.Blocks() as demo:
113
- gr.Markdown(
114
- """
115
- <h1 style="text-align: center; color: #333;">Text to Speech with Subtitles</h1>
116
- <p style="text-align: center; color: #555;">Convert your text to natural-sounding speech and generate subtitles (SRT) for your audio.</p>
117
- """,
118
- elem_id="header"
119
- )
120
-
121
- with gr.Row():
122
- with gr.Column():
123
- text_input = gr.Textbox(label="Input Text", lines=5, placeholder="Enter text here...")
124
- voice_dropdown = gr.Dropdown(choices=[""] + list(voices.keys()), label="Select Voice", value="")
125
- rate_slider = gr.Slider(minimum=-50, maximum=50, value=0, label="Rate Adjustment (%)", step=1)
126
- pitch_slider = gr.Slider(minimum=-20, maximum=20, value=0, label="Pitch Adjustment (Hz)", step=1)
127
-
128
- words_per_line = gr.Slider(minimum=3, maximum=8, value=5, label="Words per Line", step=1)
129
- lines_per_paragraph = gr.Slider(minimum=1, maximum=5, value=2, label="Lines per Paragraph", step=1)
130
-
131
- generate_button = gr.Button("Generate Audio and Subtitles", variant="primary")
132
-
133
- with gr.Column():
134
- output_audio = gr.Audio(label="Generated Audio", type="filepath")
135
- output_srt = gr.File(label="Generated SRT", file_count="single")
136
- warning_msg = gr.Markdown(label="Warning", visible=False)
137
-
138
- generate_button.click(
139
- fn=tts_interface,
140
- inputs=[text_input, voice_dropdown, rate_slider, pitch_slider, words_per_line, lines_per_paragraph],
141
- outputs=[output_audio, output_srt, warning_msg]
142
- )
143
-
144
  return demo
145
 
146
  # Run the app
 
4
  import tempfile
5
  import os
6
  from moviepy.editor import AudioFileClip
7
+ import re
8
 
9
  # Get all available voices
10
  async def get_voices():
11
  voices = await edge_tts.list_voices()
12
  return {f"{v['ShortName']} - {v['Locale']} ({v['Gender']})": v['ShortName'] for v in voices}
13
 
14
+ # Text to speech function
15
  async def text_to_speech(text, voice, rate, pitch):
16
  if not text.strip():
17
  return None, gr.Warning("Please enter the text to convert.")
 
27
  await communicate.save(tmp_path)
28
  return tmp_path, None
29
 
30
+ # Generate SRT based on estimated timing
31
+ def generate_srt(text, speech_rate, max_words_per_line):
32
+ # Clean up input text
33
+ text = re.sub(r'\s+', ' ', text.strip()) # Remove excessive whitespace
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
+ # Split into words
36
+ words = text.split()
37
+
38
+ # Calculate timing for each line
39
+ srt_lines = []
40
+ current_line = []
41
+ current_time = 0.0 # Start time in seconds
42
+ total_words = len(words)
43
+
44
+ for i, word in enumerate(words):
45
+ current_line.append(word)
46
+
47
+ # Calculate current line length
48
+ if len(current_line) >= max_words_per_line or i == total_words - 1:
49
+ # Create SRT entry
50
+ line_text = ' '.join(current_line)
51
+ duration = len(line_text.split()) / speech_rate # Estimate duration based on speech rate
52
+
53
+ # Format timing
54
  start_time = current_time
55
+ end_time = current_time + duration
56
 
57
+ start_time_str = f"{int(start_time // 3600):02}:{int((start_time % 3600) // 60):02}:{int(start_time % 60):02},{int((start_time % 1) * 1000):03}"
58
+ end_time_str = f"{int(end_time // 3600):02}:{int((end_time % 3600) // 60):02}:{int(end_time % 60):02},{int((end_time % 1) * 1000):03}"
59
+
60
+ srt_lines.append(f"{len(srt_lines) + 1}\n{start_time_str} --> {end_time_str}\n{line_text}\n")
61
+
62
+ # Move to the next line
63
+ current_line = []
64
+ current_time += duration # Update current time
 
 
 
 
 
 
 
 
 
65
 
66
+ return ''.join(srt_lines)
 
 
 
67
 
68
+ # Gradio interface function
69
+ def tts_interface(text, voice, rate, pitch, speech_rate, max_words_per_line):
70
+ audio_path, warning = asyncio.run(text_to_speech(text, voice, rate, pitch))
71
  if warning:
72
  return None, None, warning
 
 
 
73
 
74
+ # Generate SRT file
75
+ srt_content = generate_srt(text, speech_rate, max_words_per_line)
76
+ srt_path = audio_path.replace('.mp3', '_subtitle.srt')
 
 
 
 
 
 
77
 
78
+ with open(srt_path, 'w') as f:
79
+ f.write(srt_content)
80
 
81
  return audio_path, srt_path, None
82
 
 
 
 
 
 
83
  # Create Gradio app
84
  async def create_demo():
85
  voices = await get_voices()
86
 
87
+ demo = gr.Interface(
88
+ fn=tts_interface,
89
+ inputs=[
90
+ gr.Textbox(label="Input Text", lines=5),
91
+ gr.Dropdown(choices=[""] + list(voices.keys()), label="Select Voice", value=""),
92
+ gr.Slider(minimum=-50, maximum=50, value=0, label="Rate Adjustment (%)", step=1),
93
+ gr.Slider(minimum=-20, maximum=20, value=0, label="Pitch Adjustment (Hz)", step=1),
94
+ gr.Slider(minimum=100, maximum=300, value=150, label="Speech Rate (words per minute)", step=1),
95
+ gr.Slider(minimum=3, maximum=8, value=5, label="Max Words per Line", step=1),
96
+ ],
97
+ outputs=[
98
+ gr.Audio(label="Generated Audio", type="filepath"),
99
+ gr.File(label="Generated Subtitle (.srt)"),
100
+ gr.Markdown(label="Warning", visible=False)
101
+ ],
102
+ title="Edge TTS Text to Speech with SRT",
103
+ description="Convert text to speech and generate synchronized subtitles based on speech rate.",
104
+ analytics_enabled=False,
105
+ allow_flagging=False,
106
+ )
107
+
 
 
 
 
 
 
 
 
 
 
 
108
  return demo
109
 
110
  # Run the app