File size: 7,989 Bytes
022a8c8
 
 
917a0e7
 
022a8c8
 
87f1c74
f11c554
022a8c8
 
76dae34
 
022a8c8
 
e1bf1f2
 
5c0fd51
e1bf1f2
5c0fd51
a37520a
4066fdd
a37520a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a9243a6
a37520a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
917a0e7
1e952bf
 
917a0e7
 
 
 
1e952bf
 
 
917a0e7
1e952bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
917a0e7
 
 
 
 
 
 
 
1e952bf
 
 
 
 
917a0e7
a37520a
 
 
 
917a0e7
 
1e952bf
ad50c97
1e952bf
ad50c97
1e952bf
 
 
 
ad50c97
1ac7e59
ad50c97
1e952bf
ad50c97
 
 
 
 
 
 
1ac7e59
a37520a
 
 
 
 
 
 
 
ad50c97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1e952bf
ad50c97
 
 
917a0e7
ad50c97
 
 
 
917a0e7
ad50c97
 
 
 
917a0e7
ad50c97
 
917a0e7
a37520a
ad50c97
 
917a0e7
 
 
 
 
 
ad50c97
a37520a
 
 
 
a9243a6
1e952bf
a9243a6
a37520a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217

from langfuse import Langfuse
from langfuse.decorators import observe, langfuse_context
from fastapi import WebSocketDisconnect
import asyncio

from config.config import settings
from services.llama_generator import LlamaGenerator
import os

# Initialize Langfuse
os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-04d2302a-aa5c-4870-9703-58ab64c3bcae"
os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-d34ea200-feec-428e-a621-784fce93a5af"
os.environ["LANGFUSE_HOST"] = "https://chris4k-langfuse-template-space.hf.space"  # 🇪🇺 EU region

try:
    langfuse = Langfuse()
except Exception as e:
    print("Langfuse Offline")
    
# main.py
from fastapi import FastAPI, WebSocket
from fastapi.staticfiles import StaticFiles
from fastapi.responses import StreamingResponse, HTMLResponse
import asyncio
import json
import webrtcvad
import numpy as np
import wave
import io
from typing import AsyncGenerator

from utils import ( 
    from_en_translation,
    to_en_translation,
    tts,
    tts_to_bytesio,
)

from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, VisitWebpageTool

app = FastAPI()
app.mount("/static", StaticFiles(directory="static"), name="static")

# Initialize tools and agent
model = HfApiModel()
search_tool = DuckDuckGoSearchTool()
visit_webpage_tool = VisitWebpageTool()
agent = CodeAgent(
    tools=[search_tool, visit_webpage_tool],
    model=model,
    additional_authorized_imports=['requests', 'bs4', 'pandas', 'concurrent.futures', 'csv', 'json']
)

# Constants
SAMPLE_RATE = 16000
CHANNELS = 1
CHUNK_SIZE = 480  # 30ms chunks for VAD
VAD_MODE = 3  # Aggressiveness mode (3 is most aggressive)
desired_language = "de"
max_answer_length = 100
#response_generator_pipe = TextGenerationPipeline(max_length=max_answer_length)

# Initialize VAD
vad = webrtcvad.Vad(VAD_MODE)

async def detect_wakeword(audio_chunk: bytes) -> bool:
    # TODO: Implement proper wake word detection
    # For now, this is a placeholder that should be replaced with a proper wake word detection model
    # You might want to use libraries like Porcupine or build your own wake word detector
    return True


@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
    # Create the queue outside the try block
    audio_queue = asyncio.Queue()
    stream_task = None

    await websocket.accept()
    try:
        # Create a task to process the audio stream
        stream_task = asyncio.create_task(process_audio_stream(audio_queue, websocket))
        
        # Main receive loop
        while True:
            try:
                # Try to receive audio data with a timeout
                audio_data = await asyncio.wait_for(websocket.receive_bytes(), timeout=5.0)
                
                # Put audio data into queue
                await audio_queue.put(audio_data)
            
            except asyncio.TimeoutError:
                # Timeout is normal, just continue
                continue
            
            except WebSocketDisconnect:
                # Handle clean disconnection
                print("WebSocket disconnected")
                break
            
            except Exception as e:
                print(f"WebSocket receive error: {e}")
                break
    
    except Exception as e:
        print(f"WebSocket endpoint error: {e}")
    
    finally:
        # Cancel the stream processing task if it exists
        if stream_task:
            stream_task.cancel()
            try:
                await stream_task  # Wait for the task to be fully cancelled
            except asyncio.CancelledError:
                pass
        
        try:
            await websocket.close(code=1000)
        except Exception as close_error:
            print(f"Error closing WebSocket: {close_error}")

async def process_audio_stream(audio_queue: asyncio.Queue, websocket: WebSocket) -> AsyncGenerator[str, None]:
    buffer = []
    is_speaking = False
    silence_frames = 0
    
    try:
        while True:
            # Get audio data from queue with timeout
            try:
                audio_data = await asyncio.wait_for(audio_queue.get(), timeout=5.0)
            except asyncio.TimeoutError:
                # No audio for a while, reset state
                buffer = []
                is_speaking = False
                silence_frames = 0
                continue
            
            # Validate audio data
            if not audio_data or len(audio_data) < CHUNK_SIZE:
                continue
            
            try:
                is_speech = vad.is_speech(audio_data, SAMPLE_RATE)
            except Exception as vad_error:
                print(f"VAD processing error: {vad_error}")
                continue
            
            if is_speech:
                silence_frames = 0
                buffer.append(audio_data)
                is_speaking = True
            elif is_speaking:
                silence_frames += 1
                if silence_frames > 30:  # End of utterance detection
                    # Process complete utterance
                    try:
                        audio_bytes = b''.join(buffer)
                        
                        # Convert to wave file for speech recognition
                        wav_buffer = io.BytesIO()
                        with wave.open(wav_buffer, 'wb') as wav_file:
                            wav_file.setnchannels(CHANNELS)
                            wav_file.setsampwidth(2)  # 16-bit audio
                            wav_file.setframerate(SAMPLE_RATE)
                            wav_file.writeframes(audio_bytes)
                        
                        # Reset state
                        buffer = []
                        is_speaking = False
                        silence_frames = 0
                        
                        # Check for wake word
                        if await detect_wakeword(audio_bytes):
                            # Process the audio and get response
                            user_speech_text = stt(wav_buffer, desired_language)
                            if "computer" in user_speech_text.lower():
                                translated_text = to_en_translation(user_speech_text, desired_language)
                                response = await agent.arun(translated_text)
                                bot_response_de = from_en_translation(response, desired_language)
                                
                                # Stream the response
                                response_data = json.dumps({
                                    "user_text": user_speech_text,
                                    "response_de": bot_response_de,
                                    "response_en": response
                                })
                                await websocket.send_text(response_data)
                                
                                # Generate and stream audio response
                                bot_voice = tts(bot_response_de, desired_language)
                                bot_voice_bytes = tts_to_bytesio(bot_voice)
                                audio_data = json.dumps({
                                    "audio": bot_voice_bytes.decode('latin1')
                                })
                                await websocket.send_text(audio_data)
                    
                    except Exception as processing_error:
                        print(f"Error processing speech utterance: {processing_error}")
    
    except asyncio.CancelledError:
        # Handle task cancellation
        print("Audio stream processing task cancelled")
    except Exception as e:
        print(f"Unexpected error in audio stream processing: {e}")
        
@app.get("/", response_class=HTMLResponse)
async def get_index():
    with open("static/index.html") as f:
        return f.read()

 
if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)