S3TVR-Demo / Dockerfile
yalsaffar's picture
Updated Dockerfile for Hugging Face Spaces deployment
b941e8b
# Use an official CUDA-enabled image from NVIDIA with CUDA 12.1
FROM nvidia/cuda:12.1.1-cudnn8-devel-ubuntu20.04
# Set the working directory in the container
WORKDIR /app
# Set the environment variable to suppress interactive prompts
ENV DEBIAN_FRONTEND=noninteractive
# Install necessary OS packages and Python 3.9
RUN apt-get update && apt-get install -y \
software-properties-common \
&& add-apt-repository ppa:deadsnakes/ppa \
&& apt-get update && apt-get install -y \
python3.9 \
python3.9-distutils \
python3.9-venv \
python3.9-dev \
build-essential \
cmake \
libsndfile1 \
ffmpeg \
portaudio19-dev \
alsa-utils \
curl \
git \
nodejs \
npm \
&& rm -rf /var/lib/apt/lists/*
# Install pip for Python 3.9
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.9
# Create a symlink for python3.9 and pip3.9
RUN ln -s /usr/bin/python3.9 /usr/bin/python
RUN ln -s /usr/local/bin/pip /usr/bin/pip
# Set CUDA_HOME environment variable
ENV CUDA_HOME=/usr/local/cuda
# Add CUDA to PATH
ENV PATH=${CUDA_HOME}/bin:${PATH}
# Optionally set LD_LIBRARY_PATH for CUDA libraries
ENV LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}
# Create cache directories with proper permissions
RUN mkdir -p /app/cache/huggingface && \
mkdir -p /app/cache/triton && \
mkdir -p /app/cache/torch_extensions && \
chmod -R 777 /app/cache
# Set environment variables for cache directories
ENV HF_HOME=/app/cache/huggingface
ENV TRANSFORMERS_CACHE=/app/cache/huggingface
ENV TRITON_CACHE_DIR=/app/cache/triton
ENV TORCH_EXTENSIONS_DIR=/app/cache/torch_extensions
# Disable numba caching
ENV NUMBA_CACHE_DIR=/tmp/numba_cache
ENV NUMBA_DISABLE_JIT=1
# Copy the setup script and requirements file into the container
COPY setup.sh requirements.txt /app/
# Make the setup script executable
RUN chmod +x setup.sh
# Copy the application code into the container
COPY . /app
# Create necessary directories for Node.js uploads with proper permissions
RUN mkdir -p /app/app/uploads && chmod -R 777 /app/app/uploads
RUN mkdir -p /app/app/public && chmod -R 777 /app/app/public
# Copy wait-for-it script
COPY wait-for-it.sh /app/wait-for-it.sh
# Make wait-for-it script executable
RUN chmod +x /app/wait-for-it.sh
# Install dependencies
RUN pip install --upgrade pip setuptools wheel
RUN pip install pybind11
RUN pip install fasttext
RUN pip install Cython
RUN pip install pyaudio
RUN pip install fastapi uvicorn
RUN pip install uvloop
# Install PyTorch and torchaudio
RUN pip install torch==2.2.2+cu121 -f https://download.pytorch.org/whl/cu121/torch_stable.html
RUN pip install torchaudio==2.2.2+cu121 -f https://download.pytorch.org/whl/cu121/torch_stable.html
# Install the requirements
RUN pip install -r requirements.txt
# Clone and install TTS
RUN git clone https://github.com/coqui-ai/TTS/ && \
cd TTS && \
make install
# Ensure the correct version of librosa is installed
RUN pip install librosa==0.9.2
# Install Node.js dependencies
RUN cd /app/app && npm install
# Expose the ports
EXPOSE 8000
EXPOSE 3000
# Set the environment variable to indicate running in Docker
ENV IN_DOCKER=True
# Run the FastAPI app and Node.js server
CMD ["sh", "-c", "uvicorn app:app --host 0.0.0.0 --port 8000 & /app/wait-for-it.sh --url http://0.0.0.0:8000/health --strict -- node /app/app/server.js"]