Spaces:
Paused
Paused
init
Browse files- Dockerfile +21 -0
- README.md +1 -0
Dockerfile
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Hugging Face TGI image
|
2 |
+
FROM ghcr.io/huggingface/text-generation-inference:3.0.2
|
3 |
+
|
4 |
+
# Set working directory
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
# Create the /data directory inside the container
|
8 |
+
RUN mkdir -p /data
|
9 |
+
RUN chmod 777 /data
|
10 |
+
RUN mkdir -p /.cache
|
11 |
+
RUN chmod 777 /.cache
|
12 |
+
|
13 |
+
# Expose the model on port 8080
|
14 |
+
EXPOSE 8080
|
15 |
+
|
16 |
+
# Set the Hugging Face token as an environment variable
|
17 |
+
ARG HF_TOKEN
|
18 |
+
ENV HF_TOKEN=${HF_TOKEN}
|
19 |
+
|
20 |
+
# Run the TGI server directly
|
21 |
+
CMD ["--model-id", "meta-llama/Llama-3.2-1B-Instruct", "--port", "8080"]
|
README.md
CHANGED
@@ -4,6 +4,7 @@ emoji: 😻
|
|
4 |
colorFrom: green
|
5 |
colorTo: green
|
6 |
sdk: docker
|
|
|
7 |
pinned: false
|
8 |
short_description: serving llama with tgi in a space
|
9 |
---
|
|
|
4 |
colorFrom: green
|
5 |
colorTo: green
|
6 |
sdk: docker
|
7 |
+
port: 8080
|
8 |
pinned: false
|
9 |
short_description: serving llama with tgi in a space
|
10 |
---
|