Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse files- .gitattributes +0 -1
- Dockerfile +28 -0
- README.md +4 -5
- app.py +131 -0
- requirements.txt +6 -0
.gitattributes
CHANGED
@@ -25,7 +25,6 @@
|
|
25 |
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
*.wasm filter=lfs diff=lfs merge=lfs -text
|
|
|
25 |
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
|
|
28 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
*.wasm filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# FROM python:3.9
|
2 |
+
FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu22.04
|
3 |
+
LABEL maintainer="Hugging Face"
|
4 |
+
LABEL repository="transformers"
|
5 |
+
|
6 |
+
RUN apt update && \
|
7 |
+
apt install -y bash \
|
8 |
+
build-essential \
|
9 |
+
git \
|
10 |
+
curl \
|
11 |
+
ca-certificates \
|
12 |
+
python3 \
|
13 |
+
python3-pip && \
|
14 |
+
rm -rf /var/lib/apt/lists
|
15 |
+
|
16 |
+
|
17 |
+
WORKDIR /code
|
18 |
+
|
19 |
+
COPY ./requirements.txt /code/requirements.txt
|
20 |
+
RUN python3 -m pip install --no-cache-dir --upgrade pip
|
21 |
+
RUN python3 -m pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
22 |
+
|
23 |
+
COPY . .
|
24 |
+
|
25 |
+
CMD ["panel", "serve", "/code/app.py", "--address", "0.0.0.0", "--port", "7860", "--allow-websocket-origin", "sophiamyang-panel-instructpix2pix.hf.space", "--allow-websocket-origin", "0.0.0.0:7860"]
|
26 |
+
|
27 |
+
RUN mkdir /.cache
|
28 |
+
RUN chmod 777 /.cache
|
README.md
CHANGED
@@ -1,11 +1,10 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
-
license: mit
|
9 |
---
|
10 |
|
11 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Panel InstructPix2Pix
|
3 |
+
emoji: 🏃
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: purple
|
6 |
sdk: docker
|
7 |
pinned: false
|
|
|
8 |
---
|
9 |
|
10 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import panel as pn
|
5 |
+
import param
|
6 |
+
import PIL
|
7 |
+
import requests
|
8 |
+
import torch
|
9 |
+
|
10 |
+
from diffusers import StableDiffusionInstructPix2PixPipeline
|
11 |
+
|
12 |
+
pn.extension('texteditor', template="bootstrap", sizing_mode='stretch_width')
|
13 |
+
|
14 |
+
pn.state.template.param.update(
|
15 |
+
main_max_width="690px",
|
16 |
+
header_background="#F08080",
|
17 |
+
)
|
18 |
+
|
19 |
+
model_id = "timbrooks/instruct-pix2pix"
|
20 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
21 |
+
|
22 |
+
if 'pipe' in pn.state.cache:
|
23 |
+
pipe = pn.state.cache['pipe']
|
24 |
+
else:
|
25 |
+
pipe = pn.state.cache['pipe'] = StableDiffusionInstructPix2PixPipeline.from_pretrained(
|
26 |
+
model_id, torch_dtype=torch.float16
|
27 |
+
).to(device)
|
28 |
+
pipe.enable_xformers_memory_efficient_attention()
|
29 |
+
pipe.unet.to(memory_format=torch.channels_last)
|
30 |
+
|
31 |
+
def normalize_image(value, width):
|
32 |
+
"""
|
33 |
+
normalize image to RBG channels and to the same size
|
34 |
+
"""
|
35 |
+
b = io.BytesIO(value)
|
36 |
+
image = PIL.Image.open(b).convert("RGB")
|
37 |
+
aspect = image.size[1] / image.size[0]
|
38 |
+
height = int(aspect * width)
|
39 |
+
return image.resize((width, height), PIL.Image.LANCZOS)
|
40 |
+
|
41 |
+
def new_image(prompt, image, img_guidance, guidance, steps, width=600):
|
42 |
+
"""
|
43 |
+
create a new image from the StableDiffusionInstructPix2PixPipeline model
|
44 |
+
"""
|
45 |
+
edit = pipe(
|
46 |
+
prompt,
|
47 |
+
image=image,
|
48 |
+
image_guidance_scale=img_guidance,
|
49 |
+
guidance_scale=guidance,
|
50 |
+
num_inference_steps=steps,
|
51 |
+
).images[0]
|
52 |
+
return edit
|
53 |
+
|
54 |
+
file_input = pn.widgets.FileInput(width=600)
|
55 |
+
|
56 |
+
prompt = pn.widgets.TextEditor(
|
57 |
+
value="", placeholder="Enter image editing instruction here...", height=160, toolbar=False
|
58 |
+
)
|
59 |
+
img_guidance = pn.widgets.DiscreteSlider(
|
60 |
+
name="Image guidance scale", options=list(np.arange(1, 10.5, 0.5)), value=1.5
|
61 |
+
)
|
62 |
+
guidance = pn.widgets.DiscreteSlider(
|
63 |
+
name="Guidance scale", options=list(np.arange(1, 10.5, 0.5)), value=7
|
64 |
+
)
|
65 |
+
steps = pn.widgets.IntSlider(
|
66 |
+
name="Inference Steps", start=1, end=100, step=1, value=20
|
67 |
+
)
|
68 |
+
run_button = pn.widgets.Button(name="Run!")
|
69 |
+
|
70 |
+
widgets = pn.Row(
|
71 |
+
pn.Column(prompt, run_button, margin=5),
|
72 |
+
pn.Card(
|
73 |
+
pn.Column(img_guidance, guidance, steps),
|
74 |
+
title="Advanced settings", margin=10
|
75 |
+
), width=600
|
76 |
+
)
|
77 |
+
|
78 |
+
# define global variables to keep track of things
|
79 |
+
convos = [] # store all panel objects in a list
|
80 |
+
image = None
|
81 |
+
filename = None
|
82 |
+
|
83 |
+
def get_conversations(_, img, img_guidance, guidance, steps, width=600):
|
84 |
+
"""
|
85 |
+
Get all the conversations in a Panel object
|
86 |
+
"""
|
87 |
+
global image, filename
|
88 |
+
prompt_text = prompt.value
|
89 |
+
prompt.value = ""
|
90 |
+
|
91 |
+
# if the filename changes, open the image again
|
92 |
+
if filename != file_input.filename:
|
93 |
+
filename = file_input.filename
|
94 |
+
image = normalize_image(file_input.value, width)
|
95 |
+
convos.clear()
|
96 |
+
|
97 |
+
# if there is a prompt run output
|
98 |
+
if prompt_text:
|
99 |
+
image = new_image(prompt_text, image, img_guidance, guidance, steps)
|
100 |
+
convos.extend([
|
101 |
+
pn.Row(
|
102 |
+
pn.panel("\U0001F60A", width=10),
|
103 |
+
prompt_text,
|
104 |
+
width=600
|
105 |
+
),
|
106 |
+
pn.Row(
|
107 |
+
pn.panel(image, align='end', width=500),
|
108 |
+
pn.panel("\U0001F916", width=10),
|
109 |
+
align='end'
|
110 |
+
)
|
111 |
+
])
|
112 |
+
return pn.Column(*convos, margin=15, width=575)
|
113 |
+
|
114 |
+
# bind widgets to functions
|
115 |
+
interactive_upload = pn.panel(pn.bind(pn.panel, file_input, width=575, min_height=400, margin=15))
|
116 |
+
|
117 |
+
interactive_conversation = pn.panel(
|
118 |
+
pn.bind(
|
119 |
+
get_conversations, run_button, file_input, img_guidance, guidance, steps
|
120 |
+
), loading_indicator=True
|
121 |
+
)
|
122 |
+
|
123 |
+
|
124 |
+
# layout
|
125 |
+
pn.Column(
|
126 |
+
"## \U0001F60A Upload an image file and start editing!",
|
127 |
+
file_input,
|
128 |
+
interactive_upload,
|
129 |
+
interactive_conversation,
|
130 |
+
widgets
|
131 |
+
).servable(title="Panel Stable Diffusion InstructPix2pix Image Editing Chatbot")
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
panel
|
2 |
+
hvplot
|
3 |
+
diffusers
|
4 |
+
transformers
|
5 |
+
accelerate
|
6 |
+
xformers
|