Spaces:
Build error
Build error
upload model
Browse files- app.py +30 -0
- examples/00015_colors.png +0 -0
- examples/00033_colors.png +0 -0
- examples/00084_colors.png +0 -0
- layers.py +55 -0
- model/model.h5 +3 -0
- utils.py +23 -0
app.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from layers import BilinearUpSampling2D
|
2 |
+
from tensorflow.keras.models import load_model
|
3 |
+
from utils import load_images, predict
|
4 |
+
import matplotlib.pyplot as plt
|
5 |
+
import numpy as np
|
6 |
+
import gradio as gr
|
7 |
+
|
8 |
+
custom_objects = {'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': None}
|
9 |
+
print('Loading model...')
|
10 |
+
model = load_model("model/model.h5", custom_objects=custom_objects, compile=False)
|
11 |
+
print('Successfully loaded model...')
|
12 |
+
examples = ['examples/00015_colors.png', 'examples/00084_colors.png', 'examples/00033_colors.png']
|
13 |
+
|
14 |
+
|
15 |
+
def infer(image):
|
16 |
+
inputs = load_images([image])
|
17 |
+
outputs = predict(model, inputs)
|
18 |
+
plasma = plt.get_cmap('plasma')
|
19 |
+
rescaled = outputs[0][:, :, 0]
|
20 |
+
rescaled = rescaled - np.min(rescaled)
|
21 |
+
rescaled = rescaled / np.max(rescaled)
|
22 |
+
image_out = plasma(rescaled)[:, :, :3]
|
23 |
+
return image_out
|
24 |
+
|
25 |
+
|
26 |
+
iface = gr.Interface(
|
27 |
+
fn=infer,
|
28 |
+
inputs=[gr.inputs.Image(label="image", type="numpy", shape=(640, 480))],
|
29 |
+
outputs="image",
|
30 |
+
examples=examples).launch(debug=True)
|
examples/00015_colors.png
ADDED
examples/00033_colors.png
ADDED
examples/00084_colors.png
ADDED
layers.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from tensorflow.keras.layers import Layer, InputSpec
|
2 |
+
import keras.utils.conv_utils as conv_utils
|
3 |
+
import tensorflow as tf
|
4 |
+
import tensorflow.keras.backend as K
|
5 |
+
|
6 |
+
|
7 |
+
def normalize_data_format(value):
|
8 |
+
if value is None:
|
9 |
+
value = K.image_data_format()
|
10 |
+
data_format = value.lower()
|
11 |
+
if data_format not in {'channels_first', 'channels_last'}:
|
12 |
+
raise ValueError('The `data_format` argument must be one of '
|
13 |
+
'"channels_first", "channels_last". Received: ' +
|
14 |
+
str(value))
|
15 |
+
return data_format
|
16 |
+
|
17 |
+
|
18 |
+
class BilinearUpSampling2D(Layer):
|
19 |
+
def __init__(self, size=(2, 2), data_format=None, **kwargs):
|
20 |
+
super(BilinearUpSampling2D, self).__init__(**kwargs)
|
21 |
+
self.data_format = normalize_data_format(data_format)
|
22 |
+
self.size = conv_utils.normalize_tuple(size, 2, 'size')
|
23 |
+
self.input_spec = InputSpec(ndim=4)
|
24 |
+
|
25 |
+
def compute_output_shape(self, input_shape):
|
26 |
+
if self.data_format == 'channels_first':
|
27 |
+
height = self.size[0] * input_shape[2] if input_shape[2] is not None else None
|
28 |
+
width = self.size[1] * input_shape[3] if input_shape[3] is not None else None
|
29 |
+
return (input_shape[0],
|
30 |
+
input_shape[1],
|
31 |
+
height,
|
32 |
+
width)
|
33 |
+
elif self.data_format == 'channels_last':
|
34 |
+
height = self.size[0] * input_shape[1] if input_shape[1] is not None else None
|
35 |
+
width = self.size[1] * input_shape[2] if input_shape[2] is not None else None
|
36 |
+
return (input_shape[0],
|
37 |
+
height,
|
38 |
+
width,
|
39 |
+
input_shape[3])
|
40 |
+
|
41 |
+
def call(self, inputs):
|
42 |
+
input_shape = K.shape(inputs)
|
43 |
+
if self.data_format == 'channels_first':
|
44 |
+
height = self.size[0] * input_shape[2] if input_shape[2] is not None else None
|
45 |
+
width = self.size[1] * input_shape[3] if input_shape[3] is not None else None
|
46 |
+
elif self.data_format == 'channels_last':
|
47 |
+
height = self.size[0] * input_shape[1] if input_shape[1] is not None else None
|
48 |
+
width = self.size[1] * input_shape[2] if input_shape[2] is not None else None
|
49 |
+
|
50 |
+
return tf.image.resize(inputs, [height, width], method=tf.image.ResizeMethod.BILINEAR)
|
51 |
+
|
52 |
+
def get_config(self):
|
53 |
+
config = {'size': self.size, 'data_format': self.data_format}
|
54 |
+
base_config = super(BilinearUpSampling2D, self).get_config()
|
55 |
+
return dict(list(base_config.items()) + list(config.items()))
|
model/model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c23caa0daa6f34c4c4b0beeb2d2d7de0b7d5b07ef8c53ccbd3149ee5ccab595e
|
3 |
+
size 479691272
|
utils.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
|
4 |
+
def depth_norm(x, maxDepth):
|
5 |
+
return maxDepth / x
|
6 |
+
|
7 |
+
|
8 |
+
def predict(model, images, minDepth=10, maxDepth=1000, batch_size=2):
|
9 |
+
# Support multiple RGBs, one RGB image, even grayscale
|
10 |
+
if len(images.shape) < 3: images = np.stack((images, images, images), axis=2)
|
11 |
+
if len(images.shape) < 4: images = images.reshape((1, images.shape[0], images.shape[1], images.shape[2]))
|
12 |
+
# Compute predictions
|
13 |
+
predictions = model.predict(images, batch_size=batch_size)
|
14 |
+
# Put in expected range
|
15 |
+
return np.clip(depth_norm(predictions, maxDepth=maxDepth), minDepth, maxDepth) / maxDepth
|
16 |
+
|
17 |
+
|
18 |
+
def load_images(image_files):
|
19 |
+
loaded_images = []
|
20 |
+
for file in image_files:
|
21 |
+
x = np.clip(file.reshape(480, 640, 3) / 255, 0, 1)
|
22 |
+
loaded_images.append(x)
|
23 |
+
return np.stack(loaded_images, axis=0)
|