init
Browse files- added_tokens.json +59 -0
- config.json +194 -0
- configuration_minicpm.py +209 -0
- image_processing_minicpmv.py +407 -0
- merges.txt +0 -0
- modeling_minicpmo.py +0 -0
- modeling_navit_siglip.py +939 -0
- preprocessor_config.json +24 -0
- processing_minicpmo.py +506 -0
- resampler.py +864 -0
- special_tokens_map.json +264 -0
- tokenization_minicpmo_fast.py +110 -0
- tokenizer.json +0 -0
- tokenizer_config.json +523 -0
- utils.py +154 -0
- vocab.json +0 -0
added_tokens.json
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</asr>": 151682,
|
3 |
+
"</box>": 151670,
|
4 |
+
"</image>": 151666,
|
5 |
+
"</image_id>": 151678,
|
6 |
+
"</point>": 151674,
|
7 |
+
"</quad>": 151672,
|
8 |
+
"</query>": 151684,
|
9 |
+
"</ref>": 151668,
|
10 |
+
"</slice>": 151676,
|
11 |
+
"</tool_call>": 151658,
|
12 |
+
"</unit>": 151680,
|
13 |
+
"<asr>": 151681,
|
14 |
+
"<box>": 151669,
|
15 |
+
"")
|
133 |
+
self.slice_start_token = kwargs.pop("slice_start", "<slice>")
|
134 |
+
self.slice_end_token = kwargs.pop("slice_end", "</slice>")
|
135 |
+
self.unk_token = kwargs.pop("unk", "<unk>")
|
136 |
+
self.im_id_start = kwargs.pop("im_id_start", "<image_id>")
|
137 |
+
self.im_id_end = kwargs.pop("im_id_end", "</image_id>")
|
138 |
+
self.slice_mode = kwargs.pop("slice_mode", True)
|
139 |
+
|
140 |
+
self.mean = np.array(kwargs.pop("norm_mean", [0.5, 0.5, 0.5]))
|
141 |
+
self.std = np.array(kwargs.pop("norm_std", [0.5, 0.5, 0.5]))
|
142 |
+
self.version = kwargs.pop("version", 2.0)
|
143 |
+
|
144 |
+
def ensure_divide(self, length, patch_size):
|
145 |
+
return max(round(length / patch_size) * patch_size, patch_size)
|
146 |
+
|
147 |
+
def find_best_resize(self, original_size, scale_resolution, patch_size, allow_upscale=False):
|
148 |
+
width, height = original_size
|
149 |
+
if (width * height > scale_resolution * scale_resolution) or allow_upscale:
|
150 |
+
r = width / height
|
151 |
+
height = int(scale_resolution / math.sqrt(r))
|
152 |
+
width = int(height * r)
|
153 |
+
best_width = self.ensure_divide(width, patch_size)
|
154 |
+
best_height = self.ensure_divide(height, patch_size)
|
155 |
+
return (best_width, best_height)
|
156 |
+
|
157 |
+
def get_refine_size(self, original_size, grid, scale_resolution, patch_size, allow_upscale=False):
|
158 |
+
width, height = original_size
|
159 |
+
grid_x, grid_y = grid
|
160 |
+
|
161 |
+
refine_width = self.ensure_divide(width, grid_x)
|
162 |
+
refine_height = self.ensure_divide(height, grid_y)
|
163 |
+
|
164 |
+
grid_width = refine_width / grid_x
|
165 |
+
grid_height = refine_height / grid_y
|
166 |
+
|
167 |
+
best_grid_size = self.find_best_resize(
|
168 |
+
(grid_width, grid_height), scale_resolution, patch_size, allow_upscale=allow_upscale
|
169 |
+
)
|
170 |
+
refine_size = (best_grid_size[0] * grid_x, best_grid_size[1] * grid_y)
|
171 |
+
return refine_size
|
172 |
+
|
173 |
+
def split_to_patches(self, image, grid):
|
174 |
+
patches = []
|
175 |
+
width, height = image.size
|
176 |
+
grid_x = int(width / grid[0])
|
177 |
+
grid_y = int(height / grid[1])
|
178 |
+
for i in range(0, height, grid_y):
|
179 |
+
images = []
|
180 |
+
for j in range(0, width, grid_x):
|
181 |
+
box = (j, i, j + grid_x, i + grid_y)
|
182 |
+
patch = image.crop(box)
|
183 |
+
images.append(patch)
|
184 |
+
patches.append(images)
|
185 |
+
return patches
|
186 |
+
|
187 |
+
def slice_image(self, image, max_slice_nums=9, scale_resolution=448, patch_size=14, never_split=False):
|
188 |
+
original_size = image.size
|
189 |
+
source_image = None
|
190 |
+
best_grid = self.get_sliced_grid(original_size, max_slice_nums, never_split)
|
191 |
+
patches = []
|
192 |
+
|
193 |
+
if best_grid is None:
|
194 |
+
# dont need to slice, upsample
|
195 |
+
best_size = self.find_best_resize(original_size, scale_resolution, patch_size, allow_upscale=True)
|
196 |
+
source_image = image.resize(best_size, resample=Image.Resampling.BICUBIC)
|
197 |
+
else:
|
198 |
+
# source image, down-sampling and ensure divided by patch_size
|
199 |
+
best_resize = self.find_best_resize(original_size, scale_resolution, patch_size)
|
200 |
+
source_image = image.copy().resize(best_resize, resample=Image.Resampling.BICUBIC)
|
201 |
+
refine_size = self.get_refine_size(
|
202 |
+
original_size, best_grid, scale_resolution, patch_size, allow_upscale=True
|
203 |
+
)
|
204 |
+
refine_image = image.resize(refine_size, resample=Image.Resampling.BICUBIC)
|
205 |
+
patches = self.split_to_patches(refine_image, best_grid)
|
206 |
+
|
207 |
+
return source_image, patches, best_grid
|
208 |
+
|
209 |
+
def get_grid_placeholder(self, grid):
|
210 |
+
if grid is None:
|
211 |
+
return ""
|
212 |
+
slice_image_placeholder = (
|
213 |
+
self.slice_start_token + self.unk_token * self.image_feature_size + self.slice_end_token
|
214 |
+
)
|
215 |
+
|
216 |
+
cols = grid[0]
|
217 |
+
rows = grid[1]
|
218 |
+
slices = []
|
219 |
+
for i in range(rows):
|
220 |
+
lines = []
|
221 |
+
for j in range(cols):
|
222 |
+
lines.append(slice_image_placeholder)
|
223 |
+
slices.append("".join(lines))
|
224 |
+
|
225 |
+
slice_placeholder = "\n".join(slices)
|
226 |
+
return slice_placeholder
|
227 |
+
|
228 |
+
def get_image_id_placeholder(self, idx=0):
|
229 |
+
return f"{self.im_id_start}{idx}{self.im_id_end}"
|
230 |
+
|
231 |
+
def get_sliced_images(self, image, max_slice_nums=None):
|
232 |
+
slice_images = []
|
233 |
+
|
234 |
+
if not self.slice_mode:
|
235 |
+
return [image]
|
236 |
+
|
237 |
+
max_slice_nums = self.max_slice_nums if max_slice_nums is None else int(max_slice_nums)
|
238 |
+
assert max_slice_nums > 0
|
239 |
+
source_image, patches, sliced_grid = self.slice_image(
|
240 |
+
image, max_slice_nums, self.scale_resolution, self.patch_size # default: 9 # default: 448 # default: 14
|
241 |
+
)
|
242 |
+
|
243 |
+
slice_images.append(source_image)
|
244 |
+
if len(patches) > 0:
|
245 |
+
for i in range(len(patches)):
|
246 |
+
for j in range(len(patches[0])):
|
247 |
+
slice_images.append(patches[i][j])
|
248 |
+
return slice_images
|
249 |
+
|
250 |
+
def get_sliced_grid(self, image_size, max_slice_nums, nerver_split=False):
|
251 |
+
original_width, original_height = image_size
|
252 |
+
log_ratio = math.log(original_width / original_height)
|
253 |
+
ratio = original_width * original_height / (self.scale_resolution * self.scale_resolution)
|
254 |
+
multiple = min(math.ceil(ratio), max_slice_nums)
|
255 |
+
if multiple <= 1 or nerver_split:
|
256 |
+
return None
|
257 |
+
candidate_split_grids_nums = []
|
258 |
+
for i in [multiple - 1, multiple, multiple + 1]:
|
259 |
+
if i == 1 or i > max_slice_nums:
|
260 |
+
continue
|
261 |
+
candidate_split_grids_nums.append(i)
|
262 |
+
|
263 |
+
candidate_grids = []
|
264 |
+
for split_grids_nums in candidate_split_grids_nums:
|
265 |
+
m = 1
|
266 |
+
while m <= split_grids_nums:
|
267 |
+
if split_grids_nums % m == 0:
|
268 |
+
candidate_grids.append([m, split_grids_nums // m])
|
269 |
+
m += 1
|
270 |
+
|
271 |
+
best_grid = [1, 1]
|
272 |
+
min_error = float("inf")
|
273 |
+
for grid in candidate_grids:
|
274 |
+
error = abs(log_ratio - math.log(grid[0] / grid[1]))
|
275 |
+
if error < min_error:
|
276 |
+
best_grid = grid
|
277 |
+
min_error = error
|
278 |
+
|
279 |
+
return best_grid
|
280 |
+
|
281 |
+
def get_slice_image_placeholder(self, image_size, image_idx=0, max_slice_nums=None, use_image_id=None):
|
282 |
+
max_slice_nums = self.max_slice_nums if max_slice_nums is None else int(max_slice_nums)
|
283 |
+
assert max_slice_nums > 0
|
284 |
+
grid = self.get_sliced_grid(image_size=image_size, max_slice_nums=max_slice_nums)
|
285 |
+
|
286 |
+
image_placeholder = self.im_start_token + self.unk_token * self.image_feature_size + self.im_end_token
|
287 |
+
use_image_id = self.use_image_id if use_image_id is None else bool(use_image_id)
|
288 |
+
if use_image_id:
|
289 |
+
final_placeholder = self.get_image_id_placeholder(image_idx) + image_placeholder
|
290 |
+
else:
|
291 |
+
final_placeholder = image_placeholder
|
292 |
+
|
293 |
+
if self.slice_mode:
|
294 |
+
final_placeholder = final_placeholder + self.get_grid_placeholder(grid=grid)
|
295 |
+
return final_placeholder
|
296 |
+
|
297 |
+
def to_pil_image(self, image, rescale=None) -> PIL.Image.Image:
|
298 |
+
"""
|
299 |
+
Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
|
300 |
+
needed.
|
301 |
+
|
302 |
+
Args:
|
303 |
+
image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor`):
|
304 |
+
The image to convert to the PIL Image format.
|
305 |
+
rescale (`bool`, *optional*):
|
306 |
+
Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will
|
307 |
+
default to `True` if the image type is a floating type, `False` otherwise.
|
308 |
+
"""
|
309 |
+
if isinstance(image, PIL.Image.Image):
|
310 |
+
return image
|
311 |
+
if is_torch_tensor(image):
|
312 |
+
image = image.numpy()
|
313 |
+
|
314 |
+
if isinstance(image, np.ndarray):
|
315 |
+
if rescale is None:
|
316 |
+
# rescale default to the array being of floating type.
|
317 |
+
rescale = isinstance(image.flat[0], np.floating)
|
318 |
+
# If the channel as been moved to first dim, we put it back at the end.
|
319 |
+
if image.ndim == 3 and image.shape[0] in [1, 3]:
|
320 |
+
image = image.transpose(1, 2, 0)
|
321 |
+
if rescale:
|
322 |
+
image = image * 255
|
323 |
+
image = image.astype(np.uint8)
|
324 |
+
return PIL.Image.fromarray(image)
|
325 |
+
return image
|
326 |
+
|
327 |
+
def reshape_by_patch(self, image):
|
328 |
+
"""
|
329 |
+
:param image: shape [3, H, W]
|
330 |
+
:param patch_size:
|
331 |
+
:return: [3, patch_size, HW/patch_size]
|
332 |
+
"""
|
333 |
+
image = torch.from_numpy(image)
|
334 |
+
patch_size = self.patch_size
|
335 |
+
patches = torch.nn.functional.unfold(image, (patch_size, patch_size), stride=(patch_size, patch_size))
|
336 |
+
|
337 |
+
patches = patches.reshape(image.size(0), patch_size, patch_size, -1)
|
338 |
+
patches = patches.permute(0, 1, 3, 2).reshape(image.size(0), patch_size, -1)
|
339 |
+
return patches.numpy()
|
340 |
+
|
341 |
+
def preprocess(
|
342 |
+
self,
|
343 |
+
images: Union[Image.Image, List[Image.Image], List[List[Image.Image]]],
|
344 |
+
do_pad: Optional[bool] = True,
|
345 |
+
max_slice_nums: int = None,
|
346 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
347 |
+
**kwargs,
|
348 |
+
) -> MiniCPMOBatchFeature:
|
349 |
+
if isinstance(images, Image.Image):
|
350 |
+
images_list = [[images]]
|
351 |
+
elif isinstance(images[0], Image.Image):
|
352 |
+
images_list = [images]
|
353 |
+
else:
|
354 |
+
images_list = images
|
355 |
+
|
356 |
+
new_images_list = []
|
357 |
+
image_sizes_list = []
|
358 |
+
tgt_sizes_list = []
|
359 |
+
|
360 |
+
for _images in images_list:
|
361 |
+
if _images is None or len(_images) == 0:
|
362 |
+
new_images_list.append([])
|
363 |
+
image_sizes_list.append([])
|
364 |
+
tgt_sizes_list.append([])
|
365 |
+
continue
|
366 |
+
if not valid_images(_images):
|
367 |
+
raise ValueError(
|
368 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
369 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
370 |
+
)
|
371 |
+
|
372 |
+
_images = [self.to_pil_image(image).convert("RGB") for image in _images]
|
373 |
+
input_data_format = infer_channel_dimension_format(np.array(_images[0]))
|
374 |
+
|
375 |
+
new_images = []
|
376 |
+
image_sizes = [image.size for image in _images]
|
377 |
+
tgt_sizes = []
|
378 |
+
for image in _images:
|
379 |
+
image_patches = self.get_sliced_images(image, max_slice_nums)
|
380 |
+
image_patches = [to_numpy_array(image).astype(np.float32) / 255 for image in image_patches]
|
381 |
+
image_patches = [
|
382 |
+
self.normalize(image=image, mean=self.mean, std=self.std, input_data_format=input_data_format)
|
383 |
+
for image in image_patches
|
384 |
+
]
|
385 |
+
image_patches = [
|
386 |
+
to_channel_dimension_format(image, ChannelDimension.FIRST, input_channel_dim=input_data_format)
|
387 |
+
for image in image_patches
|
388 |
+
]
|
389 |
+
for slice_image in image_patches:
|
390 |
+
new_images.append(self.reshape_by_patch(slice_image))
|
391 |
+
tgt_sizes.append(
|
392 |
+
np.array((slice_image.shape[1] // self.patch_size, slice_image.shape[2] // self.patch_size))
|
393 |
+
)
|
394 |
+
|
395 |
+
if tgt_sizes:
|
396 |
+
tgt_sizes = np.vstack(tgt_sizes)
|
397 |
+
|
398 |
+
new_images_list.append(new_images)
|
399 |
+
image_sizes_list.append(image_sizes)
|
400 |
+
tgt_sizes_list.append(tgt_sizes)
|
401 |
+
return MiniCPMOBatchFeature(
|
402 |
+
data={"pixel_values": new_images_list, "image_sizes": image_sizes_list, "tgt_sizes": tgt_sizes_list},
|
403 |
+
tensor_type=return_tensors,
|
404 |
+
)
|
405 |
+
|
406 |
+
|
407 |
+
AutoImageProcessor.register("MiniCPMVImageProcessor", MiniCPMVImageProcessor)
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
modeling_minicpmo.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
modeling_navit_siglip.py
ADDED
@@ -0,0 +1,939 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 Google AI and The HuggingFace Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch Siglip model. """
|
16 |
+
# Copied from HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit and add tgt_sizes
|
17 |
+
|
18 |
+
|
19 |
+
import math
|
20 |
+
import os
|
21 |
+
import warnings
|
22 |
+
from dataclasses import dataclass
|
23 |
+
from typing import Optional
|
24 |
+
from typing import Tuple
|
25 |
+
from typing import Union
|
26 |
+
|
27 |
+
import numpy as np
|
28 |
+
import torch
|
29 |
+
import torch.nn.functional as F
|
30 |
+
import torch.utils.checkpoint
|
31 |
+
from torch import nn
|
32 |
+
from torch.nn.init import _calculate_fan_in_and_fan_out
|
33 |
+
from transformers.activations import ACT2FN
|
34 |
+
from transformers.configuration_utils import PretrainedConfig
|
35 |
+
from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
|
36 |
+
from transformers.modeling_outputs import BaseModelOutput
|
37 |
+
from transformers.modeling_outputs import BaseModelOutputWithPooling
|
38 |
+
from transformers.modeling_utils import PreTrainedModel
|
39 |
+
from transformers.utils import add_start_docstrings
|
40 |
+
from transformers.utils import add_start_docstrings_to_model_forward
|
41 |
+
from transformers.utils import is_flash_attn_2_available
|
42 |
+
from transformers.utils import logging
|
43 |
+
from transformers.utils import ModelOutput
|
44 |
+
from transformers.utils import replace_return_docstrings
|
45 |
+
|
46 |
+
logger = logging.get_logger(__name__)
|
47 |
+
|
48 |
+
|
49 |
+
class SiglipVisionConfig(PretrainedConfig):
|
50 |
+
r"""
|
51 |
+
This is the configuration class to store the configuration of a [`SiglipVisionModel`]. It is used to instantiate a
|
52 |
+
Siglip vision encoder according to the specified arguments, defining the model architecture. Instantiating a
|
53 |
+
configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip
|
54 |
+
[google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture.
|
55 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
56 |
+
documentation from [`PretrainedConfig`] for more information.
|
57 |
+
Args:
|
58 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
59 |
+
Dimensionality of the encoder layers and the pooler layer.
|
60 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
61 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
62 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
63 |
+
Number of hidden layers in the Transformer encoder.
|
64 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
65 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
66 |
+
num_channels (`int`, *optional*, defaults to 3):
|
67 |
+
Number of channels in the input images.
|
68 |
+
image_size (`int`, *optional*, defaults to 224):
|
69 |
+
The size (resolution) of each image.
|
70 |
+
patch_size (`int`, *optional*, defaults to 16):
|
71 |
+
The size (resolution) of each patch.
|
72 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
|
73 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
74 |
+
`"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
|
75 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
|
76 |
+
The epsilon used by the layer normalization layers.
|
77 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
78 |
+
The dropout ratio for the attention probabilities.
|
79 |
+
Example:
|
80 |
+
```python
|
81 |
+
>>> from transformers import SiglipVisionConfig, SiglipVisionModel
|
82 |
+
>>> # Initializing a SiglipVisionConfig with google/siglip-base-patch16-224 style configuration
|
83 |
+
>>> configuration = SiglipVisionConfig()
|
84 |
+
>>> # Initializing a SiglipVisionModel (with random weights) from the google/siglip-base-patch16-224 style configuration
|
85 |
+
>>> model = SiglipVisionModel(configuration)
|
86 |
+
>>> # Accessing the model configuration
|
87 |
+
>>> configuration = model.config
|
88 |
+
```"""
|
89 |
+
|
90 |
+
model_type = "siglip_vision_model"
|
91 |
+
|
92 |
+
def __init__(
|
93 |
+
self,
|
94 |
+
hidden_size=768,
|
95 |
+
intermediate_size=3072,
|
96 |
+
num_hidden_layers=12,
|
97 |
+
num_attention_heads=12,
|
98 |
+
num_channels=3,
|
99 |
+
image_size=224,
|
100 |
+
patch_size=16,
|
101 |
+
hidden_act="gelu_pytorch_tanh",
|
102 |
+
layer_norm_eps=1e-6,
|
103 |
+
attention_dropout=0.0,
|
104 |
+
**kwargs,
|
105 |
+
):
|
106 |
+
super().__init__(**kwargs)
|
107 |
+
|
108 |
+
self.hidden_size = hidden_size
|
109 |
+
self.intermediate_size = intermediate_size
|
110 |
+
self.num_hidden_layers = num_hidden_layers
|
111 |
+
self.num_attention_heads = num_attention_heads
|
112 |
+
self.num_channels = num_channels
|
113 |
+
self.patch_size = patch_size
|
114 |
+
self.image_size = image_size
|
115 |
+
self.attention_dropout = attention_dropout
|
116 |
+
self.layer_norm_eps = layer_norm_eps
|
117 |
+
self.hidden_act = hidden_act
|
118 |
+
|
119 |
+
@classmethod
|
120 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
|
121 |
+
cls._set_token_in_kwargs(kwargs)
|
122 |
+
|
123 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
124 |
+
|
125 |
+
# get the vision config dict if we are loading from SiglipConfig
|
126 |
+
if config_dict.get("model_type") == "siglip":
|
127 |
+
config_dict = config_dict["vision_config"]
|
128 |
+
|
129 |
+
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
|
130 |
+
logger.warning(
|
131 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
132 |
+
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
|
133 |
+
)
|
134 |
+
|
135 |
+
return cls.from_dict(config_dict, **kwargs)
|
136 |
+
|
137 |
+
|
138 |
+
_CHECKPOINT_FOR_DOC = "google/siglip-base-patch16-224"
|
139 |
+
|
140 |
+
SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
141 |
+
"google/siglip-base-patch16-224",
|
142 |
+
# See all SigLIP models at https://huggingface.co/models?filter=siglip
|
143 |
+
]
|
144 |
+
|
145 |
+
if is_flash_attn_2_available():
|
146 |
+
from flash_attn import flash_attn_func
|
147 |
+
from flash_attn import flash_attn_varlen_func
|
148 |
+
from flash_attn.bert_padding import index_first_axis # noqa
|
149 |
+
from flash_attn.bert_padding import pad_input
|
150 |
+
from flash_attn.bert_padding import unpad_input
|
151 |
+
|
152 |
+
|
153 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
154 |
+
def _get_unpad_data(attention_mask):
|
155 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
156 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
157 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
158 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
159 |
+
return (
|
160 |
+
indices,
|
161 |
+
cu_seqlens,
|
162 |
+
max_seqlen_in_batch,
|
163 |
+
)
|
164 |
+
|
165 |
+
|
166 |
+
def _trunc_normal_(tensor, mean, std, a, b):
|
167 |
+
# Cut & paste from PyTorch official master until it's in a few official releases - RW
|
168 |
+
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
169 |
+
def norm_cdf(x):
|
170 |
+
# Computes standard normal cumulative distribution function
|
171 |
+
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
|
172 |
+
|
173 |
+
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
174 |
+
warnings.warn(
|
175 |
+
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
|
176 |
+
"The distribution of values may be incorrect.",
|
177 |
+
stacklevel=2,
|
178 |
+
)
|
179 |
+
|
180 |
+
# Values are generated by using a truncated uniform distribution and
|
181 |
+
# then using the inverse CDF for the normal distribution.
|
182 |
+
# Get upper and lower cdf values
|
183 |
+
l = norm_cdf((a - mean) / std)
|
184 |
+
u = norm_cdf((b - mean) / std)
|
185 |
+
|
186 |
+
# Uniformly fill tensor with values from [l, u], then translate to
|
187 |
+
# [2l-1, 2u-1].
|
188 |
+
tensor.uniform_(2 * l - 1, 2 * u - 1)
|
189 |
+
|
190 |
+
# Use inverse cdf transform for normal distribution to get truncated
|
191 |
+
# standard normal
|
192 |
+
if tensor.dtype in [torch.float16, torch.bfloat16]:
|
193 |
+
# The `erfinv_` op is not (yet?) defined in float16+cpu, bfloat16+gpu
|
194 |
+
og_dtype = tensor.dtype
|
195 |
+
tensor = tensor.to(torch.float32)
|
196 |
+
tensor.erfinv_()
|
197 |
+
tensor = tensor.to(og_dtype)
|
198 |
+
else:
|
199 |
+
tensor.erfinv_()
|
200 |
+
|
201 |
+
# Transform to proper mean, std
|
202 |
+
tensor.mul_(std * math.sqrt(2.0))
|
203 |
+
tensor.add_(mean)
|
204 |
+
|
205 |
+
# Clamp to ensure it's in the proper range
|
206 |
+
if tensor.dtype == torch.float16:
|
207 |
+
# The `clamp_` op is not (yet?) defined in float16+cpu
|
208 |
+
tensor = tensor.to(torch.float32)
|
209 |
+
tensor.clamp_(min=a, max=b)
|
210 |
+
tensor = tensor.to(torch.float16)
|
211 |
+
else:
|
212 |
+
tensor.clamp_(min=a, max=b)
|
213 |
+
|
214 |
+
|
215 |
+
def trunc_normal_tf_(
|
216 |
+
tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0
|
217 |
+
) -> torch.Tensor:
|
218 |
+
"""Fills the input Tensor with values drawn from a truncated
|
219 |
+
normal distribution. The values are effectively drawn from the
|
220 |
+
normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)`
|
221 |
+
with values outside :math:`[a, b]` redrawn until they are within
|
222 |
+
the bounds. The method used for generating the random values works
|
223 |
+
best when :math:`a \\leq \text{mean} \\leq b`.
|
224 |
+
NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
|
225 |
+
bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
|
226 |
+
and the result is subsquently scaled and shifted by the mean and std args.
|
227 |
+
Args:
|
228 |
+
tensor: an n-dimensional `torch.Tensor`
|
229 |
+
mean: the mean of the normal distribution
|
230 |
+
std: the standard deviation of the normal distribution
|
231 |
+
a: the minimum cutoff value
|
232 |
+
b: the maximum cutoff value
|
233 |
+
"""
|
234 |
+
with torch.no_grad():
|
235 |
+
_trunc_normal_(tensor, 0, 1.0, a, b)
|
236 |
+
tensor.mul_(std).add_(mean)
|
237 |
+
|
238 |
+
|
239 |
+
def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"):
|
240 |
+
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
|
241 |
+
if mode == "fan_in":
|
242 |
+
denom = fan_in
|
243 |
+
elif mode == "fan_out":
|
244 |
+
denom = fan_out
|
245 |
+
elif mode == "fan_avg":
|
246 |
+
denom = (fan_in + fan_out) / 2
|
247 |
+
|
248 |
+
variance = scale / denom
|
249 |
+
|
250 |
+
if distribution == "truncated_normal":
|
251 |
+
# constant is stddev of standard normal truncated to (-2, 2)
|
252 |
+
trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
|
253 |
+
elif distribution == "normal":
|
254 |
+
with torch.no_grad():
|
255 |
+
tensor.normal_(std=math.sqrt(variance))
|
256 |
+
elif distribution == "uniform":
|
257 |
+
bound = math.sqrt(3 * variance)
|
258 |
+
with torch.no_grad():
|
259 |
+
tensor.uniform_(-bound, bound)
|
260 |
+
else:
|
261 |
+
raise ValueError(f"invalid distribution {distribution}")
|
262 |
+
|
263 |
+
|
264 |
+
def lecun_normal_(tensor):
|
265 |
+
variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
|
266 |
+
|
267 |
+
|
268 |
+
def default_flax_embed_init(tensor):
|
269 |
+
variance_scaling_(tensor, mode="fan_in", distribution="normal")
|
270 |
+
|
271 |
+
|
272 |
+
@dataclass
|
273 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Siglip
|
274 |
+
class SiglipVisionModelOutput(ModelOutput):
|
275 |
+
"""
|
276 |
+
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
|
277 |
+
Args:
|
278 |
+
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
279 |
+
The image embeddings obtained by applying the projection layer to the pooler_output.
|
280 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
281 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
282 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
283 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
284 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
285 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
286 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
287 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
288 |
+
sequence_length)`.
|
289 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
290 |
+
heads.
|
291 |
+
"""
|
292 |
+
|
293 |
+
image_embeds: Optional[torch.FloatTensor] = None
|
294 |
+
last_hidden_state: torch.FloatTensor = None
|
295 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
296 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
297 |
+
|
298 |
+
|
299 |
+
class SiglipVisionEmbeddings(nn.Module):
|
300 |
+
def __init__(self, config: SiglipVisionConfig):
|
301 |
+
super().__init__()
|
302 |
+
self.config = config
|
303 |
+
self.embed_dim = config.hidden_size
|
304 |
+
self.image_size = config.image_size
|
305 |
+
self.patch_size = config.patch_size
|
306 |
+
|
307 |
+
self.patch_embedding = nn.Conv2d(
|
308 |
+
in_channels=config.num_channels,
|
309 |
+
out_channels=self.embed_dim,
|
310 |
+
kernel_size=self.patch_size,
|
311 |
+
stride=self.patch_size,
|
312 |
+
padding="valid",
|
313 |
+
)
|
314 |
+
|
315 |
+
self.num_patches_per_side = self.image_size // self.patch_size
|
316 |
+
self.num_patches = self.num_patches_per_side**2
|
317 |
+
self.num_positions = self.num_patches
|
318 |
+
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
|
319 |
+
|
320 |
+
def forward(
|
321 |
+
self,
|
322 |
+
pixel_values: torch.FloatTensor,
|
323 |
+
patch_attention_mask: torch.BoolTensor,
|
324 |
+
tgt_sizes: Optional[torch.IntTensor] = None,
|
325 |
+
) -> torch.Tensor:
|
326 |
+
batch_size = pixel_values.size(0)
|
327 |
+
|
328 |
+
patch_embeds = self.patch_embedding(pixel_values)
|
329 |
+
embeddings = patch_embeds.flatten(2).transpose(1, 2)
|
330 |
+
|
331 |
+
max_im_h, max_im_w = pixel_values.size(2), pixel_values.size(3)
|
332 |
+
max_nb_patches_h, max_nb_patches_w = max_im_h // self.patch_size, max_im_w // self.patch_size
|
333 |
+
boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side)
|
334 |
+
position_ids = torch.full(
|
335 |
+
size=(
|
336 |
+
batch_size,
|
337 |
+
max_nb_patches_h * max_nb_patches_w,
|
338 |
+
),
|
339 |
+
fill_value=0,
|
340 |
+
)
|
341 |
+
|
342 |
+
for batch_idx, p_attn_mask in enumerate(patch_attention_mask):
|
343 |
+
if tgt_sizes is not None:
|
344 |
+
nb_patches_h = tgt_sizes[batch_idx][0]
|
345 |
+
nb_patches_w = tgt_sizes[batch_idx][1]
|
346 |
+
else:
|
347 |
+
nb_patches_h = p_attn_mask[:, 0].sum()
|
348 |
+
nb_patches_w = p_attn_mask[0].sum()
|
349 |
+
|
350 |
+
fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h)
|
351 |
+
fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w)
|
352 |
+
|
353 |
+
bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True)
|
354 |
+
bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True)
|
355 |
+
|
356 |
+
pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten()
|
357 |
+
position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids
|
358 |
+
|
359 |
+
position_ids = position_ids.to(self.position_embedding.weight.device)
|
360 |
+
|
361 |
+
embeddings = embeddings + self.position_embedding(position_ids)
|
362 |
+
return embeddings
|
363 |
+
|
364 |
+
|
365 |
+
class SiglipAttention(nn.Module):
|
366 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
367 |
+
|
368 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPAttention.__init__
|
369 |
+
def __init__(self, config):
|
370 |
+
super().__init__()
|
371 |
+
self.config = config
|
372 |
+
self.embed_dim = config.hidden_size
|
373 |
+
self.num_heads = config.num_attention_heads
|
374 |
+
self.head_dim = self.embed_dim // self.num_heads
|
375 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
376 |
+
raise ValueError(
|
377 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
378 |
+
f" {self.num_heads})."
|
379 |
+
)
|
380 |
+
self.scale = self.head_dim**-0.5
|
381 |
+
self.dropout = config.attention_dropout
|
382 |
+
|
383 |
+
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
384 |
+
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
385 |
+
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
386 |
+
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
387 |
+
|
388 |
+
def forward(
|
389 |
+
self,
|
390 |
+
hidden_states: torch.Tensor,
|
391 |
+
attention_mask: Optional[torch.Tensor] = None,
|
392 |
+
output_attentions: Optional[bool] = False,
|
393 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
394 |
+
"""Input shape: Batch x Time x Channel"""
|
395 |
+
|
396 |
+
batch_size, q_len, _ = hidden_states.size()
|
397 |
+
|
398 |
+
query_states = self.q_proj(hidden_states)
|
399 |
+
key_states = self.k_proj(hidden_states)
|
400 |
+
value_states = self.v_proj(hidden_states)
|
401 |
+
|
402 |
+
query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
403 |
+
key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
404 |
+
value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
405 |
+
|
406 |
+
k_v_seq_len = key_states.shape[-2]
|
407 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
|
408 |
+
|
409 |
+
if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len):
|
410 |
+
raise ValueError(
|
411 |
+
f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is"
|
412 |
+
f" {attn_weights.size()}"
|
413 |
+
)
|
414 |
+
|
415 |
+
if attention_mask is not None:
|
416 |
+
if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len):
|
417 |
+
raise ValueError(
|
418 |
+
f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}"
|
419 |
+
)
|
420 |
+
attn_weights = attn_weights + attention_mask
|
421 |
+
|
422 |
+
# upcast attention to fp32
|
423 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
424 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
425 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
426 |
+
|
427 |
+
if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim):
|
428 |
+
raise ValueError(
|
429 |
+
f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is"
|
430 |
+
f" {attn_output.size()}"
|
431 |
+
)
|
432 |
+
|
433 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
434 |
+
attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim)
|
435 |
+
|
436 |
+
attn_output = self.out_proj(attn_output)
|
437 |
+
|
438 |
+
return attn_output, attn_weights
|
439 |
+
|
440 |
+
|
441 |
+
class SiglipFlashAttention2(SiglipAttention):
|
442 |
+
"""
|
443 |
+
Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays
|
444 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
445 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
446 |
+
"""
|
447 |
+
|
448 |
+
def __init__(self, *args, **kwargs):
|
449 |
+
super().__init__(*args, **kwargs)
|
450 |
+
self.is_causal = False # Hack to make sure we don't use a causal mask
|
451 |
+
|
452 |
+
def forward(
|
453 |
+
self,
|
454 |
+
hidden_states: torch.Tensor,
|
455 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
456 |
+
position_ids: Optional[torch.LongTensor] = None,
|
457 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
458 |
+
output_attentions: bool = False,
|
459 |
+
use_cache: bool = False,
|
460 |
+
**kwargs,
|
461 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
462 |
+
output_attentions = False
|
463 |
+
|
464 |
+
bsz, q_len, _ = hidden_states.size()
|
465 |
+
|
466 |
+
query_states = self.q_proj(hidden_states)
|
467 |
+
key_states = self.k_proj(hidden_states)
|
468 |
+
value_states = self.v_proj(hidden_states)
|
469 |
+
|
470 |
+
# Flash attention requires the input to have the shape
|
471 |
+
# batch_size x seq_length x head_dim x hidden_dim
|
472 |
+
# therefore we just need to keep the original shape
|
473 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
474 |
+
key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
475 |
+
value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
476 |
+
|
477 |
+
kv_seq_len = key_states.shape[-2]
|
478 |
+
if past_key_value is not None:
|
479 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
480 |
+
# cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
481 |
+
# query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
482 |
+
|
483 |
+
# if past_key_value is not None:
|
484 |
+
# cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
485 |
+
# key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
486 |
+
|
487 |
+
# TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
|
488 |
+
# to be able to avoid many of these transpose/reshape/view.
|
489 |
+
query_states = query_states.transpose(1, 2)
|
490 |
+
key_states = key_states.transpose(1, 2)
|
491 |
+
value_states = value_states.transpose(1, 2)
|
492 |
+
|
493 |
+
dropout_rate = self.dropout if self.training else 0.0
|
494 |
+
|
495 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
496 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
497 |
+
# cast them back in the correct dtype just to be sure everything works as expected.
|
498 |
+
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
|
499 |
+
# in fp32. (LlamaRMSNorm handles it correctly)
|
500 |
+
|
501 |
+
input_dtype = query_states.dtype
|
502 |
+
if input_dtype == torch.float32:
|
503 |
+
if torch.is_autocast_enabled():
|
504 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
505 |
+
# Handle the case where the model is quantized
|
506 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
507 |
+
target_dtype = self.config._pre_quantization_dtype
|
508 |
+
else:
|
509 |
+
target_dtype = self.q_proj.weight.dtype
|
510 |
+
|
511 |
+
logger.warning_once(
|
512 |
+
"The input hidden states seems to be silently casted in float32, this might be related to the fact"
|
513 |
+
" you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
514 |
+
f" {target_dtype}."
|
515 |
+
)
|
516 |
+
|
517 |
+
query_states = query_states.to(target_dtype)
|
518 |
+
key_states = key_states.to(target_dtype)
|
519 |
+
value_states = value_states.to(target_dtype)
|
520 |
+
|
521 |
+
attn_output = self._flash_attention_forward(
|
522 |
+
query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
|
523 |
+
)
|
524 |
+
|
525 |
+
attn_output = attn_output.reshape(bsz, q_len, self.embed_dim).contiguous()
|
526 |
+
attn_output = self.out_proj(attn_output)
|
527 |
+
|
528 |
+
if not output_attentions:
|
529 |
+
attn_weights = None
|
530 |
+
|
531 |
+
return attn_output, attn_weights
|
532 |
+
|
533 |
+
def _flash_attention_forward(
|
534 |
+
self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
|
535 |
+
):
|
536 |
+
"""
|
537 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
538 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
539 |
+
Args:
|
540 |
+
query_states (`torch.Tensor`):
|
541 |
+
Input query states to be passed to Flash Attention API
|
542 |
+
key_states (`torch.Tensor`):
|
543 |
+
Input key states to be passed to Flash Attention API
|
544 |
+
value_states (`torch.Tensor`):
|
545 |
+
Input value states to be passed to Flash Attention API
|
546 |
+
attention_mask (`torch.Tensor`):
|
547 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
548 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
549 |
+
dropout (`int`, *optional*):
|
550 |
+
Attention dropout
|
551 |
+
softmax_scale (`float`, *optional*):
|
552 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
553 |
+
"""
|
554 |
+
|
555 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
|
556 |
+
causal = self.is_causal and query_length != 1
|
557 |
+
|
558 |
+
# Contains at least one padding token in the sequence
|
559 |
+
if attention_mask is not None:
|
560 |
+
batch_size = query_states.shape[0]
|
561 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
562 |
+
query_states, key_states, value_states, attention_mask, query_length
|
563 |
+
)
|
564 |
+
|
565 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
566 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
567 |
+
|
568 |
+
attn_output_unpad = flash_attn_varlen_func(
|
569 |
+
query_states,
|
570 |
+
key_states,
|
571 |
+
value_states,
|
572 |
+
cu_seqlens_q=cu_seqlens_q,
|
573 |
+
cu_seqlens_k=cu_seqlens_k,
|
574 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
575 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
576 |
+
dropout_p=dropout,
|
577 |
+
softmax_scale=softmax_scale,
|
578 |
+
causal=causal,
|
579 |
+
)
|
580 |
+
|
581 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
582 |
+
else:
|
583 |
+
attn_output = flash_attn_func(
|
584 |
+
query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
|
585 |
+
)
|
586 |
+
|
587 |
+
return attn_output
|
588 |
+
|
589 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
590 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
591 |
+
batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
|
592 |
+
|
593 |
+
key_layer = index_first_axis(
|
594 |
+
key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
595 |
+
)
|
596 |
+
value_layer = index_first_axis(
|
597 |
+
value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
598 |
+
)
|
599 |
+
if query_length == kv_seq_len:
|
600 |
+
query_layer = index_first_axis(
|
601 |
+
query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
|
602 |
+
)
|
603 |
+
cu_seqlens_q = cu_seqlens_k
|
604 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
605 |
+
indices_q = indices_k
|
606 |
+
elif query_length == 1:
|
607 |
+
max_seqlen_in_batch_q = 1
|
608 |
+
cu_seqlens_q = torch.arange(
|
609 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
610 |
+
) # There is a memcpy here, that is very bad.
|
611 |
+
indices_q = cu_seqlens_q[:-1]
|
612 |
+
query_layer = query_layer.squeeze(1)
|
613 |
+
else:
|
614 |
+
# The -q_len: slice assumes left padding.
|
615 |
+
attention_mask = attention_mask[:, -query_length:]
|
616 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
617 |
+
|
618 |
+
return (
|
619 |
+
query_layer,
|
620 |
+
key_layer,
|
621 |
+
value_layer,
|
622 |
+
indices_q,
|
623 |
+
(cu_seqlens_q, cu_seqlens_k),
|
624 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
625 |
+
)
|
626 |
+
|
627 |
+
|
628 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Siglip
|
629 |
+
class SiglipMLP(nn.Module):
|
630 |
+
def __init__(self, config):
|
631 |
+
super().__init__()
|
632 |
+
self.config = config
|
633 |
+
self.activation_fn = ACT2FN[config.hidden_act]
|
634 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
635 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
636 |
+
|
637 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
638 |
+
hidden_states = self.fc1(hidden_states)
|
639 |
+
hidden_states = self.activation_fn(hidden_states)
|
640 |
+
hidden_states = self.fc2(hidden_states)
|
641 |
+
return hidden_states
|
642 |
+
|
643 |
+
|
644 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->Siglip
|
645 |
+
class SiglipEncoderLayer(nn.Module):
|
646 |
+
def __init__(self, config: SiglipVisionConfig):
|
647 |
+
super().__init__()
|
648 |
+
self.embed_dim = config.hidden_size
|
649 |
+
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
650 |
+
self.self_attn = SiglipAttention(config) if not self._use_flash_attention_2 else SiglipFlashAttention2(config)
|
651 |
+
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
652 |
+
self.mlp = SiglipMLP(config)
|
653 |
+
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
654 |
+
|
655 |
+
def forward(
|
656 |
+
self,
|
657 |
+
hidden_states: torch.Tensor,
|
658 |
+
attention_mask: torch.Tensor,
|
659 |
+
output_attentions: Optional[bool] = False,
|
660 |
+
) -> Tuple[torch.FloatTensor]:
|
661 |
+
"""
|
662 |
+
Args:
|
663 |
+
hidden_states (`torch.FloatTensor`):
|
664 |
+
Input to the layer of shape `(batch, seq_len, embed_dim)`.
|
665 |
+
attention_mask (`torch.FloatTensor`):
|
666 |
+
Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
|
667 |
+
output_attentions (`bool`, *optional*, defaults to `False`):
|
668 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
669 |
+
returned tensors for more detail.
|
670 |
+
"""
|
671 |
+
residual = hidden_states
|
672 |
+
|
673 |
+
hidden_states = self.layer_norm1(hidden_states)
|
674 |
+
hidden_states, attn_weights = self.self_attn(
|
675 |
+
hidden_states=hidden_states,
|
676 |
+
attention_mask=attention_mask,
|
677 |
+
output_attentions=output_attentions,
|
678 |
+
)
|
679 |
+
hidden_states = residual + hidden_states
|
680 |
+
|
681 |
+
residual = hidden_states
|
682 |
+
hidden_states = self.layer_norm2(hidden_states)
|
683 |
+
hidden_states = self.mlp(hidden_states)
|
684 |
+
hidden_states = residual + hidden_states
|
685 |
+
|
686 |
+
outputs = (hidden_states,)
|
687 |
+
|
688 |
+
if output_attentions:
|
689 |
+
outputs += (attn_weights,)
|
690 |
+
|
691 |
+
return outputs
|
692 |
+
|
693 |
+
|
694 |
+
class SiglipPreTrainedModel(PreTrainedModel):
|
695 |
+
"""
|
696 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
697 |
+
models.
|
698 |
+
"""
|
699 |
+
|
700 |
+
config_class = SiglipVisionConfig
|
701 |
+
base_model_prefix = "siglip"
|
702 |
+
supports_gradient_checkpointing = True
|
703 |
+
|
704 |
+
def _init_weights(self, module):
|
705 |
+
"""Initialize the weights"""
|
706 |
+
|
707 |
+
if isinstance(module, SiglipVisionEmbeddings):
|
708 |
+
width = self.config.hidden_size
|
709 |
+
nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
|
710 |
+
elif isinstance(module, nn.Embedding):
|
711 |
+
default_flax_embed_init(module.weight)
|
712 |
+
elif isinstance(module, SiglipAttention):
|
713 |
+
nn.init.normal_(module.q_proj.weight)
|
714 |
+
nn.init.normal_(module.k_proj.weight)
|
715 |
+
nn.init.normal_(module.v_proj.weight)
|
716 |
+
nn.init.normal_(module.out_proj.weight)
|
717 |
+
nn.init.zeros_(module.q_proj.bias)
|
718 |
+
nn.init.zeros_(module.k_proj.bias)
|
719 |
+
nn.init.zeros_(module.v_proj.bias)
|
720 |
+
nn.init.zeros_(module.out_proj.bias)
|
721 |
+
elif isinstance(module, SiglipMLP):
|
722 |
+
nn.init.normal_(module.fc1.weight)
|
723 |
+
nn.init.normal_(module.fc2.weight)
|
724 |
+
nn.init.normal_(module.fc1.bias, std=1e-6)
|
725 |
+
nn.init.normal_(module.fc2.bias, std=1e-6)
|
726 |
+
elif isinstance(module, (nn.Linear, nn.Conv2d)):
|
727 |
+
lecun_normal_(module.weight)
|
728 |
+
if module.bias is not None:
|
729 |
+
nn.init.zeros_(module.bias)
|
730 |
+
elif isinstance(module, nn.LayerNorm):
|
731 |
+
module.bias.data.zero_()
|
732 |
+
module.weight.data.fill_(1.0)
|
733 |
+
|
734 |
+
|
735 |
+
SIGLIP_START_DOCSTRING = r"""
|
736 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
737 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
738 |
+
etc.)
|
739 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
740 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
741 |
+
and behavior.
|
742 |
+
Parameters:
|
743 |
+
config ([`SiglipVisionConfig`]): Model configuration class with all the parameters of the model.
|
744 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
745 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
746 |
+
"""
|
747 |
+
|
748 |
+
|
749 |
+
SIGLIP_VISION_INPUTS_DOCSTRING = r"""
|
750 |
+
Args:
|
751 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
752 |
+
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
|
753 |
+
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
|
754 |
+
output_attentions (`bool`, *optional*):
|
755 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
756 |
+
tensors for more detail.
|
757 |
+
output_hidden_states (`bool`, *optional*):
|
758 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
759 |
+
more detail.
|
760 |
+
return_dict (`bool`, *optional*):
|
761 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
762 |
+
"""
|
763 |
+
|
764 |
+
|
765 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->Siglip
|
766 |
+
class SiglipEncoder(nn.Module):
|
767 |
+
"""
|
768 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
769 |
+
[`SiglipEncoderLayer`].
|
770 |
+
Args:
|
771 |
+
config: SiglipConfig
|
772 |
+
"""
|
773 |
+
|
774 |
+
def __init__(self, config: SiglipVisionConfig):
|
775 |
+
super().__init__()
|
776 |
+
self.config = config
|
777 |
+
self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
778 |
+
self.gradient_checkpointing = False
|
779 |
+
|
780 |
+
# Ignore copy
|
781 |
+
def forward(
|
782 |
+
self,
|
783 |
+
inputs_embeds,
|
784 |
+
attention_mask: Optional[torch.Tensor] = None,
|
785 |
+
output_attentions: Optional[bool] = None,
|
786 |
+
output_hidden_states: Optional[bool] = None,
|
787 |
+
return_dict: Optional[bool] = None,
|
788 |
+
) -> Union[Tuple, BaseModelOutput]:
|
789 |
+
r"""
|
790 |
+
Args:
|
791 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
792 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
793 |
+
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
794 |
+
than the model's internal embedding lookup matrix.
|
795 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
796 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
797 |
+
- 1 for tokens that are **not masked**,
|
798 |
+
- 0 for tokens that are **masked**.
|
799 |
+
[What are attention masks?](../glossary#attention-mask)
|
800 |
+
output_attentions (`bool`, *optional*):
|
801 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
802 |
+
returned tensors for more detail.
|
803 |
+
output_hidden_states (`bool`, *optional*):
|
804 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
805 |
+
for more detail.
|
806 |
+
return_dict (`bool`, *optional*):
|
807 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
808 |
+
"""
|
809 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
810 |
+
output_hidden_states = (
|
811 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
812 |
+
)
|
813 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
814 |
+
|
815 |
+
encoder_states = () if output_hidden_states else None
|
816 |
+
all_attentions = () if output_attentions else None
|
817 |
+
|
818 |
+
hidden_states = inputs_embeds
|
819 |
+
for encoder_layer in self.layers:
|
820 |
+
if output_hidden_states:
|
821 |
+
encoder_states = encoder_states + (hidden_states,)
|
822 |
+
if self.gradient_checkpointing and self.training:
|
823 |
+
layer_outputs = self._gradient_checkpointing_func(
|
824 |
+
encoder_layer.__call__,
|
825 |
+
hidden_states,
|
826 |
+
attention_mask,
|
827 |
+
output_attentions,
|
828 |
+
)
|
829 |
+
else:
|
830 |
+
layer_outputs = encoder_layer(
|
831 |
+
hidden_states,
|
832 |
+
attention_mask,
|
833 |
+
output_attentions=output_attentions,
|
834 |
+
)
|
835 |
+
|
836 |
+
hidden_states = layer_outputs[0]
|
837 |
+
|
838 |
+
if output_attentions:
|
839 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
840 |
+
|
841 |
+
if output_hidden_states:
|
842 |
+
encoder_states = encoder_states + (hidden_states,)
|
843 |
+
|
844 |
+
if not return_dict:
|
845 |
+
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
846 |
+
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
847 |
+
|
848 |
+
|
849 |
+
@add_start_docstrings("""The vision model from SigLIP without any head or projection on top.""", SIGLIP_START_DOCSTRING)
|
850 |
+
class SiglipVisionTransformer(SiglipPreTrainedModel):
|
851 |
+
config_class = SiglipVisionConfig
|
852 |
+
main_input_name = "pixel_values"
|
853 |
+
_supports_flash_attn_2 = True
|
854 |
+
|
855 |
+
def __init__(self, config: SiglipVisionConfig):
|
856 |
+
super().__init__(config)
|
857 |
+
self.config = config
|
858 |
+
embed_dim = config.hidden_size
|
859 |
+
|
860 |
+
self.embeddings = SiglipVisionEmbeddings(config)
|
861 |
+
self.encoder = SiglipEncoder(config)
|
862 |
+
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
863 |
+
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
864 |
+
|
865 |
+
# Initialize weights and apply final processing
|
866 |
+
self.post_init()
|
867 |
+
|
868 |
+
def get_input_embeddings(self) -> nn.Module:
|
869 |
+
return self.embeddings.patch_embedding
|
870 |
+
|
871 |
+
@add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
|
872 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipVisionConfig)
|
873 |
+
def forward(
|
874 |
+
self,
|
875 |
+
pixel_values,
|
876 |
+
patch_attention_mask: Optional[torch.BoolTensor] = None,
|
877 |
+
tgt_sizes: Optional[torch.IntTensor] = None,
|
878 |
+
output_attentions: Optional[bool] = None,
|
879 |
+
output_hidden_states: Optional[bool] = None,
|
880 |
+
return_dict: Optional[bool] = None,
|
881 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
882 |
+
r"""
|
883 |
+
Returns:
|
884 |
+
"""
|
885 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
886 |
+
output_hidden_states = (
|
887 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
888 |
+
)
|
889 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
890 |
+
|
891 |
+
batch_size = pixel_values.size(0)
|
892 |
+
if patch_attention_mask is None:
|
893 |
+
patch_attention_mask = torch.ones(
|
894 |
+
size=(
|
895 |
+
batch_size,
|
896 |
+
pixel_values.size(2) // self.config.patch_size,
|
897 |
+
pixel_values.size(3) // self.config.patch_size,
|
898 |
+
),
|
899 |
+
dtype=torch.bool,
|
900 |
+
device=pixel_values.device,
|
901 |
+
)
|
902 |
+
|
903 |
+
hidden_states = self.embeddings(
|
904 |
+
pixel_values=pixel_values, patch_attention_mask=patch_attention_mask, tgt_sizes=tgt_sizes
|
905 |
+
)
|
906 |
+
|
907 |
+
patch_attention_mask = patch_attention_mask.view(batch_size, -1)
|
908 |
+
# The call to `_upad_input` in `_flash_attention_forward` is expensive
|
909 |
+
# So when the `patch_attention_mask` is full of 1s (i.e. attending to the whole sequence),
|
910 |
+
# avoiding passing the attention_mask, which is equivalent to attending to the full sequence
|
911 |
+
if not torch.any(~patch_attention_mask):
|
912 |
+
attention_mask = None
|
913 |
+
else:
|
914 |
+
attention_mask = (
|
915 |
+
_prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype)
|
916 |
+
if not self._use_flash_attention_2
|
917 |
+
else patch_attention_mask
|
918 |
+
)
|
919 |
+
|
920 |
+
encoder_outputs = self.encoder(
|
921 |
+
inputs_embeds=hidden_states,
|
922 |
+
attention_mask=attention_mask,
|
923 |
+
output_attentions=output_attentions,
|
924 |
+
output_hidden_states=output_hidden_states,
|
925 |
+
return_dict=return_dict,
|
926 |
+
)
|
927 |
+
|
928 |
+
last_hidden_state = encoder_outputs[0]
|
929 |
+
last_hidden_state = self.post_layernorm(last_hidden_state)
|
930 |
+
|
931 |
+
if not return_dict:
|
932 |
+
return (last_hidden_state, None) + encoder_outputs[1:]
|
933 |
+
|
934 |
+
return BaseModelOutputWithPooling(
|
935 |
+
last_hidden_state=last_hidden_state,
|
936 |
+
pooler_output=None,
|
937 |
+
hidden_states=encoder_outputs.hidden_states,
|
938 |
+
attentions=encoder_outputs.attentions,
|
939 |
+
)
|
preprocessor_config.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"image_processor_type": "MiniCPMVImageProcessor",
|
3 |
+
"auto_map": {
|
4 |
+
"AutoProcessor": "processing_minicpmo.MiniCPMOProcessor",
|
5 |
+
"AutoImageProcessor": "image_processing_minicpmv.MiniCPMVImageProcessor"
|
6 |
+
},
|
7 |
+
"processor_class": "MiniCPMOProcessor",
|
8 |
+
"max_slice_nums": 9,
|
9 |
+
"scale_resolution": 448,
|
10 |
+
"patch_size": 14,
|
11 |
+
"use_image_id": true,
|
12 |
+
"image_feature_size": 64,
|
13 |
+
"im_start": "",
|
15 |
+
"slice_start": "<slice>",
|
16 |
+
"slice_end": "</slice>",
|
17 |
+
"unk": "<unk>",
|
18 |
+
"im_id_start": "<image_id>",
|
19 |
+
"im_id_end": "</image_id>",
|
20 |
+
"slice_mode": true,
|
21 |
+
"norm_mean": [0.5, 0.5, 0.5],
|
22 |
+
"norm_std": [0.5, 0.5, 0.5],
|
23 |
+
"version": 2.6
|
24 |
+
}
|
processing_minicpmo.py
ADDED
@@ -0,0 +1,506 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2025 The OpenBMB Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""
|
16 |
+
Processor class for MiniCPMO.
|
17 |
+
"""
|
18 |
+
|
19 |
+
import math
|
20 |
+
import re
|
21 |
+
from typing import List
|
22 |
+
from typing import Literal
|
23 |
+
from typing import Optional
|
24 |
+
from typing import Union
|
25 |
+
|
26 |
+
import numpy as np
|
27 |
+
import torch
|
28 |
+
import torchaudio
|
29 |
+
from transformers.image_utils import ImageInput
|
30 |
+
from transformers.processing_utils import ProcessorMixin
|
31 |
+
from transformers.tokenization_utils_base import PreTokenizedInput
|
32 |
+
from transformers.tokenization_utils_base import TextInput
|
33 |
+
from transformers.utils import TensorType
|
34 |
+
|
35 |
+
from .image_processing_minicpmv import MiniCPMOBatchFeature
|
36 |
+
|
37 |
+
|
38 |
+
class MiniCPMOProcessor(ProcessorMixin):
|
39 |
+
r"""
|
40 |
+
Constructs a MiniCPMV processor which wraps a MiniCPMV image processor and a MiniCPMV tokenizer into a single processor.
|
41 |
+
|
42 |
+
[`MiniCPMVProcessor`] offers all the functionalities of [`MiniCPMVImageProcessor`] and [`LlamaTokenizerWrapper`]. See the
|
43 |
+
[`~MiniCPMVProcessor.__call__`] and [`~MiniCPMVProcessor.decode`] for more information.
|
44 |
+
|
45 |
+
Args:
|
46 |
+
image_processor ([`MiniCPMVImageProcessor`], *optional*):
|
47 |
+
The image processor is a required input.
|
48 |
+
tokenizer ([`LlamaTokenizerWrapper`], *optional*):
|
49 |
+
The tokenizer is a required input.
|
50 |
+
"""
|
51 |
+
|
52 |
+
attributes = ["image_processor", "feature_extractor", "tokenizer"]
|
53 |
+
feature_extractor_class = "WhisperFeatureExtractor"
|
54 |
+
image_processor_class = "AutoImageProcessor"
|
55 |
+
tokenizer_class = "AutoTokenizer"
|
56 |
+
|
57 |
+
def __init__(self, image_processor=None, feature_extractor=None, tokenizer=None):
|
58 |
+
super().__init__(image_processor, feature_extractor, tokenizer)
|
59 |
+
self.version = image_processor.version
|
60 |
+
|
61 |
+
def __call__(
|
62 |
+
self,
|
63 |
+
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
|
64 |
+
images: ImageInput = None,
|
65 |
+
audios: Union[np.ndarray, List[np.ndarray], List[List[np.ndarray]]] = None,
|
66 |
+
audio_parts: Optional[list] = None,
|
67 |
+
max_length: Optional[int] = None,
|
68 |
+
do_pad: Optional[bool] = True,
|
69 |
+
max_slice_nums: int = None,
|
70 |
+
use_image_id: bool = True,
|
71 |
+
chunk_input: bool = False,
|
72 |
+
return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
|
73 |
+
sampling_rate: Optional[int] = 16000,
|
74 |
+
**kwargs,
|
75 |
+
) -> MiniCPMOBatchFeature:
|
76 |
+
if images is not None:
|
77 |
+
image_inputs = self.image_processor(
|
78 |
+
images, do_pad=do_pad, max_slice_nums=max_slice_nums, return_tensors=return_tensors
|
79 |
+
)
|
80 |
+
else:
|
81 |
+
image_inputs = None
|
82 |
+
|
83 |
+
if audios is not None:
|
84 |
+
audio_features, audio_feature_lens, audio_phs = self.audio_feature_extract(
|
85 |
+
audios, audio_parts, chunk_input, sampling_rate
|
86 |
+
)
|
87 |
+
else:
|
88 |
+
audio_features, audio_feature_lens, audio_phs = [], [], []
|
89 |
+
|
90 |
+
model_inputs = self._convert_omni_to_inputs(
|
91 |
+
image_inputs,
|
92 |
+
audio_phs,
|
93 |
+
text,
|
94 |
+
max_slice_nums=max_slice_nums,
|
95 |
+
use_image_id=use_image_id,
|
96 |
+
max_length=max_length,
|
97 |
+
**kwargs,
|
98 |
+
)
|
99 |
+
|
100 |
+
model_inputs["audio_features"] = audio_features
|
101 |
+
model_inputs["audio_feature_lens"] = audio_feature_lens
|
102 |
+
|
103 |
+
return MiniCPMOBatchFeature(data={**model_inputs})
|
104 |
+
|
105 |
+
def audio_feature_extract(
|
106 |
+
self,
|
107 |
+
audios: Union[np.ndarray, List[np.ndarray], List[List[np.ndarray]]],
|
108 |
+
audio_parts: Optional[list] = None,
|
109 |
+
chunk_input: Optional[bool] = False,
|
110 |
+
sampling_rate: Optional[int] = None,
|
111 |
+
chunk_length: Optional[int] = 1,
|
112 |
+
**kwargs,
|
113 |
+
):
|
114 |
+
def get_audio_placeholder(audio_lens, chunk_input):
|
115 |
+
pool_step = 2
|
116 |
+
feature_lens = math.ceil(audio_lens / self.feature_extractor.hop_length)
|
117 |
+
|
118 |
+
feature_lens = (feature_lens - 1) // 2 + 1
|
119 |
+
output_lens = (feature_lens - pool_step) // pool_step + 1
|
120 |
+
|
121 |
+
if chunk_input:
|
122 |
+
fbank_feat_in_chunk = int(chunk_length * 100)
|
123 |
+
cnn_feat_in_chunk = (fbank_feat_in_chunk - 1) // 2 + 1
|
124 |
+
audio_embeds_in_chunk = (cnn_feat_in_chunk - pool_step) // pool_step + 1
|
125 |
+
num_audio_chunks = (output_lens + audio_embeds_in_chunk - 1) // audio_embeds_in_chunk
|
126 |
+
|
127 |
+
place_holders = ""
|
128 |
+
total_unk_len = 0
|
129 |
+
for _ in range(num_audio_chunks):
|
130 |
+
unk_len = min(audio_embeds_in_chunk, output_lens - total_unk_len)
|
131 |
+
place_holders += self.tokenizer.audio_start + "<unk>" * unk_len + self.tokenizer.audio_end
|
132 |
+
total_unk_len += unk_len
|
133 |
+
audio_placeholder = place_holders
|
134 |
+
else:
|
135 |
+
audio_placeholder = self.tokenizer.audio_start + "<unk>" * output_lens + self.tokenizer.audio_end
|
136 |
+
|
137 |
+
return audio_placeholder
|
138 |
+
|
139 |
+
if isinstance(audios, np.ndarray):
|
140 |
+
audios_list = [[audios]]
|
141 |
+
elif isinstance(audios[0], np.ndarray):
|
142 |
+
audios_list = [audios]
|
143 |
+
else:
|
144 |
+
audios_list = audios
|
145 |
+
|
146 |
+
if audio_parts is not None:
|
147 |
+
assert len(audio_parts) == len(audios_list)
|
148 |
+
for parts, audios in zip(audio_parts, audios_list):
|
149 |
+
assert len(parts) == len(audios)
|
150 |
+
|
151 |
+
audio_feature_lens_list = []
|
152 |
+
audio_ph_list = []
|
153 |
+
|
154 |
+
audio_features_all = []
|
155 |
+
|
156 |
+
# audio placeholder not dependent on audio_parts
|
157 |
+
for audios in audios_list:
|
158 |
+
if audios:
|
159 |
+
audio_ph_list.append([get_audio_placeholder(len(a), chunk_input) for a in audios])
|
160 |
+
else:
|
161 |
+
audio_ph_list.append([])
|
162 |
+
|
163 |
+
for idx, audios in enumerate(audios_list):
|
164 |
+
if audio_parts is not None:
|
165 |
+
# same audio part merge
|
166 |
+
audio_part = audio_parts[idx]
|
167 |
+
merge_audio = []
|
168 |
+
cur_audio = []
|
169 |
+
for aid, (part, audio) in enumerate(zip(audio_part, audios)):
|
170 |
+
if aid == 0 or audio_part[aid] == audio_part[aid - 1]:
|
171 |
+
cur_audio.append(audio)
|
172 |
+
else:
|
173 |
+
merge_audio.append(np.hstack(cur_audio))
|
174 |
+
cur_audio = [audio]
|
175 |
+
if cur_audio:
|
176 |
+
merge_audio.append(np.hstack(cur_audio))
|
177 |
+
|
178 |
+
else:
|
179 |
+
merge_audio = audios
|
180 |
+
|
181 |
+
audio_feature_lens = []
|
182 |
+
|
183 |
+
# If the audio exceeds 30 seconds, split it into chunks every 30 seconds.
|
184 |
+
final_merge_audio = []
|
185 |
+
max_audio_inp_len = 30 * sampling_rate
|
186 |
+
for audio in merge_audio:
|
187 |
+
if len(audio) <= max_audio_inp_len:
|
188 |
+
final_merge_audio.append(audio)
|
189 |
+
else:
|
190 |
+
for i in range(math.ceil(len(audio) / max_audio_inp_len)):
|
191 |
+
final_merge_audio.append(audio[i * max_audio_inp_len : (i + 1) * max_audio_inp_len])
|
192 |
+
|
193 |
+
if audios:
|
194 |
+
audio_inputs = self.feature_extractor(
|
195 |
+
final_merge_audio,
|
196 |
+
sampling_rate=sampling_rate,
|
197 |
+
return_attention_mask=True,
|
198 |
+
padding="max_length",
|
199 |
+
return_tensors="pt",
|
200 |
+
**kwargs,
|
201 |
+
)
|
202 |
+
audio_feature = audio_inputs["input_features"]
|
203 |
+
actual_lens = audio_inputs["attention_mask"].sum(dim=1)
|
204 |
+
|
205 |
+
for feat, lens in zip(audio_feature, actual_lens):
|
206 |
+
audio_features_all.append(feat[:, :lens])
|
207 |
+
audio_feature_lens.append(lens)
|
208 |
+
|
209 |
+
audio_feature_lens = torch.hstack(audio_feature_lens)
|
210 |
+
audio_feature_lens_list.append(audio_feature_lens)
|
211 |
+
else:
|
212 |
+
audio_feature_lens_list.append([])
|
213 |
+
|
214 |
+
if audio_features_all:
|
215 |
+
audio_features = [i.permute(1, 0) for i in audio_features_all]
|
216 |
+
audio_features = torch.nn.utils.rnn.pad_sequence(
|
217 |
+
audio_features, batch_first=True, padding_value=0.0
|
218 |
+
).permute(0, 2, 1)
|
219 |
+
else:
|
220 |
+
audio_features = []
|
221 |
+
|
222 |
+
return audio_features, audio_feature_lens_list, audio_ph_list
|
223 |
+
|
224 |
+
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama
|
225 |
+
def batch_decode(self, *args, **kwargs):
|
226 |
+
"""
|
227 |
+
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
|
228 |
+
refer to the docstring of this method for more information.
|
229 |
+
"""
|
230 |
+
output_ids = args[0]
|
231 |
+
result_text = []
|
232 |
+
for result in output_ids:
|
233 |
+
result = result[result != 0]
|
234 |
+
if result[0] == self.tokenizer.bos_id:
|
235 |
+
result = result[1:]
|
236 |
+
if result[-1] == self.tokenizer.eos_id:
|
237 |
+
result = result[:-1]
|
238 |
+
result_text.append(self.tokenizer.decode(result, *args[1:], **kwargs).strip())
|
239 |
+
return result_text
|
240 |
+
# return self.tokenizer.batch_decode(*args, **kwargs)
|
241 |
+
|
242 |
+
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama
|
243 |
+
def decode(self, *args, **kwargs):
|
244 |
+
"""
|
245 |
+
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
|
246 |
+
the docstring of this method for more information.
|
247 |
+
"""
|
248 |
+
result = args[0]
|
249 |
+
result = result[result != 0]
|
250 |
+
if result[0] == self.tokenizer.bos_id:
|
251 |
+
result = result[1:]
|
252 |
+
if result[-1] == self.tokenizer.eos_id or (
|
253 |
+
hasattr(self.tokenizer, "eot_id") and result[-1] == self.tokenizer.eot_id
|
254 |
+
):
|
255 |
+
result = result[:-1]
|
256 |
+
return self.tokenizer.decode(result, *args[1:], **kwargs).strip()
|
257 |
+
|
258 |
+
def _convert(self, input_str, max_inp_length: Optional[int] = None, **kwargs):
|
259 |
+
input_ids = self.tokenizer.encode(input_str, **kwargs)
|
260 |
+
if max_inp_length is not None:
|
261 |
+
input_ids = input_ids[:max_inp_length]
|
262 |
+
input_ids = torch.tensor(input_ids, dtype=torch.int32)
|
263 |
+
|
264 |
+
## image bound
|
265 |
+
start_cond = (input_ids == self.tokenizer.im_start_id) | (input_ids == self.tokenizer.slice_start_id)
|
266 |
+
end_cond = (input_ids == self.tokenizer.im_end_id) | (input_ids == self.tokenizer.slice_end_id)
|
267 |
+
|
268 |
+
image_start_idx = torch.where(start_cond)[0]
|
269 |
+
image_start_idx += 1
|
270 |
+
image_end_idx = torch.where(end_cond)[0]
|
271 |
+
|
272 |
+
valid_image_nums = max(len(image_start_idx), len(image_end_idx))
|
273 |
+
|
274 |
+
image_bounds = torch.hstack(
|
275 |
+
[
|
276 |
+
image_start_idx[:valid_image_nums].unsqueeze(-1),
|
277 |
+
image_end_idx[:valid_image_nums].unsqueeze(-1),
|
278 |
+
]
|
279 |
+
)
|
280 |
+
|
281 |
+
## audio bound
|
282 |
+
audio_start_idx = torch.where(input_ids == self.tokenizer.audio_start_id)[0]
|
283 |
+
audio_end_idx = torch.where(input_ids == self.tokenizer.audio_end_id)[0]
|
284 |
+
assert len(audio_start_idx) == len(audio_end_idx)
|
285 |
+
audio_bounds = torch.hstack([(audio_start_idx + 1).unsqueeze(-1), audio_end_idx.unsqueeze(-1)])
|
286 |
+
|
287 |
+
spk_start_idx = torch.where(input_ids == self.tokenizer.spk_start_id)[0]
|
288 |
+
spk_end_idx = torch.where(input_ids == self.tokenizer.spk_end_id)[0]
|
289 |
+
assert len(spk_start_idx) == len(spk_end_idx)
|
290 |
+
spk_bounds = torch.hstack([(spk_start_idx + 1).unsqueeze(-1), spk_end_idx.unsqueeze(-1)])
|
291 |
+
|
292 |
+
return input_ids, image_bounds, audio_bounds, spk_bounds
|
293 |
+
|
294 |
+
def _convert_omni_to_inputs(
|
295 |
+
self,
|
296 |
+
images,
|
297 |
+
audio_phs,
|
298 |
+
texts: Union[str, List[str]],
|
299 |
+
truncation=None,
|
300 |
+
max_length=None,
|
301 |
+
max_slice_nums=None,
|
302 |
+
use_image_id=None,
|
303 |
+
return_tensors=None,
|
304 |
+
**kwargs,
|
305 |
+
):
|
306 |
+
if images is None and audio_phs is None:
|
307 |
+
model_inputs = self.tokenizer(
|
308 |
+
texts, return_tensors=return_tensors, truncation=truncation, max_length=max_length, **kwargs
|
309 |
+
)
|
310 |
+
return MiniCPMOBatchFeature(data={**model_inputs})
|
311 |
+
|
312 |
+
image_pattern = ""
|
313 |
+
audio_pattern = "<audio>./</audio>"
|
314 |
+
split_pattern = f"({image_pattern}|{audio_pattern})"
|
315 |
+
|
316 |
+
if isinstance(texts, str):
|
317 |
+
texts = [texts]
|
318 |
+
|
319 |
+
bs = len(texts)
|
320 |
+
if images is not None:
|
321 |
+
images, image_sizes, tgt_sizes = images["pixel_values"], images["image_sizes"], images["tgt_sizes"]
|
322 |
+
else:
|
323 |
+
images, image_sizes, tgt_sizes = [[]] * bs, [[]] * bs, [[]] * bs
|
324 |
+
|
325 |
+
input_ids_list = []
|
326 |
+
image_bounds_list = []
|
327 |
+
audio_bounds_list = []
|
328 |
+
spk_bounds_list = []
|
329 |
+
|
330 |
+
for index, text in enumerate(texts):
|
331 |
+
text_chunks = re.split(split_pattern, text)
|
332 |
+
|
333 |
+
image_tags = re.findall(image_pattern, text)
|
334 |
+
audio_tags = re.findall(audio_pattern, text)
|
335 |
+
|
336 |
+
if image_tags:
|
337 |
+
assert images is not None
|
338 |
+
assert len(image_tags) == len(image_sizes[index])
|
339 |
+
if audio_tags:
|
340 |
+
assert audio_phs is not None
|
341 |
+
assert len(audio_tags) == len(audio_phs[index])
|
342 |
+
|
343 |
+
image_id = 0
|
344 |
+
audio_id = 0
|
345 |
+
for i, chunk in enumerate(text_chunks):
|
346 |
+
if chunk == image_pattern:
|
347 |
+
image_placeholder = self.image_processor.get_slice_image_placeholder(
|
348 |
+
image_sizes[index][image_id], image_id, max_slice_nums, use_image_id
|
349 |
+
)
|
350 |
+
image_id += 1
|
351 |
+
text_chunks[i] = image_placeholder
|
352 |
+
elif chunk == audio_pattern:
|
353 |
+
audio_placeholder = audio_phs[index][audio_id]
|
354 |
+
audio_id += 1
|
355 |
+
text_chunks[i] = audio_placeholder
|
356 |
+
|
357 |
+
final_text = "".join(text_chunks)
|
358 |
+
input_ids, image_bounds, audio_bounds, spk_bounds = self._convert(final_text, max_length, **kwargs)
|
359 |
+
|
360 |
+
input_ids_list.append(input_ids)
|
361 |
+
image_bounds_list.append(image_bounds)
|
362 |
+
audio_bounds_list.append(audio_bounds)
|
363 |
+
spk_bounds_list.append(spk_bounds)
|
364 |
+
|
365 |
+
padded_input_ids, padding_lengths = self.pad(input_ids_list, padding_side="left")
|
366 |
+
attention_mask = torch.ones_like(padded_input_ids, dtype=torch.bool)
|
367 |
+
for i, length in enumerate(padding_lengths):
|
368 |
+
image_bounds_list[i] = image_bounds_list[i] + length
|
369 |
+
audio_bounds_list[i] = audio_bounds_list[i] + length
|
370 |
+
spk_bounds_list[i] = spk_bounds_list[i] + length
|
371 |
+
attention_mask[i, :length] = False
|
372 |
+
|
373 |
+
data = {
|
374 |
+
"input_ids": padded_input_ids,
|
375 |
+
"attention_mask": attention_mask,
|
376 |
+
"pixel_values": images,
|
377 |
+
"image_sizes": image_sizes,
|
378 |
+
"image_bound": image_bounds_list,
|
379 |
+
"tgt_sizes": tgt_sizes,
|
380 |
+
"audio_bounds": audio_bounds_list,
|
381 |
+
"spk_bounds": spk_bounds_list,
|
382 |
+
}
|
383 |
+
|
384 |
+
return data
|
385 |
+
|
386 |
+
@property
|
387 |
+
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names
|
388 |
+
def model_input_names(self):
|
389 |
+
tokenizer_input_names = self.tokenizer.model_input_names
|
390 |
+
image_processor_input_names = self.image_processor.model_input_names
|
391 |
+
feature_extractor_input_names = self.feature_extractor.model_input_names
|
392 |
+
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names + feature_extractor_input_names))
|
393 |
+
|
394 |
+
def pad(self, inputs, max_length=None, padding_value=0, padding_side="left"):
|
395 |
+
items = []
|
396 |
+
if isinstance(inputs[0], list):
|
397 |
+
assert isinstance(inputs[0][0], torch.Tensor)
|
398 |
+
for it in inputs:
|
399 |
+
for tr in it:
|
400 |
+
items.append(tr)
|
401 |
+
else:
|
402 |
+
assert isinstance(inputs[0], torch.Tensor)
|
403 |
+
items = inputs
|
404 |
+
|
405 |
+
batch_size = len(items)
|
406 |
+
shape = items[0].shape
|
407 |
+
dim = len(shape)
|
408 |
+
assert dim <= 2
|
409 |
+
if max_length is None:
|
410 |
+
max_length = 0
|
411 |
+
max_length = max(max_length, max(item.shape[-1] for item in items))
|
412 |
+
min_length = min(item.shape[-1] for item in items)
|
413 |
+
dtype = items[0].dtype
|
414 |
+
|
415 |
+
if dim == 0:
|
416 |
+
return torch.stack([item for item in items], dim=0), [0]
|
417 |
+
elif dim == 1:
|
418 |
+
if max_length == min_length:
|
419 |
+
return torch.stack([item for item in items], dim=0), [0] * batch_size
|
420 |
+
tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value
|
421 |
+
else:
|
422 |
+
tensor = torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype) + padding_value
|
423 |
+
|
424 |
+
padding_length = []
|
425 |
+
for i, item in enumerate(items):
|
426 |
+
if dim == 1:
|
427 |
+
if padding_side == "left":
|
428 |
+
tensor[i, -len(item) :] = item.clone()
|
429 |
+
else:
|
430 |
+
tensor[i, : len(item)] = item.clone()
|
431 |
+
elif dim == 2:
|
432 |
+
if padding_side == "left":
|
433 |
+
tensor[i, -len(item) :, :] = item.clone()
|
434 |
+
else:
|
435 |
+
tensor[i, : len(item), :] = item.clone()
|
436 |
+
padding_length.append(tensor.shape[-1] - len(item))
|
437 |
+
|
438 |
+
return tensor, padding_length
|
439 |
+
|
440 |
+
|
441 |
+
class MelSpectrogramFeatures(torch.nn.Module):
|
442 |
+
def __init__(
|
443 |
+
self,
|
444 |
+
sample_rate=24000,
|
445 |
+
n_fft=1024,
|
446 |
+
hop_length=256,
|
447 |
+
n_mels=100,
|
448 |
+
padding: Literal["center", "same"] = "center",
|
449 |
+
):
|
450 |
+
super().__init__()
|
451 |
+
if padding not in ["center", "same"]:
|
452 |
+
raise ValueError("Padding must be 'center' or 'same'.")
|
453 |
+
self.padding = padding
|
454 |
+
self.mel_spec = torchaudio.transforms.MelSpectrogram(
|
455 |
+
sample_rate=sample_rate,
|
456 |
+
n_fft=n_fft,
|
457 |
+
hop_length=hop_length,
|
458 |
+
n_mels=n_mels,
|
459 |
+
center=padding == "center",
|
460 |
+
power=1,
|
461 |
+
)
|
462 |
+
|
463 |
+
def __call__(self, audio: torch.Tensor) -> torch.Tensor:
|
464 |
+
"""
|
465 |
+
audio: Tensor([num_channels, num_samples])
|
466 |
+
"""
|
467 |
+
return super().__call__(audio)
|
468 |
+
|
469 |
+
def forward(self, audio: torch.Tensor) -> torch.Tensor:
|
470 |
+
"""
|
471 |
+
audio: Tensor([num_channels, num_samples])
|
472 |
+
"""
|
473 |
+
mel: torch.Tensor = self.mel_spec(audio)
|
474 |
+
features = torch.log(torch.clip(mel, min=1e-5))
|
475 |
+
return features
|
476 |
+
|
477 |
+
|
478 |
+
class ChatTTSProcessor:
|
479 |
+
def __init__(self, text_tokenizer):
|
480 |
+
self.audio_processor = MelSpectrogramFeatures()
|
481 |
+
self.text_tokenizer = text_tokenizer
|
482 |
+
|
483 |
+
def __call__(self, text_list, audio_list):
|
484 |
+
assert len(text_list) == len(audio_list)
|
485 |
+
input_ids_varlen = []
|
486 |
+
for text in text_list:
|
487 |
+
input_ids_ = self.text_tokenizer.encode(text, return_tensors="pt", add_special_tokens=False) # [1, seq_len]
|
488 |
+
input_ids_ = input_ids_.squeeze(0) # [seq_len]
|
489 |
+
input_ids_varlen.append(input_ids_)
|
490 |
+
|
491 |
+
audio_features_varlen = []
|
492 |
+
for audio in audio_list:
|
493 |
+
assert audio.shape.__len__() == 1 # [seq_len]
|
494 |
+
try:
|
495 |
+
mel = self.audio_processor(audio) # [100(num_mel_bins), seq_len_mel]
|
496 |
+
except Exception as e:
|
497 |
+
print(
|
498 |
+
"fuck! there is an error with audio waveform. If you use a dataset __getitem__, will skip and use next data as compensate, will not halt training."
|
499 |
+
)
|
500 |
+
raise e
|
501 |
+
audio_features_varlen.append(mel)
|
502 |
+
|
503 |
+
return {
|
504 |
+
"tts_input_ids_varlen": input_ids_varlen, # return List[Tensor]
|
505 |
+
"tts_input_features_varlen": audio_features_varlen, # return List[Tensor]
|
506 |
+
}
|
resampler.py
ADDED
@@ -0,0 +1,864 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2025 The OpenBMB Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
import warnings
|
17 |
+
from functools import partial
|
18 |
+
from typing import Optional
|
19 |
+
from typing import Tuple
|
20 |
+
|
21 |
+
import numpy as np
|
22 |
+
import torch
|
23 |
+
import torch.nn.functional as F
|
24 |
+
from torch import nn
|
25 |
+
from torch import Tensor
|
26 |
+
from torch.nn.functional import *
|
27 |
+
from torch.nn.init import trunc_normal_
|
28 |
+
from torch.nn.modules.activation import *
|
29 |
+
from transformers.integrations import is_deepspeed_zero3_enabled
|
30 |
+
|
31 |
+
|
32 |
+
def get_2d_sincos_pos_embed(embed_dim, image_size):
|
33 |
+
"""
|
34 |
+
image_size: image_size or (image_height, image_width)
|
35 |
+
return:
|
36 |
+
pos_embed: [image_height, image_width, embed_dim]
|
37 |
+
"""
|
38 |
+
if isinstance(image_size, int):
|
39 |
+
grid_h_size, grid_w_size = image_size, image_size
|
40 |
+
else:
|
41 |
+
grid_h_size, grid_w_size = image_size[0], image_size[1]
|
42 |
+
|
43 |
+
grid_h = np.arange(grid_h_size, dtype=np.float32)
|
44 |
+
grid_w = np.arange(grid_w_size, dtype=np.float32)
|
45 |
+
grid = np.meshgrid(grid_w, grid_h) # here w goes first
|
46 |
+
grid = np.stack(grid, axis=0)
|
47 |
+
|
48 |
+
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
|
49 |
+
return pos_embed
|
50 |
+
|
51 |
+
|
52 |
+
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
|
53 |
+
assert embed_dim % 2 == 0
|
54 |
+
|
55 |
+
# use half of dimensions to encode grid_h
|
56 |
+
emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim // 2, grid[0]) # (H, W, D/2)
|
57 |
+
emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim // 2, grid[1]) # (H, W, D/2)
|
58 |
+
|
59 |
+
emb = np.concatenate([emb_h, emb_w], axis=-1) # (H, W, D)
|
60 |
+
return emb
|
61 |
+
|
62 |
+
|
63 |
+
def get_1d_sincos_pos_embed_from_grid_new(embed_dim, pos):
|
64 |
+
"""
|
65 |
+
embed_dim: output dimension for each position
|
66 |
+
pos: a list of positions to be encoded: size (H, W)
|
67 |
+
out: (H, W, D)
|
68 |
+
"""
|
69 |
+
assert embed_dim % 2 == 0
|
70 |
+
omega = np.arange(embed_dim // 2, dtype=np.float32)
|
71 |
+
omega /= embed_dim / 2.0
|
72 |
+
omega = 1.0 / 10000**omega # (D/2,)
|
73 |
+
|
74 |
+
out = np.einsum("hw,d->hwd", pos, omega) # (H, W, D/2), outer product
|
75 |
+
|
76 |
+
emb_sin = np.sin(out) # (H, W, D/2)
|
77 |
+
emb_cos = np.cos(out) # (H, W, D/2)
|
78 |
+
|
79 |
+
emb = np.concatenate([emb_sin, emb_cos], axis=-1) # (H, W, D)
|
80 |
+
return emb
|
81 |
+
|
82 |
+
|
83 |
+
class Resampler(nn.Module):
|
84 |
+
"""
|
85 |
+
A 2D perceiver-resampler network with one cross attention layers by
|
86 |
+
given learnable queries and 2d sincos pos_emb
|
87 |
+
Outputs:
|
88 |
+
A tensor with the shape of (batch_size, num_queries, embed_dim)
|
89 |
+
"""
|
90 |
+
|
91 |
+
def __init__(
|
92 |
+
self,
|
93 |
+
num_queries,
|
94 |
+
embed_dim,
|
95 |
+
num_heads,
|
96 |
+
kv_dim=None,
|
97 |
+
norm_layer=partial(nn.LayerNorm, eps=1e-6),
|
98 |
+
adaptive=False,
|
99 |
+
max_size=(70, 70),
|
100 |
+
):
|
101 |
+
super().__init__()
|
102 |
+
self.num_queries = num_queries
|
103 |
+
self.embed_dim = embed_dim
|
104 |
+
self.num_heads = num_heads
|
105 |
+
self.adaptive = adaptive
|
106 |
+
self.max_size = max_size
|
107 |
+
|
108 |
+
self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
|
109 |
+
|
110 |
+
if kv_dim is not None and kv_dim != embed_dim:
|
111 |
+
self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
|
112 |
+
else:
|
113 |
+
self.kv_proj = nn.Identity()
|
114 |
+
|
115 |
+
self.attn = MultiheadAttention(embed_dim, num_heads)
|
116 |
+
self.ln_q = norm_layer(embed_dim)
|
117 |
+
self.ln_kv = norm_layer(embed_dim)
|
118 |
+
|
119 |
+
self.ln_post = norm_layer(embed_dim)
|
120 |
+
self.proj = nn.Parameter((embed_dim**-0.5) * torch.randn(embed_dim, embed_dim))
|
121 |
+
|
122 |
+
self._set_2d_pos_cache(self.max_size)
|
123 |
+
|
124 |
+
def _set_2d_pos_cache(self, max_size, device="cpu"):
|
125 |
+
if is_deepspeed_zero3_enabled():
|
126 |
+
device = "cuda"
|
127 |
+
pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.embed_dim, max_size)).float().to(device)
|
128 |
+
self.register_buffer("pos_embed", pos_embed, persistent=False)
|
129 |
+
|
130 |
+
def _adjust_pos_cache(self, tgt_sizes, device):
|
131 |
+
max_h = torch.max(tgt_sizes[:, 0])
|
132 |
+
max_w = torch.max(tgt_sizes[:, 1])
|
133 |
+
if max_h > self.max_size[0] or max_w > self.max_size[1]:
|
134 |
+
self.max_size = [max(max_h, self.max_size[0]), max(max_w, self.max_size[1])]
|
135 |
+
self._set_2d_pos_cache(self.max_size, device)
|
136 |
+
|
137 |
+
def _init_weights(self, m):
|
138 |
+
if isinstance(m, nn.Linear):
|
139 |
+
trunc_normal_(m.weight, std=0.02)
|
140 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
141 |
+
nn.init.constant_(m.bias, 0)
|
142 |
+
elif isinstance(m, nn.LayerNorm):
|
143 |
+
nn.init.constant_(m.bias, 0)
|
144 |
+
nn.init.constant_(m.weight, 1.0)
|
145 |
+
|
146 |
+
def forward(self, x, tgt_sizes=None):
|
147 |
+
assert x.shape[0] == tgt_sizes.shape[0]
|
148 |
+
bs = x.shape[0]
|
149 |
+
|
150 |
+
device = x.device
|
151 |
+
dtype = x.dtype
|
152 |
+
|
153 |
+
patch_len = tgt_sizes[:, 0] * tgt_sizes[:, 1]
|
154 |
+
|
155 |
+
self._adjust_pos_cache(tgt_sizes, device=device)
|
156 |
+
|
157 |
+
max_patch_len = torch.max(patch_len)
|
158 |
+
key_padding_mask = torch.zeros((bs, max_patch_len), dtype=torch.bool, device=device)
|
159 |
+
|
160 |
+
pos_embed = []
|
161 |
+
for i in range(bs):
|
162 |
+
tgt_h, tgt_w = tgt_sizes[i]
|
163 |
+
pos_embed.append(self.pos_embed[:tgt_h, :tgt_w, :].reshape((tgt_h * tgt_w, -1)).to(dtype)) # patches * D
|
164 |
+
key_padding_mask[i, patch_len[i] :] = True
|
165 |
+
|
166 |
+
pos_embed = torch.nn.utils.rnn.pad_sequence(pos_embed, batch_first=True, padding_value=0.0).permute(
|
167 |
+
1, 0, 2
|
168 |
+
) # BLD => L * B * D
|
169 |
+
|
170 |
+
x = self.kv_proj(x) # B * L * D
|
171 |
+
x = self.ln_kv(x).permute(1, 0, 2) # L * B * D
|
172 |
+
|
173 |
+
q = self.ln_q(self.query) # Q * D
|
174 |
+
|
175 |
+
out = self.attn(
|
176 |
+
self._repeat(q, bs), # Q * B * D
|
177 |
+
x + pos_embed, # L * B * D + L * B * D
|
178 |
+
x,
|
179 |
+
key_padding_mask=key_padding_mask,
|
180 |
+
)[0]
|
181 |
+
# out: Q * B * D
|
182 |
+
x = out.permute(1, 0, 2) # B * Q * D
|
183 |
+
|
184 |
+
x = self.ln_post(x)
|
185 |
+
x = x @ self.proj
|
186 |
+
return x
|
187 |
+
|
188 |
+
def _repeat(self, query, N: int):
|
189 |
+
return query.unsqueeze(1).repeat(1, N, 1)
|
190 |
+
|
191 |
+
|
192 |
+
class MultiheadAttention(nn.MultiheadAttention):
|
193 |
+
def __init__(
|
194 |
+
self,
|
195 |
+
embed_dim,
|
196 |
+
num_heads,
|
197 |
+
dropout=0.0,
|
198 |
+
bias=True,
|
199 |
+
add_bias_kv=False,
|
200 |
+
add_zero_attn=False,
|
201 |
+
kdim=None,
|
202 |
+
vdim=None,
|
203 |
+
batch_first=False,
|
204 |
+
device=None,
|
205 |
+
dtype=None,
|
206 |
+
):
|
207 |
+
super().__init__(
|
208 |
+
embed_dim, num_heads, dropout, bias, add_bias_kv, add_zero_attn, kdim, vdim, batch_first, device, dtype
|
209 |
+
)
|
210 |
+
|
211 |
+
# rewrite out_proj layer,with nn.Linear
|
212 |
+
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, device=device, dtype=dtype)
|
213 |
+
|
214 |
+
def forward(
|
215 |
+
self,
|
216 |
+
query: Tensor,
|
217 |
+
key: Tensor,
|
218 |
+
value: Tensor,
|
219 |
+
key_padding_mask: Optional[Tensor] = None,
|
220 |
+
need_weights: bool = True,
|
221 |
+
attn_mask: Optional[Tensor] = None,
|
222 |
+
average_attn_weights: bool = True,
|
223 |
+
is_causal: bool = False,
|
224 |
+
) -> Tuple[Tensor, Optional[Tensor]]:
|
225 |
+
why_not_fast_path = ""
|
226 |
+
if (
|
227 |
+
(attn_mask is not None and torch.is_floating_point(attn_mask))
|
228 |
+
or (key_padding_mask is not None)
|
229 |
+
and torch.is_floating_point(key_padding_mask)
|
230 |
+
):
|
231 |
+
why_not_fast_path = "floating-point masks are not supported for fast path."
|
232 |
+
|
233 |
+
is_batched = query.dim() == 3
|
234 |
+
|
235 |
+
key_padding_mask = _canonical_mask(
|
236 |
+
mask=key_padding_mask,
|
237 |
+
mask_name="key_padding_mask",
|
238 |
+
other_type=F._none_or_dtype(attn_mask),
|
239 |
+
other_name="attn_mask",
|
240 |
+
target_type=query.dtype,
|
241 |
+
)
|
242 |
+
|
243 |
+
attn_mask = _canonical_mask(
|
244 |
+
mask=attn_mask,
|
245 |
+
mask_name="attn_mask",
|
246 |
+
other_type=None,
|
247 |
+
other_name="",
|
248 |
+
target_type=query.dtype,
|
249 |
+
check_other=False,
|
250 |
+
)
|
251 |
+
|
252 |
+
if not is_batched:
|
253 |
+
why_not_fast_path = f"input not batched; expected query.dim() of 3 but got {query.dim()}"
|
254 |
+
elif query is not key or key is not value:
|
255 |
+
# When lifting this restriction, don't forget to either
|
256 |
+
# enforce that the dtypes all match or test cases where
|
257 |
+
# they don't!
|
258 |
+
why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
|
259 |
+
elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
|
260 |
+
why_not_fast_path = (
|
261 |
+
f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
|
262 |
+
)
|
263 |
+
elif self.in_proj_weight is None:
|
264 |
+
why_not_fast_path = "in_proj_weight was None"
|
265 |
+
elif query.dtype != self.in_proj_weight.dtype:
|
266 |
+
# this case will fail anyway, but at least they'll get a useful error message.
|
267 |
+
why_not_fast_path = (
|
268 |
+
f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
|
269 |
+
)
|
270 |
+
elif self.training:
|
271 |
+
why_not_fast_path = "training is enabled"
|
272 |
+
elif (self.num_heads % 2) != 0:
|
273 |
+
why_not_fast_path = "self.num_heads is not even"
|
274 |
+
elif not self.batch_first:
|
275 |
+
why_not_fast_path = "batch_first was not True"
|
276 |
+
elif self.bias_k is not None:
|
277 |
+
why_not_fast_path = "self.bias_k was not None"
|
278 |
+
elif self.bias_v is not None:
|
279 |
+
why_not_fast_path = "self.bias_v was not None"
|
280 |
+
elif self.add_zero_attn:
|
281 |
+
why_not_fast_path = "add_zero_attn was enabled"
|
282 |
+
elif not self._qkv_same_embed_dim:
|
283 |
+
why_not_fast_path = "_qkv_same_embed_dim was not True"
|
284 |
+
elif query.is_nested and (key_padding_mask is not None or attn_mask is not None):
|
285 |
+
why_not_fast_path = "supplying both src_key_padding_mask and src_mask at the same time \
|
286 |
+
is not supported with NestedTensor input"
|
287 |
+
elif torch.is_autocast_enabled():
|
288 |
+
why_not_fast_path = "autocast is enabled"
|
289 |
+
|
290 |
+
if not why_not_fast_path:
|
291 |
+
tensor_args = (
|
292 |
+
query,
|
293 |
+
key,
|
294 |
+
value,
|
295 |
+
self.in_proj_weight,
|
296 |
+
self.in_proj_bias,
|
297 |
+
self.out_proj.weight,
|
298 |
+
self.out_proj.bias,
|
299 |
+
)
|
300 |
+
# We have to use list comprehensions below because TorchScript does not support
|
301 |
+
# generator expressions.
|
302 |
+
if torch.overrides.has_torch_function(tensor_args):
|
303 |
+
why_not_fast_path = "some Tensor argument has_torch_function"
|
304 |
+
elif _is_make_fx_tracing():
|
305 |
+
why_not_fast_path = "we are running make_fx tracing"
|
306 |
+
elif not all(_check_arg_device(x) for x in tensor_args):
|
307 |
+
why_not_fast_path = (
|
308 |
+
"some Tensor argument's device is neither one of "
|
309 |
+
f"cpu, cuda or {torch.utils.backend_registration._privateuse1_backend_name}"
|
310 |
+
)
|
311 |
+
elif torch.is_grad_enabled() and any(_arg_requires_grad(x) for x in tensor_args):
|
312 |
+
why_not_fast_path = (
|
313 |
+
"grad is enabled and at least one of query or the "
|
314 |
+
"input/output projection weights or biases requires_grad"
|
315 |
+
)
|
316 |
+
if not why_not_fast_path:
|
317 |
+
merged_mask, mask_type = self.merge_masks(attn_mask, key_padding_mask, query)
|
318 |
+
|
319 |
+
if self.in_proj_bias is not None and self.in_proj_weight is not None:
|
320 |
+
return torch._native_multi_head_attention(
|
321 |
+
query,
|
322 |
+
key,
|
323 |
+
value,
|
324 |
+
self.embed_dim,
|
325 |
+
self.num_heads,
|
326 |
+
self.in_proj_weight,
|
327 |
+
self.in_proj_bias,
|
328 |
+
self.out_proj.weight,
|
329 |
+
self.out_proj.bias,
|
330 |
+
merged_mask,
|
331 |
+
need_weights,
|
332 |
+
average_attn_weights,
|
333 |
+
mask_type,
|
334 |
+
)
|
335 |
+
|
336 |
+
any_nested = query.is_nested or key.is_nested or value.is_nested
|
337 |
+
assert not any_nested, (
|
338 |
+
"MultiheadAttention does not support NestedTensor outside of its fast path. "
|
339 |
+
+ f"The fast path was not hit because {why_not_fast_path}"
|
340 |
+
)
|
341 |
+
|
342 |
+
if self.batch_first and is_batched:
|
343 |
+
# make sure that the transpose op does not affect the "is" property
|
344 |
+
if key is value:
|
345 |
+
if query is key:
|
346 |
+
query = key = value = query.transpose(1, 0)
|
347 |
+
else:
|
348 |
+
query, key = (x.transpose(1, 0) for x in (query, key))
|
349 |
+
value = key
|
350 |
+
else:
|
351 |
+
query, key, value = (x.transpose(1, 0) for x in (query, key, value))
|
352 |
+
|
353 |
+
if not self._qkv_same_embed_dim:
|
354 |
+
attn_output, attn_output_weights = self.multi_head_attention_forward(
|
355 |
+
query,
|
356 |
+
key,
|
357 |
+
value,
|
358 |
+
self.embed_dim,
|
359 |
+
self.num_heads,
|
360 |
+
self.in_proj_weight,
|
361 |
+
self.in_proj_bias,
|
362 |
+
self.bias_k,
|
363 |
+
self.bias_v,
|
364 |
+
self.add_zero_attn,
|
365 |
+
self.dropout,
|
366 |
+
self.out_proj.weight,
|
367 |
+
self.out_proj.bias,
|
368 |
+
training=self.training,
|
369 |
+
key_padding_mask=key_padding_mask,
|
370 |
+
need_weights=need_weights,
|
371 |
+
attn_mask=attn_mask,
|
372 |
+
use_separate_proj_weight=True,
|
373 |
+
q_proj_weight=self.q_proj_weight,
|
374 |
+
k_proj_weight=self.k_proj_weight,
|
375 |
+
v_proj_weight=self.v_proj_weight,
|
376 |
+
average_attn_weights=average_attn_weights,
|
377 |
+
is_causal=is_causal,
|
378 |
+
)
|
379 |
+
else:
|
380 |
+
attn_output, attn_output_weights = self.multi_head_attention_forward(
|
381 |
+
query,
|
382 |
+
key,
|
383 |
+
value,
|
384 |
+
self.embed_dim,
|
385 |
+
self.num_heads,
|
386 |
+
self.in_proj_weight,
|
387 |
+
self.in_proj_bias,
|
388 |
+
self.bias_k,
|
389 |
+
self.bias_v,
|
390 |
+
self.add_zero_attn,
|
391 |
+
self.dropout,
|
392 |
+
self.out_proj.weight,
|
393 |
+
self.out_proj.bias,
|
394 |
+
training=self.training,
|
395 |
+
key_padding_mask=key_padding_mask,
|
396 |
+
need_weights=need_weights,
|
397 |
+
attn_mask=attn_mask,
|
398 |
+
average_attn_weights=average_attn_weights,
|
399 |
+
is_causal=is_causal,
|
400 |
+
)
|
401 |
+
if self.batch_first and is_batched:
|
402 |
+
return attn_output.transpose(1, 0), attn_output_weights
|
403 |
+
else:
|
404 |
+
return attn_output, attn_output_weights
|
405 |
+
|
406 |
+
def multi_head_attention_forward(
|
407 |
+
self,
|
408 |
+
query: Tensor,
|
409 |
+
key: Tensor,
|
410 |
+
value: Tensor,
|
411 |
+
embed_dim_to_check: int,
|
412 |
+
num_heads: int,
|
413 |
+
in_proj_weight: Optional[Tensor],
|
414 |
+
in_proj_bias: Optional[Tensor],
|
415 |
+
bias_k: Optional[Tensor],
|
416 |
+
bias_v: Optional[Tensor],
|
417 |
+
add_zero_attn: bool,
|
418 |
+
dropout_p: float,
|
419 |
+
out_proj_weight: Tensor,
|
420 |
+
out_proj_bias: Optional[Tensor],
|
421 |
+
training: bool = True,
|
422 |
+
key_padding_mask: Optional[Tensor] = None,
|
423 |
+
need_weights: bool = True,
|
424 |
+
attn_mask: Optional[Tensor] = None,
|
425 |
+
use_separate_proj_weight: bool = False,
|
426 |
+
q_proj_weight: Optional[Tensor] = None,
|
427 |
+
k_proj_weight: Optional[Tensor] = None,
|
428 |
+
v_proj_weight: Optional[Tensor] = None,
|
429 |
+
static_k: Optional[Tensor] = None,
|
430 |
+
static_v: Optional[Tensor] = None,
|
431 |
+
average_attn_weights: bool = True,
|
432 |
+
is_causal: bool = False,
|
433 |
+
) -> Tuple[Tensor, Optional[Tensor]]:
|
434 |
+
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)
|
435 |
+
|
436 |
+
is_batched = _mha_shape_check(query, key, value, key_padding_mask, attn_mask, num_heads)
|
437 |
+
|
438 |
+
# For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
|
439 |
+
# is batched, run the computation and before returning squeeze the
|
440 |
+
# batch dimension so that the output doesn't carry this temporary batch dimension.
|
441 |
+
if not is_batched:
|
442 |
+
# unsqueeze if the input is unbatched
|
443 |
+
query = query.unsqueeze(1)
|
444 |
+
key = key.unsqueeze(1)
|
445 |
+
value = value.unsqueeze(1)
|
446 |
+
if key_padding_mask is not None:
|
447 |
+
key_padding_mask = key_padding_mask.unsqueeze(0)
|
448 |
+
|
449 |
+
# set up shape vars
|
450 |
+
tgt_len, bsz, embed_dim = query.shape
|
451 |
+
src_len, _, _ = key.shape
|
452 |
+
|
453 |
+
key_padding_mask = _canonical_mask(
|
454 |
+
mask=key_padding_mask,
|
455 |
+
mask_name="key_padding_mask",
|
456 |
+
other_type=F._none_or_dtype(attn_mask),
|
457 |
+
other_name="attn_mask",
|
458 |
+
target_type=query.dtype,
|
459 |
+
)
|
460 |
+
|
461 |
+
if is_causal and attn_mask is None:
|
462 |
+
raise RuntimeError(
|
463 |
+
"Need attn_mask if specifying the is_causal hint. "
|
464 |
+
"You may use the Transformer module method "
|
465 |
+
"`generate_square_subsequent_mask` to create this mask."
|
466 |
+
)
|
467 |
+
|
468 |
+
if is_causal and key_padding_mask is None and not need_weights:
|
469 |
+
# when we have a kpm or need weights, we need attn_mask
|
470 |
+
# Otherwise, we use the is_causal hint go as is_causal
|
471 |
+
# indicator to SDPA.
|
472 |
+
attn_mask = None
|
473 |
+
else:
|
474 |
+
attn_mask = _canonical_mask(
|
475 |
+
mask=attn_mask,
|
476 |
+
mask_name="attn_mask",
|
477 |
+
other_type=None,
|
478 |
+
other_name="",
|
479 |
+
target_type=query.dtype,
|
480 |
+
check_other=False,
|
481 |
+
)
|
482 |
+
|
483 |
+
if key_padding_mask is not None:
|
484 |
+
# We have the attn_mask, and use that to merge kpm into it.
|
485 |
+
# Turn off use of is_causal hint, as the merged mask is no
|
486 |
+
# longer causal.
|
487 |
+
is_causal = False
|
488 |
+
|
489 |
+
assert (
|
490 |
+
embed_dim == embed_dim_to_check
|
491 |
+
), f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
|
492 |
+
if isinstance(embed_dim, torch.Tensor):
|
493 |
+
# embed_dim can be a tensor when JIT tracing
|
494 |
+
head_dim = embed_dim.div(num_heads, rounding_mode="trunc")
|
495 |
+
else:
|
496 |
+
head_dim = embed_dim // num_heads
|
497 |
+
assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
|
498 |
+
if use_separate_proj_weight:
|
499 |
+
# allow MHA to have different embedding dimensions when separate projection weights are used
|
500 |
+
assert (
|
501 |
+
key.shape[:2] == value.shape[:2]
|
502 |
+
), f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
|
503 |
+
else:
|
504 |
+
assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}"
|
505 |
+
|
506 |
+
#
|
507 |
+
# compute in-projection
|
508 |
+
#
|
509 |
+
if not use_separate_proj_weight:
|
510 |
+
assert in_proj_weight is not None, "use_separate_proj_weight is False but in_proj_weight is None"
|
511 |
+
q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
|
512 |
+
else:
|
513 |
+
assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None"
|
514 |
+
assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None"
|
515 |
+
assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None"
|
516 |
+
if in_proj_bias is None:
|
517 |
+
b_q = b_k = b_v = None
|
518 |
+
else:
|
519 |
+
b_q, b_k, b_v = in_proj_bias.chunk(3)
|
520 |
+
q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v)
|
521 |
+
|
522 |
+
# prep attention mask
|
523 |
+
|
524 |
+
if attn_mask is not None:
|
525 |
+
# ensure attn_mask's dim is 3
|
526 |
+
if attn_mask.dim() == 2:
|
527 |
+
correct_2d_size = (tgt_len, src_len)
|
528 |
+
if attn_mask.shape != correct_2d_size:
|
529 |
+
raise RuntimeError(
|
530 |
+
f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}."
|
531 |
+
)
|
532 |
+
attn_mask = attn_mask.unsqueeze(0)
|
533 |
+
elif attn_mask.dim() == 3:
|
534 |
+
correct_3d_size = (bsz * num_heads, tgt_len, src_len)
|
535 |
+
if attn_mask.shape != correct_3d_size:
|
536 |
+
raise RuntimeError(
|
537 |
+
f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}."
|
538 |
+
)
|
539 |
+
else:
|
540 |
+
raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported")
|
541 |
+
|
542 |
+
# add bias along batch dimension (currently second)
|
543 |
+
if bias_k is not None and bias_v is not None:
|
544 |
+
assert static_k is None, "bias cannot be added to static key."
|
545 |
+
assert static_v is None, "bias cannot be added to static value."
|
546 |
+
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
|
547 |
+
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
|
548 |
+
if attn_mask is not None:
|
549 |
+
attn_mask = pad(attn_mask, (0, 1))
|
550 |
+
if key_padding_mask is not None:
|
551 |
+
key_padding_mask = pad(key_padding_mask, (0, 1))
|
552 |
+
else:
|
553 |
+
assert bias_k is None
|
554 |
+
assert bias_v is None
|
555 |
+
|
556 |
+
#
|
557 |
+
# reshape q, k, v for multihead attention and make em batch first
|
558 |
+
#
|
559 |
+
q = q.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
|
560 |
+
if static_k is None:
|
561 |
+
k = k.view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
|
562 |
+
else:
|
563 |
+
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
|
564 |
+
assert (
|
565 |
+
static_k.size(0) == bsz * num_heads
|
566 |
+
), f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
|
567 |
+
assert static_k.size(2) == head_dim, f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
|
568 |
+
k = static_k
|
569 |
+
if static_v is None:
|
570 |
+
v = v.view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
|
571 |
+
else:
|
572 |
+
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
|
573 |
+
assert (
|
574 |
+
static_v.size(0) == bsz * num_heads
|
575 |
+
), f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
|
576 |
+
assert static_v.size(2) == head_dim, f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
|
577 |
+
v = static_v
|
578 |
+
|
579 |
+
# add zero attention along batch dimension (now first)
|
580 |
+
if add_zero_attn:
|
581 |
+
zero_attn_shape = (bsz * num_heads, 1, head_dim)
|
582 |
+
k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1)
|
583 |
+
v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1)
|
584 |
+
if attn_mask is not None:
|
585 |
+
attn_mask = pad(attn_mask, (0, 1))
|
586 |
+
if key_padding_mask is not None:
|
587 |
+
key_padding_mask = pad(key_padding_mask, (0, 1))
|
588 |
+
|
589 |
+
# update source sequence length after adjustments
|
590 |
+
src_len = k.size(1)
|
591 |
+
|
592 |
+
# merge key padding and attention masks
|
593 |
+
if key_padding_mask is not None:
|
594 |
+
assert key_padding_mask.shape == (
|
595 |
+
bsz,
|
596 |
+
src_len,
|
597 |
+
), f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
|
598 |
+
key_padding_mask = (
|
599 |
+
key_padding_mask.view(bsz, 1, 1, src_len)
|
600 |
+
.expand(-1, num_heads, -1, -1)
|
601 |
+
.reshape(bsz * num_heads, 1, src_len)
|
602 |
+
)
|
603 |
+
if attn_mask is None:
|
604 |
+
attn_mask = key_padding_mask
|
605 |
+
else:
|
606 |
+
attn_mask = attn_mask + key_padding_mask
|
607 |
+
|
608 |
+
# adjust dropout probability
|
609 |
+
if not training:
|
610 |
+
dropout_p = 0.0
|
611 |
+
|
612 |
+
#
|
613 |
+
# (deep breath) calculate attention and out projection
|
614 |
+
#
|
615 |
+
|
616 |
+
if need_weights:
|
617 |
+
B, Nt, E = q.shape
|
618 |
+
q_scaled = q / math.sqrt(E)
|
619 |
+
|
620 |
+
assert not (is_causal and attn_mask is None), "FIXME: is_causal not implemented for need_weights"
|
621 |
+
|
622 |
+
if attn_mask is not None:
|
623 |
+
attn_output_weights = torch.baddbmm(attn_mask, q_scaled, k.transpose(-2, -1))
|
624 |
+
else:
|
625 |
+
attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1))
|
626 |
+
attn_output_weights = softmax(attn_output_weights, dim=-1)
|
627 |
+
if dropout_p > 0.0:
|
628 |
+
attn_output_weights = dropout(attn_output_weights, p=dropout_p)
|
629 |
+
|
630 |
+
attn_output = torch.bmm(attn_output_weights, v)
|
631 |
+
|
632 |
+
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)
|
633 |
+
attn_output = self.out_proj(attn_output)
|
634 |
+
attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
|
635 |
+
|
636 |
+
# optionally average attention weights over heads
|
637 |
+
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
|
638 |
+
if average_attn_weights:
|
639 |
+
attn_output_weights = attn_output_weights.mean(dim=1)
|
640 |
+
|
641 |
+
if not is_batched:
|
642 |
+
# squeeze the output if input was unbatched
|
643 |
+
attn_output = attn_output.squeeze(1)
|
644 |
+
attn_output_weights = attn_output_weights.squeeze(0)
|
645 |
+
return attn_output, attn_output_weights
|
646 |
+
else:
|
647 |
+
# attn_mask can be either (L,S) or (N*num_heads, L, S)
|
648 |
+
# if attn_mask's shape is (1, L, S) we need to unsqueeze to (1, 1, L, S)
|
649 |
+
# in order to match the input for SDPA of (N, num_heads, L, S)
|
650 |
+
if attn_mask is not None:
|
651 |
+
if attn_mask.size(0) == 1 and attn_mask.dim() == 3:
|
652 |
+
attn_mask = attn_mask.unsqueeze(0)
|
653 |
+
else:
|
654 |
+
attn_mask = attn_mask.view(bsz, num_heads, -1, src_len)
|
655 |
+
|
656 |
+
q = q.view(bsz, num_heads, tgt_len, head_dim)
|
657 |
+
k = k.view(bsz, num_heads, src_len, head_dim)
|
658 |
+
v = v.view(bsz, num_heads, src_len, head_dim)
|
659 |
+
|
660 |
+
attn_output = F.scaled_dot_product_attention(q, k, v, attn_mask, dropout_p, is_causal)
|
661 |
+
attn_output = attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
|
662 |
+
|
663 |
+
attn_output = self.out_proj(attn_output)
|
664 |
+
attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
|
665 |
+
if not is_batched:
|
666 |
+
# squeeze the output if input was unbatched
|
667 |
+
attn_output = attn_output.squeeze(1)
|
668 |
+
return attn_output, None
|
669 |
+
|
670 |
+
|
671 |
+
def _mha_shape_check(
|
672 |
+
query: Tensor,
|
673 |
+
key: Tensor,
|
674 |
+
value: Tensor,
|
675 |
+
key_padding_mask: Optional[Tensor],
|
676 |
+
attn_mask: Optional[Tensor],
|
677 |
+
num_heads: int,
|
678 |
+
):
|
679 |
+
# Verifies the expected shape for `query, `key`, `value`, `key_padding_mask` and `attn_mask`
|
680 |
+
# and returns if the input is batched or not.
|
681 |
+
# Raises an error if `query` is not 2-D (unbatched) or 3-D (batched) tensor.
|
682 |
+
|
683 |
+
# Shape check.
|
684 |
+
if query.dim() == 3:
|
685 |
+
# Batched Inputs
|
686 |
+
is_batched = True
|
687 |
+
assert key.dim() == 3 and value.dim() == 3, (
|
688 |
+
"For batched (3-D) `query`, expected `key` and `value` to be 3-D"
|
689 |
+
f" but found {key.dim()}-D and {value.dim()}-D tensors respectively"
|
690 |
+
)
|
691 |
+
if key_padding_mask is not None:
|
692 |
+
assert key_padding_mask.dim() == 2, (
|
693 |
+
"For batched (3-D) `query`, expected `key_padding_mask` to be `None` or 2-D"
|
694 |
+
f" but found {key_padding_mask.dim()}-D tensor instead"
|
695 |
+
)
|
696 |
+
if attn_mask is not None:
|
697 |
+
assert attn_mask.dim() in (2, 3), (
|
698 |
+
"For batched (3-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
|
699 |
+
f" but found {attn_mask.dim()}-D tensor instead"
|
700 |
+
)
|
701 |
+
elif query.dim() == 2:
|
702 |
+
# Unbatched Inputs
|
703 |
+
is_batched = False
|
704 |
+
assert key.dim() == 2 and value.dim() == 2, (
|
705 |
+
"For unbatched (2-D) `query`, expected `key` and `value` to be 2-D"
|
706 |
+
f" but found {key.dim()}-D and {value.dim()}-D tensors respectively"
|
707 |
+
)
|
708 |
+
|
709 |
+
if key_padding_mask is not None:
|
710 |
+
assert key_padding_mask.dim() == 1, (
|
711 |
+
"For unbatched (2-D) `query`, expected `key_padding_mask` to be `None` or 1-D"
|
712 |
+
f" but found {key_padding_mask.dim()}-D tensor instead"
|
713 |
+
)
|
714 |
+
|
715 |
+
if attn_mask is not None:
|
716 |
+
assert attn_mask.dim() in (2, 3), (
|
717 |
+
"For unbatched (2-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
|
718 |
+
f" but found {attn_mask.dim()}-D tensor instead"
|
719 |
+
)
|
720 |
+
if attn_mask.dim() == 3:
|
721 |
+
expected_shape = (num_heads, query.shape[0], key.shape[0])
|
722 |
+
assert (
|
723 |
+
attn_mask.shape == expected_shape
|
724 |
+
), f"Expected `attn_mask` shape to be {expected_shape} but got {attn_mask.shape}"
|
725 |
+
else:
|
726 |
+
raise AssertionError(
|
727 |
+
f"query should be unbatched 2D or batched 3D tensor but received {query.dim()}-D query tensor"
|
728 |
+
)
|
729 |
+
|
730 |
+
return is_batched
|
731 |
+
|
732 |
+
|
733 |
+
def _canonical_mask(
|
734 |
+
mask: Optional[Tensor],
|
735 |
+
mask_name: str,
|
736 |
+
other_type: Optional[DType],
|
737 |
+
other_name: str,
|
738 |
+
target_type: DType,
|
739 |
+
check_other: bool = True,
|
740 |
+
) -> Optional[Tensor]:
|
741 |
+
|
742 |
+
if mask is not None:
|
743 |
+
_mask_dtype = mask.dtype
|
744 |
+
_mask_is_float = torch.is_floating_point(mask)
|
745 |
+
if _mask_dtype != torch.bool and not _mask_is_float:
|
746 |
+
raise AssertionError(f"only bool and floating types of {mask_name} are supported")
|
747 |
+
if check_other and other_type is not None:
|
748 |
+
if _mask_dtype != other_type:
|
749 |
+
warnings.warn(
|
750 |
+
f"Support for mismatched {mask_name} and {other_name} "
|
751 |
+
"is deprecated. Use same type for both instead."
|
752 |
+
)
|
753 |
+
if not _mask_is_float:
|
754 |
+
mask = torch.zeros_like(mask, dtype=target_type).masked_fill_(mask, float("-inf"))
|
755 |
+
return mask
|
756 |
+
|
757 |
+
|
758 |
+
def _in_projection_packed(
|
759 |
+
q: Tensor,
|
760 |
+
k: Tensor,
|
761 |
+
v: Tensor,
|
762 |
+
w: Tensor,
|
763 |
+
b: Optional[Tensor] = None,
|
764 |
+
) -> List[Tensor]:
|
765 |
+
r"""
|
766 |
+
Performs the in-projection step of the attention operation, using packed weights.
|
767 |
+
Output is a triple containing projection tensors for query, key and value.
|
768 |
+
Args:
|
769 |
+
q, k, v: query, key and value tensors to be projected. For self-attention,
|
770 |
+
these are typically the same tensor; for encoder-decoder attention,
|
771 |
+
k and v are typically the same tensor. (We take advantage of these
|
772 |
+
identities for performance if they are present.) Regardless, q, k and v
|
773 |
+
must share a common embedding dimension; otherwise their shapes may vary.
|
774 |
+
w: projection weights for q, k and v, packed into a single tensor. Weights
|
775 |
+
are packed along dimension 0, in q, k, v order.
|
776 |
+
b: optional projection biases for q, k and v, packed into a single tensor
|
777 |
+
in q, k, v order.
|
778 |
+
Shape:
|
779 |
+
Inputs:
|
780 |
+
- q: :math:`(..., E)` where E is the embedding dimension
|
781 |
+
- k: :math:`(..., E)` where E is the embedding dimension
|
782 |
+
- v: :math:`(..., E)` where E is the embedding dimension
|
783 |
+
- w: :math:`(E * 3, E)` where E is the embedding dimension
|
784 |
+
- b: :math:`E * 3` where E is the embedding dimension
|
785 |
+
Output:
|
786 |
+
- in output list :math:`[q', k', v']`, each output tensor will have the
|
787 |
+
same shape as the corresponding input tensor.
|
788 |
+
"""
|
789 |
+
E = q.size(-1)
|
790 |
+
if k is v:
|
791 |
+
if q is k:
|
792 |
+
# self-attention
|
793 |
+
proj = linear(q, w, b)
|
794 |
+
# reshape to 3, E and not E, 3 is deliberate for better memory coalescing and keeping same order as chunk()
|
795 |
+
proj = proj.unflatten(-1, (3, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
|
796 |
+
return proj[0], proj[1], proj[2]
|
797 |
+
else:
|
798 |
+
# encoder-decoder attention
|
799 |
+
w_q, w_kv = w.split([E, E * 2])
|
800 |
+
if b is None:
|
801 |
+
b_q = b_kv = None
|
802 |
+
else:
|
803 |
+
b_q, b_kv = b.split([E, E * 2])
|
804 |
+
q_proj = linear(q, w_q, b_q)
|
805 |
+
kv_proj = linear(k, w_kv, b_kv)
|
806 |
+
# reshape to 2, E and not E, 2 is deliberate for better memory coalescing and keeping same order as chunk()
|
807 |
+
kv_proj = kv_proj.unflatten(-1, (2, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
|
808 |
+
return (q_proj, kv_proj[0], kv_proj[1])
|
809 |
+
else:
|
810 |
+
w_q, w_k, w_v = w.chunk(3)
|
811 |
+
if b is None:
|
812 |
+
b_q = b_k = b_v = None
|
813 |
+
else:
|
814 |
+
b_q, b_k, b_v = b.chunk(3)
|
815 |
+
return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
|
816 |
+
|
817 |
+
|
818 |
+
def _in_projection(
|
819 |
+
q: Tensor,
|
820 |
+
k: Tensor,
|
821 |
+
v: Tensor,
|
822 |
+
w_q: Tensor,
|
823 |
+
w_k: Tensor,
|
824 |
+
w_v: Tensor,
|
825 |
+
b_q: Optional[Tensor] = None,
|
826 |
+
b_k: Optional[Tensor] = None,
|
827 |
+
b_v: Optional[Tensor] = None,
|
828 |
+
) -> Tuple[Tensor, Tensor, Tensor]:
|
829 |
+
r"""
|
830 |
+
Performs the in-projection step of the attention operation. This is simply
|
831 |
+
a triple of linear projections, with shape constraints on the weights which
|
832 |
+
ensure embedding dimension uniformity in the projected outputs.
|
833 |
+
Output is a triple containing projection tensors for query, key and value.
|
834 |
+
Args:
|
835 |
+
q, k, v: query, key and value tensors to be projected.
|
836 |
+
w_q, w_k, w_v: weights for q, k and v, respectively.
|
837 |
+
b_q, b_k, b_v: optional biases for q, k and v, respectively.
|
838 |
+
Shape:
|
839 |
+
Inputs:
|
840 |
+
- q: :math:`(Qdims..., Eq)` where Eq is the query embedding dimension and Qdims are any
|
841 |
+
number of leading dimensions.
|
842 |
+
- k: :math:`(Kdims..., Ek)` where Ek is the key embedding dimension and Kdims are any
|
843 |
+
number of leading dimensions.
|
844 |
+
- v: :math:`(Vdims..., Ev)` where Ev is the value embedding dimension and Vdims are any
|
845 |
+
number of leading dimensions.
|
846 |
+
- w_q: :math:`(Eq, Eq)`
|
847 |
+
- w_k: :math:`(Eq, Ek)`
|
848 |
+
- w_v: :math:`(Eq, Ev)`
|
849 |
+
- b_q: :math:`(Eq)`
|
850 |
+
- b_k: :math:`(Eq)`
|
851 |
+
- b_v: :math:`(Eq)`
|
852 |
+
Output: in output triple :math:`(q', k', v')`,
|
853 |
+
- q': :math:`[Qdims..., Eq]`
|
854 |
+
- k': :math:`[Kdims..., Eq]`
|
855 |
+
- v': :math:`[Vdims..., Eq]`
|
856 |
+
"""
|
857 |
+
Eq, Ek, Ev = q.size(-1), k.size(-1), v.size(-1)
|
858 |
+
assert w_q.shape == (Eq, Eq), f"expecting query weights shape of {(Eq, Eq)}, but got {w_q.shape}"
|
859 |
+
assert w_k.shape == (Eq, Ek), f"expecting key weights shape of {(Eq, Ek)}, but got {w_k.shape}"
|
860 |
+
assert w_v.shape == (Eq, Ev), f"expecting value weights shape of {(Eq, Ev)}, but got {w_v.shape}"
|
861 |
+
assert b_q is None or b_q.shape == (Eq,), f"expecting query bias shape of {(Eq,)}, but got {b_q.shape}"
|
862 |
+
assert b_k is None or b_k.shape == (Eq,), f"expecting key bias shape of {(Eq,)}, but got {b_k.shape}"
|
863 |
+
assert b_v is None or b_v.shape == (Eq,), f"expecting value bias shape of {(Eq,)}, but got {b_v.shape}"
|
864 |
+
return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
|
special_tokens_map.json
ADDED
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
{
|
4 |
+
"content": "",
|
12 |
+
"lstrip": false,
|
13 |
+
"normalized": false,
|
14 |
+
"rstrip": false,
|
15 |
+
"single_word": false
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"content": "<ref>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": false,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"content": "</ref>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": false,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"content": "<box>",
|
33 |
+
"lstrip": false,
|
34 |
+
"normalized": false,
|
35 |
+
"rstrip": false,
|
36 |
+
"single_word": false
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"content": "</box>",
|
40 |
+
"lstrip": false,
|
41 |
+
"normalized": false,
|
42 |
+
"rstrip": false,
|
43 |
+
"single_word": false
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"content": "<quad>",
|
47 |
+
"lstrip": false,
|
48 |
+
"normalized": false,
|
49 |
+
"rstrip": false,
|
50 |
+
"single_word": false
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"content": "</quad>",
|
54 |
+
"lstrip": false,
|
55 |
+
"normalized": false,
|
56 |
+
"rstrip": false,
|
57 |
+
"single_word": false
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"content": "<point>",
|
61 |
+
"lstrip": false,
|
62 |
+
"normalized": false,
|
63 |
+
"rstrip": false,
|
64 |
+
"single_word": false
|
65 |
+
},
|
66 |
+
{
|
67 |
+
"content": "</point>",
|
68 |
+
"lstrip": false,
|
69 |
+
"normalized": false,
|
70 |
+
"rstrip": false,
|
71 |
+
"single_word": false
|
72 |
+
},
|
73 |
+
{
|
74 |
+
"content": "<slice>",
|
75 |
+
"lstrip": false,
|
76 |
+
"normalized": false,
|
77 |
+
"rstrip": false,
|
78 |
+
"single_word": false
|
79 |
+
},
|
80 |
+
{
|
81 |
+
"content": "</slice>",
|
82 |
+
"lstrip": false,
|
83 |
+
"normalized": false,
|
84 |
+
"rstrip": false,
|
85 |
+
"single_word": false
|
86 |
+
},
|
87 |
+
{
|
88 |
+
"content": "<image_id>",
|
89 |
+
"lstrip": false,
|
90 |
+
"normalized": false,
|
91 |
+
"rstrip": false,
|
92 |
+
"single_word": false
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"content": "</image_id>",
|
96 |
+
"lstrip": false,
|
97 |
+
"normalized": false,
|
98 |
+
"rstrip": false,
|
99 |
+
"single_word": false
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"content": "<unit>",
|
103 |
+
"lstrip": false,
|
104 |
+
"normalized": false,
|
105 |
+
"rstrip": false,
|
106 |
+
"single_word": false
|
107 |
+
},
|
108 |
+
{
|
109 |
+
"content": "</unit>",
|
110 |
+
"lstrip": false,
|
111 |
+
"normalized": false,
|
112 |
+
"rstrip": false,
|
113 |
+
"single_word": false
|
114 |
+
},
|
115 |
+
{
|
116 |
+
"content": "<asr>",
|
117 |
+
"lstrip": false,
|
118 |
+
"normalized": false,
|
119 |
+
"rstrip": false,
|
120 |
+
"single_word": false
|
121 |
+
},
|
122 |
+
{
|
123 |
+
"content": "</asr>",
|
124 |
+
"lstrip": false,
|
125 |
+
"normalized": false,
|
126 |
+
"rstrip": false,
|
127 |
+
"single_word": false
|
128 |
+
},
|
129 |
+
{
|
130 |
+
"content": "<query>",
|
131 |
+
"lstrip": false,
|
132 |
+
"normalized": false,
|
133 |
+
"rstrip": false,
|
134 |
+
"single_word": false
|
135 |
+
},
|
136 |
+
{
|
137 |
+
"content": "</query>",
|
138 |
+
"lstrip": false,
|
139 |
+
"normalized": false,
|
140 |
+
"rstrip": false,
|
141 |
+
"single_word": false
|
142 |
+
},
|
143 |
+
{
|
144 |
+
"content": "<|audio_start|>",
|
145 |
+
"lstrip": false,
|
146 |
+
"normalized": false,
|
147 |
+
"rstrip": false,
|
148 |
+
"single_word": false
|
149 |
+
},
|
150 |
+
{
|
151 |
+
"content": "<|audio|>",
|
152 |
+
"lstrip": false,
|
153 |
+
"normalized": false,
|
154 |
+
"rstrip": false,
|
155 |
+
"single_word": false
|
156 |
+
},
|
157 |
+
{
|
158 |
+
"content": "<|audio_end|>",
|
159 |
+
"lstrip": false,
|
160 |
+
"normalized": false,
|
161 |
+
"rstrip": false,
|
162 |
+
"single_word": false
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"content": "<|spk_bos|>",
|
166 |
+
"lstrip": false,
|
167 |
+
"normalized": false,
|
168 |
+
"rstrip": false,
|
169 |
+
"single_word": false
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"content": "<|spk|>",
|
173 |
+
"lstrip": false,
|
174 |
+
"normalized": false,
|
175 |
+
"rstrip": false,
|
176 |
+
"single_word": false
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"content": "<|spk_eos|>",
|
180 |
+
"lstrip": false,
|
181 |
+
"normalized": false,
|
182 |
+
"rstrip": false,
|
183 |
+
"single_word": false
|
184 |
+
},
|
185 |
+
{
|
186 |
+
"content": "<|tts_bos|>",
|
187 |
+
"lstrip": false,
|
188 |
+
"normalized": false,
|
189 |
+
"rstrip": false,
|
190 |
+
"single_word": false
|
191 |
+
},
|
192 |
+
{
|
193 |
+
"content": "<|tts_eos|>",
|
194 |
+
"lstrip": false,
|
195 |
+
"normalized": false,
|
196 |
+
"rstrip": false,
|
197 |
+
"single_word": false
|
198 |
+
},
|
199 |
+
{
|
200 |
+
"content": "<|listen|>",
|
201 |
+
"lstrip": false,
|
202 |
+
"normalized": false,
|
203 |
+
"rstrip": false,
|
204 |
+
"single_word": false
|
205 |
+
},
|
206 |
+
{
|
207 |
+
"content": "<|speak|>",
|
208 |
+
"lstrip": false,
|
209 |
+
"normalized": false,
|
210 |
+
"rstrip": false,
|
211 |
+
"single_word": false
|
212 |
+
},
|
213 |
+
{
|
214 |
+
"content": "<|interrupt|>",
|
215 |
+
"lstrip": false,
|
216 |
+
"normalized": false,
|
217 |
+
"rstrip": false,
|
218 |
+
"single_word": false
|
219 |
+
},
|
220 |
+
{
|
221 |
+
"content": "<|vad_start|>",
|
222 |
+
"lstrip": false,
|
223 |
+
"normalized": false,
|
224 |
+
"rstrip": false,
|
225 |
+
"single_word": false
|
226 |
+
},
|
227 |
+
{
|
228 |
+
"content": "<|vad_end|>",
|
229 |
+
"lstrip": false,
|
230 |
+
"normalized": false,
|
231 |
+
"rstrip": false,
|
232 |
+
"single_word": false
|
233 |
+
},
|
234 |
+
{
|
235 |
+
"content": "<reserved_43>",
|
236 |
+
"lstrip": false,
|
237 |
+
"normalized": false,
|
238 |
+
"rstrip": false,
|
239 |
+
"single_word": false
|
240 |
+
},
|
241 |
+
{
|
242 |
+
"content": "<reserved_53>",
|
243 |
+
"lstrip": false,
|
244 |
+
"normalized": false,
|
245 |
+
"rstrip": false,
|
246 |
+
"single_word": false
|
247 |
+
}
|
248 |
+
],
|
249 |
+
"eos_token": {
|
250 |
+
"content": "<|im_end|>",
|
251 |
+
"lstrip": false,
|
252 |
+
"normalized": false,
|
253 |
+
"rstrip": false,
|
254 |
+
"single_word": false
|
255 |
+
},
|
256 |
+
"pad_token": {
|
257 |
+
"content": "<|endoftext|>",
|
258 |
+
"lstrip": false,
|
259 |
+
"normalized": false,
|
260 |
+
"rstrip": false,
|
261 |
+
"single_word": false
|
262 |
+
},
|
263 |
+
"unk_token": "<unk>"
|
264 |
+
}
|
tokenization_minicpmo_fast.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2025 The OpenBMB Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
from transformers import Qwen2TokenizerFast
|
17 |
+
|
18 |
+
|
19 |
+
class MiniCPMOTokenizerFast(Qwen2TokenizerFast):
|
20 |
+
def __init__(self, **kwargs):
|
21 |
+
super().__init__(**kwargs)
|
22 |
+
# image
|
23 |
+
self.im_start = ""
|
25 |
+
self.ref_start = "<ref>"
|
26 |
+
self.ref_end = "</ref>"
|
27 |
+
self.box_start = "<box>"
|
28 |
+
self.box_end = "</box>"
|
29 |
+
self.quad_start = "<quad>"
|
30 |
+
self.quad_end = "</quad>"
|
31 |
+
self.slice_start = "<slice>"
|
32 |
+
self.slice_end = "</slice>"
|
33 |
+
self.im_id_start = "<image_id>"
|
34 |
+
self.im_id_end = "</image_id>"
|
35 |
+
|
36 |
+
# audio
|
37 |
+
self.audio_start = "<|audio_start|>"
|
38 |
+
self.audio_end = "<|audio_end|>"
|
39 |
+
self.spk_start = "<|spk_bos|>"
|
40 |
+
self.spk_end = "<|spk_eos|>"
|
41 |
+
self.tts_start = "<|tts_bos|>"
|
42 |
+
self.tts_end = "<|tts_eos|>"
|
43 |
+
|
44 |
+
@property
|
45 |
+
def eos_id(self):
|
46 |
+
return self.eos_token_id
|
47 |
+
|
48 |
+
@property
|
49 |
+
def bos_id(self):
|
50 |
+
return self.bos_token_id
|
51 |
+
|
52 |
+
@property
|
53 |
+
def unk_id(self):
|
54 |
+
return self.unk_token_id
|
55 |
+
|
56 |
+
@property
|
57 |
+
def im_start_id(self):
|
58 |
+
return self.convert_tokens_to_ids(self.im_start)
|
59 |
+
|
60 |
+
@property
|
61 |
+
def im_end_id(self):
|
62 |
+
return self.convert_tokens_to_ids(self.im_end)
|
63 |
+
|
64 |
+
@property
|
65 |
+
def slice_start_id(self):
|
66 |
+
return self.convert_tokens_to_ids(self.slice_start)
|
67 |
+
|
68 |
+
@property
|
69 |
+
def slice_end_id(self):
|
70 |
+
return self.convert_tokens_to_ids(self.slice_end)
|
71 |
+
|
72 |
+
@property
|
73 |
+
def im_id_start_id(self):
|
74 |
+
return self.convert_tokens_to_ids(self.im_id_start)
|
75 |
+
|
76 |
+
@property
|
77 |
+
def im_id_end_id(self):
|
78 |
+
return self.convert_tokens_to_ids(self.im_id_end)
|
79 |
+
|
80 |
+
@property
|
81 |
+
def audio_start_id(self):
|
82 |
+
return self.convert_tokens_to_ids(self.audio_start)
|
83 |
+
|
84 |
+
@property
|
85 |
+
def audio_end_id(self):
|
86 |
+
return self.convert_tokens_to_ids(self.audio_end)
|
87 |
+
|
88 |
+
@property
|
89 |
+
def spk_start_id(self):
|
90 |
+
return self.convert_tokens_to_ids(self.spk_start)
|
91 |
+
|
92 |
+
@property
|
93 |
+
def spk_end_id(self):
|
94 |
+
return self.convert_tokens_to_ids(self.spk_end)
|
95 |
+
|
96 |
+
@property
|
97 |
+
def tts_start_id(self):
|
98 |
+
return self.convert_tokens_to_ids(self.tts_start)
|
99 |
+
|
100 |
+
@property
|
101 |
+
def tts_end_id(self):
|
102 |
+
return self.convert_tokens_to_ids(self.tts_end)
|
103 |
+
|
104 |
+
@staticmethod
|
105 |
+
def escape(text: str) -> str:
|
106 |
+
return text
|
107 |
+
|
108 |
+
@staticmethod
|
109 |
+
def unescape(text: str) -> str:
|
110 |
+
return text
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,523 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_prefix_space": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"128244": {
|
6 |
+
"content": "<unk>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"151643": {
|
14 |
+
"content": "<|endoftext|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"151644": {
|
22 |
+
"content": "<|im_start|>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
},
|
29 |
+
"151645": {
|
30 |
+
"content": "<|im_end|>",
|
31 |
+
"lstrip": false,
|
32 |
+
"normalized": false,
|
33 |
+
"rstrip": false,
|
34 |
+
"single_word": false,
|
35 |
+
"special": true
|
36 |
+
},
|
37 |
+
"151646": {
|
38 |
+
"content": "<|object_ref_start|>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false,
|
43 |
+
"special": true
|
44 |
+
},
|
45 |
+
"151647": {
|
46 |
+
"content": "<|object_ref_end|>",
|
47 |
+
"lstrip": false,
|
48 |
+
"normalized": false,
|
49 |
+
"rstrip": false,
|
50 |
+
"single_word": false,
|
51 |
+
"special": true
|
52 |
+
},
|
53 |
+
"151648": {
|
54 |
+
"content": "<|box_start|>",
|
55 |
+
"lstrip": false,
|
56 |
+
"normalized": false,
|
57 |
+
"rstrip": false,
|
58 |
+
"single_word": false,
|
59 |
+
"special": true
|
60 |
+
},
|
61 |
+
"151649": {
|
62 |
+
"content": "<|box_end|>",
|
63 |
+
"lstrip": false,
|
64 |
+
"normalized": false,
|
65 |
+
"rstrip": false,
|
66 |
+
"single_word": false,
|
67 |
+
"special": true
|
68 |
+
},
|
69 |
+
"151650": {
|
70 |
+
"content": "<|quad_start|>",
|
71 |
+
"lstrip": false,
|
72 |
+
"normalized": false,
|
73 |
+
"rstrip": false,
|
74 |
+
"single_word": false,
|
75 |
+
"special": true
|
76 |
+
},
|
77 |
+
"151651": {
|
78 |
+
"content": "<|quad_end|>",
|
79 |
+
"lstrip": false,
|
80 |
+
"normalized": false,
|
81 |
+
"rstrip": false,
|
82 |
+
"single_word": false,
|
83 |
+
"special": true
|
84 |
+
},
|
85 |
+
"151652": {
|
86 |
+
"content": "<|vision_start|>",
|
87 |
+
"lstrip": false,
|
88 |
+
"normalized": false,
|
89 |
+
"rstrip": false,
|
90 |
+
"single_word": false,
|
91 |
+
"special": true
|
92 |
+
},
|
93 |
+
"151653": {
|
94 |
+
"content": "<|vision_end|>",
|
95 |
+
"lstrip": false,
|
96 |
+
"normalized": false,
|
97 |
+
"rstrip": false,
|
98 |
+
"single_word": false,
|
99 |
+
"special": true
|
100 |
+
},
|
101 |
+
"151654": {
|
102 |
+
"content": "<|vision_pad|>",
|
103 |
+
"lstrip": false,
|
104 |
+
"normalized": false,
|
105 |
+
"rstrip": false,
|
106 |
+
"single_word": false,
|
107 |
+
"special": true
|
108 |
+
},
|
109 |
+
"151655": {
|
110 |
+
"content": "<|image_pad|>",
|
111 |
+
"lstrip": false,
|
112 |
+
"normalized": false,
|
113 |
+
"rstrip": false,
|
114 |
+
"single_word": false,
|
115 |
+
"special": true
|
116 |
+
},
|
117 |
+
"151656": {
|
118 |
+
"content": "<|video_pad|>",
|
119 |
+
"lstrip": false,
|
120 |
+
"normalized": false,
|
121 |
+
"rstrip": false,
|
122 |
+
"single_word": false,
|
123 |
+
"special": true
|
124 |
+
},
|
125 |
+
"151657": {
|
126 |
+
"content": "<tool_call>",
|
127 |
+
"lstrip": false,
|
128 |
+
"normalized": false,
|
129 |
+
"rstrip": false,
|
130 |
+
"single_word": false,
|
131 |
+
"special": false
|
132 |
+
},
|
133 |
+
"151658": {
|
134 |
+
"content": "</tool_call>",
|
135 |
+
"lstrip": false,
|
136 |
+
"normalized": false,
|
137 |
+
"rstrip": false,
|
138 |
+
"single_word": false,
|
139 |
+
"special": false
|
140 |
+
},
|
141 |
+
"151659": {
|
142 |
+
"content": "<|fim_prefix|>",
|
143 |
+
"lstrip": false,
|
144 |
+
"normalized": false,
|
145 |
+
"rstrip": false,
|
146 |
+
"single_word": false,
|
147 |
+
"special": false
|
148 |
+
},
|
149 |
+
"151660": {
|
150 |
+
"content": "<|fim_middle|>",
|
151 |
+
"lstrip": false,
|
152 |
+
"normalized": false,
|
153 |
+
"rstrip": false,
|
154 |
+
"single_word": false,
|
155 |
+
"special": false
|
156 |
+
},
|
157 |
+
"151661": {
|
158 |
+
"content": "<|fim_suffix|>",
|
159 |
+
"lstrip": false,
|
160 |
+
"normalized": false,
|
161 |
+
"rstrip": false,
|
162 |
+
"single_word": false,
|
163 |
+
"special": false
|
164 |
+
},
|
165 |
+
"151662": {
|
166 |
+
"content": "<|fim_pad|>",
|
167 |
+
"lstrip": false,
|
168 |
+
"normalized": false,
|
169 |
+
"rstrip": false,
|
170 |
+
"single_word": false,
|
171 |
+
"special": false
|
172 |
+
},
|
173 |
+
"151663": {
|
174 |
+
"content": "<|repo_name|>",
|
175 |
+
"lstrip": false,
|
176 |
+
"normalized": false,
|
177 |
+
"rstrip": false,
|
178 |
+
"single_word": false,
|
179 |
+
"special": false
|
180 |
+
},
|
181 |
+
"151664": {
|
182 |
+
"content": "<|file_sep|>",
|
183 |
+
"lstrip": false,
|
184 |
+
"normalized": false,
|
185 |
+
"rstrip": false,
|
186 |
+
"single_word": false,
|
187 |
+
"special": false
|
188 |
+
},
|
189 |
+
"151665": {
|
190 |
+
"content": "",
|
199 |
+
"lstrip": false,
|
200 |
+
"normalized": false,
|
201 |
+
"rstrip": false,
|
202 |
+
"single_word": false,
|
203 |
+
"special": true
|
204 |
+
},
|
205 |
+
"151667": {
|
206 |
+
"content": "<ref>",
|
207 |
+
"lstrip": false,
|
208 |
+
"normalized": false,
|
209 |
+
"rstrip": false,
|
210 |
+
"single_word": false,
|
211 |
+
"special": true
|
212 |
+
},
|
213 |
+
"151668": {
|
214 |
+
"content": "</ref>",
|
215 |
+
"lstrip": false,
|
216 |
+
"normalized": false,
|
217 |
+
"rstrip": false,
|
218 |
+
"single_word": false,
|
219 |
+
"special": true
|
220 |
+
},
|
221 |
+
"151669": {
|
222 |
+
"content": "<box>",
|
223 |
+
"lstrip": false,
|
224 |
+
"normalized": false,
|
225 |
+
"rstrip": false,
|
226 |
+
"single_word": false,
|
227 |
+
"special": true
|
228 |
+
},
|
229 |
+
"151670": {
|
230 |
+
"content": "</box>",
|
231 |
+
"lstrip": false,
|
232 |
+
"normalized": false,
|
233 |
+
"rstrip": false,
|
234 |
+
"single_word": false,
|
235 |
+
"special": true
|
236 |
+
},
|
237 |
+
"151671": {
|
238 |
+
"content": "<quad>",
|
239 |
+
"lstrip": false,
|
240 |
+
"normalized": false,
|
241 |
+
"rstrip": false,
|
242 |
+
"single_word": false,
|
243 |
+
"special": true
|
244 |
+
},
|
245 |
+
"151672": {
|
246 |
+
"content": "</quad>",
|
247 |
+
"lstrip": false,
|
248 |
+
"normalized": false,
|
249 |
+
"rstrip": false,
|
250 |
+
"single_word": false,
|
251 |
+
"special": true
|
252 |
+
},
|
253 |
+
"151673": {
|
254 |
+
"content": "<point>",
|
255 |
+
"lstrip": false,
|
256 |
+
"normalized": false,
|
257 |
+
"rstrip": false,
|
258 |
+
"single_word": false,
|
259 |
+
"special": true
|
260 |
+
},
|
261 |
+
"151674": {
|
262 |
+
"content": "</point>",
|
263 |
+
"lstrip": false,
|
264 |
+
"normalized": false,
|
265 |
+
"rstrip": false,
|
266 |
+
"single_word": false,
|
267 |
+
"special": true
|
268 |
+
},
|
269 |
+
"151675": {
|
270 |
+
"content": "<slice>",
|
271 |
+
"lstrip": false,
|
272 |
+
"normalized": false,
|
273 |
+
"rstrip": false,
|
274 |
+
"single_word": false,
|
275 |
+
"special": true
|
276 |
+
},
|
277 |
+
"151676": {
|
278 |
+
"content": "</slice>",
|
279 |
+
"lstrip": false,
|
280 |
+
"normalized": false,
|
281 |
+
"rstrip": false,
|
282 |
+
"single_word": false,
|
283 |
+
"special": true
|
284 |
+
},
|
285 |
+
"151677": {
|
286 |
+
"content": "<image_id>",
|
287 |
+
"lstrip": false,
|
288 |
+
"normalized": false,
|
289 |
+
"rstrip": false,
|
290 |
+
"single_word": false,
|
291 |
+
"special": true
|
292 |
+
},
|
293 |
+
"151678": {
|
294 |
+
"content": "</image_id>",
|
295 |
+
"lstrip": false,
|
296 |
+
"normalized": false,
|
297 |
+
"rstrip": false,
|
298 |
+
"single_word": false,
|
299 |
+
"special": true
|
300 |
+
},
|
301 |
+
"151679": {
|
302 |
+
"content": "<unit>",
|
303 |
+
"lstrip": false,
|
304 |
+
"normalized": false,
|
305 |
+
"rstrip": false,
|
306 |
+
"single_word": false,
|
307 |
+
"special": true
|
308 |
+
},
|
309 |
+
"151680": {
|
310 |
+
"content": "</unit>",
|
311 |
+
"lstrip": false,
|
312 |
+
"normalized": false,
|
313 |
+
"rstrip": false,
|
314 |
+
"single_word": false,
|
315 |
+
"special": true
|
316 |
+
},
|
317 |
+
"151681": {
|
318 |
+
"content": "<asr>",
|
319 |
+
"lstrip": false,
|
320 |
+
"normalized": false,
|
321 |
+
"rstrip": false,
|
322 |
+
"single_word": false,
|
323 |
+
"special": true
|
324 |
+
},
|
325 |
+
"151682": {
|
326 |
+
"content": "</asr>",
|
327 |
+
"lstrip": false,
|
328 |
+
"normalized": false,
|
329 |
+
"rstrip": false,
|
330 |
+
"single_word": false,
|
331 |
+
"special": true
|
332 |
+
},
|
333 |
+
"151683": {
|
334 |
+
"content": "<query>",
|
335 |
+
"lstrip": false,
|
336 |
+
"normalized": false,
|
337 |
+
"rstrip": false,
|
338 |
+
"single_word": false,
|
339 |
+
"special": true
|
340 |
+
},
|
341 |
+
"151684": {
|
342 |
+
"content": "</query>",
|
343 |
+
"lstrip": false,
|
344 |
+
"normalized": false,
|
345 |
+
"rstrip": false,
|
346 |
+
"single_word": false,
|
347 |
+
"special": true
|
348 |
+
},
|
349 |
+
"151685": {
|
350 |
+
"content": "<|audio_start|>",
|
351 |
+
"lstrip": false,
|
352 |
+
"normalized": false,
|
353 |
+
"rstrip": false,
|
354 |
+
"single_word": false,
|
355 |
+
"special": true
|
356 |
+
},
|
357 |
+
"151686": {
|
358 |
+
"content": "<|audio|>",
|
359 |
+
"lstrip": false,
|
360 |
+
"normalized": false,
|
361 |
+
"rstrip": false,
|
362 |
+
"single_word": false,
|
363 |
+
"special": true
|
364 |
+
},
|
365 |
+
"151687": {
|
366 |
+
"content": "<|audio_end|>",
|
367 |
+
"lstrip": false,
|
368 |
+
"normalized": false,
|
369 |
+
"rstrip": false,
|
370 |
+
"single_word": false,
|
371 |
+
"special": true
|
372 |
+
},
|
373 |
+
"151688": {
|
374 |
+
"content": "<|spk_bos|>",
|
375 |
+
"lstrip": false,
|
376 |
+
"normalized": false,
|
377 |
+
"rstrip": false,
|
378 |
+
"single_word": false,
|
379 |
+
"special": true
|
380 |
+
},
|
381 |
+
"151689": {
|
382 |
+
"content": "<|spk|>",
|
383 |
+
"lstrip": false,
|
384 |
+
"normalized": false,
|
385 |
+
"rstrip": false,
|
386 |
+
"single_word": false,
|
387 |
+
"special": true
|
388 |
+
},
|
389 |
+
"151690": {
|
390 |
+
"content": "<|spk_eos|>",
|
391 |
+
"lstrip": false,
|
392 |
+
"normalized": false,
|
393 |
+
"rstrip": false,
|
394 |
+
"single_word": false,
|
395 |
+
"special": true
|
396 |
+
},
|
397 |
+
"151691": {
|
398 |
+
"content": "<|tts_bos|>",
|
399 |
+
"lstrip": false,
|
400 |
+
"normalized": false,
|
401 |
+
"rstrip": false,
|
402 |
+
"single_word": false,
|
403 |
+
"special": true
|
404 |
+
},
|
405 |
+
"151692": {
|
406 |
+
"content": "<|tts_eos|>",
|
407 |
+
"lstrip": false,
|
408 |
+
"normalized": false,
|
409 |
+
"rstrip": false,
|
410 |
+
"single_word": false,
|
411 |
+
"special": true
|
412 |
+
},
|
413 |
+
"151693": {
|
414 |
+
"content": "<|listen|>",
|
415 |
+
"lstrip": false,
|
416 |
+
"normalized": false,
|
417 |
+
"rstrip": false,
|
418 |
+
"single_word": false,
|
419 |
+
"special": true
|
420 |
+
},
|
421 |
+
"151694": {
|
422 |
+
"content": "<|speak|>",
|
423 |
+
"lstrip": false,
|
424 |
+
"normalized": false,
|
425 |
+
"rstrip": false,
|
426 |
+
"single_word": false,
|
427 |
+
"special": true
|
428 |
+
},
|
429 |
+
"151695": {
|
430 |
+
"content": "<|interrupt|>",
|
431 |
+
"lstrip": false,
|
432 |
+
"normalized": false,
|
433 |
+
"rstrip": false,
|
434 |
+
"single_word": false,
|
435 |
+
"special": true
|
436 |
+
},
|
437 |
+
"151696": {
|
438 |
+
"content": "<|vad_start|>",
|
439 |
+
"lstrip": false,
|
440 |
+
"normalized": false,
|
441 |
+
"rstrip": false,
|
442 |
+
"single_word": false,
|
443 |
+
"special": true
|
444 |
+
},
|
445 |
+
"151697": {
|
446 |
+
"content": "<|vad_end|>",
|
447 |
+
"lstrip": false,
|
448 |
+
"normalized": false,
|
449 |
+
"rstrip": false,
|
450 |
+
"single_word": false,
|
451 |
+
"special": true
|
452 |
+
},
|
453 |
+
"151698": {
|
454 |
+
"content": "<reserved_43>",
|
455 |
+
"lstrip": false,
|
456 |
+
"normalized": false,
|
457 |
+
"rstrip": false,
|
458 |
+
"single_word": false,
|
459 |
+
"special": true
|
460 |
+
},
|
461 |
+
"151699": {
|
462 |
+
"content": "<reserved_53>",
|
463 |
+
"lstrip": false,
|
464 |
+
"normalized": false,
|
465 |
+
"rstrip": false,
|
466 |
+
"single_word": false,
|
467 |
+
"special": true
|
468 |
+
}
|
469 |
+
},
|
470 |
+
"additional_special_tokens": [
|
471 |
+
"",
|
473 |
+
"<ref>",
|
474 |
+
"</ref>",
|
475 |
+
"<box>",
|
476 |
+
"</box>",
|
477 |
+
"<quad>",
|
478 |
+
"</quad>",
|
479 |
+
"<point>",
|
480 |
+
"</point>",
|
481 |
+
"<slice>",
|
482 |
+
"</slice>",
|
483 |
+
"<image_id>",
|
484 |
+
"</image_id>",
|
485 |
+
"<unit>",
|
486 |
+
"</unit>",
|
487 |
+
"<asr>",
|
488 |
+
"</asr>",
|
489 |
+
"<query>",
|
490 |
+
"</query>",
|
491 |
+
"<|audio_start|>",
|
492 |
+
"<|audio|>",
|
493 |
+
"<|audio_end|>",
|
494 |
+
"<|spk_bos|>",
|
495 |
+
"<|spk|>",
|
496 |
+
"<|spk_eos|>",
|
497 |
+
"<|tts_bos|>",
|
498 |
+
"<|tts_eos|>",
|
499 |
+
"<|listen|>",
|
500 |
+
"<|speak|>",
|
501 |
+
"<|interrupt|>",
|
502 |
+
"<|vad_start|>",
|
503 |
+
"<|vad_end|>",
|
504 |
+
"<reserved_43>",
|
505 |
+
"<reserved_53>"
|
506 |
+
],
|
507 |
+
"bos_token": "<|im_start|>",
|
508 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
509 |
+
"clean_up_tokenization_spaces": false,
|
510 |
+
"eos_token": "<|im_end|>",
|
511 |
+
"errors": "replace",
|
512 |
+
"model_max_length": 131072,
|
513 |
+
"pad_token": "<|endoftext|>",
|
514 |
+
"split_special_tokens": false,
|
515 |
+
"auto_map": {
|
516 |
+
"AutoTokenizer": [
|
517 |
+
"tokenization_minicpmo_fast.MiniCPMOTokenizerFast",
|
518 |
+
null
|
519 |
+
]
|
520 |
+
},
|
521 |
+
"tokenizer_class": "MiniCPMOTokenizerFast",
|
522 |
+
"unk_token": "<unk>"
|
523 |
+
}
|
utils.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2025 The OpenBMB Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
import re
|
17 |
+
import logging
|
18 |
+
|
19 |
+
import librosa
|
20 |
+
import numpy as np
|
21 |
+
|
22 |
+
logger = logging.getLogger(__name__)
|
23 |
+
|
24 |
+
|
25 |
+
def is_silent(data):
|
26 |
+
if np.abs(data).max() < 3e-3:
|
27 |
+
return True
|
28 |
+
else:
|
29 |
+
return False
|
30 |
+
|
31 |
+
|
32 |
+
def sentence_end(txt):
|
33 |
+
for c in [".", "。", "!", "?", "!", "?"]:
|
34 |
+
if c in txt:
|
35 |
+
if c == ".": # check not number before it like 1.
|
36 |
+
idx = txt.find(c)
|
37 |
+
if idx > 0:
|
38 |
+
if txt[idx - 1].isdigit():
|
39 |
+
continue
|
40 |
+
return c
|
41 |
+
return ""
|
42 |
+
|
43 |
+
|
44 |
+
class NumberToTextConverter:
|
45 |
+
def __init__(self):
|
46 |
+
self.num_to_chinese = {
|
47 |
+
"0": "零",
|
48 |
+
"1": "一",
|
49 |
+
"2": "二",
|
50 |
+
"3": "三",
|
51 |
+
"4": "四",
|
52 |
+
"5": "五",
|
53 |
+
"6": "六",
|
54 |
+
"7": "七",
|
55 |
+
"8": "八",
|
56 |
+
"9": "九",
|
57 |
+
}
|
58 |
+
self.num_to_english = {
|
59 |
+
"0": "zero",
|
60 |
+
"1": "one",
|
61 |
+
"2": "two",
|
62 |
+
"3": "three",
|
63 |
+
"4": "four",
|
64 |
+
"5": "five",
|
65 |
+
"6": "six",
|
66 |
+
"7": "seven",
|
67 |
+
"8": "eight",
|
68 |
+
"9": "nine",
|
69 |
+
}
|
70 |
+
|
71 |
+
def number_to_chinese_digit_by_digit(self, num_str):
|
72 |
+
result = ""
|
73 |
+
for char in num_str:
|
74 |
+
if char in self.num_to_chinese:
|
75 |
+
result += self.num_to_chinese[char]
|
76 |
+
return result
|
77 |
+
|
78 |
+
def number_to_english_digit_by_digit(self, num_str):
|
79 |
+
result = []
|
80 |
+
for char in num_str:
|
81 |
+
if char in self.num_to_english:
|
82 |
+
result.append(self.num_to_english[char])
|
83 |
+
return " ".join(result)
|
84 |
+
|
85 |
+
def detect_language(self, text):
|
86 |
+
chinese_count = len(re.findall(r"[\u4e00-\u9fff]", text))
|
87 |
+
english_count = len(re.findall(r"[a-zA-Z]", text))
|
88 |
+
return "chinese" if chinese_count >= english_count else "english"
|
89 |
+
|
90 |
+
def replace_numbers_with_text(self, text, language=None):
|
91 |
+
if language is None:
|
92 |
+
language = self.detect_language(text)
|
93 |
+
numbers = re.findall(r"\d+", text)
|
94 |
+
|
95 |
+
for num in numbers:
|
96 |
+
if language == "chinese":
|
97 |
+
replacement = self.number_to_chinese_digit_by_digit(num)
|
98 |
+
else:
|
99 |
+
replacement = self.number_to_english_digit_by_digit(num)
|
100 |
+
text = text.replace(num, replacement, 1)
|
101 |
+
|
102 |
+
return text
|
103 |
+
|
104 |
+
|
105 |
+
class VoiceChecker:
|
106 |
+
def __init__(self):
|
107 |
+
self.previous_mel = None
|
108 |
+
self.consecutive_zeros = 0
|
109 |
+
self.consecutive_low_distance = 0
|
110 |
+
|
111 |
+
def compute_distance(self, audio_chunk, mel_spec):
|
112 |
+
if is_silent(audio_chunk):
|
113 |
+
return 0.0 # 检查是否为空白片段
|
114 |
+
|
115 |
+
mel_db = librosa.power_to_db(mel_spec)
|
116 |
+
if self.previous_mel is None:
|
117 |
+
self.previous_mel = mel_db
|
118 |
+
return -1.0
|
119 |
+
|
120 |
+
distance = np.linalg.norm(np.mean(mel_db, axis=1) - np.mean(self.previous_mel, axis=1))
|
121 |
+
self.previous_mel = mel_db
|
122 |
+
return distance
|
123 |
+
|
124 |
+
def is_bad(self, audio_wav, mel_spec, chunk_size=2560, thresh=100.0):
|
125 |
+
num_chunks = len(audio_wav) // chunk_size
|
126 |
+
mel_chunk_size = mel_spec.shape[-1] // num_chunks
|
127 |
+
for i in range(num_chunks):
|
128 |
+
audio_chunk = audio_wav[i * chunk_size : (i + 1) * chunk_size]
|
129 |
+
mel_spec_chunk = mel_spec[:, i * mel_chunk_size : (i + 1) * mel_chunk_size]
|
130 |
+
|
131 |
+
distance = self.compute_distance(audio_chunk, mel_spec_chunk)
|
132 |
+
logger.warning(f"mel dist: {distance:.1f}, zero: {self.consecutive_zeros}, low: {self.consecutive_low_distance}")
|
133 |
+
if distance == 0:
|
134 |
+
self.consecutive_low_distance = 0 # reset
|
135 |
+
self.consecutive_zeros += 1
|
136 |
+
if self.consecutive_zeros >= 12:
|
137 |
+
logger.warning("VoiceChecker detected 1.2 s silent. Marking as failed.")
|
138 |
+
return True
|
139 |
+
elif distance < thresh:
|
140 |
+
self.consecutive_zeros = 0
|
141 |
+
self.consecutive_low_distance += 1
|
142 |
+
if self.consecutive_low_distance >= 5:
|
143 |
+
logger.warning("VoiceChecker detected 5 consecutive low distance chunks. Marking as failed.")
|
144 |
+
return True
|
145 |
+
else:
|
146 |
+
self.consecutive_low_distance = 0
|
147 |
+
self.consecutive_zeros = 0
|
148 |
+
|
149 |
+
return False
|
150 |
+
|
151 |
+
def reset(self):
|
152 |
+
self.previous_mel = None
|
153 |
+
self.consecutive_zeros = 0
|
154 |
+
self.consecutive_low_distance = 0
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|