Praveen Malla
commited on
Commit
•
c680bdc
1
Parent(s):
a748914
initial push
Browse files- .DS_Store +0 -0
- README.md +105 -3
- assets/got_logo.png +0 -0
- assets/got_support.jpg +0 -0
- assets/train_sample.jpg +0 -0
- config.json +38 -0
- generation_config.json +6 -0
- got_vision_b.py +468 -0
- model.safetensors +3 -0
- modeling_GOT.py +1008 -0
- qwen.tiktoken +0 -0
- render_tools.py +96 -0
- special_tokens_map.json +9 -0
- tokenization_qwen.py +264 -0
- tokenizer_config.json +14 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
README.md
CHANGED
@@ -1,3 +1,105 @@
|
|
1 |
-
---
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
pipeline_tag: image-text-to-text
|
3 |
+
library_name: transformers
|
4 |
+
language:
|
5 |
+
- multilingual
|
6 |
+
tags:
|
7 |
+
- got
|
8 |
+
- vision-language
|
9 |
+
- ocr2.0
|
10 |
+
- custom_code
|
11 |
+
license: apache-2.0
|
12 |
+
---
|
13 |
+
|
14 |
+
<h1>General OCR Theory: Towards OCR-2.0 via a Unified End-to-end Model
|
15 |
+
</h1>
|
16 |
+
|
17 |
+
[Online Demo](https://huggingface.co/spaces/ucaslcl/GOT_online) | [GitHub](https://github.com/Ucas-HaoranWei/GOT-OCR2.0/) | [Paper](https://arxiv.org/abs/2409.01704)</a>
|
18 |
+
|
19 |
+
|
20 |
+
[Haoran Wei*](https://scholar.google.com/citations?user=J4naK0MAAAAJ&hl=en), Chenglong Liu*, Jinyue Chen, Jia Wang, Lingyu Kong, Yanming Xu, [Zheng Ge](https://joker316701882.github.io/), Liang Zhao, [Jianjian Sun](https://scholar.google.com/citations?user=MVZrGkYAAAAJ&hl=en), [Yuang Peng](https://scholar.google.com.hk/citations?user=J0ko04IAAAAJ&hl=zh-CN&oi=ao), Chunrui Han, [Xiangyu Zhang](https://scholar.google.com/citations?user=yuB-cfoAAAAJ&hl=en)
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/6653eee7a2d7a882a805ab95/QCEFY-M_YG3Bp5fn1GQ8X.jpeg)
|
25 |
+
|
26 |
+
|
27 |
+
|
28 |
+
## Usage
|
29 |
+
Inference using Huggingface transformers on NVIDIA GPUs. Requirements tested on python 3.10:
|
30 |
+
```
|
31 |
+
torch==2.0.1
|
32 |
+
torchvision==0.15.2
|
33 |
+
transformers==4.37.2
|
34 |
+
```
|
35 |
+
|
36 |
+
|
37 |
+
```python
|
38 |
+
from transformers import AutoModel, AutoTokenizer
|
39 |
+
|
40 |
+
tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
|
41 |
+
model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
|
42 |
+
model = model.eval().cuda()
|
43 |
+
|
44 |
+
|
45 |
+
# input your test image
|
46 |
+
image_file = 'xxx.jpg'
|
47 |
+
|
48 |
+
# plain texts OCR
|
49 |
+
res = model.chat(tokenizer, image_file, ocr_type='ocr')
|
50 |
+
|
51 |
+
# format texts OCR:
|
52 |
+
# res = model.chat(tokenizer, image_file, ocr_type='format')
|
53 |
+
|
54 |
+
# fine-grained OCR:
|
55 |
+
# res = model.chat(tokenizer, image_file, ocr_type='ocr', ocr_box='')
|
56 |
+
# res = model.chat(tokenizer, image_file, ocr_type='format', ocr_box='')
|
57 |
+
# res = model.chat(tokenizer, image_file, ocr_type='ocr', ocr_color='')
|
58 |
+
# res = model.chat(tokenizer, image_file, ocr_type='format', ocr_color='')
|
59 |
+
|
60 |
+
# multi-crop OCR:
|
61 |
+
# res = model.chat_crop(tokenizer, image_file, ocr_type='ocr')
|
62 |
+
# res = model.chat_crop(tokenizer, image_file, ocr_type='format')
|
63 |
+
|
64 |
+
# render the formatted OCR results:
|
65 |
+
# res = model.chat(tokenizer, image_file, ocr_type='format', render=True, save_render_file = './demo.html')
|
66 |
+
|
67 |
+
print(res)
|
68 |
+
|
69 |
+
|
70 |
+
```
|
71 |
+
More details about 'ocr_type', 'ocr_box', 'ocr_color', and 'render' can be found at our GitHub.
|
72 |
+
Our training codes are available at our [GitHub](https://github.com/Ucas-HaoranWei/GOT-OCR2.0/).
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
## More Multimodal Projects
|
77 |
+
|
78 |
+
👏 Welcome to explore more multimodal projects of our team:
|
79 |
+
|
80 |
+
[Vary](https://github.com/Ucas-HaoranWei/Vary) | [Fox](https://github.com/ucaslcl/Fox) | [OneChart](https://github.com/LingyvKong/OneChart)
|
81 |
+
|
82 |
+
## Citation
|
83 |
+
|
84 |
+
If you find our work helpful, please consider citing our papers 📝 and liking this project ❤️!
|
85 |
+
|
86 |
+
```bib
|
87 |
+
@article{wei2024general,
|
88 |
+
title={General OCR Theory: Towards OCR-2.0 via a Unified End-to-end Model},
|
89 |
+
author={Wei, Haoran and Liu, Chenglong and Chen, Jinyue and Wang, Jia and Kong, Lingyu and Xu, Yanming and Ge, Zheng and Zhao, Liang and Sun, Jianjian and Peng, Yuang and others},
|
90 |
+
journal={arXiv preprint arXiv:2409.01704},
|
91 |
+
year={2024}
|
92 |
+
}
|
93 |
+
@article{liu2024focus,
|
94 |
+
title={Focus Anywhere for Fine-grained Multi-page Document Understanding},
|
95 |
+
author={Liu, Chenglong and Wei, Haoran and Chen, Jinyue and Kong, Lingyu and Ge, Zheng and Zhu, Zining and Zhao, Liang and Sun, Jianjian and Han, Chunrui and Zhang, Xiangyu},
|
96 |
+
journal={arXiv preprint arXiv:2405.14295},
|
97 |
+
year={2024}
|
98 |
+
}
|
99 |
+
@article{wei2023vary,
|
100 |
+
title={Vary: Scaling up the Vision Vocabulary for Large Vision-Language Models},
|
101 |
+
author={Wei, Haoran and Kong, Lingyu and Chen, Jinyue and Zhao, Liang and Ge, Zheng and Yang, Jinrong and Sun, Jianjian and Han, Chunrui and Zhang, Xiangyu},
|
102 |
+
journal={arXiv preprint arXiv:2312.06109},
|
103 |
+
year={2023}
|
104 |
+
}
|
105 |
+
```
|
assets/got_logo.png
ADDED
assets/got_support.jpg
ADDED
assets/train_sample.jpg
ADDED
config.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "ucaslcl/GOT-OCR2_0",
|
3 |
+
"architectures": [
|
4 |
+
"GOTQwenForCausalLM"
|
5 |
+
],
|
6 |
+
"auto_map": {
|
7 |
+
"AutoConfig": "modeling_GOT.GOTConfig",
|
8 |
+
"AutoModel": "modeling_GOT.GOTQwenForCausalLM"
|
9 |
+
},
|
10 |
+
"attention_dropout": 0.0,
|
11 |
+
"bos_token_id": 151643,
|
12 |
+
"eos_token_id": 151643,
|
13 |
+
"freeze_vision_tower": false,
|
14 |
+
"hidden_act": "silu",
|
15 |
+
"hidden_size": 1024,
|
16 |
+
"im_end_token": 151858,
|
17 |
+
"im_patch_token": 151859,
|
18 |
+
"im_start_token": 151857,
|
19 |
+
"image_token_len": 256,
|
20 |
+
"initializer_range": 0.02,
|
21 |
+
"intermediate_size": 2816,
|
22 |
+
"max_position_embeddings": 32768,
|
23 |
+
"max_window_layers": 21,
|
24 |
+
"model_type": "GOT",
|
25 |
+
"num_attention_heads": 16,
|
26 |
+
"num_hidden_layers": 24,
|
27 |
+
"num_key_value_heads": 16,
|
28 |
+
"rms_norm_eps": 1e-06,
|
29 |
+
"rope_theta": 1000000.0,
|
30 |
+
"sliding_window": 32768,
|
31 |
+
"tie_word_embeddings": true,
|
32 |
+
"torch_dtype": "bfloat16",
|
33 |
+
"transformers_version": "4.37.2",
|
34 |
+
"use_cache": true,
|
35 |
+
"use_im_start_end": true,
|
36 |
+
"use_sliding_window": false,
|
37 |
+
"vocab_size": 151860
|
38 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token_id": 151643,
|
3 |
+
"eos_token_id": 151643,
|
4 |
+
"max_new_tokens": 2048,
|
5 |
+
"transformers_version": "4.37.2"
|
6 |
+
}
|
got_vision_b.py
ADDED
@@ -0,0 +1,468 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn.functional as F
|
3 |
+
from typing import Optional, Tuple, Type
|
4 |
+
from functools import partial
|
5 |
+
import torch.nn as nn
|
6 |
+
from typing import Type
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
class MLPBlock(nn.Module):
|
11 |
+
def __init__(
|
12 |
+
self,
|
13 |
+
embedding_dim: int,
|
14 |
+
mlp_dim: int,
|
15 |
+
act: Type[nn.Module] = nn.GELU,
|
16 |
+
) -> None:
|
17 |
+
super().__init__()
|
18 |
+
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
|
19 |
+
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
|
20 |
+
self.act = act()
|
21 |
+
|
22 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
23 |
+
return self.lin2(self.act(self.lin1(x)))
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
class LayerNorm2d(nn.Module):
|
28 |
+
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
|
29 |
+
super().__init__()
|
30 |
+
self.weight = nn.Parameter(torch.ones(num_channels))
|
31 |
+
self.bias = nn.Parameter(torch.zeros(num_channels))
|
32 |
+
self.eps = eps
|
33 |
+
|
34 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
35 |
+
u = x.mean(1, keepdim=True)
|
36 |
+
s = (x - u).pow(2).mean(1, keepdim=True)
|
37 |
+
x = (x - u) / torch.sqrt(s + self.eps)
|
38 |
+
x = self.weight[:, None, None] * x + self.bias[:, None, None]
|
39 |
+
return x
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
class ImageEncoderViT(nn.Module):
|
44 |
+
def __init__(
|
45 |
+
self,
|
46 |
+
img_size: int = 1024,
|
47 |
+
patch_size: int = 16,
|
48 |
+
in_chans: int = 3,
|
49 |
+
embed_dim: int = 768,
|
50 |
+
depth: int = 12,
|
51 |
+
num_heads: int = 12,
|
52 |
+
mlp_ratio: float = 4.0,
|
53 |
+
out_chans: int = 256,
|
54 |
+
qkv_bias: bool = True,
|
55 |
+
norm_layer: Type[nn.Module] = nn.LayerNorm,
|
56 |
+
act_layer: Type[nn.Module] = nn.GELU,
|
57 |
+
use_abs_pos: bool = True,
|
58 |
+
use_rel_pos: bool = False,
|
59 |
+
rel_pos_zero_init: bool = True,
|
60 |
+
window_size: int = 0,
|
61 |
+
global_attn_indexes: Tuple[int, ...] = (),
|
62 |
+
) -> None:
|
63 |
+
"""
|
64 |
+
Args:
|
65 |
+
img_size (int): Input image size.
|
66 |
+
patch_size (int): Patch size.
|
67 |
+
in_chans (int): Number of input image channels.
|
68 |
+
embed_dim (int): Patch embedding dimension.
|
69 |
+
depth (int): Depth of ViT.
|
70 |
+
num_heads (int): Number of attention heads in each ViT block.
|
71 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
72 |
+
qkv_bias (bool): If True, add a learnable bias to query, key, value.
|
73 |
+
norm_layer (nn.Module): Normalization layer.
|
74 |
+
act_layer (nn.Module): Activation layer.
|
75 |
+
use_abs_pos (bool): If True, use absolute positional embeddings.
|
76 |
+
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
|
77 |
+
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
|
78 |
+
window_size (int): Window size for window attention blocks.
|
79 |
+
global_attn_indexes (list): Indexes for blocks using global attention.
|
80 |
+
"""
|
81 |
+
super().__init__()
|
82 |
+
self.img_size = img_size
|
83 |
+
|
84 |
+
self.patch_embed = PatchEmbed(
|
85 |
+
kernel_size=(patch_size, patch_size),
|
86 |
+
stride=(patch_size, patch_size),
|
87 |
+
in_chans=in_chans,
|
88 |
+
embed_dim=embed_dim,
|
89 |
+
)
|
90 |
+
|
91 |
+
self.pos_embed: Optional[nn.Parameter] = None
|
92 |
+
if use_abs_pos:
|
93 |
+
# Initialize absolute positional embedding with pretrain image size.
|
94 |
+
self.pos_embed = nn.Parameter(
|
95 |
+
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
|
96 |
+
)
|
97 |
+
|
98 |
+
self.blocks = nn.ModuleList()
|
99 |
+
for i in range(depth):
|
100 |
+
block = Block(
|
101 |
+
dim=embed_dim,
|
102 |
+
num_heads=num_heads,
|
103 |
+
mlp_ratio=mlp_ratio,
|
104 |
+
qkv_bias=qkv_bias,
|
105 |
+
norm_layer=norm_layer,
|
106 |
+
act_layer=act_layer,
|
107 |
+
use_rel_pos=use_rel_pos,
|
108 |
+
rel_pos_zero_init=rel_pos_zero_init,
|
109 |
+
window_size=window_size if i not in global_attn_indexes else 0,
|
110 |
+
input_size=(img_size // patch_size, img_size // patch_size),
|
111 |
+
)
|
112 |
+
self.blocks.append(block)
|
113 |
+
|
114 |
+
self.neck = nn.Sequential(
|
115 |
+
nn.Conv2d(
|
116 |
+
embed_dim,
|
117 |
+
out_chans,
|
118 |
+
kernel_size=1,
|
119 |
+
bias=False,
|
120 |
+
),
|
121 |
+
LayerNorm2d(out_chans),
|
122 |
+
nn.Conv2d(
|
123 |
+
out_chans,
|
124 |
+
out_chans,
|
125 |
+
kernel_size=3,
|
126 |
+
padding=1,
|
127 |
+
bias=False,
|
128 |
+
),
|
129 |
+
LayerNorm2d(out_chans),
|
130 |
+
)
|
131 |
+
|
132 |
+
|
133 |
+
self.net_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False)
|
134 |
+
self.net_3 = nn.Conv2d(512, 1024, kernel_size=3, stride=2, padding=1, bias=False)
|
135 |
+
|
136 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
137 |
+
x = self.patch_embed(x)
|
138 |
+
if self.pos_embed is not None:
|
139 |
+
x = x + self.pos_embed
|
140 |
+
|
141 |
+
for blk in self.blocks:
|
142 |
+
x = blk(x)
|
143 |
+
|
144 |
+
x = self.neck(x.permute(0, 3, 1, 2))
|
145 |
+
x = self.net_2(x)
|
146 |
+
x = self.net_3(x)
|
147 |
+
|
148 |
+
|
149 |
+
return x
|
150 |
+
|
151 |
+
|
152 |
+
class Block(nn.Module):
|
153 |
+
"""Transformer blocks with support of window attention and residual propagation blocks"""
|
154 |
+
|
155 |
+
def __init__(
|
156 |
+
self,
|
157 |
+
dim: int,
|
158 |
+
num_heads: int,
|
159 |
+
mlp_ratio: float = 4.0,
|
160 |
+
qkv_bias: bool = True,
|
161 |
+
norm_layer: Type[nn.Module] = nn.LayerNorm,
|
162 |
+
act_layer: Type[nn.Module] = nn.GELU,
|
163 |
+
use_rel_pos: bool = False,
|
164 |
+
rel_pos_zero_init: bool = True,
|
165 |
+
window_size: int = 0,
|
166 |
+
input_size: Optional[Tuple[int, int]] = None,
|
167 |
+
) -> None:
|
168 |
+
"""
|
169 |
+
Args:
|
170 |
+
dim (int): Number of input channels.
|
171 |
+
num_heads (int): Number of attention heads in each ViT block.
|
172 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
173 |
+
qkv_bias (bool): If True, add a learnable bias to query, key, value.
|
174 |
+
norm_layer (nn.Module): Normalization layer.
|
175 |
+
act_layer (nn.Module): Activation layer.
|
176 |
+
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
|
177 |
+
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
|
178 |
+
window_size (int): Window size for window attention blocks. If it equals 0, then
|
179 |
+
use global attention.
|
180 |
+
input_size (tuple(int, int) or None): Input resolution for calculating the relative
|
181 |
+
positional parameter size.
|
182 |
+
"""
|
183 |
+
super().__init__()
|
184 |
+
self.norm1 = norm_layer(dim)
|
185 |
+
self.attn = Attention(
|
186 |
+
dim,
|
187 |
+
num_heads=num_heads,
|
188 |
+
qkv_bias=qkv_bias,
|
189 |
+
use_rel_pos=use_rel_pos,
|
190 |
+
rel_pos_zero_init=rel_pos_zero_init,
|
191 |
+
input_size=input_size if window_size == 0 else (window_size, window_size),
|
192 |
+
)
|
193 |
+
|
194 |
+
self.norm2 = norm_layer(dim)
|
195 |
+
self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)
|
196 |
+
|
197 |
+
self.window_size = window_size
|
198 |
+
|
199 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
200 |
+
shortcut = x
|
201 |
+
x = self.norm1(x)
|
202 |
+
# Window partition
|
203 |
+
if self.window_size > 0:
|
204 |
+
H, W = x.shape[1], x.shape[2]
|
205 |
+
x, pad_hw = window_partition(x, self.window_size)
|
206 |
+
|
207 |
+
x = self.attn(x)
|
208 |
+
# Reverse window partition
|
209 |
+
if self.window_size > 0:
|
210 |
+
x = window_unpartition(x, self.window_size, pad_hw, (H, W))
|
211 |
+
|
212 |
+
x = shortcut + x
|
213 |
+
x = x + self.mlp(self.norm2(x))
|
214 |
+
|
215 |
+
return x
|
216 |
+
|
217 |
+
|
218 |
+
class Attention(nn.Module):
|
219 |
+
"""Multi-head Attention block with relative position embeddings."""
|
220 |
+
|
221 |
+
def __init__(
|
222 |
+
self,
|
223 |
+
dim: int,
|
224 |
+
num_heads: int = 8,
|
225 |
+
qkv_bias: bool = True,
|
226 |
+
use_rel_pos: bool = False,
|
227 |
+
rel_pos_zero_init: bool = True,
|
228 |
+
input_size: Optional[Tuple[int, int]] = None,
|
229 |
+
) -> None:
|
230 |
+
"""
|
231 |
+
Args:
|
232 |
+
dim (int): Number of input channels.
|
233 |
+
num_heads (int): Number of attention heads.
|
234 |
+
qkv_bias (bool): If True, add a learnable bias to query, key, value.
|
235 |
+
rel_pos (bool): If True, add relative positional embeddings to the attention map.
|
236 |
+
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
|
237 |
+
input_size (tuple(int, int) or None): Input resolution for calculating the relative
|
238 |
+
positional parameter size.
|
239 |
+
"""
|
240 |
+
super().__init__()
|
241 |
+
self.num_heads = num_heads
|
242 |
+
head_dim = dim // num_heads
|
243 |
+
self.scale = head_dim**-0.5
|
244 |
+
|
245 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
246 |
+
self.proj = nn.Linear(dim, dim)
|
247 |
+
|
248 |
+
self.use_rel_pos = use_rel_pos
|
249 |
+
if self.use_rel_pos:
|
250 |
+
assert (
|
251 |
+
input_size is not None
|
252 |
+
), "Input size must be provided if using relative positional encoding."
|
253 |
+
# initialize relative positional embeddings
|
254 |
+
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
|
255 |
+
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
|
256 |
+
|
257 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
258 |
+
B, H, W, _ = x.shape
|
259 |
+
# qkv with shape (3, B, nHead, H * W, C)
|
260 |
+
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
261 |
+
# q, k, v with shape (B * nHead, H * W, C)
|
262 |
+
q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
|
263 |
+
|
264 |
+
attn = (q * self.scale) @ k.transpose(-2, -1)
|
265 |
+
|
266 |
+
if self.use_rel_pos:
|
267 |
+
attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
|
268 |
+
|
269 |
+
attn = attn.softmax(dim=-1)
|
270 |
+
x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
|
271 |
+
x = self.proj(x)
|
272 |
+
|
273 |
+
return x
|
274 |
+
|
275 |
+
|
276 |
+
def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
|
277 |
+
"""
|
278 |
+
Partition into non-overlapping windows with padding if needed.
|
279 |
+
Args:
|
280 |
+
x (tensor): input tokens with [B, H, W, C].
|
281 |
+
window_size (int): window size.
|
282 |
+
|
283 |
+
Returns:
|
284 |
+
windows: windows after partition with [B * num_windows, window_size, window_size, C].
|
285 |
+
(Hp, Wp): padded height and width before partition
|
286 |
+
"""
|
287 |
+
B, H, W, C = x.shape
|
288 |
+
|
289 |
+
pad_h = (window_size - H % window_size) % window_size
|
290 |
+
pad_w = (window_size - W % window_size) % window_size
|
291 |
+
if pad_h > 0 or pad_w > 0:
|
292 |
+
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
|
293 |
+
Hp, Wp = H + pad_h, W + pad_w
|
294 |
+
|
295 |
+
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
|
296 |
+
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
297 |
+
return windows, (Hp, Wp)
|
298 |
+
|
299 |
+
|
300 |
+
def window_unpartition(
|
301 |
+
windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int]
|
302 |
+
) -> torch.Tensor:
|
303 |
+
"""
|
304 |
+
Window unpartition into original sequences and removing padding.
|
305 |
+
Args:
|
306 |
+
windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
|
307 |
+
window_size (int): window size.
|
308 |
+
pad_hw (Tuple): padded height and width (Hp, Wp).
|
309 |
+
hw (Tuple): original height and width (H, W) before padding.
|
310 |
+
|
311 |
+
Returns:
|
312 |
+
x: unpartitioned sequences with [B, H, W, C].
|
313 |
+
"""
|
314 |
+
Hp, Wp = pad_hw
|
315 |
+
H, W = hw
|
316 |
+
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
|
317 |
+
x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
|
318 |
+
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
|
319 |
+
|
320 |
+
if Hp > H or Wp > W:
|
321 |
+
x = x[:, :H, :W, :].contiguous()
|
322 |
+
return x
|
323 |
+
|
324 |
+
|
325 |
+
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
|
326 |
+
"""
|
327 |
+
Get relative positional embeddings according to the relative positions of
|
328 |
+
query and key sizes.
|
329 |
+
Args:
|
330 |
+
q_size (int): size of query q.
|
331 |
+
k_size (int): size of key k.
|
332 |
+
rel_pos (Tensor): relative position embeddings (L, C).
|
333 |
+
|
334 |
+
Returns:
|
335 |
+
Extracted positional embeddings according to relative positions.
|
336 |
+
"""
|
337 |
+
max_rel_dist = int(2 * max(q_size, k_size) - 1)
|
338 |
+
# Interpolate rel pos if needed.
|
339 |
+
if rel_pos.shape[0] != max_rel_dist:
|
340 |
+
# Interpolate rel pos.
|
341 |
+
rel_pos_resized = F.interpolate(
|
342 |
+
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
|
343 |
+
size=max_rel_dist,
|
344 |
+
mode="linear",
|
345 |
+
)
|
346 |
+
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
|
347 |
+
else:
|
348 |
+
rel_pos_resized = rel_pos
|
349 |
+
|
350 |
+
# Scale the coords with short length if shapes for q and k are different.
|
351 |
+
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
|
352 |
+
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
|
353 |
+
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
|
354 |
+
|
355 |
+
return rel_pos_resized[relative_coords.long()]
|
356 |
+
|
357 |
+
|
358 |
+
def add_decomposed_rel_pos(
|
359 |
+
attn: torch.Tensor,
|
360 |
+
q: torch.Tensor,
|
361 |
+
rel_pos_h: torch.Tensor,
|
362 |
+
rel_pos_w: torch.Tensor,
|
363 |
+
q_size: Tuple[int, int],
|
364 |
+
k_size: Tuple[int, int],
|
365 |
+
) -> torch.Tensor:
|
366 |
+
"""
|
367 |
+
Args:
|
368 |
+
attn (Tensor): attention map.
|
369 |
+
q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
|
370 |
+
rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
|
371 |
+
rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
|
372 |
+
q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
|
373 |
+
k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
|
374 |
+
|
375 |
+
Returns:
|
376 |
+
attn (Tensor): attention map with added relative positional embeddings.
|
377 |
+
"""
|
378 |
+
q_h, q_w = q_size
|
379 |
+
k_h, k_w = k_size
|
380 |
+
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
|
381 |
+
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
|
382 |
+
|
383 |
+
B, _, dim = q.shape
|
384 |
+
r_q = q.reshape(B, q_h, q_w, dim)
|
385 |
+
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
|
386 |
+
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
|
387 |
+
|
388 |
+
attn = (
|
389 |
+
attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
|
390 |
+
).view(B, q_h * q_w, k_h * k_w)
|
391 |
+
|
392 |
+
return attn
|
393 |
+
|
394 |
+
|
395 |
+
class PatchEmbed(nn.Module):
|
396 |
+
"""
|
397 |
+
Image to Patch Embedding.
|
398 |
+
"""
|
399 |
+
|
400 |
+
def __init__(
|
401 |
+
self,
|
402 |
+
kernel_size: Tuple[int, int] = (16, 16),
|
403 |
+
stride: Tuple[int, int] = (16, 16),
|
404 |
+
padding: Tuple[int, int] = (0, 0),
|
405 |
+
in_chans: int = 3,
|
406 |
+
embed_dim: int = 768,
|
407 |
+
) -> None:
|
408 |
+
"""
|
409 |
+
Args:
|
410 |
+
kernel_size (Tuple): kernel size of the projection layer.
|
411 |
+
stride (Tuple): stride of the projection layer.
|
412 |
+
padding (Tuple): padding size of the projection layer.
|
413 |
+
in_chans (int): Number of input image channels.
|
414 |
+
embed_dim (int): Patch embedding dimension.
|
415 |
+
"""
|
416 |
+
super().__init__()
|
417 |
+
|
418 |
+
self.proj = nn.Conv2d(
|
419 |
+
in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
|
420 |
+
)
|
421 |
+
|
422 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
423 |
+
x = self.proj(x)
|
424 |
+
# B C H W -> B H W C
|
425 |
+
x = x.permute(0, 2, 3, 1)
|
426 |
+
return x
|
427 |
+
|
428 |
+
|
429 |
+
|
430 |
+
def build_GOT_vit_b(checkpoint=None):
|
431 |
+
return _build_GOT_vision(
|
432 |
+
encoder_embed_dim=768,
|
433 |
+
encoder_depth=12,
|
434 |
+
encoder_num_heads=12,
|
435 |
+
encoder_global_attn_indexes=[2, 5, 8, 11],
|
436 |
+
checkpoint=checkpoint,
|
437 |
+
)
|
438 |
+
|
439 |
+
|
440 |
+
def _build_GOT_vision(
|
441 |
+
encoder_embed_dim,
|
442 |
+
encoder_depth,
|
443 |
+
encoder_num_heads,
|
444 |
+
encoder_global_attn_indexes,
|
445 |
+
checkpoint=None,
|
446 |
+
):
|
447 |
+
prompt_embed_dim = 256
|
448 |
+
image_size = 1024
|
449 |
+
vit_patch_size = 16
|
450 |
+
image_embedding_size = image_size // vit_patch_size
|
451 |
+
image_encoder=ImageEncoderViT(
|
452 |
+
depth=encoder_depth,
|
453 |
+
embed_dim=encoder_embed_dim,
|
454 |
+
img_size=image_size,
|
455 |
+
mlp_ratio=4,
|
456 |
+
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
|
457 |
+
num_heads=encoder_num_heads,
|
458 |
+
patch_size=vit_patch_size,
|
459 |
+
qkv_bias=True,
|
460 |
+
use_rel_pos=True,
|
461 |
+
global_attn_indexes=encoder_global_attn_indexes,
|
462 |
+
window_size=14,
|
463 |
+
out_chans=prompt_embed_dim,
|
464 |
+
)
|
465 |
+
|
466 |
+
|
467 |
+
return image_encoder
|
468 |
+
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:77d6144039548b14253176b6eb264896bc39eba532f8894700f210a7fd2a5956
|
3 |
+
size 1432121416
|
modeling_GOT.py
ADDED
@@ -0,0 +1,1008 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import (
|
2 |
+
Qwen2Config,
|
3 |
+
Qwen2Model,
|
4 |
+
Qwen2ForCausalLM,
|
5 |
+
StoppingCriteria,
|
6 |
+
TextStreamer,
|
7 |
+
)
|
8 |
+
from transformers.modeling_outputs import (
|
9 |
+
BaseModelOutputWithPast,
|
10 |
+
CausalLMOutputWithPast,
|
11 |
+
)
|
12 |
+
from typing import List, Optional, Tuple, Union
|
13 |
+
from transformers.cache_utils import Cache
|
14 |
+
import requests
|
15 |
+
from PIL import Image
|
16 |
+
from io import BytesIO
|
17 |
+
import torch
|
18 |
+
import torch.nn as nn
|
19 |
+
from torch.nn import CrossEntropyLoss
|
20 |
+
from .got_vision_b import build_GOT_vit_b
|
21 |
+
from torchvision import transforms
|
22 |
+
from torchvision.transforms.functional import InterpolationMode
|
23 |
+
import dataclasses
|
24 |
+
|
25 |
+
|
26 |
+
DEFAULT_IMAGE_TOKEN = "<image>"
|
27 |
+
DEFAULT_IMAGE_PATCH_TOKEN = "<imgpad>"
|
28 |
+
DEFAULT_IM_START_TOKEN = "<img>"
|
29 |
+
DEFAULT_IM_END_TOKEN = "</img>"
|
30 |
+
|
31 |
+
from enum import auto, Enum
|
32 |
+
|
33 |
+
|
34 |
+
class SeparatorStyle(Enum):
|
35 |
+
"""Different separator style."""
|
36 |
+
|
37 |
+
SINGLE = auto()
|
38 |
+
TWO = auto()
|
39 |
+
MPT = auto()
|
40 |
+
|
41 |
+
|
42 |
+
@dataclasses.dataclass
|
43 |
+
class Conversation:
|
44 |
+
"""A class that keeps all conversation history."""
|
45 |
+
|
46 |
+
system: str
|
47 |
+
roles: List[str]
|
48 |
+
messages: List[List[str]]
|
49 |
+
offset: int
|
50 |
+
sep_style: SeparatorStyle = SeparatorStyle.SINGLE
|
51 |
+
sep: str = "<|im_end|>"
|
52 |
+
sep2: str = None
|
53 |
+
version: str = "Unknown"
|
54 |
+
|
55 |
+
skip_next: bool = False
|
56 |
+
|
57 |
+
def get_prompt(self):
|
58 |
+
if self.sep_style == SeparatorStyle.SINGLE:
|
59 |
+
ret = self.system + self.sep + "\n"
|
60 |
+
for role, message in self.messages:
|
61 |
+
if message:
|
62 |
+
if type(message) is tuple:
|
63 |
+
message, _, _ = message
|
64 |
+
ret += role + ": " + message + self.sep
|
65 |
+
else:
|
66 |
+
ret += role + ":"
|
67 |
+
return ret
|
68 |
+
elif self.sep_style == SeparatorStyle.TWO:
|
69 |
+
seps = [self.sep, self.sep2]
|
70 |
+
ret = self.system + seps[0]
|
71 |
+
for i, (role, message) in enumerate(self.messages):
|
72 |
+
if message:
|
73 |
+
if type(message) is tuple:
|
74 |
+
message, _, _ = message
|
75 |
+
ret += role + ": " + message + seps[i % 2]
|
76 |
+
else:
|
77 |
+
ret += role + ":"
|
78 |
+
return ret
|
79 |
+
if self.sep_style == SeparatorStyle.MPT:
|
80 |
+
if self.system:
|
81 |
+
ret = self.system + self.sep
|
82 |
+
else:
|
83 |
+
ret = ""
|
84 |
+
for role, message in self.messages:
|
85 |
+
if message:
|
86 |
+
if type(message) is tuple:
|
87 |
+
message, _, _ = message
|
88 |
+
ret += role + message + self.sep
|
89 |
+
else:
|
90 |
+
ret += role
|
91 |
+
return ret
|
92 |
+
else:
|
93 |
+
raise ValueError(f"Invalid style: {self.sep_style}")
|
94 |
+
|
95 |
+
def append_message(self, role, message):
|
96 |
+
self.messages.append([role, message])
|
97 |
+
|
98 |
+
def copy(self):
|
99 |
+
return Conversation(
|
100 |
+
system=self.system,
|
101 |
+
roles=self.roles,
|
102 |
+
messages=[[x, y] for x, y in self.messages],
|
103 |
+
offset=self.offset,
|
104 |
+
sep_style=self.sep_style,
|
105 |
+
sep=self.sep,
|
106 |
+
sep2=self.sep2,
|
107 |
+
)
|
108 |
+
|
109 |
+
|
110 |
+
class KeywordsStoppingCriteria(StoppingCriteria):
|
111 |
+
def __init__(self, keywords, tokenizer, input_ids):
|
112 |
+
self.keywords = keywords
|
113 |
+
self.keyword_ids = [tokenizer(keyword).input_ids for keyword in keywords]
|
114 |
+
self.keyword_ids = [
|
115 |
+
keyword_id[0]
|
116 |
+
for keyword_id in self.keyword_ids
|
117 |
+
if type(keyword_id) is list and len(keyword_id) == 1
|
118 |
+
]
|
119 |
+
self.tokenizer = tokenizer
|
120 |
+
self.start_len = None
|
121 |
+
self.input_ids = input_ids
|
122 |
+
|
123 |
+
def __call__(
|
124 |
+
self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs
|
125 |
+
) -> bool:
|
126 |
+
if self.start_len is None:
|
127 |
+
self.start_len = self.input_ids.shape[1]
|
128 |
+
else:
|
129 |
+
for keyword_id in self.keyword_ids:
|
130 |
+
if output_ids[0, -1] == keyword_id:
|
131 |
+
return True
|
132 |
+
outputs = self.tokenizer.batch_decode(
|
133 |
+
output_ids[:, self.start_len :], skip_special_tokens=True
|
134 |
+
)[0]
|
135 |
+
for keyword in self.keywords:
|
136 |
+
if keyword in outputs:
|
137 |
+
return True
|
138 |
+
return False
|
139 |
+
|
140 |
+
|
141 |
+
class GOTImageEvalProcessor:
|
142 |
+
def __init__(self, image_size=384, mean=None, std=None):
|
143 |
+
if mean is None:
|
144 |
+
mean = (0.48145466, 0.4578275, 0.40821073)
|
145 |
+
if std is None:
|
146 |
+
std = (0.26862954, 0.26130258, 0.27577711)
|
147 |
+
|
148 |
+
self.normalize = transforms.Normalize(mean, std)
|
149 |
+
|
150 |
+
self.transform = transforms.Compose(
|
151 |
+
[
|
152 |
+
transforms.Resize(
|
153 |
+
(image_size, image_size), interpolation=InterpolationMode.BICUBIC
|
154 |
+
),
|
155 |
+
transforms.ToTensor(),
|
156 |
+
self.normalize,
|
157 |
+
]
|
158 |
+
)
|
159 |
+
|
160 |
+
def __call__(self, item):
|
161 |
+
return self.transform(item)
|
162 |
+
|
163 |
+
|
164 |
+
class GOTConfig(Qwen2Config):
|
165 |
+
model_type = "GOT"
|
166 |
+
|
167 |
+
|
168 |
+
class GOTQwenModel(Qwen2Model):
|
169 |
+
config_class = GOTConfig
|
170 |
+
|
171 |
+
def __init__(self, config: Qwen2Config):
|
172 |
+
super(GOTQwenModel, self).__init__(config)
|
173 |
+
|
174 |
+
self.vision_tower_high = build_GOT_vit_b()
|
175 |
+
|
176 |
+
self.mm_projector_vary = nn.Linear(1024, 1024)
|
177 |
+
|
178 |
+
def initialize_vision_modules(
|
179 |
+
self,
|
180 |
+
vision_tower,
|
181 |
+
pretrained_stage1_model=None,
|
182 |
+
freeze_vision_tower=False,
|
183 |
+
use_im_start_end=False,
|
184 |
+
vision_select_layer=-1,
|
185 |
+
dtype=torch.float16,
|
186 |
+
device="cpu",
|
187 |
+
):
|
188 |
+
|
189 |
+
image_processor_high = GOTImageEvalProcessor(image_size=1024)
|
190 |
+
|
191 |
+
self.vision_tower_high = self.vision_tower_high.to(dtype=dtype, device=device)
|
192 |
+
|
193 |
+
self.mm_projector_vary = self.mm_projector_vary.to(dtype=dtype, device=device)
|
194 |
+
|
195 |
+
image_token_len = 256
|
196 |
+
|
197 |
+
self.config.vision_tower = vision_tower
|
198 |
+
self.config.image_token_len = image_token_len
|
199 |
+
|
200 |
+
self.config.use_im_start_end = True
|
201 |
+
|
202 |
+
self.config.vision_select_layer = vision_select_layer
|
203 |
+
self.config.freeze_vision_tower = freeze_vision_tower
|
204 |
+
|
205 |
+
return dict(
|
206 |
+
image_processor_high=image_processor_high,
|
207 |
+
image_token_len=image_token_len,
|
208 |
+
)
|
209 |
+
|
210 |
+
def forward(
|
211 |
+
self,
|
212 |
+
input_ids: torch.LongTensor = None,
|
213 |
+
attention_mask: Optional[torch.Tensor] = None,
|
214 |
+
position_ids: Optional[torch.LongTensor] = None,
|
215 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
216 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
217 |
+
use_cache: Optional[bool] = None,
|
218 |
+
output_attentions: Optional[bool] = None,
|
219 |
+
output_hidden_states: Optional[bool] = None,
|
220 |
+
images: Optional[torch.FloatTensor] = None,
|
221 |
+
return_dict: Optional[bool] = None,
|
222 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
223 |
+
|
224 |
+
# HACK: replace back original embeddings for LLaVA pretraining
|
225 |
+
orig_embeds_params = getattr(self, "orig_embeds_params", None)
|
226 |
+
if orig_embeds_params is not None:
|
227 |
+
with torch.no_grad():
|
228 |
+
self.get_input_embeddings().weight[: -self.num_new_tokens] = (
|
229 |
+
orig_embeds_params[: -self.num_new_tokens].data
|
230 |
+
)
|
231 |
+
|
232 |
+
if inputs_embeds is None:
|
233 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
234 |
+
|
235 |
+
vision_tower_high = getattr(self, "vision_tower_high", None)
|
236 |
+
|
237 |
+
if (
|
238 |
+
vision_tower_high is not None
|
239 |
+
and (input_ids.shape[1] != 1 or self.training)
|
240 |
+
and images is not None
|
241 |
+
):
|
242 |
+
use_im_start_end = getattr(self.config, "use_im_start_end", -1)
|
243 |
+
|
244 |
+
vision_select_layer = getattr(self.config, "vision_select_layer", -1)
|
245 |
+
im_patch_token = getattr(self.config, "im_patch_token", -1)
|
246 |
+
im_start_token = getattr(self.config, "im_start_token", -1)
|
247 |
+
im_end_token = getattr(self.config, "im_end_token", -1)
|
248 |
+
freeze_vision_tower = getattr(self.config, "freeze_vision_tower", False)
|
249 |
+
|
250 |
+
im_patch_token = 151859
|
251 |
+
|
252 |
+
im_start_token = 151857
|
253 |
+
|
254 |
+
im_end_token = 151858
|
255 |
+
|
256 |
+
image_features = []
|
257 |
+
|
258 |
+
for image in images:
|
259 |
+
P, C, H, W = image.shape
|
260 |
+
if P == 1:
|
261 |
+
with torch.set_grad_enabled(False):
|
262 |
+
cnn_feature = vision_tower_high(image)
|
263 |
+
cnn_feature = cnn_feature.flatten(2).permute(
|
264 |
+
0, 2, 1
|
265 |
+
) # 256*1024
|
266 |
+
image_feature = self.mm_projector_vary(cnn_feature)
|
267 |
+
image_features.append(image_feature)
|
268 |
+
|
269 |
+
else:
|
270 |
+
image_patches = torch.unbind(image)
|
271 |
+
image_patches_features = []
|
272 |
+
for image_patch in image_patches:
|
273 |
+
image_p = torch.stack([image_patch])
|
274 |
+
|
275 |
+
with torch.set_grad_enabled(False):
|
276 |
+
cnn_feature_p = vision_tower_high(image_p)
|
277 |
+
cnn_feature_p = cnn_feature_p.flatten(2).permute(0, 2, 1)
|
278 |
+
image_feature_p = self.mm_projector_vary(cnn_feature_p)
|
279 |
+
image_patches_features.append(image_feature_p)
|
280 |
+
image_feature = torch.cat(image_patches_features, dim=1)
|
281 |
+
image_features.append(image_feature)
|
282 |
+
|
283 |
+
dummy_image_features_2 = torch.zeros(
|
284 |
+
256, 1024, device=inputs_embeds.device, dtype=inputs_embeds.dtype
|
285 |
+
)
|
286 |
+
dummy_image_features = dummy_image_features_2
|
287 |
+
use_im_start_end = True
|
288 |
+
new_input_embeds = []
|
289 |
+
for cur_input_ids, cur_input_embeds, cur_image_features in zip(
|
290 |
+
input_ids, inputs_embeds, image_features
|
291 |
+
):
|
292 |
+
if (cur_input_ids == im_patch_token).sum() == 0:
|
293 |
+
cur_input_embeds = (
|
294 |
+
cur_input_embeds + (0.0 * dummy_image_features).sum()
|
295 |
+
)
|
296 |
+
new_input_embeds.append(cur_input_embeds)
|
297 |
+
continue
|
298 |
+
|
299 |
+
if use_im_start_end:
|
300 |
+
if (cur_input_ids == im_start_token).sum() != (
|
301 |
+
cur_input_ids == im_end_token
|
302 |
+
).sum():
|
303 |
+
raise ValueError(
|
304 |
+
"The number of image start tokens and image end tokens should be the same."
|
305 |
+
)
|
306 |
+
|
307 |
+
image_start_tokens = torch.where(cur_input_ids == im_start_token)[0]
|
308 |
+
for image_start_token_pos, per_cur_image_features in zip(
|
309 |
+
image_start_tokens, cur_image_features
|
310 |
+
):
|
311 |
+
per_cur_image_features = per_cur_image_features.to(
|
312 |
+
device=cur_input_embeds.device
|
313 |
+
)
|
314 |
+
num_patches = per_cur_image_features.shape[0]
|
315 |
+
|
316 |
+
if (
|
317 |
+
cur_input_ids[image_start_token_pos + num_patches + 1]
|
318 |
+
!= im_end_token
|
319 |
+
):
|
320 |
+
raise ValueError(
|
321 |
+
"The image end token should follow the image start token."
|
322 |
+
)
|
323 |
+
|
324 |
+
cur_input_embeds = torch.cat(
|
325 |
+
(
|
326 |
+
cur_input_embeds[: image_start_token_pos + 1],
|
327 |
+
per_cur_image_features,
|
328 |
+
cur_input_embeds[
|
329 |
+
image_start_token_pos + num_patches + 1 :
|
330 |
+
],
|
331 |
+
),
|
332 |
+
dim=0,
|
333 |
+
)
|
334 |
+
|
335 |
+
new_input_embeds.append(cur_input_embeds)
|
336 |
+
else:
|
337 |
+
raise NotImplementedError
|
338 |
+
|
339 |
+
inputs_embeds = torch.stack(new_input_embeds, dim=0)
|
340 |
+
|
341 |
+
return super(GOTQwenModel, self).forward(
|
342 |
+
input_ids=None,
|
343 |
+
attention_mask=attention_mask,
|
344 |
+
past_key_values=past_key_values,
|
345 |
+
inputs_embeds=inputs_embeds,
|
346 |
+
use_cache=use_cache,
|
347 |
+
position_ids=position_ids,
|
348 |
+
output_attentions=output_attentions,
|
349 |
+
output_hidden_states=output_hidden_states,
|
350 |
+
return_dict=return_dict,
|
351 |
+
)
|
352 |
+
|
353 |
+
|
354 |
+
class GOTQwenForCausalLM(Qwen2ForCausalLM):
|
355 |
+
config_class = GOTConfig
|
356 |
+
# supports_gradient_checkpointing = True
|
357 |
+
|
358 |
+
def __init__(self, config):
|
359 |
+
super(Qwen2ForCausalLM, self).__init__(config)
|
360 |
+
self.model = GOTQwenModel(config)
|
361 |
+
|
362 |
+
self.vocab_size = config.vocab_size
|
363 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
364 |
+
|
365 |
+
# Initialize weights and apply final processing
|
366 |
+
self.post_init()
|
367 |
+
|
368 |
+
def get_model(self):
|
369 |
+
return self.model
|
370 |
+
|
371 |
+
def forward(
|
372 |
+
self,
|
373 |
+
input_ids: torch.LongTensor = None,
|
374 |
+
attention_mask: Optional[torch.Tensor] = None,
|
375 |
+
position_ids: Optional[torch.LongTensor] = None,
|
376 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
377 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
378 |
+
labels: Optional[torch.LongTensor] = None,
|
379 |
+
use_cache: Optional[bool] = None,
|
380 |
+
output_attentions: Optional[bool] = None,
|
381 |
+
output_hidden_states: Optional[bool] = None,
|
382 |
+
images: Optional[torch.FloatTensor] = None,
|
383 |
+
return_dict: Optional[bool] = None,
|
384 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
385 |
+
output_attentions = (
|
386 |
+
output_attentions
|
387 |
+
if output_attentions is not None
|
388 |
+
else self.config.output_attentions
|
389 |
+
)
|
390 |
+
output_hidden_states = (
|
391 |
+
output_hidden_states
|
392 |
+
if output_hidden_states is not None
|
393 |
+
else self.config.output_hidden_states
|
394 |
+
)
|
395 |
+
return_dict = (
|
396 |
+
return_dict if return_dict is not None else self.config.use_return_dict
|
397 |
+
)
|
398 |
+
|
399 |
+
outputs = self.model(
|
400 |
+
input_ids=input_ids,
|
401 |
+
past_key_values=past_key_values,
|
402 |
+
attention_mask=attention_mask,
|
403 |
+
position_ids=position_ids,
|
404 |
+
inputs_embeds=inputs_embeds,
|
405 |
+
use_cache=use_cache,
|
406 |
+
output_attentions=output_attentions,
|
407 |
+
output_hidden_states=output_hidden_states,
|
408 |
+
images=images,
|
409 |
+
return_dict=return_dict,
|
410 |
+
)
|
411 |
+
|
412 |
+
hidden_states = outputs[0]
|
413 |
+
logits = self.lm_head(hidden_states)
|
414 |
+
logits = logits.float()
|
415 |
+
|
416 |
+
# logits
|
417 |
+
|
418 |
+
loss = None
|
419 |
+
if labels is not None:
|
420 |
+
# Shift so that tokens < n predict n
|
421 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
422 |
+
shift_labels = labels[..., 1:].contiguous()
|
423 |
+
# Flatten the tokens
|
424 |
+
loss_fct = CrossEntropyLoss()
|
425 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
426 |
+
shift_labels = shift_labels.view(-1)
|
427 |
+
# Enable model parallelism
|
428 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
429 |
+
loss = loss_fct(shift_logits, shift_labels)
|
430 |
+
|
431 |
+
if not return_dict:
|
432 |
+
output = (logits,) + outputs[1:]
|
433 |
+
return (loss,) + output if loss is not None else output
|
434 |
+
|
435 |
+
return CausalLMOutputWithPast(
|
436 |
+
loss=loss,
|
437 |
+
logits=logits,
|
438 |
+
past_key_values=outputs.past_key_values,
|
439 |
+
hidden_states=outputs.hidden_states,
|
440 |
+
attentions=outputs.attentions,
|
441 |
+
)
|
442 |
+
|
443 |
+
def prepare_inputs_for_generation(
|
444 |
+
self,
|
445 |
+
input_ids,
|
446 |
+
past_key_values=None,
|
447 |
+
attention_mask=None,
|
448 |
+
inputs_embeds=None,
|
449 |
+
**kwargs,
|
450 |
+
):
|
451 |
+
# Omit tokens covered by past_key_values
|
452 |
+
if past_key_values is not None:
|
453 |
+
if isinstance(past_key_values, Cache):
|
454 |
+
cache_length = past_key_values.get_seq_length()
|
455 |
+
past_length = past_key_values.seen_tokens
|
456 |
+
max_cache_length = past_key_values.get_max_length()
|
457 |
+
else:
|
458 |
+
cache_length = past_length = past_key_values[0][0].shape[2]
|
459 |
+
max_cache_length = None
|
460 |
+
|
461 |
+
# Keep only the unprocessed tokens:
|
462 |
+
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
463 |
+
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
|
464 |
+
# input)
|
465 |
+
if (
|
466 |
+
attention_mask is not None
|
467 |
+
and attention_mask.shape[1] > input_ids.shape[1]
|
468 |
+
):
|
469 |
+
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
470 |
+
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
|
471 |
+
# input_ids based on the past_length.
|
472 |
+
elif past_length < input_ids.shape[1]:
|
473 |
+
input_ids = input_ids[:, past_length:]
|
474 |
+
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
|
475 |
+
|
476 |
+
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
|
477 |
+
if (
|
478 |
+
max_cache_length is not None
|
479 |
+
and attention_mask is not None
|
480 |
+
and cache_length + input_ids.shape[1] > max_cache_length
|
481 |
+
):
|
482 |
+
attention_mask = attention_mask[:, -max_cache_length:]
|
483 |
+
|
484 |
+
position_ids = kwargs.get("position_ids", None)
|
485 |
+
if attention_mask is not None and position_ids is None:
|
486 |
+
# create position_ids on the fly for batch generation
|
487 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
488 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
489 |
+
if past_key_values:
|
490 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
491 |
+
|
492 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
493 |
+
if inputs_embeds is not None and past_key_values is None:
|
494 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
495 |
+
else:
|
496 |
+
model_inputs = {"input_ids": input_ids}
|
497 |
+
|
498 |
+
model_inputs.update(
|
499 |
+
{
|
500 |
+
"position_ids": position_ids,
|
501 |
+
"past_key_values": past_key_values,
|
502 |
+
"use_cache": kwargs.get("use_cache"),
|
503 |
+
"attention_mask": attention_mask,
|
504 |
+
"images": kwargs.get("images", None),
|
505 |
+
}
|
506 |
+
)
|
507 |
+
return model_inputs
|
508 |
+
|
509 |
+
def initialize_vision_tokenizer(
|
510 |
+
self,
|
511 |
+
tokenizer,
|
512 |
+
freeze_lm_model=False,
|
513 |
+
pretrained_stage1_model=None,
|
514 |
+
device="cpu",
|
515 |
+
):
|
516 |
+
config = self.get_model().config
|
517 |
+
|
518 |
+
self.resize_token_embeddings(len(tokenizer))
|
519 |
+
|
520 |
+
config.im_patch_token = 151859
|
521 |
+
|
522 |
+
config.use_im_start_end = True
|
523 |
+
|
524 |
+
if config.use_im_start_end:
|
525 |
+
self.resize_token_embeddings(len(tokenizer))
|
526 |
+
config.im_start_token, config.im_end_token = 151857, 151858
|
527 |
+
|
528 |
+
def load_image(self, image_file):
|
529 |
+
if image_file.startswith("http") or image_file.startswith("https"):
|
530 |
+
response = requests.get(image_file)
|
531 |
+
image = Image.open(BytesIO(response.content)).convert("RGB")
|
532 |
+
else:
|
533 |
+
image = Image.open(image_file).convert("RGB")
|
534 |
+
return image
|
535 |
+
|
536 |
+
def disable_torch_init(self):
|
537 |
+
"""
|
538 |
+
Disable the redundant torch default initialization to accelerate model creation.
|
539 |
+
"""
|
540 |
+
import torch
|
541 |
+
|
542 |
+
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
|
543 |
+
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
|
544 |
+
|
545 |
+
def chat(
|
546 |
+
self,
|
547 |
+
tokenizer,
|
548 |
+
image_file,
|
549 |
+
ocr_type,
|
550 |
+
ocr_box="",
|
551 |
+
ocr_color="",
|
552 |
+
render=False,
|
553 |
+
save_render_file=None,
|
554 |
+
print_prompt=False,
|
555 |
+
gradio_input=False,
|
556 |
+
stream_flag=False,
|
557 |
+
):
|
558 |
+
|
559 |
+
self.disable_torch_init()
|
560 |
+
|
561 |
+
image_processor_high = GOTImageEvalProcessor(image_size=1024)
|
562 |
+
|
563 |
+
use_im_start_end = True
|
564 |
+
|
565 |
+
image_token_len = 256
|
566 |
+
|
567 |
+
if gradio_input:
|
568 |
+
image = image_file.copy()
|
569 |
+
else:
|
570 |
+
image = self.load_image(image_file)
|
571 |
+
|
572 |
+
w, h = image.size
|
573 |
+
|
574 |
+
if ocr_type == "format":
|
575 |
+
qs = "OCR with format: "
|
576 |
+
else:
|
577 |
+
qs = "OCR: "
|
578 |
+
|
579 |
+
if ocr_box:
|
580 |
+
bbox = eval(ocr_box)
|
581 |
+
if len(bbox) == 2:
|
582 |
+
bbox[0] = int(bbox[0] / w * 1000)
|
583 |
+
bbox[1] = int(bbox[1] / h * 1000)
|
584 |
+
if len(bbox) == 4:
|
585 |
+
bbox[0] = int(bbox[0] / w * 1000)
|
586 |
+
bbox[1] = int(bbox[1] / h * 1000)
|
587 |
+
bbox[2] = int(bbox[2] / w * 1000)
|
588 |
+
bbox[3] = int(bbox[3] / h * 1000)
|
589 |
+
if ocr_type == "format":
|
590 |
+
qs = str(bbox) + " " + "OCR with format: "
|
591 |
+
else:
|
592 |
+
qs = str(bbox) + " " + "OCR: "
|
593 |
+
|
594 |
+
if ocr_color:
|
595 |
+
if ocr_type == "format":
|
596 |
+
qs = "[" + ocr_color + "]" + " " + "OCR with format: "
|
597 |
+
else:
|
598 |
+
qs = "[" + ocr_color + "]" + " " + "OCR: "
|
599 |
+
|
600 |
+
if use_im_start_end:
|
601 |
+
qs = (
|
602 |
+
DEFAULT_IM_START_TOKEN
|
603 |
+
+ DEFAULT_IMAGE_PATCH_TOKEN * image_token_len
|
604 |
+
+ DEFAULT_IM_END_TOKEN
|
605 |
+
+ "\n"
|
606 |
+
+ qs
|
607 |
+
)
|
608 |
+
else:
|
609 |
+
qs = DEFAULT_IMAGE_TOKEN + "\n" + qs
|
610 |
+
|
611 |
+
conv_mpt = Conversation(
|
612 |
+
system="""<|im_start|>system
|
613 |
+
You should follow the instructions carefully and explain your answers in detail.""",
|
614 |
+
# system = None,
|
615 |
+
roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
|
616 |
+
version="mpt",
|
617 |
+
messages=(),
|
618 |
+
offset=0,
|
619 |
+
sep_style=SeparatorStyle.MPT,
|
620 |
+
sep="<|im_end|>",
|
621 |
+
)
|
622 |
+
|
623 |
+
conv = conv_mpt.copy()
|
624 |
+
conv.append_message(conv.roles[0], qs)
|
625 |
+
conv.append_message(conv.roles[1], None)
|
626 |
+
prompt = conv.get_prompt()
|
627 |
+
|
628 |
+
if print_prompt:
|
629 |
+
print(prompt)
|
630 |
+
|
631 |
+
inputs = tokenizer([prompt])
|
632 |
+
|
633 |
+
image_tensor_1 = image_processor_high(image)
|
634 |
+
|
635 |
+
input_ids = torch.as_tensor(inputs.input_ids).cpu()
|
636 |
+
|
637 |
+
stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
|
638 |
+
keywords = [stop_str]
|
639 |
+
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
|
640 |
+
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
641 |
+
|
642 |
+
if stream_flag:
|
643 |
+
with torch.autocast("cpu", dtype=torch.bfloat16):
|
644 |
+
output_ids = self.generate(
|
645 |
+
input_ids,
|
646 |
+
images=[image_tensor_1.unsqueeze(0).half().cpu()],
|
647 |
+
do_sample=False,
|
648 |
+
num_beams=1,
|
649 |
+
no_repeat_ngram_size=20,
|
650 |
+
streamer=streamer,
|
651 |
+
max_new_tokens=4096,
|
652 |
+
stopping_criteria=[stopping_criteria],
|
653 |
+
)
|
654 |
+
else:
|
655 |
+
with torch.autocast("cpu", dtype=torch.bfloat16):
|
656 |
+
output_ids = self.generate(
|
657 |
+
input_ids,
|
658 |
+
images=[image_tensor_1.unsqueeze(0).half().cpu()],
|
659 |
+
do_sample=False,
|
660 |
+
num_beams=1,
|
661 |
+
no_repeat_ngram_size=20,
|
662 |
+
# streamer=streamer,
|
663 |
+
max_new_tokens=4096,
|
664 |
+
stopping_criteria=[stopping_criteria],
|
665 |
+
)
|
666 |
+
|
667 |
+
outputs = tokenizer.decode(output_ids[0, input_ids.shape[1] :]).strip()
|
668 |
+
|
669 |
+
if outputs.endswith(stop_str):
|
670 |
+
outputs = outputs[: -len(stop_str)]
|
671 |
+
outputs = outputs.strip()
|
672 |
+
response_str = outputs
|
673 |
+
|
674 |
+
if render:
|
675 |
+
print("==============rendering===============")
|
676 |
+
from .render_tools import (
|
677 |
+
svg_to_html,
|
678 |
+
content_mmd_to_html,
|
679 |
+
tik_html,
|
680 |
+
translation_table,
|
681 |
+
)
|
682 |
+
|
683 |
+
if "**kern" in outputs:
|
684 |
+
import verovio
|
685 |
+
|
686 |
+
tk = verovio.toolkit()
|
687 |
+
tk.loadData(outputs)
|
688 |
+
tk.setOptions(
|
689 |
+
{
|
690 |
+
"pageWidth": 2100,
|
691 |
+
"footer": "none",
|
692 |
+
"barLineWidth": 0.5,
|
693 |
+
"beamMaxSlope": 15,
|
694 |
+
"staffLineWidth": 0.2,
|
695 |
+
"spacingStaff": 6,
|
696 |
+
}
|
697 |
+
)
|
698 |
+
tk.getPageCount()
|
699 |
+
svg = tk.renderToSVG()
|
700 |
+
svg = svg.replace('overflow="inherit"', 'overflow="visible"')
|
701 |
+
|
702 |
+
svg_to_html(svg, save_render_file)
|
703 |
+
|
704 |
+
if ocr_type == "format" and "**kern" not in outputs:
|
705 |
+
|
706 |
+
if "\\begin{tikzpicture}" not in outputs:
|
707 |
+
html_path_2 = save_render_file
|
708 |
+
right_num = outputs.count("\\right")
|
709 |
+
left_num = outputs.count("\left")
|
710 |
+
|
711 |
+
if right_num != left_num:
|
712 |
+
outputs = (
|
713 |
+
outputs.replace("\left(", "(")
|
714 |
+
.replace("\\right)", ")")
|
715 |
+
.replace("\left[", "[")
|
716 |
+
.replace("\\right]", "]")
|
717 |
+
.replace("\left{", "{")
|
718 |
+
.replace("\\right}", "}")
|
719 |
+
.replace("\left|", "|")
|
720 |
+
.replace("\\right|", "|")
|
721 |
+
.replace("\left.", ".")
|
722 |
+
.replace("\\right.", ".")
|
723 |
+
)
|
724 |
+
|
725 |
+
outputs = outputs.replace('"', "``").replace("$", "")
|
726 |
+
|
727 |
+
outputs_list = outputs.split("\n")
|
728 |
+
gt = ""
|
729 |
+
for out in outputs_list:
|
730 |
+
gt += '"' + out.replace("\\", "\\\\") + r"\n" + '"' + "+" + "\n"
|
731 |
+
|
732 |
+
gt = gt[:-2]
|
733 |
+
|
734 |
+
lines = content_mmd_to_html
|
735 |
+
lines = lines.split("const text =")
|
736 |
+
new_web = lines[0] + "const text =" + gt + lines[1]
|
737 |
+
|
738 |
+
else:
|
739 |
+
html_path_2 = save_render_file
|
740 |
+
outputs = outputs.translate(translation_table)
|
741 |
+
outputs_list = outputs.split("\n")
|
742 |
+
gt = ""
|
743 |
+
for out in outputs_list:
|
744 |
+
if out:
|
745 |
+
if (
|
746 |
+
"\\begin{tikzpicture}" not in out
|
747 |
+
and "\\end{tikzpicture}" not in out
|
748 |
+
):
|
749 |
+
while out[-1] == " ":
|
750 |
+
out = out[:-1]
|
751 |
+
if out is None:
|
752 |
+
break
|
753 |
+
|
754 |
+
if out:
|
755 |
+
if out[-1] != ";":
|
756 |
+
gt += out[:-1] + ";\n"
|
757 |
+
else:
|
758 |
+
gt += out + "\n"
|
759 |
+
else:
|
760 |
+
gt += out + "\n"
|
761 |
+
|
762 |
+
lines = tik_html
|
763 |
+
lines = lines.split("const text =")
|
764 |
+
new_web = lines[0] + gt + lines[1]
|
765 |
+
|
766 |
+
with open(html_path_2, "w") as web_f_new:
|
767 |
+
web_f_new.write(new_web)
|
768 |
+
return response_str
|
769 |
+
|
770 |
+
def dynamic_preprocess(
|
771 |
+
self, image, min_num=1, max_num=6, image_size=1024, use_thumbnail=True
|
772 |
+
):
|
773 |
+
|
774 |
+
def find_closest_aspect_ratio(
|
775 |
+
aspect_ratio, target_ratios, width, height, image_size
|
776 |
+
):
|
777 |
+
best_ratio_diff = float("inf")
|
778 |
+
best_ratio = (1, 1)
|
779 |
+
area = width * height
|
780 |
+
for ratio in target_ratios:
|
781 |
+
target_aspect_ratio = ratio[0] / ratio[1]
|
782 |
+
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
|
783 |
+
if ratio_diff < best_ratio_diff:
|
784 |
+
best_ratio_diff = ratio_diff
|
785 |
+
best_ratio = ratio
|
786 |
+
elif ratio_diff == best_ratio_diff:
|
787 |
+
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
|
788 |
+
best_ratio = ratio
|
789 |
+
# print(f'width: {width}, height: {height}, best_ratio: {best_ratio}')
|
790 |
+
return best_ratio
|
791 |
+
|
792 |
+
orig_width, orig_height = image.size
|
793 |
+
aspect_ratio = orig_width / orig_height
|
794 |
+
|
795 |
+
# calculate the existing image aspect ratio
|
796 |
+
target_ratios = set(
|
797 |
+
(i, j)
|
798 |
+
for n in range(min_num, max_num + 1)
|
799 |
+
for i in range(1, n + 1)
|
800 |
+
for j in range(1, n + 1)
|
801 |
+
if i * j <= max_num and i * j >= min_num
|
802 |
+
)
|
803 |
+
# print(target_ratios)
|
804 |
+
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
|
805 |
+
|
806 |
+
# find the closest aspect ratio to the target
|
807 |
+
target_aspect_ratio = find_closest_aspect_ratio(
|
808 |
+
aspect_ratio, target_ratios, orig_width, orig_height, image_size
|
809 |
+
)
|
810 |
+
|
811 |
+
# print(target_aspect_ratio)
|
812 |
+
# calculate the target width and height
|
813 |
+
target_width = image_size * target_aspect_ratio[0]
|
814 |
+
target_height = image_size * target_aspect_ratio[1]
|
815 |
+
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
816 |
+
|
817 |
+
# resize the image
|
818 |
+
resized_img = image.resize((target_width, target_height))
|
819 |
+
processed_images = []
|
820 |
+
for i in range(blocks):
|
821 |
+
box = (
|
822 |
+
(i % (target_width // image_size)) * image_size,
|
823 |
+
(i // (target_width // image_size)) * image_size,
|
824 |
+
((i % (target_width // image_size)) + 1) * image_size,
|
825 |
+
((i // (target_width // image_size)) + 1) * image_size,
|
826 |
+
)
|
827 |
+
# split the image
|
828 |
+
split_img = resized_img.crop(box)
|
829 |
+
processed_images.append(split_img)
|
830 |
+
assert len(processed_images) == blocks
|
831 |
+
if use_thumbnail and len(processed_images) != 1:
|
832 |
+
thumbnail_img = image.resize((image_size, image_size))
|
833 |
+
processed_images.append(thumbnail_img)
|
834 |
+
return processed_images
|
835 |
+
|
836 |
+
def chat_crop(
|
837 |
+
self,
|
838 |
+
tokenizer,
|
839 |
+
image_file,
|
840 |
+
ocr_type,
|
841 |
+
render=False,
|
842 |
+
save_render_file=None,
|
843 |
+
print_prompt=False,
|
844 |
+
gradio_input=False,
|
845 |
+
stream_flag=False,
|
846 |
+
):
|
847 |
+
# Model
|
848 |
+
self.disable_torch_init()
|
849 |
+
multi_page = False
|
850 |
+
|
851 |
+
image_processor_high = GOTImageEvalProcessor(image_size=1024)
|
852 |
+
|
853 |
+
use_im_start_end = True
|
854 |
+
|
855 |
+
image_token_len = 256
|
856 |
+
|
857 |
+
image_list = []
|
858 |
+
|
859 |
+
# if len(image_file_list)>1:
|
860 |
+
# multi_page = True
|
861 |
+
|
862 |
+
if multi_page:
|
863 |
+
qs = "OCR with format across multi pages: "
|
864 |
+
# only for png files
|
865 |
+
# import glob
|
866 |
+
# from natsort import natsorted
|
867 |
+
# patches = glob.glob(image_file + '/*png')
|
868 |
+
patches = image_file
|
869 |
+
# patches = natsorted(patches)
|
870 |
+
sub_images = []
|
871 |
+
for sub_image in patches:
|
872 |
+
sub_images.append(self.load_image(sub_image))
|
873 |
+
|
874 |
+
ll = len(patches)
|
875 |
+
# print(patches)
|
876 |
+
# print("len ll: ", ll)
|
877 |
+
|
878 |
+
else:
|
879 |
+
if ocr_type == "format":
|
880 |
+
qs = "OCR with format upon the patch reference: "
|
881 |
+
else:
|
882 |
+
qs = "OCR upon the patch reference: "
|
883 |
+
if gradio_input:
|
884 |
+
img = image_file.copy()
|
885 |
+
else:
|
886 |
+
img = self.load_image(image_file)
|
887 |
+
sub_images = self.dynamic_preprocess(img)
|
888 |
+
ll = len(sub_images)
|
889 |
+
|
890 |
+
for image in sub_images:
|
891 |
+
image_tensor_1 = image_processor_high(image)
|
892 |
+
image_list.append(image_tensor_1)
|
893 |
+
|
894 |
+
image_list = torch.stack(image_list)
|
895 |
+
|
896 |
+
print("====new images batch size======: \n", image_list.shape)
|
897 |
+
|
898 |
+
if use_im_start_end:
|
899 |
+
qs = (
|
900 |
+
DEFAULT_IM_START_TOKEN
|
901 |
+
+ DEFAULT_IMAGE_PATCH_TOKEN * image_token_len * ll
|
902 |
+
+ DEFAULT_IM_END_TOKEN
|
903 |
+
+ "\n"
|
904 |
+
+ qs
|
905 |
+
)
|
906 |
+
else:
|
907 |
+
qs = DEFAULT_IMAGE_TOKEN + "\n" + qs
|
908 |
+
|
909 |
+
conv_mpt = Conversation(
|
910 |
+
system="""<|im_start|>system
|
911 |
+
You should follow the instructions carefully and explain your answers in detail.""",
|
912 |
+
# system = None,
|
913 |
+
roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
|
914 |
+
version="mpt",
|
915 |
+
messages=(),
|
916 |
+
offset=0,
|
917 |
+
sep_style=SeparatorStyle.MPT,
|
918 |
+
sep="<|im_end|>",
|
919 |
+
)
|
920 |
+
|
921 |
+
conv = conv_mpt.copy()
|
922 |
+
conv.append_message(conv.roles[0], qs)
|
923 |
+
conv.append_message(conv.roles[1], None)
|
924 |
+
prompt = conv.get_prompt()
|
925 |
+
|
926 |
+
if print_prompt:
|
927 |
+
print(prompt)
|
928 |
+
|
929 |
+
inputs = tokenizer([prompt])
|
930 |
+
|
931 |
+
input_ids = torch.as_tensor(inputs.input_ids).cpu()
|
932 |
+
|
933 |
+
stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
|
934 |
+
keywords = [stop_str]
|
935 |
+
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
|
936 |
+
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
937 |
+
|
938 |
+
if stream_flag:
|
939 |
+
with torch.autocast("cpu", dtype=torch.bfloat16):
|
940 |
+
output_ids = self.generate(
|
941 |
+
input_ids,
|
942 |
+
images=[image_list.half().cpu()],
|
943 |
+
do_sample=False,
|
944 |
+
num_beams=1,
|
945 |
+
# no_repeat_ngram_size = 20,
|
946 |
+
streamer=streamer,
|
947 |
+
max_new_tokens=4096,
|
948 |
+
stopping_criteria=[stopping_criteria],
|
949 |
+
)
|
950 |
+
else:
|
951 |
+
with torch.autocast("cpu", dtype=torch.bfloat16):
|
952 |
+
output_ids = self.generate(
|
953 |
+
input_ids,
|
954 |
+
images=[image_list.half().cpu()],
|
955 |
+
do_sample=False,
|
956 |
+
num_beams=1,
|
957 |
+
# no_repeat_ngram_size = 20,
|
958 |
+
# streamer=streamer,
|
959 |
+
max_new_tokens=4096,
|
960 |
+
stopping_criteria=[stopping_criteria],
|
961 |
+
)
|
962 |
+
|
963 |
+
outputs = tokenizer.decode(output_ids[0, input_ids.shape[1] :]).strip()
|
964 |
+
|
965 |
+
if outputs.endswith(stop_str):
|
966 |
+
outputs = outputs[: -len(stop_str)]
|
967 |
+
outputs = outputs.strip()
|
968 |
+
response_str = outputs
|
969 |
+
|
970 |
+
if render:
|
971 |
+
print("==============rendering===============")
|
972 |
+
from .render_tools import content_mmd_to_html
|
973 |
+
|
974 |
+
html_path_2 = save_render_file
|
975 |
+
right_num = outputs.count("\\right")
|
976 |
+
left_num = outputs.count("\left")
|
977 |
+
|
978 |
+
if right_num != left_num:
|
979 |
+
outputs = (
|
980 |
+
outputs.replace("\left(", "(")
|
981 |
+
.replace("\\right)", ")")
|
982 |
+
.replace("\left[", "[")
|
983 |
+
.replace("\\right]", "]")
|
984 |
+
.replace("\left{", "{")
|
985 |
+
.replace("\\right}", "}")
|
986 |
+
.replace("\left|", "|")
|
987 |
+
.replace("\\right|", "|")
|
988 |
+
.replace("\left.", ".")
|
989 |
+
.replace("\\right.", ".")
|
990 |
+
)
|
991 |
+
|
992 |
+
outputs = outputs.replace('"', "``").replace("$", "")
|
993 |
+
|
994 |
+
outputs_list = outputs.split("\n")
|
995 |
+
gt = ""
|
996 |
+
for out in outputs_list:
|
997 |
+
gt += '"' + out.replace("\\", "\\\\") + r"\n" + '"' + "+" + "\n"
|
998 |
+
|
999 |
+
gt = gt[:-2]
|
1000 |
+
|
1001 |
+
lines = content_mmd_to_html
|
1002 |
+
lines = lines.split("const text =")
|
1003 |
+
new_web = lines[0] + "const text =" + gt + lines[1]
|
1004 |
+
|
1005 |
+
with open(html_path_2, "w") as web_f_new:
|
1006 |
+
web_f_new.write(new_web)
|
1007 |
+
|
1008 |
+
return response_str
|
qwen.tiktoken
ADDED
The diff for this file is too large to render.
See raw diff
|
|
render_tools.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
punctuation_dict = {
|
3 |
+
",": ",",
|
4 |
+
"。": ".",
|
5 |
+
|
6 |
+
}
|
7 |
+
translation_table = str.maketrans(punctuation_dict)
|
8 |
+
|
9 |
+
def svg_to_html(svg_content, output_filename):
|
10 |
+
|
11 |
+
html_content = f"""
|
12 |
+
<!DOCTYPE html>
|
13 |
+
<html lang="en">
|
14 |
+
<head>
|
15 |
+
<meta charset="UTF-8">
|
16 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
17 |
+
<title>SVG Embedded in HTML</title>
|
18 |
+
</head>
|
19 |
+
<body>
|
20 |
+
<svg width="2100" height="15000" xmlns="http://www.w3.org/2000/svg">
|
21 |
+
{svg_content}
|
22 |
+
</svg>
|
23 |
+
</body>
|
24 |
+
</html>
|
25 |
+
"""
|
26 |
+
|
27 |
+
with open(output_filename, 'w') as file:
|
28 |
+
file.write(html_content)
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
content_mmd_to_html = """<!DOCTYPE html>
|
33 |
+
<html lang="en" data-lt-installed="true"><head>
|
34 |
+
<meta charset="UTF-8">
|
35 |
+
<title>Title</title>
|
36 |
+
<script>
|
37 |
+
const text =
|
38 |
+
</script>
|
39 |
+
<style>
|
40 |
+
#content {
|
41 |
+
max-width: 800px;
|
42 |
+
margin: auto;
|
43 |
+
}
|
44 |
+
</style>
|
45 |
+
<script>
|
46 |
+
let script = document.createElement('script');
|
47 |
+
script.src = "https://cdn.jsdelivr.net/npm/[email protected]/es5/bundle.js";
|
48 |
+
document.head.append(script);
|
49 |
+
|
50 |
+
script.onload = function() {
|
51 |
+
const isLoaded = window.loadMathJax();
|
52 |
+
if (isLoaded) {
|
53 |
+
console.log('Styles loaded!')
|
54 |
+
}
|
55 |
+
|
56 |
+
const el = window.document.getElementById('content-text');
|
57 |
+
if (el) {
|
58 |
+
const options = {
|
59 |
+
htmlTags: true
|
60 |
+
};
|
61 |
+
const html = window.render(text, options);
|
62 |
+
el.outerHTML = html;
|
63 |
+
}
|
64 |
+
};
|
65 |
+
</script>
|
66 |
+
</head>
|
67 |
+
<body>
|
68 |
+
<div id="content"><div id="content-text"></div></div>
|
69 |
+
</body>
|
70 |
+
</html>
|
71 |
+
"""
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
tik_html = """
|
76 |
+
<!DOCTYPE html>
|
77 |
+
|
78 |
+
<html>
|
79 |
+
|
80 |
+
<head>
|
81 |
+
<meta charset="UTF-8">
|
82 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
83 |
+
<title>Document</title>
|
84 |
+
<link rel="stylesheet" type="text/css" href="https://tikzjax.com/v1/fonts.css">
|
85 |
+
<script src="https://tikzjax.com/v1/tikzjax.js"></script>
|
86 |
+
</head>
|
87 |
+
<body>
|
88 |
+
<script type="text/tikz">
|
89 |
+
const text =
|
90 |
+
</script>
|
91 |
+
</body>
|
92 |
+
</html>"""
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
# print(tik_html)
|
special_tokens_map.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"pad_token": {
|
3 |
+
"content": "<|endoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
}
|
9 |
+
}
|
tokenization_qwen.py
ADDED
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Alibaba Cloud.
|
2 |
+
#
|
3 |
+
# This source code is licensed under the license found in the
|
4 |
+
# LICENSE file in the root directory of this source tree.
|
5 |
+
|
6 |
+
"""Tokenization classes for QWen."""
|
7 |
+
|
8 |
+
import base64
|
9 |
+
import logging
|
10 |
+
import os
|
11 |
+
import unicodedata
|
12 |
+
from typing import Collection, Dict, List, Set, Tuple, Union
|
13 |
+
|
14 |
+
import tiktoken
|
15 |
+
from transformers import PreTrainedTokenizer, AddedToken
|
16 |
+
|
17 |
+
logger = logging.getLogger(__name__)
|
18 |
+
|
19 |
+
|
20 |
+
VOCAB_FILES_NAMES = {"vocab_file": "qwen.tiktoken"}
|
21 |
+
|
22 |
+
PAT_STR = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
|
23 |
+
ENDOFTEXT = "<|endoftext|>"
|
24 |
+
IMSTART = "<|im_start|>"
|
25 |
+
IMEND = "<|im_end|>"
|
26 |
+
# as the default behavior is changed to allow special tokens in
|
27 |
+
# regular texts, the surface forms of special tokens need to be
|
28 |
+
# as different as possible to minimize the impact
|
29 |
+
EXTRAS = tuple((f"<|extra_{i}|>" for i in range(205)))
|
30 |
+
SPECIAL_TOKENS = (
|
31 |
+
ENDOFTEXT,
|
32 |
+
IMSTART,
|
33 |
+
IMEND,
|
34 |
+
) + EXTRAS
|
35 |
+
|
36 |
+
|
37 |
+
def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
|
38 |
+
with open(tiktoken_bpe_file, "rb") as f:
|
39 |
+
contents = f.read()
|
40 |
+
return {
|
41 |
+
base64.b64decode(token): int(rank)
|
42 |
+
for token, rank in (line.split() for line in contents.splitlines() if line)
|
43 |
+
}
|
44 |
+
|
45 |
+
class QWenTokenizer(PreTrainedTokenizer):
|
46 |
+
"""QWen tokenizer."""
|
47 |
+
|
48 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
49 |
+
|
50 |
+
def __init__(
|
51 |
+
self,
|
52 |
+
vocab_file,
|
53 |
+
errors="replace",
|
54 |
+
image_start_tag='<img>',
|
55 |
+
image_end_tag='</img>',
|
56 |
+
image_pad_tag='<imgpad>',
|
57 |
+
ref_start_tag='<ref>',
|
58 |
+
ref_end_tag='</ref>',
|
59 |
+
box_start_tag='<box>',
|
60 |
+
box_end_tag='</box>',
|
61 |
+
quad_start_tag='<quad>',
|
62 |
+
quad_end_tag='</quad>',
|
63 |
+
**kwargs,
|
64 |
+
):
|
65 |
+
super().__init__(**kwargs)
|
66 |
+
|
67 |
+
self.image_start_tag = image_start_tag
|
68 |
+
self.image_end_tag = image_end_tag
|
69 |
+
self.image_pad_tag = image_pad_tag
|
70 |
+
self.ref_start_tag = ref_start_tag
|
71 |
+
self.ref_end_tag = ref_end_tag
|
72 |
+
self.box_start_tag = box_start_tag
|
73 |
+
self.box_end_tag = box_end_tag
|
74 |
+
self.quad_start_tag = quad_start_tag
|
75 |
+
self.quad_end_tag = quad_end_tag
|
76 |
+
self.IMAGE_ST = (
|
77 |
+
ref_start_tag, ref_end_tag,
|
78 |
+
box_start_tag, box_end_tag,
|
79 |
+
quad_start_tag, quad_end_tag,
|
80 |
+
image_start_tag, image_end_tag,
|
81 |
+
image_pad_tag
|
82 |
+
)
|
83 |
+
|
84 |
+
self.errors = errors # how to handle errors in decoding
|
85 |
+
|
86 |
+
self.mergeable_ranks = _load_tiktoken_bpe(vocab_file) # type: dict[bytes, int]
|
87 |
+
self.special_tokens = {
|
88 |
+
token: index
|
89 |
+
for index, token in enumerate(
|
90 |
+
SPECIAL_TOKENS + self.IMAGE_ST, start=len(self.mergeable_ranks)
|
91 |
+
)
|
92 |
+
}
|
93 |
+
|
94 |
+
self.img_start_id = self.special_tokens[self.image_start_tag]
|
95 |
+
self.img_end_id = self.special_tokens[self.image_end_tag]
|
96 |
+
self.img_pad_id = self.special_tokens[self.image_pad_tag]
|
97 |
+
self.ref_start_id = self.special_tokens[self.ref_start_tag]
|
98 |
+
self.ref_end_id = self.special_tokens[self.ref_end_tag]
|
99 |
+
self.box_start_id = self.special_tokens[self.box_start_tag]
|
100 |
+
self.box_end_id = self.special_tokens[self.box_end_tag]
|
101 |
+
self.quad_start_id = self.special_tokens[self.quad_start_tag]
|
102 |
+
self.quad_end_id = self.special_tokens[self.quad_end_tag]
|
103 |
+
|
104 |
+
enc = tiktoken.Encoding(
|
105 |
+
"Qwen",
|
106 |
+
pat_str=PAT_STR,
|
107 |
+
mergeable_ranks=self.mergeable_ranks,
|
108 |
+
special_tokens=self.special_tokens,
|
109 |
+
)
|
110 |
+
assert (
|
111 |
+
len(self.mergeable_ranks) + len(self.special_tokens) == enc.n_vocab
|
112 |
+
), f"{len(self.mergeable_ranks) + len(self.special_tokens)} != {enc.n_vocab} in encoding"
|
113 |
+
|
114 |
+
self.decoder = {
|
115 |
+
v: k for k, v in self.mergeable_ranks.items()
|
116 |
+
} # type: dict[int, bytes|str]
|
117 |
+
self.decoder.update({v: k for k, v in self.special_tokens.items()})
|
118 |
+
|
119 |
+
self.tokenizer = enc # type: tiktoken.Encoding
|
120 |
+
|
121 |
+
self.eod_id = self.tokenizer.eot_token
|
122 |
+
self.im_start_id = self.special_tokens[IMSTART]
|
123 |
+
self.im_end_id = self.special_tokens[IMEND]
|
124 |
+
|
125 |
+
def __len__(self) -> int:
|
126 |
+
return self.tokenizer.n_vocab
|
127 |
+
|
128 |
+
def get_vocab(self) -> Dict[bytes, int]:
|
129 |
+
return self.mergeable_ranks
|
130 |
+
|
131 |
+
def convert_tokens_to_ids(
|
132 |
+
self, tokens: Union[bytes, str, List[Union[bytes, str]]]
|
133 |
+
) -> List[int]:
|
134 |
+
ids = []
|
135 |
+
if isinstance(tokens, (str, bytes)):
|
136 |
+
if tokens in self.special_tokens:
|
137 |
+
return self.special_tokens[tokens]
|
138 |
+
else:
|
139 |
+
return self.mergeable_ranks.get(tokens)
|
140 |
+
for token in tokens:
|
141 |
+
if token in self.special_tokens:
|
142 |
+
ids.append(self.special_tokens[token])
|
143 |
+
else:
|
144 |
+
ids.append(self.mergeable_ranks.get(token))
|
145 |
+
return ids
|
146 |
+
|
147 |
+
def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
|
148 |
+
if not special_tokens and new_tokens:
|
149 |
+
raise ValueError('Adding regular tokens is not supported')
|
150 |
+
for token in new_tokens:
|
151 |
+
surface_form = token.content if isinstance(token, AddedToken) else token
|
152 |
+
if surface_form not in SPECIAL_TOKENS:
|
153 |
+
raise ValueError('Adding unknown special tokens is not supported')
|
154 |
+
return 0
|
155 |
+
|
156 |
+
def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
|
157 |
+
"""
|
158 |
+
Save only the vocabulary of the tokenizer (vocabulary).
|
159 |
+
|
160 |
+
Returns:
|
161 |
+
`Tuple(str)`: Paths to the files saved.
|
162 |
+
"""
|
163 |
+
file_path = os.path.join(save_directory, "qwen.tiktoken")
|
164 |
+
with open(file_path, "w", encoding="utf8") as w:
|
165 |
+
for k, v in self.mergeable_ranks.items():
|
166 |
+
line = base64.b64encode(k).decode("utf8") + " " + str(v) + "\n"
|
167 |
+
w.write(line)
|
168 |
+
return (file_path,)
|
169 |
+
|
170 |
+
def tokenize(
|
171 |
+
self,
|
172 |
+
text: str,
|
173 |
+
allowed_special: Union[Set, str] = "all",
|
174 |
+
disallowed_special: Union[Collection, str] = (),
|
175 |
+
**kwargs,
|
176 |
+
) -> List[Union[bytes, str]]:
|
177 |
+
"""
|
178 |
+
Converts a string in a sequence of tokens.
|
179 |
+
|
180 |
+
Args:
|
181 |
+
text (`str`):
|
182 |
+
The sequence to be encoded.
|
183 |
+
allowed_special (`Literal["all"]` or `set`):
|
184 |
+
The surface forms of the tokens to be encoded as special tokens in regular texts.
|
185 |
+
Default to "all".
|
186 |
+
disallowed_special (`Literal["all"]` or `Collection`):
|
187 |
+
The surface forms of the tokens that should not be in regular texts and trigger errors.
|
188 |
+
Default to an empty tuple.
|
189 |
+
|
190 |
+
kwargs (additional keyword arguments, *optional*):
|
191 |
+
Will be passed to the underlying model specific encode method.
|
192 |
+
|
193 |
+
Returns:
|
194 |
+
`List[bytes|str]`: The list of tokens.
|
195 |
+
"""
|
196 |
+
tokens = []
|
197 |
+
text = unicodedata.normalize("NFC", text)
|
198 |
+
|
199 |
+
# this implementation takes a detour: text -> token id -> token surface forms
|
200 |
+
for t in self.tokenizer.encode(
|
201 |
+
text, allowed_special=allowed_special, disallowed_special=disallowed_special
|
202 |
+
):
|
203 |
+
tokens.append(self.decoder[t])
|
204 |
+
return tokens
|
205 |
+
|
206 |
+
def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
|
207 |
+
"""
|
208 |
+
Converts a sequence of tokens in a single string.
|
209 |
+
"""
|
210 |
+
text = ""
|
211 |
+
temp = b""
|
212 |
+
for t in tokens:
|
213 |
+
if isinstance(t, str):
|
214 |
+
if temp:
|
215 |
+
text += temp.decode("utf-8", errors=self.errors)
|
216 |
+
temp = b""
|
217 |
+
text += t
|
218 |
+
elif isinstance(t, bytes):
|
219 |
+
temp += t
|
220 |
+
else:
|
221 |
+
raise TypeError("token should only be of type types or str")
|
222 |
+
if temp:
|
223 |
+
text += temp.decode("utf-8", errors=self.errors)
|
224 |
+
return text
|
225 |
+
|
226 |
+
@property
|
227 |
+
def vocab_size(self):
|
228 |
+
return self.tokenizer.n_vocab
|
229 |
+
|
230 |
+
def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
|
231 |
+
"""Converts an id to a token, special tokens included"""
|
232 |
+
if index in self.decoder:
|
233 |
+
return self.decoder[index]
|
234 |
+
raise ValueError("unknown ids")
|
235 |
+
|
236 |
+
def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
|
237 |
+
"""Converts a token to an id using the vocab, special tokens included"""
|
238 |
+
if token in self.special_tokens:
|
239 |
+
return self.special_tokens[token]
|
240 |
+
if token in self.mergeable_ranks:
|
241 |
+
return self.mergeable_ranks[token]
|
242 |
+
raise ValueError("unknown token")
|
243 |
+
|
244 |
+
def _tokenize(self, text: str, **kwargs):
|
245 |
+
"""
|
246 |
+
Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
|
247 |
+
vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
|
248 |
+
|
249 |
+
Do NOT take care of added tokens.
|
250 |
+
"""
|
251 |
+
raise NotImplementedError
|
252 |
+
|
253 |
+
def _decode(
|
254 |
+
self,
|
255 |
+
token_ids: Union[int, List[int]],
|
256 |
+
skip_special_tokens: bool = False,
|
257 |
+
errors: str = None,
|
258 |
+
**kwargs,
|
259 |
+
) -> str:
|
260 |
+
if isinstance(token_ids, int):
|
261 |
+
token_ids = [token_ids]
|
262 |
+
if skip_special_tokens:
|
263 |
+
token_ids = [i for i in token_ids if i < self.eod_id]
|
264 |
+
return self.tokenizer.decode(token_ids, errors=errors or self.errors)
|
tokenizer_config.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {},
|
3 |
+
"auto_map": {
|
4 |
+
"AutoTokenizer": [
|
5 |
+
"tokenization_qwen.QWenTokenizer",
|
6 |
+
null
|
7 |
+
]
|
8 |
+
},
|
9 |
+
"clean_up_tokenization_spaces": true,
|
10 |
+
"model_max_length": 8000,
|
11 |
+
"pad_token": "<|endoftext|>",
|
12 |
+
"padding_side": "right",
|
13 |
+
"tokenizer_class": "QWenTokenizer"
|
14 |
+
}
|