Update README.md
Browse files
README.md
CHANGED
@@ -13,9 +13,9 @@ new_version: OpenGVLab/InternViT-300M-448px-V2_5
|
|
13 |
|
14 |
# InternViT-300M-448px
|
15 |
|
16 |
-
[\[π GitHub\]](https://github.com/OpenGVLab/InternVL) [\[
|
17 |
|
18 |
-
[\[π¨οΈ Chat Demo\]](https://internvl.opengvlab.com/) [\[π€ HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL) [\[π Quick Start\]](#quick-start) [\[π Documents\]](https://internvl.readthedocs.io/en/latest/)
|
19 |
|
20 |
<div align="center">
|
21 |
<img width="500" alt="image" src="https://cdn-uploads.huggingface.co/production/uploads/64006c09330a45b03605bba3/zJsd2hqd3EevgXo6fNgC-.png">
|
@@ -67,6 +67,12 @@ This project is released under the MIT License.
|
|
67 |
If you find this project useful in your research, please consider citing:
|
68 |
|
69 |
```BibTeX
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
@article{gao2024mini,
|
71 |
title={Mini-internvl: A flexible-transfer pocket multimodal model with 5\% parameters and 90\% performance},
|
72 |
author={Gao, Zhangwei and Chen, Zhe and Cui, Erfei and Ren, Yiming and Wang, Weiyun and Zhu, Jinguo and Tian, Hao and Ye, Shenglong and He, Junjun and Zhu, Xizhou and others},
|
@@ -79,10 +85,11 @@ If you find this project useful in your research, please consider citing:
|
|
79 |
journal={arXiv preprint arXiv:2404.16821},
|
80 |
year={2024}
|
81 |
}
|
82 |
-
@
|
83 |
-
title={
|
84 |
-
author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and
|
85 |
-
|
86 |
-
|
|
|
87 |
}
|
88 |
```
|
|
|
13 |
|
14 |
# InternViT-300M-448px
|
15 |
|
16 |
+
[\[π GitHub\]](https://github.com/OpenGVLab/InternVL) [\[π InternVL 1.0\]](https://huggingface.co/papers/2312.14238) [\[π InternVL 1.5\]](https://huggingface.co/papers/2404.16821) [\[π Mini-InternVL\]](https://arxiv.org/abs/2410.16261) [\[π InternVL 2.5\]](https://huggingface.co/papers/2412.05271)
|
17 |
|
18 |
+
[\[π Blog\]](https://internvl.github.io/blog/) [\[π¨οΈ Chat Demo\]](https://internvl.opengvlab.com/) [\[π€ HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL) [\[π Quick Start\]](#quick-start) [\[π Documents\]](https://internvl.readthedocs.io/en/latest/)
|
19 |
|
20 |
<div align="center">
|
21 |
<img width="500" alt="image" src="https://cdn-uploads.huggingface.co/production/uploads/64006c09330a45b03605bba3/zJsd2hqd3EevgXo6fNgC-.png">
|
|
|
67 |
If you find this project useful in your research, please consider citing:
|
68 |
|
69 |
```BibTeX
|
70 |
+
@article{chen2024expanding,
|
71 |
+
title={Expanding Performance Boundaries of Open-Source Multimodal Models with Model, Data, and Test-Time Scaling},
|
72 |
+
author={Chen, Zhe and Wang, Weiyun and Cao, Yue and Liu, Yangzhou and Gao, Zhangwei and Cui, Erfei and Zhu, Jinguo and Ye, Shenglong and Tian, Hao and Liu, Zhaoyang and others},
|
73 |
+
journal={arXiv preprint arXiv:2412.05271},
|
74 |
+
year={2024}
|
75 |
+
}
|
76 |
@article{gao2024mini,
|
77 |
title={Mini-internvl: A flexible-transfer pocket multimodal model with 5\% parameters and 90\% performance},
|
78 |
author={Gao, Zhangwei and Chen, Zhe and Cui, Erfei and Ren, Yiming and Wang, Weiyun and Zhu, Jinguo and Tian, Hao and Ye, Shenglong and He, Junjun and Zhu, Xizhou and others},
|
|
|
85 |
journal={arXiv preprint arXiv:2404.16821},
|
86 |
year={2024}
|
87 |
}
|
88 |
+
@inproceedings{chen2024internvl,
|
89 |
+
title={Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks},
|
90 |
+
author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and others},
|
91 |
+
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
|
92 |
+
pages={24185--24198},
|
93 |
+
year={2024}
|
94 |
}
|
95 |
```
|