shuaijiang commited on
Commit
f040a31
·
1 Parent(s): 426adde
.gitattributes CHANGED
@@ -46,3 +46,6 @@ static/videos/shiba.mp4 filter=lfs diff=lfs merge=lfs -text
46
  static/videos/steve.mp4 filter=lfs diff=lfs merge=lfs -text
47
  static/videos/teaser.mp4 filter=lfs diff=lfs merge=lfs -text
48
  static/videos/toby.mp4 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
46
  static/videos/steve.mp4 filter=lfs diff=lfs merge=lfs -text
47
  static/videos/teaser.mp4 filter=lfs diff=lfs merge=lfs -text
48
  static/videos/toby.mp4 filter=lfs diff=lfs merge=lfs -text
49
+ static/audios/*wav filter=lfs diff=lfs merge=lfs -text
50
+ static/*pdf filter=lfs diff=lfs merge=lfs -text
51
+ static/images/*png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,25 +1,26 @@
1
  ---
2
- title: KE Omni
3
  emoji: 🧠
4
  colorFrom: yellow
5
  colorTo: indigo
6
  sdk: static
7
  pinned: false
 
 
8
  ---
9
 
10
- # Nerfies
11
 
12
- This is the repository that contains source code for the [Nerfies website](https://nerfies.github.io).
13
 
14
- If you find Nerfies useful for your work please cite:
15
  ```
16
- @article{park2021nerfies
17
- author = {Park, Keunhong and Sinha, Utkarsh and Barron, Jonathan T. and Bouaziz, Sofien and Goldman, Dan B and Seitz, Steven M. and Martin-Brualla, Ricardo},
18
- title = {Nerfies: Deformable Neural Radiance Fields},
19
- journal = {ICCV},
20
- year = {2021},
21
  }
22
  ```
23
 
24
  # Website License
25
- <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-sa/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/">Creative Commons Attribution-ShareAlike 4.0 International License</a>.
 
1
  ---
2
+ title: KE-Omni
3
  emoji: 🧠
4
  colorFrom: yellow
5
  colorTo: indigo
6
  sdk: static
7
  pinned: false
8
+ license: apache-2.0
9
+ short_description: KE-Omni
10
  ---
11
 
12
+ # AM-Omni
13
 
14
+ This is the repository that contains the description of AM-Omni.
15
 
16
+ If you find AM-Omni useful for your work please cite:
17
  ```
18
+ @article{zhao2024am,
19
+ author = {Zhao, Shuajiang and Guo, Tingwei and Xiang, Bajian and Wan, Tongtang and Niu, Qiang and Zou, Wei and Li, Xiangang},
20
+ title = {KE-Omni: A Seamless Speech Language Model with 60,000+ Hours Synthesized Chinese and English Speech Dialogues},
21
+ year = {2024},
 
22
  }
23
  ```
24
 
25
  # Website License
26
+ <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-sa/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/">Creative Commons Attribution-ShareAlike 4.0 International License</a>.
index.html CHANGED
@@ -3,10 +3,10 @@
3
  <head>
4
  <meta charset="utf-8">
5
  <meta name="description"
6
- content="Deformable Neural Radiance Fields creates free-viewpoint portraits (nerfies) from casually captured videos.">
7
- <meta name="keywords" content="Nerfies, D-NeRF, NeRF">
8
  <meta name="viewport" content="width=device-width, initial-scale=1">
9
- <title>Nerfies: Deformable Neural Radiance Fields</title>
10
 
11
  <link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro"
12
  rel="stylesheet">
@@ -33,39 +33,40 @@
33
  <div class="container is-max-desktop">
34
  <div class="columns is-centered">
35
  <div class="column has-text-centered">
36
- <h1 class="title is-1 publication-title">Nerfies: Deformable Neural Radiance Fields</h1>
37
  <div class="is-size-5 publication-authors">
38
  <span class="author-block">
39
- <a href="https://keunhong.com" target="_blank">Keunhong Park</a><sup>1</sup>,</span>
 
40
  <span class="author-block">
41
- <a href="https://utkarshsinha.com" target="_blank">Utkarsh Sinha</a><sup>2</sup>,</span>
 
42
  <span class="author-block">
43
- <a href="https://jonbarron.info" target="_blank">Jonathan T. Barron</a><sup>2</sup>,
44
  </span>
45
  <span class="author-block">
46
- <a href="http://sofienbouaziz.com" target="_blank">Sofien Bouaziz</a><sup>2</sup>,
47
  </span>
48
  <span class="author-block">
49
- <a href="https://www.danbgoldman.com" target="_blank">Dan B Goldman</a><sup>2</sup>,
50
  </span>
51
  <span class="author-block">
52
- <a href="https://homes.cs.washington.edu/~seitz/" target="_blank">Steven M. Seitz</a><sup>1,2</sup>,
53
  </span>
54
  <span class="author-block">
55
- <a href="http://www.ricardomartinbrualla.com" target="_blank">Ricardo Martin-Brualla</a><sup>2</sup>
56
  </span>
57
  </div>
58
 
59
  <div class="is-size-5 publication-authors">
60
- <span class="author-block"><sup>1</sup>University of Washington,</span>
61
- <span class="author-block"><sup>2</sup>Google Research</span>
62
  </div>
63
 
64
  <div class="column has-text-centered">
65
  <div class="publication-links">
66
  <!-- PDF Link. -->
67
  <span class="link-block">
68
- <a href="https://arxiv.org/pdf/2011.12948" target="_blank"
69
  class="external-link button is-normal is-rounded is-dark">
70
  <span class="icon">
71
  <i class="fas fa-file-pdf"></i>
@@ -74,7 +75,7 @@
74
  </a>
75
  </span>
76
  <span class="link-block">
77
- <a href="https://arxiv.org/abs/2011.12948" target="_blank"
78
  class="external-link button is-normal is-rounded is-dark">
79
  <span class="icon">
80
  <i class="ai ai-arxiv"></i>
@@ -82,7 +83,7 @@
82
  <span>arXiv</span>
83
  </a>
84
  </span>
85
- <!-- Video Link. -->
86
  <span class="link-block">
87
  <a href="https://www.youtube.com/watch?v=MrKrnHhk8IA" target="_blank"
88
  class="external-link button is-normal is-rounded is-dark">
@@ -92,9 +93,10 @@
92
  <span>Video</span>
93
  </a>
94
  </span>
 
95
  <!-- Code Link. -->
96
  <span class="link-block">
97
- <a href="https://github.com/google/nerfies" target="_blank"
98
  class="external-link button is-normal is-rounded is-dark">
99
  <span class="icon">
100
  <i class="fab fa-github"></i>
@@ -104,7 +106,7 @@
104
  </span>
105
  <!-- Dataset Link. -->
106
  <span class="link-block">
107
- <a href="https://github.com/google/nerfies/releases/tag/0.1" target="_blank"
108
  class="external-link button is-normal is-rounded is-dark">
109
  <span class="icon">
110
  <i class="far fa-images"></i>
@@ -112,7 +114,6 @@
112
  <span>Data</span>
113
  </a>
114
  </div>
115
-
116
  </div>
117
  </div>
118
  </div>
@@ -120,80 +121,6 @@
120
  </div>
121
  </section>
122
 
123
- <section class="hero teaser">
124
- <div class="container is-max-desktop">
125
- <div class="hero-body">
126
- <video id="teaser" autoplay muted loop playsinline height="100%">
127
- <source src="./static/videos/teaser.mp4"
128
- type="video/mp4">
129
- </video>
130
- <h2 class="subtitle has-text-centered">
131
- <span class="dnerf">Nerfies</span> turns selfie videos from your phone into
132
- free-viewpoint
133
- portraits.
134
- </h2>
135
- </div>
136
- </div>
137
- </section>
138
-
139
-
140
- <section class="hero is-light is-small">
141
- <div class="hero-body">
142
- <div class="container">
143
- <div id="results-carousel" class="carousel results-carousel">
144
- <div class="item item-steve">
145
- <video poster="" id="steve" autoplay controls muted loop playsinline height="100%">
146
- <source src="./static/videos/steve.mp4"
147
- type="video/mp4">
148
- </video>
149
- </div>
150
- <div class="item item-chair-tp">
151
- <video poster="" id="chair-tp" autoplay controls muted loop playsinline height="100%">
152
- <source src="./static/videos/chair-tp.mp4"
153
- type="video/mp4">
154
- </video>
155
- </div>
156
- <div class="item item-shiba">
157
- <video poster="" id="shiba" autoplay controls muted loop playsinline height="100%">
158
- <source src="./static/videos/shiba.mp4"
159
- type="video/mp4">
160
- </video>
161
- </div>
162
- <div class="item item-fullbody">
163
- <video poster="" id="fullbody" autoplay controls muted loop playsinline height="100%">
164
- <source src="./static/videos/fullbody.mp4"
165
- type="video/mp4">
166
- </video>
167
- </div>
168
- <div class="item item-blueshirt">
169
- <video poster="" id="blueshirt" autoplay controls muted loop playsinline height="100%">
170
- <source src="./static/videos/blueshirt.mp4"
171
- type="video/mp4">
172
- </video>
173
- </div>
174
- <div class="item item-mask">
175
- <video poster="" id="mask" autoplay controls muted loop playsinline height="100%">
176
- <source src="./static/videos/mask.mp4"
177
- type="video/mp4">
178
- </video>
179
- </div>
180
- <div class="item item-coffee">
181
- <video poster="" id="coffee" autoplay controls muted loop playsinline height="100%">
182
- <source src="./static/videos/coffee.mp4"
183
- type="video/mp4">
184
- </video>
185
- </div>
186
- <div class="item item-toby">
187
- <video poster="" id="toby" autoplay controls muted loop playsinline height="100%">
188
- <source src="./static/videos/toby2.mp4"
189
- type="video/mp4">
190
- </video>
191
- </div>
192
- </div>
193
- </div>
194
- </div>
195
- </section>
196
-
197
 
198
  <section class="section">
199
  <div class="container is-max-desktop">
@@ -203,185 +130,279 @@
203
  <h2 class="title is-3">Abstract</h2>
204
  <div class="content has-text-justified">
205
  <p>
206
- We present the first method capable of photorealistically reconstructing a non-rigidly
207
- deforming scene using photos/videos captured casually from mobile phones.
 
 
208
  </p>
209
- <p>
210
- Our approach augments neural radiance fields
211
- (NeRF) by optimizing an
212
- additional continuous volumetric deformation field that warps each observed point into a
213
- canonical 5D NeRF.
214
- We observe that these NeRF-like deformation fields are prone to local minima, and
215
- propose a coarse-to-fine optimization method for coordinate-based models that allows for
216
- more robust optimization.
217
- By adapting principles from geometry processing and physical simulation to NeRF-like
218
- models, we propose an elastic regularization of the deformation field that further
219
- improves robustness.
220
- </p>
221
- <p>
222
- We show that <span class="dnerf">Nerfies</span> can turn casually captured selfie
223
- photos/videos into deformable NeRF
224
- models that allow for photorealistic renderings of the subject from arbitrary
225
- viewpoints, which we dub <i>"nerfies"</i>. We evaluate our method by collecting data
226
- using a
227
- rig with two mobile phones that take time-synchronized photos, yielding train/validation
228
- images of the same pose at different viewpoints. We show that our method faithfully
229
- reconstructs non-rigidly deforming scenes and reproduces unseen views with high
230
- fidelity.
231
- </p>
232
- </div>
233
- </div>
234
- </div>
235
- <!--/ Abstract. -->
236
 
237
- <!-- Paper video. -->
238
- <div class="columns is-centered has-text-centered">
239
- <div class="column is-four-fifths">
240
- <h2 class="title is-3">Video</h2>
241
- <div class="publication-video">
242
- <iframe src="https://www.youtube.com/embed/MrKrnHhk8IA?rel=0&amp;showinfo=0"
243
- frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
244
  </div>
245
  </div>
246
  </div>
247
- <!--/ Paper video. -->
248
  </div>
249
  </section>
250
 
251
-
252
  <section class="section">
253
  <div class="container is-max-desktop">
254
-
 
 
 
255
  <div class="columns is-centered">
256
-
257
- <!-- Visual Effects. -->
258
  <div class="column">
259
- <div class="content">
260
- <h2 class="title is-3">Visual Effects</h2>
261
- <p>
262
- Using <i>nerfies</i> you can create fun visual effects. This Dolly zoom effect
263
- would be impossible without nerfies since it would require going through a wall.
264
- </p>
265
- <video id="dollyzoom" autoplay controls muted loop playsinline height="100%">
266
- <source src="./static/videos/dollyzoom-stacked.mp4"
267
- type="video/mp4">
268
- </video>
269
  </div>
270
  </div>
271
- <!--/ Visual Effects. -->
272
 
273
- <!-- Matting. -->
 
274
  <div class="column">
275
- <h2 class="title is-3">Matting</h2>
276
- <div class="columns is-centered">
277
- <div class="column content">
278
- <p>
279
- As a byproduct of our method, we can also solve the matting problem by ignoring
280
- samples that fall outside of a bounding box during rendering.
281
- </p>
282
- <video id="matting-video" controls playsinline height="100%">
283
- <source src="./static/videos/matting.mp4"
284
- type="video/mp4">
285
- </video>
286
- </div>
287
-
288
  </div>
289
  </div>
290
  </div>
291
- <!--/ Matting. -->
292
 
293
- <!-- Animation. -->
294
  <div class="columns is-centered">
295
- <div class="column is-full-width">
296
- <h2 class="title is-3">Animation</h2>
 
 
 
 
 
297
 
298
- <!-- Interpolating. -->
299
- <h3 class="title is-4">Interpolating states</h3>
300
- <div class="content has-text-justified">
301
- <p>
302
- We can also animate the scene by interpolating the deformation latent codes of two input
303
- frames. Use the slider here to linearly interpolate between the left frame and the right
304
- frame.
305
- </p>
306
  </div>
307
- <div class="columns is-vcentered interpolation-panel">
308
- <div class="column is-3 has-text-centered">
309
- <img src="./static/images/interpolate_start.jpg"
310
- class="interpolation-image"
311
- alt="Interpolate start reference image."/>
312
- <p>Start Frame</p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313
  </div>
314
- <div class="column interpolation-video-column">
315
- <div id="interpolation-image-wrapper">
316
- Loading...
317
- </div>
318
- <input class="slider is-fullwidth is-large is-info"
319
- id="interpolation-slider"
320
- step="1" min="0" max="100" value="0" type="range">
 
 
 
 
 
 
 
 
321
  </div>
322
- <div class="column is-3 has-text-centered">
323
- <img src="./static/images/interpolate_end.jpg"
324
- class="interpolation-image"
325
- alt="Interpolation end reference image."/>
326
- <p class="is-bold">End Frame</p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327
  </div>
328
  </div>
329
- <br/>
330
- <!--/ Interpolating. -->
 
 
331
 
332
- <!-- Re-rendering. -->
333
- <h3 class="title is-4">Re-rendering the input video</h3>
334
- <div class="content has-text-justified">
335
- <p>
336
- Using <span class="dnerf">Nerfies</span>, you can re-render a video from a novel
337
- viewpoint such as a stabilized camera by playing back the training deformations.
338
- </p>
 
 
 
 
 
 
 
 
 
 
339
  </div>
340
- <div class="content has-text-centered">
341
- <video id="replay-video"
342
- controls
343
- muted
344
- preload
345
- playsinline
346
- width="75%">
347
- <source src="./static/videos/replay.mp4"
348
- type="video/mp4">
349
- </video>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350
  </div>
351
- <!--/ Re-rendering. -->
352
-
353
  </div>
354
  </div>
355
- <!--/ Animation. -->
 
356
 
357
 
358
- <!-- Concurrent Work. -->
 
 
 
359
  <div class="columns is-centered">
360
- <div class="column is-full-width">
361
- <h2 class="title is-3">Related Links</h2>
362
-
363
- <div class="content has-text-justified">
364
- <p>
365
- There's a lot of excellent work that was introduced around the same time as ours.
366
- </p>
367
- <p>
368
- <a href="https://arxiv.org/abs/2104.09125" target="_blank">Progressive Encoding for Neural Optimization</a> introduces an idea similar to our windowed position encoding for coarse-to-fine optimization.
369
- </p>
370
- <p>
371
- <a href="https://www.albertpumarola.com/research/D-NeRF/index.html" target="_blank">D-NeRF</a> and <a href="https://gvv.mpi-inf.mpg.de/projects/nonrigid_nerf/" target="_blank">NR-NeRF</a>
372
- both use deformation fields to model non-rigid scenes.
373
- </p>
374
- <p>
375
- Some works model videos with a NeRF by directly modulating the density, such as <a href="https://video-nerf.github.io/" target="_blank">Video-NeRF</a>, <a href="https://www.cs.cornell.edu/~zl548/NSFF/" target="_blank">NSFF</a>, and <a href="https://neural-3d-video.github.io/" target="_blank">DyNeRF</a>
376
- </p>
377
- <p>
378
- There are probably many more by the time you are reading this. Check out <a href="https://dellaert.github.io/NeRF/" target="_blank">Frank Dellart's survey on recent NeRF papers</a>, and <a href="https://github.com/yenchenlin/awesome-NeRF" target="_blank">Yen-Chen Lin's curated list of NeRF papers</a>.
379
- </p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380
  </div>
381
  </div>
382
  </div>
383
- <!--/ Concurrent Work. -->
384
-
385
  </div>
386
  </section>
387
 
@@ -389,11 +410,10 @@
389
  <section class="section" id="BibTeX">
390
  <div class="container is-max-desktop content">
391
  <h2 class="title">BibTeX</h2>
392
- <pre><code>@article{park2021nerfies,
393
- author = {Park, Keunhong and Sinha, Utkarsh and Barron, Jonathan T. and Bouaziz, Sofien and Goldman, Dan B and Seitz, Steven M. and Martin-Brualla, Ricardo},
394
- title = {Nerfies: Deformable Neural Radiance Fields},
395
- journal = {ICCV},
396
- year = {2021},
397
  }</code></pre>
398
  </div>
399
  </section>
@@ -403,10 +423,10 @@
403
  <div class="container">
404
  <div class="content has-text-centered">
405
  <a class="icon-link" target="_blank"
406
- href="./static/videos/nerfies_paper.pdf">
407
  <i class="fas fa-file-pdf"></i>
408
  </a>
409
- <a class="icon-link" href="https://github.com/keunhong" target="_blank" class="external-link" disabled>
410
  <i class="fab fa-github"></i>
411
  </a>
412
  </div>
 
3
  <head>
4
  <meta charset="utf-8">
5
  <meta name="description"
6
+ content="KE-Omni">
7
+ <meta name="keywords" content="speech language models, speech interaction">
8
  <meta name="viewport" content="width=device-width, initial-scale=1">
9
+ <title>KE-Omni</title>
10
 
11
  <link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro"
12
  rel="stylesheet">
 
33
  <div class="container is-max-desktop">
34
  <div class="columns is-centered">
35
  <div class="column has-text-centered">
36
+ <h1 class="title is-1 publication-title">KE-Omni: A Seamless Speech Language Model with 60,000+ Hours Synthesized Chinese and English Speech Dialogues</h1>
37
  <div class="is-size-5 publication-authors">
38
  <span class="author-block">
39
+ Shuaijiang Zhao,
40
+ </span>
41
  <span class="author-block">
42
+ Tingwei Guo,
43
+ </span>
44
  <span class="author-block">
45
+ Bajian Xiang,
46
  </span>
47
  <span class="author-block">
48
+ Tongtang Wan,
49
  </span>
50
  <span class="author-block">
51
+ Qiang Niu,
52
  </span>
53
  <span class="author-block">
54
+ Wei Zou,
55
  </span>
56
  <span class="author-block">
57
+ Xiangang Li
58
  </span>
59
  </div>
60
 
61
  <div class="is-size-5 publication-authors">
62
+ <span class="author-block">Beike Inc., Beijing, China</span>
 
63
  </div>
64
 
65
  <div class="column has-text-centered">
66
  <div class="publication-links">
67
  <!-- PDF Link. -->
68
  <span class="link-block">
69
+ <a href="./static/KE_Omni.pdf" target="_blank"
70
  class="external-link button is-normal is-rounded is-dark">
71
  <span class="icon">
72
  <i class="fas fa-file-pdf"></i>
 
75
  </a>
76
  </span>
77
  <span class="link-block">
78
+ <a href="https://arxiv.org/" target="_blank"
79
  class="external-link button is-normal is-rounded is-dark">
80
  <span class="icon">
81
  <i class="ai ai-arxiv"></i>
 
83
  <span>arXiv</span>
84
  </a>
85
  </span>
86
+ <!-- Video Link.
87
  <span class="link-block">
88
  <a href="https://www.youtube.com/watch?v=MrKrnHhk8IA" target="_blank"
89
  class="external-link button is-normal is-rounded is-dark">
 
93
  <span>Video</span>
94
  </a>
95
  </span>
96
+ -->
97
  <!-- Code Link. -->
98
  <span class="link-block">
99
+ <a href="https://github.com/LianjiaTech/BELLE" target="_blank"
100
  class="external-link button is-normal is-rounded is-dark">
101
  <span class="icon">
102
  <i class="fab fa-github"></i>
 
106
  </span>
107
  <!-- Dataset Link. -->
108
  <span class="link-block">
109
+ <a href="https://huggingface.co/datasets/AM-Team/KESpeechChat" target="_blank"
110
  class="external-link button is-normal is-rounded is-dark">
111
  <span class="icon">
112
  <i class="far fa-images"></i>
 
114
  <span>Data</span>
115
  </a>
116
  </div>
 
117
  </div>
118
  </div>
119
  </div>
 
121
  </div>
122
  </section>
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
  <section class="section">
126
  <div class="container is-max-desktop">
 
130
  <h2 class="title is-3">Abstract</h2>
131
  <div class="content has-text-justified">
132
  <p>
133
+ The GPT-4o represents a significant milestone in enabling real-time interaction with large language models (LLMs) through speech, its remarkable low latency and high fluency not only capture attention but also stimulate research interest in the field.
134
+ This real-time speech interaction is particularly valuable in scenarios requiring rapid feedback and immediate responses, dramatically enhancing user experience.
135
+ However, there is a notable lack of research focused on real-time large speech language models, particularly for Chinese.
136
+ In this work, we present KE-Omni, a seamless large speech language model built upon Ke-SpeechChat, a large-scale high-quality synthesized speech interaction dataset consisting of 7 million Chinese and English conversations, featuring 42,002 speakers, and totaling over 60,000 hours, This contributes significantly to the advancement of research and development in this field.
137
  </p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
 
 
 
 
 
 
 
139
  </div>
140
  </div>
141
  </div>
142
+ <!--/ Abstract. -->
143
  </div>
144
  </section>
145
 
 
146
  <section class="section">
147
  <div class="container is-max-desktop">
148
+ <h2 class="title is-2 has-text-centered" >KE-Omni Model Architecture</h2>
149
+ <p>
150
+ KE-Omni comprises three main components: a speech encoder, a large language model(LLM), and a speech decoder.
151
+ </p>
152
  <div class="columns is-centered">
 
 
153
  <div class="column">
154
+ <div class="content dialogue-block" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
155
+ <img src="./static/images/model.jpg" alt="am-omni architecture">
 
 
 
 
 
 
 
 
156
  </div>
157
  </div>
158
+ </div>
159
 
160
+ <h2 class="title is-2 has-text-centered" >KE-SpeechChat Dataset Construction</h2>
161
+ <div class="columns is-centered">
162
  <div class="column">
163
+ <div class="content dialogue-block" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
164
+ <h3 class="title is-4">Textual Dialogue Construction</h3>
165
+ <img src="./static/images/textual.png" alt="textual dialogue construction">
166
+ <h3 class="title is-4">Speech Dialogue Construction</h3>
167
+ <img src="./static/images/speech.png" alt="speech dialogue construction">
 
 
 
 
 
 
 
 
168
  </div>
169
  </div>
170
  </div>
 
171
 
172
+ <h2 class="title is-2 has-text-centered">KE-SpeechChat Dataset Details</h2>
173
  <div class="columns is-centered">
174
+ <!-- Left image -->
175
+ <div class="column is-half">
176
+ <div class="content dialogue-block" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
177
+ <h3 class="title is-4 has-text-centered">Statistics of KESpeechChat</h3>
178
+ <img src="./static/images/am_speech.png" alt="Statistics of KESpeechChat " style="width: 100%; height: 100%;">
179
+ </div>
180
+ </div>
181
 
182
+ <!-- Right image -->
183
+ <div class="column is-half">
184
+ <div class="content dialogue-block" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
185
+ <h3 class="title is-4 has-text-centered">Subsets of KESpeechChat</h3>
186
+ <img src="./static/images/subset.png" alt="Subsets of KESpeechChat" style="width: 100%; height: 100%;">
 
 
 
187
  </div>
188
+ </div>
189
+ </div>
190
+ </div>
191
+ </section>
192
+
193
+ <section class="section">
194
+ <div class="container is-max-desktop">
195
+ <h2 class="title is-2 has-text-centered" >KESpeechChat Dialogue Demos</h2>
196
+ <h2 class="title is-3 has-text-centered" >Chinese Dialogues</h2>
197
+ <div class="columns is-centered">
198
+ <div class="column">
199
+ <div class="content dialogue-block" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
200
+ <h3 class="title is-4">User-male, Agent-male</h3>
201
+ <p>User: 奥运会是不是每四年举办一次啊</p>
202
+ <p>Agent: 是的,奥运会确实是每四年举办一次,不过有时候会因为特殊情况有所调整,比如之前就因为全球健康危机延期过。</p>
203
+ <div style="margin-top: auto;">
204
+ <audio controls>
205
+ <source src="./static/audios/zh_26157_0_male.wav" type="audio/wav">
206
+ </audio>
207
+ <audio controls>
208
+ <source src="./static/audios/zh_26157_1_male.wav" type="audio/wav">
209
+ </audio>
210
  </div>
211
+ </div>
212
+ </div>
213
+
214
+ <div class="column">
215
+ <div class="content dialogue-block" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
216
+ <h3 class="title is-4">User-male, Agent-female</h3>
217
+ <p>User: 人工智能现在都用在哪些地方了?</p>
218
+ <p>Agent: 人工智能的应用范围非常广泛,比如智能家居、自动驾驶汽车、医疗诊断、金融分析、教育辅导、娱乐推荐系统等等,几乎涵盖了我们生活的方方面面。</p>
219
+ <div style="margin-top: auto;">
220
+ <audio controls>
221
+ <source src="./static/audios/zh_2615759_0_male.wav" type="audio/wav">
222
+ </audio>
223
+ <audio controls>
224
+ <source src="./static/audios/zh_2615759_1_female.wav" type="audio/wav">
225
+ </audio>
226
  </div>
227
+ </div>
228
+ </div>
229
+
230
+ <div class="column">
231
+ <div class="content" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
232
+ <h3 class="title is-4">User-female, Agent-male</h3>
233
+ <p>User: 人类的平均体温一般是多少度呀?</p>
234
+ <p>Agent: 人类的平均体温通常是在三十六点五到三十七点五摄氏度之间,这个范围内的体温都算是正常的。</p>
235
+ <div style="margin-top: auto;">
236
+ <audio controls>
237
+ <source src="./static/audios/zh_26178_0_female.wav" type="audio/wav">
238
+ </audio>
239
+ <audio controls>
240
+ <source src="./static/audios/zh_26178_1_male.wav" type="audio/wav">
241
+ </audio>
242
+ </div>
243
+ </div>
244
+ </div>
245
+
246
+ <div class="column">
247
+ <div class="content" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
248
+ <h3 class="title is-4">User-female, Agent-female</h3>
249
+ <p>User: 如果我想提高写作水平,多读书真的有帮助吗?</p>
250
+ <p>Agent: 当然有帮助,多读书可以让你接触到不同的写作风格和表达方式,还能增加词汇量,对提高写作水平特别有用。</p>
251
+ <div style="margin-top: auto;">
252
+ <audio controls>
253
+ <source src="./static/audios/zh_2518949_0_female.wav" type="audio/wav">
254
+ </audio>
255
+ <audio controls>
256
+ <source src="./static/audios/zh_2518949_1_female.wav" type="audio/wav">
257
+ </audio>
258
  </div>
259
  </div>
260
+ </div>
261
+ </div>
262
+ </div>
263
+ </section>
264
 
265
+ <section class="section">
266
+ <div class="container is-max-desktop">
267
+ <h2 class="title is-3 has-text-centered">English Dialogues</h2>
268
+ <div class="columns is-centered">
269
+ <div class="column">
270
+ <div class="content dialogue-block" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
271
+ <h3 class="title is-4">User-male, Agent-male</h3>
272
+ <p>User: What kinds of birds and trees would you usually see in a peaceful forest clearing?</p>
273
+ <p>Agent: In a peaceful forest clearing, you might see a variety of birds like robins, sparrows, and woodpeckers. As for trees, you could expect to find oaks, maples, and pines, which provide a beautiful and serene backdrop.</p>
274
+ <div style="margin-top: auto;">
275
+ <audio controls>
276
+ <source src="./static/audios/en_21092_0_male.wav" type="audio/wav">
277
+ </audio>
278
+ <audio controls>
279
+ <source src="./static/audios/en_21092_1_male.wav" type="audio/wav">
280
+ </audio>
281
+ </div>
282
  </div>
283
+ </div>
284
+
285
+ <div class="column">
286
+ <div class="content dialogue-block" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
287
+ <h3 class="title is-4">User-male, Agent-female</h3>
288
+ <p>User: What are some things I should think about before agreeing to speak at a conference?</p>
289
+ <p>Agent: When considering a speaking engagement, think about the alignment of the conference theme with your expertise, the audience demographics, the potential networking opportunities, the time commitment required, and any travel or financial arrangements. Also, consider how this opportunity fits into your career goals and whether it can help you reach a wider audience or gain new insights.</p>
290
+ <div style="margin-top: auto;">
291
+ <audio controls>
292
+ <source src="./static/audios/en_7241_0_male.wav" type="audio/wav">
293
+ </audio>
294
+ <audio controls>
295
+ <source src="./static/audios/en_7241_1_female.wav" type="audio/wav">
296
+ </audio>
297
+ </div>
298
+ </div>
299
+ </div>
300
+
301
+ <div class="column">
302
+ <div class="content" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
303
+ <h3 class="title is-4">User-female, Agent-male</h3>
304
+ <p>User: Could you tell me what the capital of France is?</p>
305
+ <p>Agent: The capital of France is Paris.</p>
306
+ <div style="margin-top: auto;">
307
+ <audio controls>
308
+ <source src="./static/audios/en_162359_0_female.wav" type="audio/wav">
309
+ </audio>
310
+ <audio controls>
311
+ <source src="./static/audios/en_162359_1_male.wav" type="audio/wav">
312
+ </audio>
313
+ </div>
314
+ </div>
315
+ </div>
316
+
317
+ <div class="column">
318
+ <div class="content" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
319
+ <h3 class="title is-4">User-female, Agent-female</h3>
320
+ <p>User: What's the best way to store different types of garden produce to keep them fresh for as long as possible?</p>
321
+ <p>Agent: To keep your garden produce fresh, it's best to store each type according to its specific needs. For example, leafy greens like lettuce and spinach should be kept in a cool, humid environment, while root vegetables like carrots and potatoes prefer a cooler, drier place. Berries and stone fruits do well in the refrigerator, but tomatoes should be stored at room temperature. Make sure to avoid storing ethylene-producing fruits like apples and bananas with sensitive vegetables, as this can cause premature ripening.</p>
322
+ <div style="margin-top: auto;">
323
+ <audio controls>
324
+ <source src="./static/audios/en_52918_0_female.wav" type="audio/wav">
325
+ </audio>
326
+ <audio controls>
327
+ <source src="./static/audios/en_52918_1_female.wav" type="audio/wav">
328
+ </audio>
329
+ </div>
330
  </div>
 
 
331
  </div>
332
  </div>
333
+ </div>
334
+ </section>
335
 
336
 
337
+ <section class="section">
338
+ <div class="container is-max-desktop">
339
+ <h2 class="title is-2 has-text-centered">Experiment Results</h2>
340
+ <h2 class="title is-3 has-text-centered">English Dialogues</h2>
341
  <div class="columns is-centered">
342
+ <div class="column">
343
+ <div class="content dialogue-block" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
344
+ <h3 class="title is-4">Ground Truth</h3>
345
+ <p>User: What kind of job would suit someone who likes solving problems and interacting with people?</p>
346
+ <p>Agent: A career in fields like customer service, human resources, or project management could be a great fit. These roles often require strong problem-solving skills and regular interaction with people, which can be both challenging and rewarding.</p>
347
+ <div style="margin-top: auto;">
348
+ <audio controls>
349
+ <source src="./static/audios/exp_en_131152_0_mark.wav" type="audio/wav">
350
+ </audio>
351
+ <audio controls>
352
+ <source src="./static/audios/exp_en_131152_1_mark.wav" type="audio/wav">
353
+ </audio>
354
+ </div>
355
+ </div>
356
+ </div>
357
+
358
+ <div class="column">
359
+ <div class="content dialogue-block" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
360
+ <h3 class="title is-4">SpeechGPT</h3>
361
+ <p>User: What kind of job would suit someone who likes solving problems and interacting with people?</p>
362
+ <p>Agent: You may be referring to a job in customer service. This type of job involves interacting with customers and solving their problems. It requires excellent communication skills and the ability to remain calm under pressure.</p>
363
+ <div style="margin-top: auto;">
364
+ <audio controls>
365
+ <source src="./static/audios/exp_en_131152_0_mark.wav" type="audio/wav">
366
+ </audio>
367
+ <audio controls>
368
+ <source src="./static/audios/exp_en_speechgpt_90.wav" type="audio/wav">
369
+ </audio>
370
+ </div>
371
+ </div>
372
+ </div>
373
+
374
+ <div class="column">
375
+ <div class="content" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
376
+ <h3 class="title is-4">LLaMA-Omni</h3>
377
+ <p>User: What kind of job would suit someone who likes solving problems and interacting with people?</p>
378
+ <p>Agent: A job that suits someone who likes solving problems and interacting with people is a consultant or a project manager.</p>
379
+ <div style="margin-top: auto;">
380
+ <audio controls>
381
+ <source src="./static/audios/exp_en_131152_0_mark.wav" type="audio/wav">
382
+ </audio>
383
+ <audio controls>
384
+ <source src="./static/audios/exp_en_llamaomni_90.wav" type="audio/wav">
385
+ </audio>
386
+ </div>
387
+ </div>
388
+ </div>
389
+
390
+ <div class="column">
391
+ <div class="content" style="display: flex; flex-direction: column; justify-content: space-between; height: 100%;">
392
+ <h3 class="title is-4">KE-Omni</h3>
393
+ <p>User: What kind of job would suit someone who likes solving problems and interacting with people?</p>
394
+ <p>Agent: A job that combines problem-solving skills with people interaction could be a customer service representative or a project manager. These roles often require creative thinking to resolve conflicts and communicate effectively with various clients and team members. Another great option might be a salesperson, where you can build strong relationships and help clients find the right solutions.</p>
395
+ <div style="margin-top: auto;">
396
+ <audio controls>
397
+ <source src="./static/audios/exp_en_131152_0_mark.wav" type="audio/wav">
398
+ </audio>
399
+ <audio controls>
400
+ <source src="./static/audios/exp_en_amomni_female_90.wav" type="audio/wav">
401
+ </audio>
402
+ </div>
403
  </div>
404
  </div>
405
  </div>
 
 
406
  </div>
407
  </section>
408
 
 
410
  <section class="section" id="BibTeX">
411
  <div class="container is-max-desktop content">
412
  <h2 class="title">BibTeX</h2>
413
+ <pre><code>@article{zhao2024keomni,
414
+ author = {Zhao, Shuajiang and Guo, Tingwei and Xiang, Bajian and Wan, Tongtang and Niu, Qiang and Zou, Wei and Li, Xiangang},
415
+ title = {KE-Omni: A Seamless Speech Language Model with 60,000+ Hours Synthesized Chinese and English Speech Dialogues},
416
+ year = {2024},
 
417
  }</code></pre>
418
  </div>
419
  </section>
 
423
  <div class="container">
424
  <div class="content has-text-centered">
425
  <a class="icon-link" target="_blank"
426
+ href="./static/am_omni.pdf">
427
  <i class="fas fa-file-pdf"></i>
428
  </a>
429
+ <a class="icon-link" href="https://github.com/LianjiaTech/BELLE" target="_blank" class="external-link" disabled>
430
  <i class="fab fa-github"></i>
431
  </a>
432
  </div>
static/KE_Omni.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e54bf3b22998263825af29eefb0d16ebe9db3d818abb7723c5fc47a9bbcbdb8b
3
+ size 1824220
static/audios/en_162359_0_female.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:575823556925c1e8b72d3271586a268a816302ff87711ddc20e1f5e3f999fff8
3
+ size 257930
static/audios/en_162359_1_male.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dccd4e7382403e9c64e15b13c601cd8b06105bc84a14016887f68e9b7d1dae09
3
+ size 160650
static/audios/en_21092_0_male.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df12f9421f723ce5f888728f24bcb3f25e2af77ebb98b544c32dfd5ce02f2e60
3
+ size 532362
static/audios/en_21092_1_male.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70a9475dad1c5daa4c5823a3971138803abaf0b0fd8c444fb01f4bce86ce0712
3
+ size 1451914
static/audios/en_52918_0_female.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:859760f911b518295dba512f14e55aefa90c5983a4f55411103c8938e4a8d200
3
+ size 539530
static/audios/en_52918_1_female.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64f3bb84808f1098976f2e82fa00b0605eb406133dea2d1ef99de3f5a799f45a
3
+ size 2726630
static/audios/en_7241_0_male.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce4f307dc7fa53808a4c12d56d23b6f89b40e330a3272172ba972642472e65ac
3
+ size 379786
static/audios/en_7241_1_female.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e6b9865ca8d1dfc7c33679f9921cee3579a04117d52a3fdfa32014495dbbadb
3
+ size 2069386
static/audios/exp_en_131152_0_mark.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ff873833fefe83390399efa6e1482c21d2978b952850f1f89dd6024fe9b85f7
3
+ size 495498
static/audios/exp_en_131152_1_mark.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e451a1423a8ffed1e86286b64840c104c2ffdcd9f80190ff2e6da9a03a538693
3
+ size 1371018
static/audios/exp_en_15479_0_mark.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7caa8384db06ab9ff1ca51930bbd5946e0acbcc2150a819b0eccf1b533596c09
3
+ size 626570
static/audios/exp_en_15479_1_mark.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c618df62c94b5dea7889679355d4f8428045286b62c1f4e42b181085f46b101a
3
+ size 2147210
static/audios/exp_en_90.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48d5c875b4f36bfbb9ab7c3be4704a8da8f18de96711fc7e80bbe0f959899348
3
+ size 291244
static/audios/exp_en_amomni_female_90.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23c8dd75b89815c6a3c802eb3fab649fb9873480e930e0c32b165f3026f5f84d
3
+ size 518444
static/audios/exp_en_amomni_male_90.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48d5c875b4f36bfbb9ab7c3be4704a8da8f18de96711fc7e80bbe0f959899348
3
+ size 291244
static/audios/exp_en_llamaomni_90.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c25cc2cd6011b97bbb97dfafb62f7b910e21865861d921b2d3db0772f237548c
3
+ size 212524
static/audios/exp_en_speechgpt_90.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d2cd8d2833ceb7d50c9f2807794edbefc333c171a938fb28656e82a60d30038
3
+ size 407724
static/audios/zh_2518949_0_female.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9355c80c26bafbead3abe5cb4201d95aef0b4572913c91c02107ffa14d402e1
3
+ size 462730
static/audios/zh_2518949_1_female.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a47318dfdc021492a0630dc893be8b7748ea642387301225070ef7f9fee76878
3
+ size 946058
static/audios/zh_2615759_0_male.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91c737e5767879c4f539ef5b750f3f595c28e069bcc0c17174e29e9254f8d48f
3
+ size 375690
static/audios/zh_2615759_1_female.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae0f5af806a9628f99bb8f964589d79964fa5147ccec523c35e6862c32f0a8c7
3
+ size 1183626
static/audios/zh_26157_0_male.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1beb2ed3f0bf3711615be8271be82ecee5919814dd2e79eef54cf1db0ed055a
3
+ size 341898
static/audios/zh_26157_1_male.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bae4b72b41e6fe9f42ef3dd4071cf53ca28bc717773dbdb66d4fde8082c5ff96
3
+ size 907146
static/audios/zh_26178_0_female.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e869fa9b2052a7d3a61554975ab6baad7c8e77080c59350ee3e3353e5176f65
3
+ size 354186
static/audios/zh_26178_1_male.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac98f8baaafeb171f49ef3e715ced8d41144a42c665f1a6bc99a45a168a6a11a
3
+ size 855946
static/images/am_speech.png ADDED

Git LFS Details

  • SHA256: ad7d6f09cc77afe84403672eaaed0ad7f9a68fff083bda2344c26415243f4af4
  • Pointer size: 130 Bytes
  • Size of remote file: 67.8 kB
static/images/model.jpg ADDED
static/images/speech.jpg ADDED
static/images/speech.pdf ADDED
Binary file (714 kB). View file
 
static/images/speech.png ADDED

Git LFS Details

  • SHA256: 218b6f488cb25264198ae40833a07d501696dca4f121330e00419522a13c9913
  • Pointer size: 132 Bytes
  • Size of remote file: 1.05 MB
static/images/subset.png ADDED

Git LFS Details

  • SHA256: 4e958e802a6ddde79923993cfce9af288c7174c7546481e9483e09d1a42e80d4
  • Pointer size: 130 Bytes
  • Size of remote file: 57.2 kB
static/images/textual.jpg ADDED
static/images/textual.pdf ADDED
Binary file (457 kB). View file
 
static/images/textual.png ADDED

Git LFS Details

  • SHA256: 0084d37bf73bdbf16a5d8b2e2b25c5156ac0b486df511c1f45d5ce32a827a508
  • Pointer size: 132 Bytes
  • Size of remote file: 1.22 MB