add omni case for inference
Browse files- .gitattributes +1 -0
- README.md +6 -1
- assets/Skiing.mp4 +3 -0
.gitattributes
CHANGED
@@ -37,3 +37,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
37 |
*jpg filter=lfs diff=lfs merge=lfs -text
|
38 |
*gif filter=lfs diff=lfs merge=lfs -text
|
39 |
*.wav filter=lfs diff=lfs merge=lfs -text
|
|
|
|
37 |
*jpg filter=lfs diff=lfs merge=lfs -text
|
38 |
*gif filter=lfs diff=lfs merge=lfs -text
|
39 |
*.wav filter=lfs diff=lfs merge=lfs -text
|
40 |
+
assets/Skiing.mp4 filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1029,7 +1029,7 @@ def get_video_chunk_content(video_path, flatten=True):
|
|
1029 |
|
1030 |
return contents
|
1031 |
|
1032 |
-
video_path="/
|
1033 |
# if use voice clone prompt, please set ref_audio
|
1034 |
ref_audio_path = 'assets/demo.wav'
|
1035 |
ref_audio, _ = librosa.load(ref_audio_path, sr=16000, mono=True)
|
@@ -1060,6 +1060,11 @@ res = model.chat(
|
|
1060 |
return_dict=True
|
1061 |
)
|
1062 |
print(res)
|
|
|
|
|
|
|
|
|
|
|
1063 |
```
|
1064 |
#### Streaming inference
|
1065 |
```python
|
|
|
1029 |
|
1030 |
return contents
|
1031 |
|
1032 |
+
video_path="assets/Skiing.mp4"
|
1033 |
# if use voice clone prompt, please set ref_audio
|
1034 |
ref_audio_path = 'assets/demo.wav'
|
1035 |
ref_audio, _ = librosa.load(ref_audio_path, sr=16000, mono=True)
|
|
|
1060 |
return_dict=True
|
1061 |
)
|
1062 |
print(res)
|
1063 |
+
|
1064 |
+
## You will get the answer: The person in the picture is skiing down a snowy slope.
|
1065 |
+
# import IPython
|
1066 |
+
# IPython.display.Audio('output.wav')
|
1067 |
+
|
1068 |
```
|
1069 |
#### Streaming inference
|
1070 |
```python
|
assets/Skiing.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:479ace116d6ac92487ad90f415b3ef817cd019bba4521043ef0d5faaa1a8415d
|
3 |
+
size 8534409
|