diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..eaf46a125533baf87f5f1e924303342d4f97b922 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Ho Kei Cheng + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index 2ddcedb8c801596633db7408c2a03d416cf72005..920f60211132a1e2ca86880ecd96a0fd252ad648 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,152 @@ ---- -title: MMAudio -emoji: 🐨 -colorFrom: yellow -colorTo: gray -sdk: gradio -sdk_version: 5.8.0 -app_file: app.py -pinned: false -license: mit -short_description: Generating synchronizated audio given video/text inputs. ---- - -Check out the configuration reference at https://huggingface.co./docs/hub/spaces-config-reference +# [Taming Multimodal Joint Training for High-Quality Video-to-Audio Synthesis](https://hkchengrex.github.io/MMAudio) + +[Ho Kei Cheng](https://hkchengrex.github.io/), [Masato Ishii](https://scholar.google.co.jp/citations?user=RRIO1CcAAAAJ), [Akio Hayakawa](https://scholar.google.com/citations?user=sXAjHFIAAAAJ), [Takashi Shibuya](https://scholar.google.com/citations?user=XCRO260AAAAJ), [Alexander Schwing](https://www.alexander-schwing.de/), [Yuki Mitsufuji](https://www.yukimitsufuji.com/) + +University of Illinois Urbana-Champaign, Sony AI, and Sony Group Corporation + + +[[Paper (being prepared)]](https://hkchengrex.github.io/MMAudio) [[Project Page]](https://hkchengrex.github.io/MMAudio) + + +**Note: This repository is still under construction. Single-example inference should work as expected. The training code will be added. Code is subject to non-backward-compatible changes.** + +## Highlight + +MMAudio generates synchronized audio given video and/or text inputs. +Our key innovation is multimodal joint training which allows training on a wide range of audio-visual and audio-text datasets. +Moreover, a synchronization module aligns the generated audio with the video frames. + + +## Results + +(All audio from our algorithm MMAudio) + +Videos from Sora: + +https://github.com/user-attachments/assets/82afd192-0cee-48a1-86ca-bd39b8c8f330 + + +Videos from MovieGen/Hunyuan Video/VGGSound: + +https://github.com/user-attachments/assets/29230d4e-21c1-4cf8-a221-c28f2af6d0ca + +For more results, visit https://hkchengrex.com/MMAudio/video_main.html. + +## Installation + +We have only tested this on Ubuntu. + +### Prerequisites + +We recommend using a [miniforge](https://github.com/conda-forge/miniforge) environment. + +- Python 3.8+ +- PyTorch **2.5.1+** and corresponding torchvision/torchaudio (pick your CUDA version https://pytorch.org/) +- ffmpeg<7 ([this is required by torchaudio](https://pytorch.org/audio/master/installation.html#optional-dependencies), you can install it in a miniforge environment with `conda install -c conda-forge 'ffmpeg<7'`) + +**Clone our repository:** + +```bash +git clone https://github.com/hkchengrex/MMAudio.git +``` + +**Install with pip:** + +```bash +cd MMAudio +pip install -e . +``` + +(If you encounter the File "setup.py" not found error, upgrade your pip with pip install --upgrade pip) + +**Pretrained models:** + +The models will be downloaded automatically when you run the demo script. MD5 checksums are provided in `mmaudio/utils/download_utils.py` + +| Model | Download link | File size | +| -------- | ------- | ------- | +| Flow prediction network, small 16kHz | mmaudio_small_16k.pth | 601M | +| Flow prediction network, small 44.1kHz | mmaudio_small_44k.pth | 601M | +| Flow prediction network, medium 44.1kHz | mmaudio_medium_44k.pth | 2.4G | +| Flow prediction network, large 44.1kHz **(recommended)** | mmaudio_large_44k.pth | 3.9G | +| 16kHz VAE | v1-16.pth | 655M | +| 16kHz BigVGAN vocoder |best_netG.pt | 429M | +| 44.1kHz VAE |v1-44.pth | 1.2G | +| Synchformer visual encoder |synchformer_state_dict.pth | 907M | + +The 44.1kHz vocoder will be downloaded automatically. + +The expected directory structure (full): + +```bash +MMAudio +├── ext_weights +│ ├── best_netG.pt +│ ├── synchformer_state_dict.pth +│ ├── v1-16.pth +│ └── v1-44.pth +├── weights +│ ├── mmaudio_small_16k.pth +│ ├── mmaudio_small_44k.pth +│ ├── mmaudio_medium_44k.pth +│ └── mmaudio_large_44k.pth +└── ... +``` + +The expected directory structure (minimal, for the recommended model only): + +```bash +MMAudio +├── ext_weights +│ ├── synchformer_state_dict.pth +│ └── v1-44.pth +├── weights +│ └── mmaudio_large_44k.pth +└── ... +``` + +## Demo + +By default, these scripts use the `large_44k` model. +In our experiments, inference only takes around 6GB of GPU memory (in 16-bit mode) which should fit in most modern GPUs. + +### Command-line interface + +With `demo.py` +```bash +python demo.py --duration=8 --video= --prompt "your prompt" +``` +The output (audio in `.flac` format, and video in `.mp4` format) will be saved in `./output`. +See the file for more options. +Simply omit the `--video` option for text-to-audio synthesis. +The default output (and training) duration is 8 seconds. Longer/shorter durations could also work, but a large deviation from the training duration may result in a lower quality. + + +### Gradio interface + +Supports video-to-audio and text-to-audio synthesis. + +``` +python gradio_demo.py +``` + +### Known limitations + +1. The model sometimes generates undesired unintelligible human speech-like sounds +2. The model sometimes generates undesired background music +3. The model struggles with unfamiliar concepts, e.g., it can generate "gunfires" but not "RPG firing". + +We believe all of these three limitations can be addressed with more high-quality training data. + +## Training +Work in progress. + +## Evaluation +Work in progress. + +## Acknowledgement +Many thanks to: +- [Make-An-Audio 2](https://github.com/bytedance/Make-An-Audio-2) for the 16kHz BigVGAN pretrained model +- [BigVGAN](https://github.com/NVIDIA/BigVGAN) +- [Synchformer](https://github.com/v-iashin/Synchformer) + diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..23c611b4ce3b46688a76b30d7f465c2b951766b1 --- /dev/null +++ b/app.py @@ -0,0 +1,149 @@ +import logging +from datetime import datetime +from pathlib import Path + +import gradio as gr +import torch +import torchaudio + +from mmaudio.eval_utils import (ModelConfig, all_model_cfg, generate, load_video, make_video, + setup_eval_logging) +from mmaudio.model.flow_matching import FlowMatching +from mmaudio.model.networks import MMAudio, get_my_mmaudio +from mmaudio.model.sequence_config import SequenceConfig +from mmaudio.model.utils.features_utils import FeaturesUtils + +torch.backends.cuda.matmul.allow_tf32 = True +torch.backends.cudnn.allow_tf32 = True + +log = logging.getLogger() + +device = 'cuda' +dtype = torch.bfloat16 + +model: ModelConfig = all_model_cfg['large_44k_v2'] +model.download_if_needed() +output_dir = Path('./output/gradio') + +setup_eval_logging() + + +def get_model() -> tuple[MMAudio, FeaturesUtils, SequenceConfig]: + seq_cfg = model.seq_cfg + + net: MMAudio = get_my_mmaudio(model.model_name).to(device, dtype).eval() + net.load_weights(torch.load(model.model_path, map_location=device, weights_only=True)) + log.info(f'Loaded weights from {model.model_path}') + + feature_utils = FeaturesUtils(tod_vae_ckpt=model.vae_path, + synchformer_ckpt=model.synchformer_ckpt, + enable_conditions=True, + mode=model.mode, + bigvgan_vocoder_ckpt=model.bigvgan_16k_path) + feature_utils = feature_utils.to(device, dtype).eval() + + return net, feature_utils, seq_cfg + + +net, feature_utils, seq_cfg = get_model() + + +@torch.inference_mode() +def video_to_audio(video: gr.Video, prompt: str, negative_prompt: str, seed: int, num_steps: int, + cfg_strength: float, duration: float): + + rng = torch.Generator(device=device) + rng.manual_seed(seed) + fm = FlowMatching(min_sigma=0, inference_mode='euler', num_steps=num_steps) + + clip_frames, sync_frames, duration = load_video(video, duration) + clip_frames = clip_frames.unsqueeze(0) + sync_frames = sync_frames.unsqueeze(0) + seq_cfg.duration = duration + net.update_seq_lengths(seq_cfg.latent_seq_len, seq_cfg.clip_seq_len, seq_cfg.sync_seq_len) + + audios = generate(clip_frames, + sync_frames, [prompt], + negative_text=[negative_prompt], + feature_utils=feature_utils, + net=net, + fm=fm, + rng=rng, + cfg_strength=cfg_strength) + audio = audios.float().cpu()[0] + + current_time_string = datetime.now().strftime('%Y%m%d_%H%M%S') + output_dir.mkdir(exist_ok=True, parents=True) + video_save_path = output_dir / f'{current_time_string}.mp4' + make_video(video, + video_save_path, + audio, + sampling_rate=seq_cfg.sampling_rate, + duration_sec=seq_cfg.duration) + return video_save_path + + +@torch.inference_mode() +def text_to_audio(prompt: str, negative_prompt: str, seed: int, num_steps: int, cfg_strength: float, + duration: float): + + rng = torch.Generator(device=device) + rng.manual_seed(seed) + fm = FlowMatching(min_sigma=0, inference_mode='euler', num_steps=num_steps) + + clip_frames = sync_frames = None + seq_cfg.duration = duration + net.update_seq_lengths(seq_cfg.latent_seq_len, seq_cfg.clip_seq_len, seq_cfg.sync_seq_len) + + audios = generate(clip_frames, + sync_frames, [prompt], + negative_text=[negative_prompt], + feature_utils=feature_utils, + net=net, + fm=fm, + rng=rng, + cfg_strength=cfg_strength) + audio = audios.float().cpu()[0] + + current_time_string = datetime.now().strftime('%Y%m%d_%H%M%S') + output_dir.mkdir(exist_ok=True, parents=True) + audio_save_path = output_dir / f'{current_time_string}.flac' + torchaudio.save(audio_save_path, audio, seq_cfg.sampling_rate) + return audio_save_path + + +video_to_audio_tab = gr.Interface( + fn=video_to_audio, + inputs=[ + gr.Video(), + gr.Text(label='Prompt'), + gr.Text(label='Negative prompt', value='music'), + gr.Number(label='Seed', value=0, precision=0, minimum=0), + gr.Number(label='Num steps', value=25, precision=0, minimum=1), + gr.Number(label='Guidance Strength', value=4.5, minimum=1), + gr.Number(label='Duration (sec)', value=8, minimum=1), + ], + outputs='playable_video', + cache_examples=False, + title='MMAudio — Video-to-Audio Synthesis', +) + +text_to_audio_tab = gr.Interface( + fn=text_to_audio, + inputs=[ + gr.Text(label='Prompt'), + gr.Text(label='Negative prompt'), + gr.Number(label='Seed', value=0, precision=0, minimum=0), + gr.Number(label='Num steps', value=25, precision=0, minimum=1), + gr.Number(label='Guidance Strength', value=4.5, minimum=1), + gr.Number(label='Duration (sec)', value=8, minimum=1), + ], + outputs='audio', + cache_examples=False, + title='MMAudio — Text-to-Audio Synthesis', +) + +if __name__ == "__main__": + gr.TabbedInterface([video_to_audio_tab, text_to_audio_tab], + ['Video-to-Audio', 'Text-to-Audio']).launch(server_port=17888, + allowed_paths=[output_dir]) diff --git a/demo.py b/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..2789afcbe29360ff8896bda461a70875614890ac --- /dev/null +++ b/demo.py @@ -0,0 +1,135 @@ +import logging +from argparse import ArgumentParser +from pathlib import Path + +import torch +import torchaudio + +from mmaudio.eval_utils import (ModelConfig, all_model_cfg, generate, + load_video, make_video, setup_eval_logging) +from mmaudio.model.flow_matching import FlowMatching +from mmaudio.model.networks import MMAudio, get_my_mmaudio +from mmaudio.model.utils.features_utils import FeaturesUtils + +torch.backends.cuda.matmul.allow_tf32 = True +torch.backends.cudnn.allow_tf32 = True + +log = logging.getLogger() + + +@torch.inference_mode() +def main(): + setup_eval_logging() + + parser = ArgumentParser() + parser.add_argument('--variant', + type=str, + default='large_44k_v2', + help='small_16k, small_44k, medium_44k, large_44k, large_44k_v2') + parser.add_argument('--video', type=Path, help='Path to the video file') + parser.add_argument('--prompt', type=str, help='Input prompt', default='') + parser.add_argument('--negative_prompt', type=str, help='Negative prompt', default='') + parser.add_argument('--duration', type=float, default=8.0) + parser.add_argument('--cfg_strength', type=float, default=4.5) + parser.add_argument('--num_steps', type=int, default=25) + + parser.add_argument('--mask_away_clip', action='store_true') + + parser.add_argument('--output', type=Path, help='Output directory', default='./output') + parser.add_argument('--seed', type=int, help='Random seed', default=42) + parser.add_argument('--skip_video_composite', action='store_true') + parser.add_argument('--full_precision', action='store_true') + + args = parser.parse_args() + + if args.variant not in all_model_cfg: + raise ValueError(f'Unknown model variant: {args.variant}') + model: ModelConfig = all_model_cfg[args.variant] + model.download_if_needed() + seq_cfg = model.seq_cfg + + if args.video: + video_path: Path = Path(args.video).expanduser() + else: + video_path = None + prompt: str = args.prompt + negative_prompt: str = args.negative_prompt + output_dir: str = args.output.expanduser() + seed: int = args.seed + num_steps: int = args.num_steps + duration: float = args.duration + cfg_strength: float = args.cfg_strength + skip_video_composite: bool = args.skip_video_composite + mask_away_clip: bool = args.mask_away_clip + + device = 'cuda' + dtype = torch.float32 if args.full_precision else torch.bfloat16 + + output_dir.mkdir(parents=True, exist_ok=True) + + # load a pretrained model + net: MMAudio = get_my_mmaudio(model.model_name).to(device, dtype).eval() + net.load_weights(torch.load(model.model_path, map_location=device, weights_only=True)) + log.info(f'Loaded weights from {model.model_path}') + + # misc setup + rng = torch.Generator(device=device) + rng.manual_seed(seed) + fm = FlowMatching(min_sigma=0, inference_mode='euler', num_steps=num_steps) + + feature_utils = FeaturesUtils(tod_vae_ckpt=model.vae_path, + synchformer_ckpt=model.synchformer_ckpt, + enable_conditions=True, + mode=model.mode, + bigvgan_vocoder_ckpt=model.bigvgan_16k_path) + feature_utils = feature_utils.to(device, dtype).eval() + + if video_path is not None: + log.info(f'Using video {video_path}') + clip_frames, sync_frames, duration = load_video(video_path, duration) + if mask_away_clip: + clip_frames = None + else: + clip_frames = clip_frames.unsqueeze(0) + sync_frames = sync_frames.unsqueeze(0) + else: + log.info('No video provided -- text-to-audio mode') + clip_frames = sync_frames = None + + seq_cfg.duration = duration + net.update_seq_lengths(seq_cfg.latent_seq_len, seq_cfg.clip_seq_len, seq_cfg.sync_seq_len) + + log.info(f'Prompt: {prompt}') + log.info(f'Negative prompt: {negative_prompt}') + + audios = generate(clip_frames, + sync_frames, [prompt], + negative_text=[negative_prompt], + feature_utils=feature_utils, + net=net, + fm=fm, + rng=rng, + cfg_strength=cfg_strength) + audio = audios.float().cpu()[0] + if video_path is not None: + save_path = output_dir / f'{video_path.stem}.flac' + else: + safe_filename = prompt.replace(' ', '_').replace('/', '_').replace('.', '') + save_path = output_dir / f'{safe_filename}.flac' + torchaudio.save(save_path, audio, seq_cfg.sampling_rate) + + log.info(f'Audio saved to {save_path}') + if video_path is not None and not skip_video_composite: + video_save_path = output_dir / f'{video_path.stem}.mp4' + make_video(video_path, + video_save_path, + audio, + sampling_rate=seq_cfg.sampling_rate, + duration_sec=seq_cfg.duration) + log.info(f'Video saved to {output_dir / video_save_path}') + + log.info('Memory usage: %.2f GB', torch.cuda.max_memory_allocated() / (2**30)) + + +if __name__ == '__main__': + main() diff --git a/docs/images/icon.png b/docs/images/icon.png new file mode 100644 index 0000000000000000000000000000000000000000..c337eee9868e61173e61f583ef098668681555f5 Binary files /dev/null and b/docs/images/icon.png differ diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 0000000000000000000000000000000000000000..c2792c3baef3baad9e0220853d1232eebf3c266d --- /dev/null +++ b/docs/index.html @@ -0,0 +1,147 @@ + + + + + + + + + + + + MMAudio + + + + + + + + + + + + + +



+
+
+
+ Taming Multimodal Joint Training for High-Quality
Video-to-Audio Synthesis +
+
+ +
+
+
+ arXiv 2024 +
+
+
+ + + +
+
+ 1University of Illinois Urbana-Champaign +
+
+ 2Sony AI +
+
+ 3Sony Group Corporation +
+
+ +
+ +
+ +
+ + +
+ [Code] +
+ +
+ +
+ +
+ +
+
+ TL;DR +
+
+
+
+
+

+ MMAudio generates synchronized audio given video and/or text inputs. +

+
+
+ +
+
+
+ +
+
+ Demo +
+
+
+
+ +
+
+
+ +
+ +
+ +

+

+ +
+ + + \ No newline at end of file diff --git a/docs/style.css b/docs/style.css new file mode 100644 index 0000000000000000000000000000000000000000..4946ef1f17b794d2122351bf24e4eb08f19b9637 --- /dev/null +++ b/docs/style.css @@ -0,0 +1,78 @@ +body { + font-family: 'Source Sans 3', sans-serif; + font-size: 18px; + margin-left: auto; + margin-right: auto; + font-weight: 400; + height: 100%; + max-width: 1000px; +} + +table { + width: 100%; + border-collapse: collapse; +} +th, td { + border: 1px solid #ddd; + padding: 8px; + text-align: center; +} +th { + background-color: #f2f2f2; +} +video { + width: 100%; + height: auto; +} +p { + font-size: 28px; +} +h2 { + font-size: 36px; +} + +.strong { + font-weight: 700; +} + +.light { + font-weight: 100; +} + +.heavy { + font-weight: 900; +} + +.column { + float: left; +} + +a:link, +a:visited { + color: #05538f; + text-decoration: none; +} + +a:hover { + color: #63cbdd; +} + +hr { + border: 0; + height: 1px; + background-image: linear-gradient(to right, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.75), rgba(0, 0, 0, 0)); +} + +.video-container { + position: relative; + padding-bottom: 56.25%; /* 16:9 */ + height: 0; + } + +.video-container iframe { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; +} \ No newline at end of file diff --git a/docs/style_videos.css b/docs/style_videos.css new file mode 100644 index 0000000000000000000000000000000000000000..9d641122166e3c3fdd8f3e104628686ed5dc9258 --- /dev/null +++ b/docs/style_videos.css @@ -0,0 +1,52 @@ +body { + font-family: 'Source Sans 3', sans-serif; + font-size: 1.5vh; + font-weight: 400; +} + +table { + width: 100%; + border-collapse: collapse; +} +th, td { + border: 1px solid #ddd; + padding: 8px; + text-align: center; +} +th { + background-color: #f2f2f2; +} +video { + width: 100%; + height: auto; +} +p { + font-size: 1.5vh; + font-weight: bold; +} +h2 { + font-size: 2vh; + font-weight: bold; +} + +.video-container { + position: relative; + padding-bottom: 56.25%; /* 16:9 */ + height: 0; + } + +.video-container iframe { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; +} + +.video-header { + background-color: #f2f2f2; + text-align: center; + font-size: 1.5vh; + font-weight: bold; + padding: 8px; +} \ No newline at end of file diff --git a/docs/video_gen.html b/docs/video_gen.html new file mode 100644 index 0000000000000000000000000000000000000000..da1a9d95393153b1264cc4c58ee78bda23379a2a --- /dev/null +++ b/docs/video_gen.html @@ -0,0 +1,254 @@ + + + + + + + + + + MMAudio + + + + + + + + + + + + +
+

Comparisons with Movie Gen Audio on Videos Generated by MovieGen

+

+ Example 1: Ice cracking with sharp snapping sound, and metal tool scraping against the ice surface. + Back to index +

+ +
+
+
Movie Gen Audio
+
+ +
+
+
+
Ours
+
+ +
+
+
+
+ + + +

+ Example 2: Rhythmic splashing and lapping of water. + Back to index +

+
+
+
Movie Gen Audio
+
+ +
+
+
+
Ours
+
+ +
+
+
+
+ +

+ Example 3: Shovel scrapes against dry earth. + Back to index +

+
+
+
Movie Gen Audio
+
+ +
+
+
+
Ours
+
+ +
+
+
+
+ + +

+ (Failure case) Example 4: Creamy sound of mashed potatoes being scooped. + Back to index +

+
+
+
Movie Gen Audio
+
+ +
+
+
+
Ours
+
+ +
+
+
+
+ +
+ +
+ +

Results on Videos Generated by Hunyuan

+

+ Back to index +

+
+
+
Typing
+
+ +
+
+
+
Water is rushing down a stream and pouring
+
+ +
+
+
+
+
+
Waves on beach
+
+ +
+
+
+
Water droplet
+
+ +
+
+
+
+ +

Results on Videos Generated by Sora

+

+ Back to index +

+
+
+
Ships riding waves
+
+ +
+
+
+
Train (no text prompt given)
+
+ +
+
+
+
+
+
Seashore (no text prompt given)
+
+ +
+
+
+
Surfing (failure: unprompted music)
+
+ +
+
+
+
+ +
+

Results on Videos Generated by Mochi 1

+

+ Back to index +

+
+
+
Magical fire and lightning (no text prompt given)
+
+ +
+
+
+
Storm (no text prompt given)
+
+ +
+
+
+
+ +

Results on Videos Generated by LTX-Video

+

+ Back to index +

+
+
+
Firewood burning and cracking
+
+ +
+
+
+
Waterfall, water splashing
+
+ +
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/docs/video_main.html b/docs/video_main.html new file mode 100644 index 0000000000000000000000000000000000000000..36c3d996cb5bc0e9050fd217b2b1a056b085a88e --- /dev/null +++ b/docs/video_main.html @@ -0,0 +1,98 @@ + + + + + + + + + + MMAudio + + + + + + + + + + + + + +

Index

+

(Click on the links to load the corresponding videos) Back to project page

+ +
    +
  1. + Comparisons with Movie Gen Audio on Videos Generated by MovieGen +
  2. +
  3. + Results on Videos Generated by Hunyuan and Sora +
  4. +
  5. + Results on Videos Generated by Mochi 1 and LTX-Video +
  6. +
  7. + On VGGSound +
      +
    1. Example 1: Wolf howling
    2. +
    3. Example 2: Striking a golf ball
    4. +
    5. Example 3: Hitting a drum
    6. +
    7. Example 4: Dog barking
    8. +
    9. Example 5: Playing a string instrument
    10. +
    11. Example 6: A group of people playing tambourines
    12. +
    13. Extra results & failure cases
    14. +
    +
  8. +
+ +
+ +
+
+
+ + + \ No newline at end of file diff --git a/docs/video_vgg.html b/docs/video_vgg.html new file mode 100644 index 0000000000000000000000000000000000000000..945b33660ed46c3f7acad3157c4181219b248533 --- /dev/null +++ b/docs/video_vgg.html @@ -0,0 +1,452 @@ + + + + + + + + + + MMAudio + + + + + + + + + + +
+

Comparisons with state-of-the-art methods in VGGSound

+

+ Example 1: Wolf howling. + Back to index +

+
+
+
Ground-truth
+
+ +
+
+
+
Ours
+
+ +
+
+
+
V2A-Mapper
+
+ +
+
+
+
FoleyCrafter
+
+ +
+
+
+
+
+
Frieren
+
+ +
+
+
+
VATT
+
+ +
+
+
+
V-AURA
+
+ +
+
+
+
Seeing and Hearing
+
+ +
+
+
+
+ +
+

Comparisons with state-of-the-art methods in VGGSound

+

+ Example 2: Striking a golf ball. + Back to index +

+ +
+
+
Ground-truth
+
+ +
+
+
+
Ours
+
+ +
+
+
+
V2A-Mapper
+
+ +
+
+
+
FoleyCrafter
+
+ +
+
+
+
+
+
Frieren
+
+ +
+
+
+
VATT
+
+ +
+
+
+
V-AURA
+
+ +
+
+
+
Seeing and Hearing
+
+ +
+
+
+
+ +
+

Comparisons with state-of-the-art methods in VGGSound

+

+ Example 3: Hitting a drum. + Back to index +

+ +
+
+
Ground-truth
+
+ +
+
+
+
Ours
+
+ +
+
+
+
V2A-Mapper
+
+ +
+
+
+
FoleyCrafter
+
+ +
+
+
+
+
+
Frieren
+
+ +
+
+
+
VATT
+
+ +
+
+
+
V-AURA
+
+ +
+
+
+
Seeing and Hearing
+
+ +
+
+
+
+
+ +
+

Comparisons with state-of-the-art methods in VGGSound

+

+ Example 4: Dog barking. + Back to index +

+ +
+
+
Ground-truth
+
+ +
+
+
+
Ours
+
+ +
+
+
+
V2A-Mapper
+
+ +
+
+
+
FoleyCrafter
+
+ +
+
+
+
+
+
Frieren
+
+ +
+
+
+
VATT
+
+ +
+
+
+
V-AURA
+
+ +
+
+
+
Seeing and Hearing
+
+ +
+
+
+
+ +
+

Comparisons with state-of-the-art methods in VGGSound

+

+ Example 5: Playing a string instrument. + Back to index +

+ +
+
+
Ground-truth
+
+ +
+
+
+
Ours
+
+ +
+
+
+
V2A-Mapper
+
+ +
+
+
+
FoleyCrafter
+
+ +
+
+
+
+
+
Frieren
+
+ +
+
+
+
VATT
+
+ +
+
+
+
V-AURA
+
+ +
+
+
+
Seeing and Hearing
+
+ +
+
+
+
+ +
+

Comparisons with state-of-the-art methods in VGGSound

+

+ Example 6: A group of people playing tambourines. + Back to index +

+ +
+
+
Ground-truth
+
+ +
+
+
+
Ours
+
+ +
+
+
+
V2A-Mapper
+
+ +
+
+
+
FoleyCrafter
+
+ +
+
+
+
+
+
Frieren
+
+ +
+
+
+
VATT
+
+ +
+
+
+
V-AURA
+
+ +
+
+
+
Seeing and Hearing
+
+ +
+
+
+
+ +
+

Comparisons with state-of-the-art methods in VGGSound

+

+ Back to index +

+ +
+
+
Moving train
+
+ +
+
+
+
Water splashing
+
+ +
+
+
+
Skateboarding
+
+ +
+
+
+
Synchronized clapping
+
+ +
+
+
+ +

+ +
+

Failure cases

+

+ Back to index +

+ +
+
+
Human speech
+
+ +
+
+
+
Unfamiliar vision input
+
+ +
+
+
+
+
+ + + \ No newline at end of file diff --git a/mmaudio/__init__.py b/mmaudio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mmaudio/eval_utils.py b/mmaudio/eval_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..620e97ce16f49d3bcd53b86815dea2ec6a567689 --- /dev/null +++ b/mmaudio/eval_utils.py @@ -0,0 +1,245 @@ +import dataclasses +import logging +from pathlib import Path +from typing import Optional + +import torch +from colorlog import ColoredFormatter +from torchvision.transforms import v2 +from torio.io import StreamingMediaDecoder, StreamingMediaEncoder + +from mmaudio.model.flow_matching import FlowMatching +from mmaudio.model.networks import MMAudio +from mmaudio.model.sequence_config import (CONFIG_16K, CONFIG_44K, SequenceConfig) +from mmaudio.model.utils.features_utils import FeaturesUtils +from mmaudio.utils.download_utils import download_model_if_needed + +log = logging.getLogger() + + +@dataclasses.dataclass +class ModelConfig: + model_name: str + model_path: Path + vae_path: Path + bigvgan_16k_path: Optional[Path] + mode: str + synchformer_ckpt: Path = Path('./ext_weights/synchformer_state_dict.pth') + + @property + def seq_cfg(self) -> SequenceConfig: + if self.mode == '16k': + return CONFIG_16K + elif self.mode == '44k': + return CONFIG_44K + + def download_if_needed(self): + download_model_if_needed(self.model_path) + download_model_if_needed(self.vae_path) + if self.bigvgan_16k_path is not None: + download_model_if_needed(self.bigvgan_16k_path) + download_model_if_needed(self.synchformer_ckpt) + + +small_16k = ModelConfig(model_name='small_16k', + model_path=Path('./weights/mmaudio_small_16k.pth'), + vae_path=Path('./ext_weights/v1-16.pth'), + bigvgan_16k_path=Path('./ext_weights/best_netG.pt'), + mode='16k') +small_44k = ModelConfig(model_name='small_44k', + model_path=Path('./weights/mmaudio_small_44k.pth'), + vae_path=Path('./ext_weights/v1-44.pth'), + bigvgan_16k_path=None, + mode='44k') +medium_44k = ModelConfig(model_name='medium_44k', + model_path=Path('./weights/mmaudio_medium_44k.pth'), + vae_path=Path('./ext_weights/v1-44.pth'), + bigvgan_16k_path=None, + mode='44k') +large_44k = ModelConfig(model_name='large_44k', + model_path=Path('./weights/mmaudio_large_44k.pth'), + vae_path=Path('./ext_weights/v1-44.pth'), + bigvgan_16k_path=None, + mode='44k') +large_44k_v2 = ModelConfig(model_name='large_44k_v2', + model_path=Path('./weights/mmaudio_large_44k_v2.pth'), + vae_path=Path('./ext_weights/v1-44.pth'), + bigvgan_16k_path=None, + mode='44k') +all_model_cfg: dict[str, ModelConfig] = { + 'small_16k': small_16k, + 'small_44k': small_44k, + 'medium_44k': medium_44k, + 'large_44k': large_44k, + 'large_44k_v2': large_44k_v2, +} + + +def generate(clip_video: Optional[torch.Tensor], + sync_video: Optional[torch.Tensor], + text: Optional[list[str]], + *, + negative_text: Optional[list[str]] = None, + feature_utils: FeaturesUtils, + net: MMAudio, + fm: FlowMatching, + rng: torch.Generator, + cfg_strength: float): + device = feature_utils.device + dtype = feature_utils.dtype + + bs = len(text) + if clip_video is not None: + clip_video = clip_video.to(device, dtype, non_blocking=True) + clip_features = feature_utils.encode_video_with_clip(clip_video, batch_size=bs) + else: + clip_features = net.get_empty_clip_sequence(bs) + + if sync_video is not None: + sync_video = sync_video.to(device, dtype, non_blocking=True) + sync_features = feature_utils.encode_video_with_sync(sync_video, batch_size=bs) + else: + sync_features = net.get_empty_sync_sequence(bs) + + if text is not None: + text_features = feature_utils.encode_text(text) + else: + text_features = net.get_empty_string_sequence(bs) + + if negative_text is not None: + assert len(negative_text) == bs + negative_text_features = feature_utils.encode_text(negative_text) + else: + negative_text_features = net.get_empty_string_sequence(bs) + + x0 = torch.randn(bs, + net.latent_seq_len, + net.latent_dim, + device=device, + dtype=dtype, + generator=rng) + preprocessed_conditions = net.preprocess_conditions(clip_features, sync_features, text_features) + empty_conditions = net.get_empty_conditions( + bs, negative_text_features=negative_text_features if negative_text is not None else None) + + cfg_ode_wrapper = lambda t, x: net.ode_wrapper(t, x, preprocessed_conditions, empty_conditions, + cfg_strength) + x1 = fm.to_data(cfg_ode_wrapper, x0) + x1 = net.unnormalize(x1) + spec = feature_utils.decode(x1) + audio = feature_utils.vocode(spec) + return audio + + +LOGFORMAT = " %(log_color)s%(levelname)-8s%(reset)s | %(log_color)s%(message)s%(reset)s" + + +def setup_eval_logging(log_level: int = logging.INFO): + logging.root.setLevel(log_level) + formatter = ColoredFormatter(LOGFORMAT) + stream = logging.StreamHandler() + stream.setLevel(log_level) + stream.setFormatter(formatter) + log = logging.getLogger() + log.setLevel(log_level) + log.addHandler(stream) + + +def load_video(video_path: Path, duration_sec: float) -> tuple[torch.Tensor, torch.Tensor, float]: + _CLIP_SIZE = 384 + _CLIP_FPS = 8.0 + + _SYNC_SIZE = 224 + _SYNC_FPS = 25.0 + + clip_transform = v2.Compose([ + v2.Resize((_CLIP_SIZE, _CLIP_SIZE), interpolation=v2.InterpolationMode.BICUBIC), + v2.ToImage(), + v2.ToDtype(torch.float32, scale=True), + ]) + + sync_transform = v2.Compose([ + v2.Resize(_SYNC_SIZE, interpolation=v2.InterpolationMode.BICUBIC), + v2.CenterCrop(_SYNC_SIZE), + v2.ToImage(), + v2.ToDtype(torch.float32, scale=True), + v2.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + ]) + + reader = StreamingMediaDecoder(video_path) + reader.add_basic_video_stream( + frames_per_chunk=int(_CLIP_FPS * duration_sec), + frame_rate=_CLIP_FPS, + format='rgb24', + ) + reader.add_basic_video_stream( + frames_per_chunk=int(_SYNC_FPS * duration_sec), + frame_rate=_SYNC_FPS, + format='rgb24', + ) + + reader.fill_buffer() + data_chunk = reader.pop_chunks() + clip_chunk = data_chunk[0] + sync_chunk = data_chunk[1] + assert clip_chunk is not None + assert sync_chunk is not None + + clip_frames = clip_transform(clip_chunk) + sync_frames = sync_transform(sync_chunk) + + clip_length_sec = clip_frames.shape[0] / _CLIP_FPS + sync_length_sec = sync_frames.shape[0] / _SYNC_FPS + + if clip_length_sec < duration_sec: + log.warning(f'Clip video is too short: {clip_length_sec:.2f} < {duration_sec:.2f}') + log.warning(f'Truncating to {clip_length_sec:.2f} sec') + duration_sec = clip_length_sec + + if sync_length_sec < duration_sec: + log.warning(f'Sync video is too short: {sync_length_sec:.2f} < {duration_sec:.2f}') + log.warning(f'Truncating to {sync_length_sec:.2f} sec') + duration_sec = sync_length_sec + + clip_frames = clip_frames[:int(_CLIP_FPS * duration_sec)] + sync_frames = sync_frames[:int(_SYNC_FPS * duration_sec)] + + return clip_frames, sync_frames, duration_sec + + +def make_video(video_path: Path, output_path: Path, audio: torch.Tensor, sampling_rate: int, + duration_sec: float): + + approx_max_length = int(duration_sec * 60) + reader = StreamingMediaDecoder(video_path) + reader.add_basic_video_stream( + frames_per_chunk=approx_max_length, + format='rgb24', + ) + reader.fill_buffer() + video_chunk = reader.pop_chunks()[0] + assert video_chunk is not None + + fps = int(reader.get_out_stream_info(0).frame_rate) + if fps > 60: + log.warning(f'This code supports only up to 60 fps, but the video has {fps} fps') + log.warning(f'Just change the *60 above me') + + h, w = video_chunk.shape[-2:] + video_chunk = video_chunk[:int(fps * duration_sec)] + + writer = StreamingMediaEncoder(output_path) + writer.add_audio_stream( + sample_rate=sampling_rate, + num_channels=audio.shape[0], + encoder='aac', # 'flac' does not work for some reason? + ) + writer.add_video_stream(frame_rate=fps, + width=w, + height=h, + format='rgb24', + encoder='libx264', + encoder_format='yuv420p') + with writer.open(): + writer.write_audio_chunk(0, audio.float().transpose(0, 1)) + writer.write_video_chunk(1, video_chunk) diff --git a/mmaudio/ext/__init__.py b/mmaudio/ext/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/mmaudio/ext/__init__.py @@ -0,0 +1 @@ + diff --git a/mmaudio/ext/autoencoder/__init__.py b/mmaudio/ext/autoencoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e5a876391c1e48970e93ff45f212f21f86d4d0c9 --- /dev/null +++ b/mmaudio/ext/autoencoder/__init__.py @@ -0,0 +1 @@ +from .autoencoder import AutoEncoderModule diff --git a/mmaudio/ext/autoencoder/autoencoder.py b/mmaudio/ext/autoencoder/autoencoder.py new file mode 100644 index 0000000000000000000000000000000000000000..2a2aa3941627f1dac764292e1f2dd7d79beecb32 --- /dev/null +++ b/mmaudio/ext/autoencoder/autoencoder.py @@ -0,0 +1,48 @@ +from typing import Literal, Optional + +import torch +import torch.nn as nn + +from mmaudio.ext.autoencoder.vae import VAE, get_my_vae +from mmaudio.ext.bigvgan import BigVGAN +from mmaudio.ext.bigvgan_v2.bigvgan import BigVGAN as BigVGANv2 +from mmaudio.model.utils.distributions import DiagonalGaussianDistribution + + +class AutoEncoderModule(nn.Module): + + def __init__(self, + *, + vae_ckpt_path, + vocoder_ckpt_path: Optional[str] = None, + mode: Literal['16k', '44k']): + super().__init__() + self.vae: VAE = get_my_vae(mode).eval() + vae_state_dict = torch.load(vae_ckpt_path, weights_only=True, map_location='cpu') + self.vae.load_state_dict(vae_state_dict) + self.vae.remove_weight_norm() + + if mode == '16k': + assert vocoder_ckpt_path is not None + self.vocoder = BigVGAN(vocoder_ckpt_path).eval() + elif mode == '44k': + self.vocoder = BigVGANv2.from_pretrained('nvidia/bigvgan_v2_44khz_128band_512x', + use_cuda_kernel=False) + self.vocoder.remove_weight_norm() + else: + raise ValueError(f'Unknown mode: {mode}') + + for param in self.parameters(): + param.requires_grad = False + + @torch.inference_mode() + def encode(self, x: torch.Tensor) -> DiagonalGaussianDistribution: + return self.vae.encode(x) + + @torch.inference_mode() + def decode(self, z: torch.Tensor) -> torch.Tensor: + return self.vae.decode(z) + + @torch.inference_mode() + def vocode(self, spec: torch.Tensor) -> torch.Tensor: + return self.vocoder(spec) diff --git a/mmaudio/ext/autoencoder/edm2_utils.py b/mmaudio/ext/autoencoder/edm2_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a18ffba5cc42214fddf1300034be2eff2760025c --- /dev/null +++ b/mmaudio/ext/autoencoder/edm2_utils.py @@ -0,0 +1,168 @@ +# Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# This work is licensed under a Creative Commons +# Attribution-NonCommercial-ShareAlike 4.0 International License. +# You should have received a copy of the license along with this +# work. If not, see http://creativecommons.org/licenses/by-nc-sa/4.0/ +"""Improved diffusion model architecture proposed in the paper +"Analyzing and Improving the Training Dynamics of Diffusion Models".""" + +import numpy as np +import torch + +#---------------------------------------------------------------------------- +# Variant of constant() that inherits dtype and device from the given +# reference tensor by default. + +_constant_cache = dict() + + +def constant(value, shape=None, dtype=None, device=None, memory_format=None): + value = np.asarray(value) + if shape is not None: + shape = tuple(shape) + if dtype is None: + dtype = torch.get_default_dtype() + if device is None: + device = torch.device('cpu') + if memory_format is None: + memory_format = torch.contiguous_format + + key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format) + tensor = _constant_cache.get(key, None) + if tensor is None: + tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device) + if shape is not None: + tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape)) + tensor = tensor.contiguous(memory_format=memory_format) + _constant_cache[key] = tensor + return tensor + + +def const_like(ref, value, shape=None, dtype=None, device=None, memory_format=None): + if dtype is None: + dtype = ref.dtype + if device is None: + device = ref.device + return constant(value, shape=shape, dtype=dtype, device=device, memory_format=memory_format) + + +#---------------------------------------------------------------------------- +# Normalize given tensor to unit magnitude with respect to the given +# dimensions. Default = all dimensions except the first. + + +def normalize(x, dim=None, eps=1e-4): + if dim is None: + dim = list(range(1, x.ndim)) + norm = torch.linalg.vector_norm(x, dim=dim, keepdim=True, dtype=torch.float32) + norm = torch.add(eps, norm, alpha=np.sqrt(norm.numel() / x.numel())) + return x / norm.to(x.dtype) + + +class Normalize(torch.nn.Module): + + def __init__(self, dim=None, eps=1e-4): + super().__init__() + self.dim = dim + self.eps = eps + + def forward(self, x): + return normalize(x, dim=self.dim, eps=self.eps) + + +#---------------------------------------------------------------------------- +# Upsample or downsample the given tensor with the given filter, +# or keep it as is. + + +def resample(x, f=[1, 1], mode='keep'): + if mode == 'keep': + return x + f = np.float32(f) + assert f.ndim == 1 and len(f) % 2 == 0 + pad = (len(f) - 1) // 2 + f = f / f.sum() + f = np.outer(f, f)[np.newaxis, np.newaxis, :, :] + f = const_like(x, f) + c = x.shape[1] + if mode == 'down': + return torch.nn.functional.conv2d(x, + f.tile([c, 1, 1, 1]), + groups=c, + stride=2, + padding=(pad, )) + assert mode == 'up' + return torch.nn.functional.conv_transpose2d(x, (f * 4).tile([c, 1, 1, 1]), + groups=c, + stride=2, + padding=(pad, )) + + +#---------------------------------------------------------------------------- +# Magnitude-preserving SiLU (Equation 81). + + +def mp_silu(x): + return torch.nn.functional.silu(x) / 0.596 + + +class MPSiLU(torch.nn.Module): + + def forward(self, x): + return mp_silu(x) + + +#---------------------------------------------------------------------------- +# Magnitude-preserving sum (Equation 88). + + +def mp_sum(a, b, t=0.5): + return a.lerp(b, t) / np.sqrt((1 - t)**2 + t**2) + + +#---------------------------------------------------------------------------- +# Magnitude-preserving concatenation (Equation 103). + + +def mp_cat(a, b, dim=1, t=0.5): + Na = a.shape[dim] + Nb = b.shape[dim] + C = np.sqrt((Na + Nb) / ((1 - t)**2 + t**2)) + wa = C / np.sqrt(Na) * (1 - t) + wb = C / np.sqrt(Nb) * t + return torch.cat([wa * a, wb * b], dim=dim) + + +#---------------------------------------------------------------------------- +# Magnitude-preserving convolution or fully-connected layer (Equation 47) +# with force weight normalization (Equation 66). + + +class MPConv1D(torch.nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size): + super().__init__() + self.out_channels = out_channels + self.weight = torch.nn.Parameter(torch.randn(out_channels, in_channels, kernel_size)) + + self.weight_norm_removed = False + + def forward(self, x, gain=1): + assert self.weight_norm_removed, 'call remove_weight_norm() before inference' + + w = self.weight * gain + if w.ndim == 2: + return x @ w.t() + assert w.ndim == 3 + return torch.nn.functional.conv1d(x, w, padding=(w.shape[-1] // 2, )) + + def remove_weight_norm(self): + w = self.weight.to(torch.float32) + w = normalize(w) # traditional weight normalization + w = w / np.sqrt(w[0].numel()) + w = w.to(self.weight.dtype) + self.weight.data.copy_(w) + + self.weight_norm_removed = True + return self diff --git a/mmaudio/ext/autoencoder/vae.py b/mmaudio/ext/autoencoder/vae.py new file mode 100644 index 0000000000000000000000000000000000000000..7b7181c680b90557345847d02f41699b604565ac --- /dev/null +++ b/mmaudio/ext/autoencoder/vae.py @@ -0,0 +1,369 @@ +import logging +from typing import Optional + +import torch +import torch.nn as nn + +from mmaudio.ext.autoencoder.edm2_utils import MPConv1D +from mmaudio.ext.autoencoder.vae_modules import (AttnBlock1D, Downsample1D, ResnetBlock1D, + Upsample1D, nonlinearity) +from mmaudio.model.utils.distributions import DiagonalGaussianDistribution + +log = logging.getLogger() + +DATA_MEAN_80D = [ + -1.6058, -1.3676, -1.2520, -1.2453, -1.2078, -1.2224, -1.2419, -1.2439, -1.2922, -1.2927, + -1.3170, -1.3543, -1.3401, -1.3836, -1.3907, -1.3912, -1.4313, -1.4152, -1.4527, -1.4728, + -1.4568, -1.5101, -1.5051, -1.5172, -1.5623, -1.5373, -1.5746, -1.5687, -1.6032, -1.6131, + -1.6081, -1.6331, -1.6489, -1.6489, -1.6700, -1.6738, -1.6953, -1.6969, -1.7048, -1.7280, + -1.7361, -1.7495, -1.7658, -1.7814, -1.7889, -1.8064, -1.8221, -1.8377, -1.8417, -1.8643, + -1.8857, -1.8929, -1.9173, -1.9379, -1.9531, -1.9673, -1.9824, -2.0042, -2.0215, -2.0436, + -2.0766, -2.1064, -2.1418, -2.1855, -2.2319, -2.2767, -2.3161, -2.3572, -2.3954, -2.4282, + -2.4659, -2.5072, -2.5552, -2.6074, -2.6584, -2.7107, -2.7634, -2.8266, -2.8981, -2.9673 +] + +DATA_STD_80D = [ + 1.0291, 1.0411, 1.0043, 0.9820, 0.9677, 0.9543, 0.9450, 0.9392, 0.9343, 0.9297, 0.9276, 0.9263, + 0.9242, 0.9254, 0.9232, 0.9281, 0.9263, 0.9315, 0.9274, 0.9247, 0.9277, 0.9199, 0.9188, 0.9194, + 0.9160, 0.9161, 0.9146, 0.9161, 0.9100, 0.9095, 0.9145, 0.9076, 0.9066, 0.9095, 0.9032, 0.9043, + 0.9038, 0.9011, 0.9019, 0.9010, 0.8984, 0.8983, 0.8986, 0.8961, 0.8962, 0.8978, 0.8962, 0.8973, + 0.8993, 0.8976, 0.8995, 0.9016, 0.8982, 0.8972, 0.8974, 0.8949, 0.8940, 0.8947, 0.8936, 0.8939, + 0.8951, 0.8956, 0.9017, 0.9167, 0.9436, 0.9690, 1.0003, 1.0225, 1.0381, 1.0491, 1.0545, 1.0604, + 1.0761, 1.0929, 1.1089, 1.1196, 1.1176, 1.1156, 1.1117, 1.1070 +] + +DATA_MEAN_128D = [ + -3.3462, -2.6723, -2.4893, -2.3143, -2.2664, -2.3317, -2.1802, -2.4006, -2.2357, -2.4597, + -2.3717, -2.4690, -2.5142, -2.4919, -2.6610, -2.5047, -2.7483, -2.5926, -2.7462, -2.7033, + -2.7386, -2.8112, -2.7502, -2.9594, -2.7473, -3.0035, -2.8891, -2.9922, -2.9856, -3.0157, + -3.1191, -2.9893, -3.1718, -3.0745, -3.1879, -3.2310, -3.1424, -3.2296, -3.2791, -3.2782, + -3.2756, -3.3134, -3.3509, -3.3750, -3.3951, -3.3698, -3.4505, -3.4509, -3.5089, -3.4647, + -3.5536, -3.5788, -3.5867, -3.6036, -3.6400, -3.6747, -3.7072, -3.7279, -3.7283, -3.7795, + -3.8259, -3.8447, -3.8663, -3.9182, -3.9605, -3.9861, -4.0105, -4.0373, -4.0762, -4.1121, + -4.1488, -4.1874, -4.2461, -4.3170, -4.3639, -4.4452, -4.5282, -4.6297, -4.7019, -4.7960, + -4.8700, -4.9507, -5.0303, -5.0866, -5.1634, -5.2342, -5.3242, -5.4053, -5.4927, -5.5712, + -5.6464, -5.7052, -5.7619, -5.8410, -5.9188, -6.0103, -6.0955, -6.1673, -6.2362, -6.3120, + -6.3926, -6.4797, -6.5565, -6.6511, -6.8130, -6.9961, -7.1275, -7.2457, -7.3576, -7.4663, + -7.6136, -7.7469, -7.8815, -8.0132, -8.1515, -8.3071, -8.4722, -8.7418, -9.3975, -9.6628, + -9.7671, -9.8863, -9.9992, -10.0860, -10.1709, -10.5418, -11.2795, -11.3861 +] + +DATA_STD_128D = [ + 2.3804, 2.4368, 2.3772, 2.3145, 2.2803, 2.2510, 2.2316, 2.2083, 2.1996, 2.1835, 2.1769, 2.1659, + 2.1631, 2.1618, 2.1540, 2.1606, 2.1571, 2.1567, 2.1612, 2.1579, 2.1679, 2.1683, 2.1634, 2.1557, + 2.1668, 2.1518, 2.1415, 2.1449, 2.1406, 2.1350, 2.1313, 2.1415, 2.1281, 2.1352, 2.1219, 2.1182, + 2.1327, 2.1195, 2.1137, 2.1080, 2.1179, 2.1036, 2.1087, 2.1036, 2.1015, 2.1068, 2.0975, 2.0991, + 2.0902, 2.1015, 2.0857, 2.0920, 2.0893, 2.0897, 2.0910, 2.0881, 2.0925, 2.0873, 2.0960, 2.0900, + 2.0957, 2.0958, 2.0978, 2.0936, 2.0886, 2.0905, 2.0845, 2.0855, 2.0796, 2.0840, 2.0813, 2.0817, + 2.0838, 2.0840, 2.0917, 2.1061, 2.1431, 2.1976, 2.2482, 2.3055, 2.3700, 2.4088, 2.4372, 2.4609, + 2.4731, 2.4847, 2.5072, 2.5451, 2.5772, 2.6147, 2.6529, 2.6596, 2.6645, 2.6726, 2.6803, 2.6812, + 2.6899, 2.6916, 2.6931, 2.6998, 2.7062, 2.7262, 2.7222, 2.7158, 2.7041, 2.7485, 2.7491, 2.7451, + 2.7485, 2.7233, 2.7297, 2.7233, 2.7145, 2.6958, 2.6788, 2.6439, 2.6007, 2.4786, 2.2469, 2.1877, + 2.1392, 2.0717, 2.0107, 1.9676, 1.9140, 1.7102, 0.9101, 0.7164 +] + + +class VAE(nn.Module): + + def __init__( + self, + *, + data_dim: int, + embed_dim: int, + hidden_dim: int, + ): + super().__init__() + + if data_dim == 80: + self.data_mean = nn.Buffer(torch.tensor(DATA_MEAN_80D, dtype=torch.float32).cuda()) + self.data_std = nn.Buffer(torch.tensor(DATA_STD_80D, dtype=torch.float32).cuda()) + elif data_dim == 128: + self.data_mean = nn.Buffer(torch.tensor(DATA_MEAN_128D, dtype=torch.float32).cuda()) + self.data_std = nn.Buffer(torch.tensor(DATA_STD_128D, dtype=torch.float32).cuda()) + + self.data_mean = self.data_mean.view(1, -1, 1) + self.data_std = self.data_std.view(1, -1, 1) + + self.encoder = Encoder1D( + dim=hidden_dim, + ch_mult=(1, 2, 4), + num_res_blocks=2, + attn_layers=[3], + down_layers=[0], + in_dim=data_dim, + embed_dim=embed_dim, + ) + self.decoder = Decoder1D( + dim=hidden_dim, + ch_mult=(1, 2, 4), + num_res_blocks=2, + attn_layers=[3], + down_layers=[0], + in_dim=data_dim, + out_dim=data_dim, + embed_dim=embed_dim, + ) + + self.embed_dim = embed_dim + # self.quant_conv = nn.Conv1d(2 * embed_dim, 2 * embed_dim, 1) + # self.post_quant_conv = nn.Conv1d(embed_dim, embed_dim, 1) + + self.initialize_weights() + + def initialize_weights(self): + pass + + def encode(self, x: torch.Tensor, normalize: bool = True) -> DiagonalGaussianDistribution: + if normalize: + x = self.normalize(x) + moments = self.encoder(x) + posterior = DiagonalGaussianDistribution(moments) + return posterior + + def decode(self, z: torch.Tensor, unnormalize: bool = True) -> torch.Tensor: + dec = self.decoder(z) + if unnormalize: + dec = self.unnormalize(dec) + return dec + + def normalize(self, x: torch.Tensor) -> torch.Tensor: + return (x - self.data_mean) / self.data_std + + def unnormalize(self, x: torch.Tensor) -> torch.Tensor: + return x * self.data_std + self.data_mean + + def forward( + self, + x: torch.Tensor, + sample_posterior: bool = True, + rng: Optional[torch.Generator] = None, + normalize: bool = True, + unnormalize: bool = True, + ) -> tuple[torch.Tensor, DiagonalGaussianDistribution]: + + posterior = self.encode(x, normalize=normalize) + if sample_posterior: + z = posterior.sample(rng) + else: + z = posterior.mode() + dec = self.decode(z, unnormalize=unnormalize) + return dec, posterior + + def load_weights(self, src_dict) -> None: + self.load_state_dict(src_dict, strict=True) + + @property + def device(self) -> torch.device: + return next(self.parameters()).device + + def get_last_layer(self): + return self.decoder.conv_out.weight + + def remove_weight_norm(self): + for name, m in self.named_modules(): + if isinstance(m, MPConv1D): + m.remove_weight_norm() + log.debug(f"Removed weight norm from {name}") + return self + + +class Encoder1D(nn.Module): + + def __init__(self, + *, + dim: int, + ch_mult: tuple[int] = (1, 2, 4, 8), + num_res_blocks: int, + attn_layers: list[int] = [], + down_layers: list[int] = [], + resamp_with_conv: bool = True, + in_dim: int, + embed_dim: int, + double_z: bool = True, + kernel_size: int = 3, + clip_act: float = 256.0): + super().__init__() + self.dim = dim + self.num_layers = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.in_channels = in_dim + self.clip_act = clip_act + self.down_layers = down_layers + self.attn_layers = attn_layers + self.conv_in = MPConv1D(in_dim, self.dim, kernel_size=kernel_size) + + in_ch_mult = (1, ) + tuple(ch_mult) + self.in_ch_mult = in_ch_mult + # downsampling + self.down = nn.ModuleList() + for i_level in range(self.num_layers): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = dim * in_ch_mult[i_level] + block_out = dim * ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append( + ResnetBlock1D(in_dim=block_in, + out_dim=block_out, + kernel_size=kernel_size, + use_norm=True)) + block_in = block_out + if i_level in attn_layers: + attn.append(AttnBlock1D(block_in)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level in down_layers: + down.downsample = Downsample1D(block_in, resamp_with_conv) + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock1D(in_dim=block_in, + out_dim=block_in, + kernel_size=kernel_size, + use_norm=True) + self.mid.attn_1 = AttnBlock1D(block_in) + self.mid.block_2 = ResnetBlock1D(in_dim=block_in, + out_dim=block_in, + kernel_size=kernel_size, + use_norm=True) + + # end + self.conv_out = MPConv1D(block_in, + 2 * embed_dim if double_z else embed_dim, + kernel_size=kernel_size) + + self.learnable_gain = nn.Parameter(torch.zeros([])) + + def forward(self, x): + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_layers): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1]) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + h = h.clamp(-self.clip_act, self.clip_act) + hs.append(h) + if i_level in self.down_layers: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + h = h.clamp(-self.clip_act, self.clip_act) + + # end + h = nonlinearity(h) + h = self.conv_out(h, gain=(self.learnable_gain + 1)) + return h + + +class Decoder1D(nn.Module): + + def __init__(self, + *, + dim: int, + out_dim: int, + ch_mult: tuple[int] = (1, 2, 4, 8), + num_res_blocks: int, + attn_layers: list[int] = [], + down_layers: list[int] = [], + kernel_size: int = 3, + resamp_with_conv: bool = True, + in_dim: int, + embed_dim: int, + clip_act: float = 256.0): + super().__init__() + self.ch = dim + self.num_layers = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.in_channels = in_dim + self.clip_act = clip_act + self.down_layers = [i + 1 for i in down_layers] # each downlayer add one + + # compute in_ch_mult, block_in and curr_res at lowest res + block_in = dim * ch_mult[self.num_layers - 1] + + # z to block_in + self.conv_in = MPConv1D(embed_dim, block_in, kernel_size=kernel_size) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock1D(in_dim=block_in, out_dim=block_in, use_norm=True) + self.mid.attn_1 = AttnBlock1D(block_in) + self.mid.block_2 = ResnetBlock1D(in_dim=block_in, out_dim=block_in, use_norm=True) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_layers)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = dim * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + block.append(ResnetBlock1D(in_dim=block_in, out_dim=block_out, use_norm=True)) + block_in = block_out + if i_level in attn_layers: + attn.append(AttnBlock1D(block_in)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level in self.down_layers: + up.upsample = Upsample1D(block_in, resamp_with_conv) + self.up.insert(0, up) # prepend to get consistent order + + # end + self.conv_out = MPConv1D(block_in, out_dim, kernel_size=kernel_size) + self.learnable_gain = nn.Parameter(torch.zeros([])) + + def forward(self, z): + # z to block_in + h = self.conv_in(z) + + # middle + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + h = h.clamp(-self.clip_act, self.clip_act) + + # upsampling + for i_level in reversed(range(self.num_layers)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block](h) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + h = h.clamp(-self.clip_act, self.clip_act) + if i_level in self.down_layers: + h = self.up[i_level].upsample(h) + + h = nonlinearity(h) + h = self.conv_out(h, gain=(self.learnable_gain + 1)) + return h + + +def VAE_16k(**kwargs) -> VAE: + return VAE(data_dim=80, embed_dim=20, hidden_dim=384, **kwargs) + + +def VAE_44k(**kwargs) -> VAE: + return VAE(data_dim=128, embed_dim=40, hidden_dim=512, **kwargs) + + +def get_my_vae(name: str, **kwargs) -> VAE: + if name == '16k': + return VAE_16k(**kwargs) + if name == '44k': + return VAE_44k(**kwargs) + raise ValueError(f'Unknown model: {name}') + + +if __name__ == '__main__': + network = get_my_vae('standard') + + # print the number of parameters in terms of millions + num_params = sum(p.numel() for p in network.parameters()) / 1e6 + print(f'Number of parameters: {num_params:.2f}M') diff --git a/mmaudio/ext/autoencoder/vae_modules.py b/mmaudio/ext/autoencoder/vae_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..c59ff41e86303e518688fd3f56ade08f4550f2aa --- /dev/null +++ b/mmaudio/ext/autoencoder/vae_modules.py @@ -0,0 +1,117 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange + +from mmaudio.ext.autoencoder.edm2_utils import (MPConv1D, mp_silu, mp_sum, normalize) + + +def nonlinearity(x): + # swish + return mp_silu(x) + + +class ResnetBlock1D(nn.Module): + + def __init__(self, *, in_dim, out_dim=None, conv_shortcut=False, kernel_size=3, use_norm=True): + super().__init__() + self.in_dim = in_dim + out_dim = in_dim if out_dim is None else out_dim + self.out_dim = out_dim + self.use_conv_shortcut = conv_shortcut + self.use_norm = use_norm + + self.conv1 = MPConv1D(in_dim, out_dim, kernel_size=kernel_size) + self.conv2 = MPConv1D(out_dim, out_dim, kernel_size=kernel_size) + if self.in_dim != self.out_dim: + if self.use_conv_shortcut: + self.conv_shortcut = MPConv1D(in_dim, out_dim, kernel_size=kernel_size) + else: + self.nin_shortcut = MPConv1D(in_dim, out_dim, kernel_size=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + + # pixel norm + if self.use_norm: + x = normalize(x, dim=1) + + h = x + h = nonlinearity(h) + h = self.conv1(h) + + h = nonlinearity(h) + h = self.conv2(h) + + if self.in_dim != self.out_dim: + if self.use_conv_shortcut: + x = self.conv_shortcut(x) + else: + x = self.nin_shortcut(x) + + return mp_sum(x, h, t=0.3) + + +class AttnBlock1D(nn.Module): + + def __init__(self, in_channels, num_heads=1): + super().__init__() + self.in_channels = in_channels + + self.num_heads = num_heads + self.qkv = MPConv1D(in_channels, in_channels * 3, kernel_size=1) + self.proj_out = MPConv1D(in_channels, in_channels, kernel_size=1) + + def forward(self, x): + h = x + y = self.qkv(h) + y = y.reshape(y.shape[0], self.num_heads, -1, 3, y.shape[-1]) + q, k, v = normalize(y, dim=2).unbind(3) + + q = rearrange(q, 'b h c l -> b h l c') + k = rearrange(k, 'b h c l -> b h l c') + v = rearrange(v, 'b h c l -> b h l c') + + h = F.scaled_dot_product_attention(q, k, v) + h = rearrange(h, 'b h l c -> b (h c) l') + + h = self.proj_out(h) + + return mp_sum(x, h, t=0.3) + + +class Upsample1D(nn.Module): + + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + self.conv = MPConv1D(in_channels, in_channels, kernel_size=3) + + def forward(self, x): + x = F.interpolate(x, scale_factor=2.0, mode='nearest-exact') # support 3D tensor(B,C,T) + if self.with_conv: + x = self.conv(x) + return x + + +class Downsample1D(nn.Module): + + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + # no asymmetric padding in torch conv, must do it ourselves + self.conv1 = MPConv1D(in_channels, in_channels, kernel_size=1) + self.conv2 = MPConv1D(in_channels, in_channels, kernel_size=1) + + def forward(self, x): + + if self.with_conv: + x = self.conv1(x) + + x = F.avg_pool1d(x, kernel_size=2, stride=2) + + if self.with_conv: + x = self.conv2(x) + + return x diff --git a/mmaudio/ext/bigvgan/LICENSE b/mmaudio/ext/bigvgan/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..e9663595cc28938f88d6299acd3ba791542e4c0c --- /dev/null +++ b/mmaudio/ext/bigvgan/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 NVIDIA CORPORATION. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/__init__.py b/mmaudio/ext/bigvgan/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..00f13e9bf9ccb0b4ec37e1c70869f9a9a538871f --- /dev/null +++ b/mmaudio/ext/bigvgan/__init__.py @@ -0,0 +1 @@ +from .bigvgan import BigVGAN diff --git a/mmaudio/ext/bigvgan/activations.py b/mmaudio/ext/bigvgan/activations.py new file mode 100644 index 0000000000000000000000000000000000000000..61f2808a5466b3cf4d041059700993af5527dd29 --- /dev/null +++ b/mmaudio/ext/bigvgan/activations.py @@ -0,0 +1,120 @@ +# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license. +# LICENSE is in incl_licenses directory. + +import torch +from torch import nn, sin, pow +from torch.nn import Parameter + + +class Snake(nn.Module): + ''' + Implementation of a sine-based periodic activation function + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter + References: + - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snake(256) + >>> x = torch.randn(256) + >>> x = a1(x) + ''' + def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): + ''' + Initialization. + INPUT: + - in_features: shape of the input + - alpha: trainable parameter + alpha is initialized to 1 by default, higher values = higher-frequency. + alpha will be trained along with the rest of your model. + ''' + super(Snake, self).__init__() + self.in_features = in_features + + # initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + else: # linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + ''' + Forward pass of the function. + Applies the function to the input elementwise. + Snake ∶= x + 1/a * sin^2 (xa) + ''' + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T] + if self.alpha_logscale: + alpha = torch.exp(alpha) + x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x + + +class SnakeBeta(nn.Module): + ''' + A modified Snake function which uses separate parameters for the magnitude of the periodic components + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + References: + - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snakebeta(256) + >>> x = torch.randn(256) + >>> x = a1(x) + ''' + def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): + ''' + Initialization. + INPUT: + - in_features: shape of the input + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + alpha is initialized to 1 by default, higher values = higher-frequency. + beta is initialized to 1 by default, higher values = higher-magnitude. + alpha will be trained along with the rest of your model. + ''' + super(SnakeBeta, self).__init__() + self.in_features = in_features + + # initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + self.beta = Parameter(torch.zeros(in_features) * alpha) + else: # linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + self.beta = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + self.beta.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + ''' + Forward pass of the function. + Applies the function to the input elementwise. + SnakeBeta ∶= x + 1/b * sin^2 (xa) + ''' + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T] + beta = self.beta.unsqueeze(0).unsqueeze(-1) + if self.alpha_logscale: + alpha = torch.exp(alpha) + beta = torch.exp(beta) + x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/alias_free_torch/__init__.py b/mmaudio/ext/bigvgan/alias_free_torch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a2318b63198250856809c0cb46210a4147b829bc --- /dev/null +++ b/mmaudio/ext/bigvgan/alias_free_torch/__init__.py @@ -0,0 +1,6 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +from .filter import * +from .resample import * +from .act import * \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/alias_free_torch/act.py b/mmaudio/ext/bigvgan/alias_free_torch/act.py new file mode 100644 index 0000000000000000000000000000000000000000..028debd697dd60458aae75010057df038bd3518a --- /dev/null +++ b/mmaudio/ext/bigvgan/alias_free_torch/act.py @@ -0,0 +1,28 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn +from .resample import UpSample1d, DownSample1d + + +class Activation1d(nn.Module): + def __init__(self, + activation, + up_ratio: int = 2, + down_ratio: int = 2, + up_kernel_size: int = 12, + down_kernel_size: int = 12): + super().__init__() + self.up_ratio = up_ratio + self.down_ratio = down_ratio + self.act = activation + self.upsample = UpSample1d(up_ratio, up_kernel_size) + self.downsample = DownSample1d(down_ratio, down_kernel_size) + + # x: [B,C,T] + def forward(self, x): + x = self.upsample(x) + x = self.act(x) + x = self.downsample(x) + + return x \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/alias_free_torch/filter.py b/mmaudio/ext/bigvgan/alias_free_torch/filter.py new file mode 100644 index 0000000000000000000000000000000000000000..7ad6ea87c1f10ddd94c544037791d7a4634d5ae1 --- /dev/null +++ b/mmaudio/ext/bigvgan/alias_free_torch/filter.py @@ -0,0 +1,95 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch +import torch.nn as nn +import torch.nn.functional as F +import math + +if 'sinc' in dir(torch): + sinc = torch.sinc +else: + # This code is adopted from adefossez's julius.core.sinc under the MIT License + # https://adefossez.github.io/julius/julius/core.html + # LICENSE is in incl_licenses directory. + def sinc(x: torch.Tensor): + """ + Implementation of sinc, i.e. sin(pi * x) / (pi * x) + __Warning__: Different to julius.sinc, the input is multiplied by `pi`! + """ + return torch.where(x == 0, + torch.tensor(1., device=x.device, dtype=x.dtype), + torch.sin(math.pi * x) / math.pi / x) + + +# This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License +# https://adefossez.github.io/julius/julius/lowpass.html +# LICENSE is in incl_licenses directory. +def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): # return filter [1,1,kernel_size] + even = (kernel_size % 2 == 0) + half_size = kernel_size // 2 + + #For kaiser window + delta_f = 4 * half_width + A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95 + if A > 50.: + beta = 0.1102 * (A - 8.7) + elif A >= 21.: + beta = 0.5842 * (A - 21)**0.4 + 0.07886 * (A - 21.) + else: + beta = 0. + window = torch.kaiser_window(kernel_size, beta=beta, periodic=False) + + # ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio + if even: + time = (torch.arange(-half_size, half_size) + 0.5) + else: + time = torch.arange(kernel_size) - half_size + if cutoff == 0: + filter_ = torch.zeros_like(time) + else: + filter_ = 2 * cutoff * window * sinc(2 * cutoff * time) + # Normalize filter to have sum = 1, otherwise we will have a small leakage + # of the constant component in the input signal. + filter_ /= filter_.sum() + filter = filter_.view(1, 1, kernel_size) + + return filter + + +class LowPassFilter1d(nn.Module): + def __init__(self, + cutoff=0.5, + half_width=0.6, + stride: int = 1, + padding: bool = True, + padding_mode: str = 'replicate', + kernel_size: int = 12): + # kernel_size should be even number for stylegan3 setup, + # in this implementation, odd number is also possible. + super().__init__() + if cutoff < -0.: + raise ValueError("Minimum cutoff must be larger than zero.") + if cutoff > 0.5: + raise ValueError("A cutoff above 0.5 does not make sense.") + self.kernel_size = kernel_size + self.even = (kernel_size % 2 == 0) + self.pad_left = kernel_size // 2 - int(self.even) + self.pad_right = kernel_size // 2 + self.stride = stride + self.padding = padding + self.padding_mode = padding_mode + filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size) + self.register_buffer("filter", filter) + + #input [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + if self.padding: + x = F.pad(x, (self.pad_left, self.pad_right), + mode=self.padding_mode) + out = F.conv1d(x, self.filter.expand(C, -1, -1), + stride=self.stride, groups=C) + + return out \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/alias_free_torch/resample.py b/mmaudio/ext/bigvgan/alias_free_torch/resample.py new file mode 100644 index 0000000000000000000000000000000000000000..750e6c3402cc5ac939c4b9d075246562e0e1d1a7 --- /dev/null +++ b/mmaudio/ext/bigvgan/alias_free_torch/resample.py @@ -0,0 +1,49 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn +from torch.nn import functional as F +from .filter import LowPassFilter1d +from .filter import kaiser_sinc_filter1d + + +class UpSample1d(nn.Module): + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size + self.stride = ratio + self.pad = self.kernel_size // ratio - 1 + self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2 + self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2 + filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio, + half_width=0.6 / ratio, + kernel_size=self.kernel_size) + self.register_buffer("filter", filter) + + # x: [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + x = F.pad(x, (self.pad, self.pad), mode='replicate') + x = self.ratio * F.conv_transpose1d( + x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C) + x = x[..., self.pad_left:-self.pad_right] + + return x + + +class DownSample1d(nn.Module): + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size + self.lowpass = LowPassFilter1d(cutoff=0.5 / ratio, + half_width=0.6 / ratio, + stride=ratio, + kernel_size=self.kernel_size) + + def forward(self, x): + xx = self.lowpass(x) + + return xx \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/bigvgan.py b/mmaudio/ext/bigvgan/bigvgan.py new file mode 100644 index 0000000000000000000000000000000000000000..032ea1d03e96165571c9ae22d66e00911a605870 --- /dev/null +++ b/mmaudio/ext/bigvgan/bigvgan.py @@ -0,0 +1,32 @@ +from pathlib import Path + +import torch +import torch.nn as nn +from omegaconf import OmegaConf + +from mmaudio.ext.bigvgan.models import BigVGANVocoder + +_bigvgan_vocoder_path = Path(__file__).parent / 'bigvgan_vocoder.yml' + + +class BigVGAN(nn.Module): + + def __init__(self, ckpt_path, config_path=_bigvgan_vocoder_path): + super().__init__() + vocoder_cfg = OmegaConf.load(config_path) + self.vocoder = BigVGANVocoder(vocoder_cfg).eval() + vocoder_ckpt = torch.load(ckpt_path, map_location='cpu', weights_only=True)['generator'] + self.vocoder.load_state_dict(vocoder_ckpt) + + self.weight_norm_removed = False + self.remove_weight_norm() + + @torch.inference_mode() + def forward(self, x): + assert self.weight_norm_removed, 'call remove_weight_norm() before inference' + return self.vocoder(x) + + def remove_weight_norm(self): + self.vocoder.remove_weight_norm() + self.weight_norm_removed = True + return self diff --git a/mmaudio/ext/bigvgan/bigvgan_vocoder.yml b/mmaudio/ext/bigvgan/bigvgan_vocoder.yml new file mode 100644 index 0000000000000000000000000000000000000000..d4db31ec45336e757d94d5099ed16cb3c906c24a --- /dev/null +++ b/mmaudio/ext/bigvgan/bigvgan_vocoder.yml @@ -0,0 +1,63 @@ +resblock: '1' +num_gpus: 0 +batch_size: 64 +num_mels: 80 +learning_rate: 0.0001 +adam_b1: 0.8 +adam_b2: 0.99 +lr_decay: 0.999 +seed: 1234 +upsample_rates: +- 4 +- 4 +- 2 +- 2 +- 2 +- 2 +upsample_kernel_sizes: +- 8 +- 8 +- 4 +- 4 +- 4 +- 4 +upsample_initial_channel: 1536 +resblock_kernel_sizes: +- 3 +- 7 +- 11 +resblock_dilation_sizes: +- - 1 + - 3 + - 5 +- - 1 + - 3 + - 5 +- - 1 + - 3 + - 5 +activation: snakebeta +snake_logscale: true +resolutions: +- - 1024 + - 120 + - 600 +- - 2048 + - 240 + - 1200 +- - 512 + - 50 + - 240 +mpd_reshapes: +- 2 +- 3 +- 5 +- 7 +- 11 +use_spectral_norm: false +discriminator_channel_mult: 1 +num_workers: 4 +dist_config: + dist_backend: nccl + dist_url: tcp://localhost:54341 + world_size: 1 diff --git a/mmaudio/ext/bigvgan/env.py b/mmaudio/ext/bigvgan/env.py new file mode 100644 index 0000000000000000000000000000000000000000..b8be238d4db710c8c9a338d336baea0138f18d1f --- /dev/null +++ b/mmaudio/ext/bigvgan/env.py @@ -0,0 +1,18 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os +import shutil + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + +def build_env(config, config_name, path): + t_path = os.path.join(path, config_name) + if config != t_path: + os.makedirs(path, exist_ok=True) + shutil.copyfile(config, os.path.join(path, config_name)) \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/incl_licenses/LICENSE_1 b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_1 new file mode 100644 index 0000000000000000000000000000000000000000..5afae394d6b37da0e12ba6b290d2512687f421ac --- /dev/null +++ b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_1 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Jungil Kong + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/incl_licenses/LICENSE_2 b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_2 new file mode 100644 index 0000000000000000000000000000000000000000..322b758863c4219be68291ae3826218baa93cb4c --- /dev/null +++ b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_2 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Edward Dixon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/incl_licenses/LICENSE_3 b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_3 new file mode 100644 index 0000000000000000000000000000000000000000..56ee3c8c4cc2b4b32e0975d17258f9ba515fdbcc --- /dev/null +++ b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_3 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/incl_licenses/LICENSE_4 b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_4 new file mode 100644 index 0000000000000000000000000000000000000000..48fd1a1ba8d81a94b6c7d1c2ff1a1f307cc5371d --- /dev/null +++ b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_4 @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, Seungwon Park 박승원 +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/incl_licenses/LICENSE_5 b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_5 new file mode 100644 index 0000000000000000000000000000000000000000..01ae5538e6b7c787bb4f5d6f2cd9903520d6e465 --- /dev/null +++ b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_5 @@ -0,0 +1,16 @@ +Copyright 2020 Alexandre Défossez + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/models.py b/mmaudio/ext/bigvgan/models.py new file mode 100644 index 0000000000000000000000000000000000000000..36938e659ebc0e4cb045f10e4893525907c2d1f7 --- /dev/null +++ b/mmaudio/ext/bigvgan/models.py @@ -0,0 +1,255 @@ +# Copyright (c) 2022 NVIDIA CORPORATION. +# Licensed under the MIT license. + +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import torch +import torch.nn as nn +from torch.nn import Conv1d, ConvTranspose1d +from torch.nn.utils.parametrizations import weight_norm +from torch.nn.utils.parametrize import remove_parametrizations + +from mmaudio.ext.bigvgan import activations +from mmaudio.ext.bigvgan.alias_free_torch import * +from mmaudio.ext.bigvgan.utils import get_padding, init_weights + +LRELU_SLOPE = 0.1 + + +class AMPBlock1(torch.nn.Module): + + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5), activation=None): + super(AMPBlock1, self).__init__() + self.h = h + + self.convs1 = nn.ModuleList([ + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]))) + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1))) + ]) + self.convs2.apply(init_weights) + + self.num_layers = len(self.convs1) + len(self.convs2) # total number of conv layers + + if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + acts1, acts2 = self.activations[::2], self.activations[1::2] + for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2): + xt = a1(x) + xt = c1(xt) + xt = a2(xt) + xt = c2(xt) + x = xt + x + + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_parametrizations(l, 'weight') + for l in self.convs2: + remove_parametrizations(l, 'weight') + + +class AMPBlock2(torch.nn.Module): + + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3), activation=None): + super(AMPBlock2, self).__init__() + self.h = h + + self.convs = nn.ModuleList([ + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))) + ]) + self.convs.apply(init_weights) + + self.num_layers = len(self.convs) # total number of conv layers + + if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + for c, a in zip(self.convs, self.activations): + xt = a(x) + xt = c(xt) + x = xt + x + + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_parametrizations(l, 'weight') + + +class BigVGANVocoder(torch.nn.Module): + # this is our main BigVGAN model. Applies anti-aliased periodic activation for resblocks. + def __init__(self, h): + super().__init__() + self.h = h + + self.num_kernels = len(h.resblock_kernel_sizes) + self.num_upsamples = len(h.upsample_rates) + + # pre conv + self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3)) + + # define which AMPBlock to use. BigVGAN uses AMPBlock1 as default + resblock = AMPBlock1 if h.resblock == '1' else AMPBlock2 + + # transposed conv-based upsamplers. does not apply anti-aliasing + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): + self.ups.append( + nn.ModuleList([ + weight_norm( + ConvTranspose1d(h.upsample_initial_channel // (2**i), + h.upsample_initial_channel // (2**(i + 1)), + k, + u, + padding=(k - u) // 2)) + ])) + + # residual blocks using anti-aliased multi-periodicity composition modules (AMP) + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = h.upsample_initial_channel // (2**(i + 1)) + for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): + self.resblocks.append(resblock(h, ch, k, d, activation=h.activation)) + + # post conv + if h.activation == "snake": # periodic nonlinearity with snake function and anti-aliasing + activation_post = activations.Snake(ch, alpha_logscale=h.snake_logscale) + self.activation_post = Activation1d(activation=activation_post) + elif h.activation == "snakebeta": # periodic nonlinearity with snakebeta function and anti-aliasing + activation_post = activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale) + self.activation_post = Activation1d(activation=activation_post) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) + + # weight initialization + for i in range(len(self.ups)): + self.ups[i].apply(init_weights) + self.conv_post.apply(init_weights) + + def forward(self, x): + # pre conv + x = self.conv_pre(x) + + for i in range(self.num_upsamples): + # upsampling + for i_up in range(len(self.ups[i])): + x = self.ups[i][i_up](x) + # AMP blocks + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + + # post conv + x = self.activation_post(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + for l_i in l: + remove_parametrizations(l_i, 'weight') + for l in self.resblocks: + l.remove_weight_norm() + remove_parametrizations(self.conv_pre, 'weight') + remove_parametrizations(self.conv_post, 'weight') diff --git a/mmaudio/ext/bigvgan/utils.py b/mmaudio/ext/bigvgan/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aff7e653533d3390756c53a0215801b06cc924b5 --- /dev/null +++ b/mmaudio/ext/bigvgan/utils.py @@ -0,0 +1,31 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os + +import torch +from torch.nn.utils.parametrizations import weight_norm + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def apply_weight_norm(m): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + weight_norm(m) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + + +def load_checkpoint(filepath, device): + assert os.path.isfile(filepath) + print("Loading '{}'".format(filepath)) + checkpoint_dict = torch.load(filepath, map_location=device) + print("Complete.") + return checkpoint_dict diff --git a/mmaudio/ext/bigvgan_v2/LICENSE b/mmaudio/ext/bigvgan_v2/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4c78361c86d4f685117d60d6623e2197fcfed706 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 NVIDIA CORPORATION. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/mmaudio/ext/bigvgan_v2/__init__.py b/mmaudio/ext/bigvgan_v2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mmaudio/ext/bigvgan_v2/activations.py b/mmaudio/ext/bigvgan_v2/activations.py new file mode 100644 index 0000000000000000000000000000000000000000..4f08ddab5b55d6dcaf3e968af98889e0770c44f5 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/activations.py @@ -0,0 +1,126 @@ +# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license. +# LICENSE is in incl_licenses directory. + +import torch +from torch import nn, sin, pow +from torch.nn import Parameter + + +class Snake(nn.Module): + """ + Implementation of a sine-based periodic activation function + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter + References: + - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snake(256) + >>> x = torch.randn(256) + >>> x = a1(x) + """ + + def __init__( + self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False + ): + """ + Initialization. + INPUT: + - in_features: shape of the input + - alpha: trainable parameter + alpha is initialized to 1 by default, higher values = higher-frequency. + alpha will be trained along with the rest of your model. + """ + super(Snake, self).__init__() + self.in_features = in_features + + # Initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # Log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + else: # Linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + """ + Forward pass of the function. + Applies the function to the input elementwise. + Snake ∶= x + 1/a * sin^2 (xa) + """ + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T] + if self.alpha_logscale: + alpha = torch.exp(alpha) + x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x + + +class SnakeBeta(nn.Module): + """ + A modified Snake function which uses separate parameters for the magnitude of the periodic components + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + References: + - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snakebeta(256) + >>> x = torch.randn(256) + >>> x = a1(x) + """ + + def __init__( + self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False + ): + """ + Initialization. + INPUT: + - in_features: shape of the input + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + alpha is initialized to 1 by default, higher values = higher-frequency. + beta is initialized to 1 by default, higher values = higher-magnitude. + alpha will be trained along with the rest of your model. + """ + super(SnakeBeta, self).__init__() + self.in_features = in_features + + # Initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # Log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + self.beta = Parameter(torch.zeros(in_features) * alpha) + else: # Linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + self.beta = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + self.beta.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + """ + Forward pass of the function. + Applies the function to the input elementwise. + SnakeBeta ∶= x + 1/b * sin^2 (xa) + """ + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T] + beta = self.beta.unsqueeze(0).unsqueeze(-1) + if self.alpha_logscale: + alpha = torch.exp(alpha) + beta = torch.exp(beta) + x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/__init__.py b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/activation1d.py b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/activation1d.py new file mode 100644 index 0000000000000000000000000000000000000000..fbc0fd8f28a37ad949fbdb9832f51b5b933c6ff2 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/activation1d.py @@ -0,0 +1,77 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +from alias_free_activation.torch.resample import UpSample1d, DownSample1d + +# load fused CUDA kernel: this enables importing anti_alias_activation_cuda +from alias_free_activation.cuda import load + +anti_alias_activation_cuda = load.load() + + +class FusedAntiAliasActivation(torch.autograd.Function): + """ + Assumes filter size 12, replication padding on upsampling/downsampling, and logscale alpha/beta parameters as inputs. + The hyperparameters are hard-coded in the kernel to maximize speed. + NOTE: The fused kenrel is incorrect for Activation1d with different hyperparameters. + """ + + @staticmethod + def forward(ctx, inputs, up_ftr, down_ftr, alpha, beta): + activation_results = anti_alias_activation_cuda.forward( + inputs, up_ftr, down_ftr, alpha, beta + ) + + return activation_results + + @staticmethod + def backward(ctx, output_grads): + raise NotImplementedError + return output_grads, None, None + + +class Activation1d(nn.Module): + def __init__( + self, + activation, + up_ratio: int = 2, + down_ratio: int = 2, + up_kernel_size: int = 12, + down_kernel_size: int = 12, + fused: bool = True, + ): + super().__init__() + self.up_ratio = up_ratio + self.down_ratio = down_ratio + self.act = activation + self.upsample = UpSample1d(up_ratio, up_kernel_size) + self.downsample = DownSample1d(down_ratio, down_kernel_size) + + self.fused = fused # Whether to use fused CUDA kernel or not + + def forward(self, x): + if not self.fused: + x = self.upsample(x) + x = self.act(x) + x = self.downsample(x) + return x + else: + if self.act.__class__.__name__ == "Snake": + beta = self.act.alpha.data # Snake uses same params for alpha and beta + else: + beta = ( + self.act.beta.data + ) # Snakebeta uses different params for alpha and beta + alpha = self.act.alpha.data + if ( + not self.act.alpha_logscale + ): # Exp baked into cuda kernel, cancel it out with a log + alpha = torch.log(alpha) + beta = torch.log(beta) + + x = FusedAntiAliasActivation.apply( + x, self.upsample.filter, self.downsample.lowpass.filter, alpha, beta + ) + return x diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation.cpp b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c5651f77143bd678169eb11564a7cf7a7969a59e --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation.cpp @@ -0,0 +1,23 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + #include + +extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta); + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &fwd_cuda, "Anti-Alias Activation forward (CUDA)"); +} \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation_cuda.cu b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..8c442334869fe72d639ec203fa4fac07f96a0ee1 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation_cuda.cu @@ -0,0 +1,246 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "type_shim.h" +#include +#include +#include +#include +#include + +namespace +{ + // Hard-coded hyperparameters + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + constexpr int ELEMENTS_PER_LDG_STG = 1; //(WARP_ITERATIONS < 4) ? 1 : 4; + constexpr int BUFFER_SIZE = 32; + constexpr int FILTER_SIZE = 12; + constexpr int HALF_FILTER_SIZE = 6; + constexpr int UPSAMPLE_REPLICATION_PAD = 5; // 5 on each side, matching torch impl + constexpr int DOWNSAMPLE_REPLICATION_PAD_LEFT = 5; // matching torch impl + constexpr int DOWNSAMPLE_REPLICATION_PAD_RIGHT = 6; // matching torch impl + + template + __global__ void anti_alias_activation_forward( + output_t *dst, + const input_t *src, + const input_t *up_ftr, + const input_t *down_ftr, + const input_t *alpha, + const input_t *beta, + int batch_size, + int channels, + int seq_len) + { + // Up and downsample filters + input_t up_filter[FILTER_SIZE]; + input_t down_filter[FILTER_SIZE]; + + // Load data from global memory including extra indices reserved for replication paddings + input_t elements[2 * FILTER_SIZE + 2 * BUFFER_SIZE + 2 * UPSAMPLE_REPLICATION_PAD] = {0}; + input_t intermediates[2 * FILTER_SIZE + 2 * BUFFER_SIZE + DOWNSAMPLE_REPLICATION_PAD_LEFT + DOWNSAMPLE_REPLICATION_PAD_RIGHT] = {0}; + + // Output stores downsampled output before writing to dst + output_t output[BUFFER_SIZE]; + + // blockDim/threadIdx = (128, 1, 1) + // gridDim/blockIdx = (seq_blocks, channels, batches) + int block_offset = (blockIdx.x * 128 * BUFFER_SIZE + seq_len * (blockIdx.y + gridDim.y * blockIdx.z)); + int local_offset = threadIdx.x * BUFFER_SIZE; + int seq_offset = blockIdx.x * 128 * BUFFER_SIZE + local_offset; + + // intermediate have double the seq_len + int intermediate_local_offset = threadIdx.x * BUFFER_SIZE * 2; + int intermediate_seq_offset = blockIdx.x * 128 * BUFFER_SIZE * 2 + intermediate_local_offset; + + // Get values needed for replication padding before moving pointer + const input_t *right_most_pntr = src + (seq_len * (blockIdx.y + gridDim.y * blockIdx.z)); + input_t seq_left_most_value = right_most_pntr[0]; + input_t seq_right_most_value = right_most_pntr[seq_len - 1]; + + // Move src and dst pointers + src += block_offset + local_offset; + dst += block_offset + local_offset; + + // Alpha and beta values for snake activatons. Applies exp by default + alpha = alpha + blockIdx.y; + input_t alpha_val = expf(alpha[0]); + beta = beta + blockIdx.y; + input_t beta_val = expf(beta[0]); + + #pragma unroll + for (int it = 0; it < FILTER_SIZE; it += 1) + { + up_filter[it] = up_ftr[it]; + down_filter[it] = down_ftr[it]; + } + + // Apply replication padding for upsampling, matching torch impl + #pragma unroll + for (int it = -HALF_FILTER_SIZE; it < BUFFER_SIZE + HALF_FILTER_SIZE; it += 1) + { + int element_index = seq_offset + it; // index for element + if ((element_index < 0) && (element_index >= -UPSAMPLE_REPLICATION_PAD)) + { + elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_left_most_value; + } + if ((element_index >= seq_len) && (element_index < seq_len + UPSAMPLE_REPLICATION_PAD)) + { + elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_right_most_value; + } + if ((element_index >= 0) && (element_index < seq_len)) + { + elements[2 * (HALF_FILTER_SIZE + it)] = 2 * src[it]; + } + } + + // Apply upsampling strided convolution and write to intermediates. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT for replication padding of the downsampilng conv later + #pragma unroll + for (int it = 0; it < (2 * BUFFER_SIZE + 2 * FILTER_SIZE); it += 1) + { + input_t acc = 0.0; + int element_index = intermediate_seq_offset + it; // index for intermediate + #pragma unroll + for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1) + { + if ((element_index + f_idx) >= 0) + { + acc += up_filter[f_idx] * elements[it + f_idx]; + } + } + intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] = acc; + } + + // Apply activation function. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT and DOWNSAMPLE_REPLICATION_PAD_RIGHT for replication padding of the downsampilng conv later + double no_div_by_zero = 0.000000001; + #pragma unroll + for (int it = 0; it < 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it += 1) + { + intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] += (1.0 / (beta_val + no_div_by_zero)) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val); + } + + // Apply replication padding before downsampling conv from intermediates + #pragma unroll + for (int it = 0; it < DOWNSAMPLE_REPLICATION_PAD_LEFT; it += 1) + { + intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT]; + } + #pragma unroll + for (int it = DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it < DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE + DOWNSAMPLE_REPLICATION_PAD_RIGHT; it += 1) + { + intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE - 1]; + } + + // Apply downsample strided convolution (assuming stride=2) from intermediates + #pragma unroll + for (int it = 0; it < BUFFER_SIZE; it += 1) + { + input_t acc = 0.0; + #pragma unroll + for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1) + { + // Add constant DOWNSAMPLE_REPLICATION_PAD_RIGHT to match torch implementation + acc += down_filter[f_idx] * intermediates[it * 2 + f_idx + DOWNSAMPLE_REPLICATION_PAD_RIGHT]; + } + output[it] = acc; + } + + // Write output to dst + #pragma unroll + for (int it = 0; it < BUFFER_SIZE; it += ELEMENTS_PER_LDG_STG) + { + int element_index = seq_offset + it; + if (element_index < seq_len) + { + dst[it] = output[it]; + } + } + + } + + template + void dispatch_anti_alias_activation_forward( + output_t *dst, + const input_t *src, + const input_t *up_ftr, + const input_t *down_ftr, + const input_t *alpha, + const input_t *beta, + int batch_size, + int channels, + int seq_len) + { + if (seq_len == 0) + { + return; + } + else + { + // Use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + constexpr int seq_len_per_block = 4096; + int blocks_per_seq_len = (seq_len + seq_len_per_block - 1) / seq_len_per_block; + dim3 blocks(blocks_per_seq_len, channels, batch_size); + dim3 threads(threads_per_block, 1, 1); + + anti_alias_activation_forward + <<>>(dst, src, up_ftr, down_ftr, alpha, beta, batch_size, channels, seq_len); + } + } +} + +extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta) +{ + // Input is a 3d tensor with dimensions [batches, channels, seq_len] + const int batches = input.size(0); + const int channels = input.size(1); + const int seq_len = input.size(2); + + // Output + auto act_options = input.options().requires_grad(false); + + torch::Tensor anti_alias_activation_results = + torch::empty({batches, channels, seq_len}, act_options); + + void *input_ptr = static_cast(input.data_ptr()); + void *up_filter_ptr = static_cast(up_filter.data_ptr()); + void *down_filter_ptr = static_cast(down_filter.data_ptr()); + void *alpha_ptr = static_cast(alpha.data_ptr()); + void *beta_ptr = static_cast(beta.data_ptr()); + void *anti_alias_activation_results_ptr = static_cast(anti_alias_activation_results.data_ptr()); + + DISPATCH_FLOAT_HALF_AND_BFLOAT( + input.scalar_type(), + "dispatch anti alias activation_forward", + dispatch_anti_alias_activation_forward( + reinterpret_cast(anti_alias_activation_results_ptr), + reinterpret_cast(input_ptr), + reinterpret_cast(up_filter_ptr), + reinterpret_cast(down_filter_ptr), + reinterpret_cast(alpha_ptr), + reinterpret_cast(beta_ptr), + batches, + channels, + seq_len);); + return anti_alias_activation_results; +} \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/compat.h b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/compat.h new file mode 100644 index 0000000000000000000000000000000000000000..25818b2edf4cb0dc9130e62c7c4de8d16a01baa5 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/compat.h @@ -0,0 +1,29 @@ +/* coding=utf-8 + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*This code is copied fron NVIDIA apex: + * https://github.com/NVIDIA/apex + * with minor changes. */ + +#ifndef TORCH_CHECK +#define TORCH_CHECK AT_CHECK +#endif + +#ifdef VERSION_GE_1_3 +#define DATA_PTR data_ptr +#else +#define DATA_PTR data +#endif diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/load.py b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/load.py new file mode 100644 index 0000000000000000000000000000000000000000..ca5d01de398249e75e9e2298958764acb436edba --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/load.py @@ -0,0 +1,86 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +import os +import pathlib +import subprocess + +from torch.utils import cpp_extension + +""" +Setting this param to a list has a problem of generating different compilation commands (with diferent order of architectures) and leading to recompilation of fused kernels. +Set it to empty stringo avoid recompilation and assign arch flags explicity in extra_cuda_cflags below +""" +os.environ["TORCH_CUDA_ARCH_LIST"] = "" + + +def load(): + # Check if cuda 11 is installed for compute capability 8.0 + cc_flag = [] + _, bare_metal_major, _ = _get_cuda_bare_metal_version(cpp_extension.CUDA_HOME) + if int(bare_metal_major) >= 11: + cc_flag.append("-gencode") + cc_flag.append("arch=compute_80,code=sm_80") + + # Build path + srcpath = pathlib.Path(__file__).parent.absolute() + buildpath = srcpath / "build" + _create_build_dir(buildpath) + + # Helper function to build the kernels. + def _cpp_extention_load_helper(name, sources, extra_cuda_flags): + return cpp_extension.load( + name=name, + sources=sources, + build_directory=buildpath, + extra_cflags=[ + "-O3", + ], + extra_cuda_cflags=[ + "-O3", + "-gencode", + "arch=compute_70,code=sm_70", + "--use_fast_math", + ] + + extra_cuda_flags + + cc_flag, + verbose=True, + ) + + extra_cuda_flags = [ + "-U__CUDA_NO_HALF_OPERATORS__", + "-U__CUDA_NO_HALF_CONVERSIONS__", + "--expt-relaxed-constexpr", + "--expt-extended-lambda", + ] + + sources = [ + srcpath / "anti_alias_activation.cpp", + srcpath / "anti_alias_activation_cuda.cu", + ] + anti_alias_activation_cuda = _cpp_extention_load_helper( + "anti_alias_activation_cuda", sources, extra_cuda_flags + ) + + return anti_alias_activation_cuda + + +def _get_cuda_bare_metal_version(cuda_dir): + raw_output = subprocess.check_output( + [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True + ) + output = raw_output.split() + release_idx = output.index("release") + 1 + release = output[release_idx].split(".") + bare_metal_major = release[0] + bare_metal_minor = release[1][0] + + return raw_output, bare_metal_major, bare_metal_minor + + +def _create_build_dir(buildpath): + try: + os.mkdir(buildpath) + except OSError: + if not os.path.isdir(buildpath): + print(f"Creation of the build directory {buildpath} failed") diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/type_shim.h b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/type_shim.h new file mode 100644 index 0000000000000000000000000000000000000000..5db7e8a397e982d4d30d16ab6060814b98b7ab83 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/type_shim.h @@ -0,0 +1,92 @@ +/* coding=utf-8 + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "compat.h" + +#define DISPATCH_FLOAT_HALF_AND_BFLOAT(TYPE, NAME, ...) \ + switch (TYPE) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(TYPEIN, TYPEOUT, NAME, ...) \ + switch (TYPEIN) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_in = float; \ + switch (TYPEOUT) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_out = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPEOUT), "'"); \ + } \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_in = at::Half; \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t_in = at::BFloat16; \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPEIN), "'"); \ + } diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/__init__.py b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8f756ed83f87f9839e457b240f60469bc187707d --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/__init__.py @@ -0,0 +1,6 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +from .filter import * +from .resample import * +from .act import * diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/act.py b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/act.py new file mode 100644 index 0000000000000000000000000000000000000000..92445a8652d1998f80e2952224b18d0e1a89dc9f --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/act.py @@ -0,0 +1,32 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn + +from mmaudio.ext.bigvgan_v2.alias_free_activation.torch.resample import (DownSample1d, UpSample1d) + + +class Activation1d(nn.Module): + + def __init__( + self, + activation, + up_ratio: int = 2, + down_ratio: int = 2, + up_kernel_size: int = 12, + down_kernel_size: int = 12, + ): + super().__init__() + self.up_ratio = up_ratio + self.down_ratio = down_ratio + self.act = activation + self.upsample = UpSample1d(up_ratio, up_kernel_size) + self.downsample = DownSample1d(down_ratio, down_kernel_size) + + # x: [B,C,T] + def forward(self, x): + x = self.upsample(x) + x = self.act(x) + x = self.downsample(x) + + return x diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/filter.py b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/filter.py new file mode 100644 index 0000000000000000000000000000000000000000..0fa35b0d5ddf8d6cb04cd9d47364ca033cebcd32 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/filter.py @@ -0,0 +1,101 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch +import torch.nn as nn +import torch.nn.functional as F +import math + +if "sinc" in dir(torch): + sinc = torch.sinc +else: + # This code is adopted from adefossez's julius.core.sinc under the MIT License + # https://adefossez.github.io/julius/julius/core.html + # LICENSE is in incl_licenses directory. + def sinc(x: torch.Tensor): + """ + Implementation of sinc, i.e. sin(pi * x) / (pi * x) + __Warning__: Different to julius.sinc, the input is multiplied by `pi`! + """ + return torch.where( + x == 0, + torch.tensor(1.0, device=x.device, dtype=x.dtype), + torch.sin(math.pi * x) / math.pi / x, + ) + + +# This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License +# https://adefossez.github.io/julius/julius/lowpass.html +# LICENSE is in incl_licenses directory. +def kaiser_sinc_filter1d( + cutoff, half_width, kernel_size +): # return filter [1,1,kernel_size] + even = kernel_size % 2 == 0 + half_size = kernel_size // 2 + + # For kaiser window + delta_f = 4 * half_width + A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95 + if A > 50.0: + beta = 0.1102 * (A - 8.7) + elif A >= 21.0: + beta = 0.5842 * (A - 21) ** 0.4 + 0.07886 * (A - 21.0) + else: + beta = 0.0 + window = torch.kaiser_window(kernel_size, beta=beta, periodic=False) + + # ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio + if even: + time = torch.arange(-half_size, half_size) + 0.5 + else: + time = torch.arange(kernel_size) - half_size + if cutoff == 0: + filter_ = torch.zeros_like(time) + else: + filter_ = 2 * cutoff * window * sinc(2 * cutoff * time) + """ + Normalize filter to have sum = 1, otherwise we will have a small leakage of the constant component in the input signal. + """ + filter_ /= filter_.sum() + filter = filter_.view(1, 1, kernel_size) + + return filter + + +class LowPassFilter1d(nn.Module): + def __init__( + self, + cutoff=0.5, + half_width=0.6, + stride: int = 1, + padding: bool = True, + padding_mode: str = "replicate", + kernel_size: int = 12, + ): + """ + kernel_size should be even number for stylegan3 setup, in this implementation, odd number is also possible. + """ + super().__init__() + if cutoff < -0.0: + raise ValueError("Minimum cutoff must be larger than zero.") + if cutoff > 0.5: + raise ValueError("A cutoff above 0.5 does not make sense.") + self.kernel_size = kernel_size + self.even = kernel_size % 2 == 0 + self.pad_left = kernel_size // 2 - int(self.even) + self.pad_right = kernel_size // 2 + self.stride = stride + self.padding = padding + self.padding_mode = padding_mode + filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size) + self.register_buffer("filter", filter) + + # Input [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + if self.padding: + x = F.pad(x, (self.pad_left, self.pad_right), mode=self.padding_mode) + out = F.conv1d(x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C) + + return out diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/resample.py b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/resample.py new file mode 100644 index 0000000000000000000000000000000000000000..33faa1518c3bcf34b63cc44374905df83542f614 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/resample.py @@ -0,0 +1,54 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn +from torch.nn import functional as F + +from mmaudio.ext.bigvgan_v2.alias_free_activation.torch.filter import (LowPassFilter1d, + kaiser_sinc_filter1d) + + +class UpSample1d(nn.Module): + + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = (int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size) + self.stride = ratio + self.pad = self.kernel_size // ratio - 1 + self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2 + self.pad_right = (self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2) + filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio, + half_width=0.6 / ratio, + kernel_size=self.kernel_size) + self.register_buffer("filter", filter) + + # x: [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + x = F.pad(x, (self.pad, self.pad), mode="replicate") + x = self.ratio * F.conv_transpose1d( + x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C) + x = x[..., self.pad_left:-self.pad_right] + + return x + + +class DownSample1d(nn.Module): + + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = (int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size) + self.lowpass = LowPassFilter1d( + cutoff=0.5 / ratio, + half_width=0.6 / ratio, + stride=ratio, + kernel_size=self.kernel_size, + ) + + def forward(self, x): + xx = self.lowpass(x) + + return xx diff --git a/mmaudio/ext/bigvgan_v2/bigvgan.py b/mmaudio/ext/bigvgan_v2/bigvgan.py new file mode 100644 index 0000000000000000000000000000000000000000..ff2b6c4c87e20d147130d0b608d2467557347caf --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/bigvgan.py @@ -0,0 +1,439 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import json +import os +from pathlib import Path +from typing import Dict, Optional, Union + +import torch +import torch.nn as nn +from huggingface_hub import PyTorchModelHubMixin, hf_hub_download +from torch.nn import Conv1d, ConvTranspose1d +from torch.nn.utils.parametrizations import weight_norm +from torch.nn.utils.parametrize import remove_parametrizations + +from mmaudio.ext.bigvgan_v2 import activations +from mmaudio.ext.bigvgan_v2.alias_free_activation.torch.act import \ + Activation1d as TorchActivation1d +from mmaudio.ext.bigvgan_v2.env import AttrDict +from mmaudio.ext.bigvgan_v2.utils import get_padding, init_weights + + +def load_hparams_from_json(path) -> AttrDict: + with open(path) as f: + data = f.read() + return AttrDict(json.loads(data)) + + +class AMPBlock1(torch.nn.Module): + """ + AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer. + AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 followed by each layer in self.convs1 + + Args: + h (AttrDict): Hyperparameters. + channels (int): Number of convolution channels. + kernel_size (int): Size of the convolution kernel. Default is 3. + dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5). + activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None. + """ + + def __init__( + self, + h: AttrDict, + channels: int, + kernel_size: int = 3, + dilation: tuple = (1, 3, 5), + activation: str = None, + ): + super().__init__() + + self.h = h + + self.convs1 = nn.ModuleList([ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=d, + padding=get_padding(kernel_size, d), + )) for d in dilation + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=1, + padding=get_padding(kernel_size, 1), + )) for _ in range(len(dilation)) + ]) + self.convs2.apply(init_weights) + + self.num_layers = len(self.convs1) + len(self.convs2) # Total number of conv layers + + # Select which Activation1d, lazy-load cuda version to ensure backward compatibility + if self.h.get("use_cuda_kernel", False): + from alias_free_activation.cuda.activation1d import \ + Activation1d as CudaActivation1d + + Activation1d = CudaActivation1d + else: + Activation1d = TorchActivation1d + + # Activation functions + if activation == "snake": + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + elif activation == "snakebeta": + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + acts1, acts2 = self.activations[::2], self.activations[1::2] + for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2): + xt = a1(x) + xt = c1(xt) + xt = a2(xt) + xt = c2(xt) + x = xt + x + + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_parametrizations(l, 'weight') + for l in self.convs2: + remove_parametrizations(l, 'weight') + + +class AMPBlock2(torch.nn.Module): + """ + AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer. + Unlike AMPBlock1, AMPBlock2 does not contain extra Conv1d layers with fixed dilation=1 + + Args: + h (AttrDict): Hyperparameters. + channels (int): Number of convolution channels. + kernel_size (int): Size of the convolution kernel. Default is 3. + dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5). + activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None. + """ + + def __init__( + self, + h: AttrDict, + channels: int, + kernel_size: int = 3, + dilation: tuple = (1, 3, 5), + activation: str = None, + ): + super().__init__() + + self.h = h + + self.convs = nn.ModuleList([ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=d, + padding=get_padding(kernel_size, d), + )) for d in dilation + ]) + self.convs.apply(init_weights) + + self.num_layers = len(self.convs) # Total number of conv layers + + # Select which Activation1d, lazy-load cuda version to ensure backward compatibility + if self.h.get("use_cuda_kernel", False): + from alias_free_activation.cuda.activation1d import \ + Activation1d as CudaActivation1d + + Activation1d = CudaActivation1d + else: + Activation1d = TorchActivation1d + + # Activation functions + if activation == "snake": + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + elif activation == "snakebeta": + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + for c, a in zip(self.convs, self.activations): + xt = a(x) + xt = c(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class BigVGAN( + torch.nn.Module, + PyTorchModelHubMixin, + library_name="bigvgan", + repo_url="https://github.com/NVIDIA/BigVGAN", + docs_url="https://github.com/NVIDIA/BigVGAN/blob/main/README.md", + pipeline_tag="audio-to-audio", + license="mit", + tags=["neural-vocoder", "audio-generation", "arxiv:2206.04658"], +): + """ + BigVGAN is a neural vocoder model that applies anti-aliased periodic activation for residual blocks (resblocks). + New in BigVGAN-v2: it can optionally use optimized CUDA kernels for AMP (anti-aliased multi-periodicity) blocks. + + Args: + h (AttrDict): Hyperparameters. + use_cuda_kernel (bool): If set to True, loads optimized CUDA kernels for AMP. This should be used for inference only, as training is not supported with CUDA kernels. + + Note: + - The `use_cuda_kernel` parameter should be used for inference only, as training with CUDA kernels is not supported. + - Ensure that the activation function is correctly specified in the hyperparameters (h.activation). + """ + + def __init__(self, h: AttrDict, use_cuda_kernel: bool = False): + super().__init__() + self.h = h + self.h["use_cuda_kernel"] = use_cuda_kernel + + # Select which Activation1d, lazy-load cuda version to ensure backward compatibility + if self.h.get("use_cuda_kernel", False): + from alias_free_activation.cuda.activation1d import \ + Activation1d as CudaActivation1d + + Activation1d = CudaActivation1d + else: + Activation1d = TorchActivation1d + + self.num_kernels = len(h.resblock_kernel_sizes) + self.num_upsamples = len(h.upsample_rates) + + # Pre-conv + self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3)) + + # Define which AMPBlock to use. BigVGAN uses AMPBlock1 as default + if h.resblock == "1": + resblock_class = AMPBlock1 + elif h.resblock == "2": + resblock_class = AMPBlock2 + else: + raise ValueError( + f"Incorrect resblock class specified in hyperparameters. Got {h.resblock}") + + # Transposed conv-based upsamplers. does not apply anti-aliasing + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): + self.ups.append( + nn.ModuleList([ + weight_norm( + ConvTranspose1d( + h.upsample_initial_channel // (2**i), + h.upsample_initial_channel // (2**(i + 1)), + k, + u, + padding=(k - u) // 2, + )) + ])) + + # Residual blocks using anti-aliased multi-periodicity composition modules (AMP) + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = h.upsample_initial_channel // (2**(i + 1)) + for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): + self.resblocks.append(resblock_class(h, ch, k, d, activation=h.activation)) + + # Post-conv + activation_post = (activations.Snake(ch, alpha_logscale=h.snake_logscale) + if h.activation == "snake" else + (activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale) + if h.activation == "snakebeta" else None)) + if activation_post is None: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + self.activation_post = Activation1d(activation=activation_post) + + # Whether to use bias for the final conv_post. Default to True for backward compatibility + self.use_bias_at_final = h.get("use_bias_at_final", True) + self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3, bias=self.use_bias_at_final)) + + # Weight initialization + for i in range(len(self.ups)): + self.ups[i].apply(init_weights) + self.conv_post.apply(init_weights) + + # Final tanh activation. Defaults to True for backward compatibility + self.use_tanh_at_final = h.get("use_tanh_at_final", True) + + def forward(self, x): + # Pre-conv + x = self.conv_pre(x) + + for i in range(self.num_upsamples): + # Upsampling + for i_up in range(len(self.ups[i])): + x = self.ups[i][i_up](x) + # AMP blocks + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + + # Post-conv + x = self.activation_post(x) + x = self.conv_post(x) + # Final tanh activation + if self.use_tanh_at_final: + x = torch.tanh(x) + else: + x = torch.clamp(x, min=-1.0, max=1.0) # Bound the output to [-1, 1] + + return x + + def remove_weight_norm(self): + try: + print("Removing weight norm...") + for l in self.ups: + for l_i in l: + remove_parametrizations(l_i, 'weight') + for l in self.resblocks: + l.remove_weight_norm() + remove_parametrizations(self.conv_pre, 'weight') + remove_parametrizations(self.conv_post, 'weight') + except ValueError: + print("[INFO] Model already removed weight norm. Skipping!") + pass + + # Additional methods for huggingface_hub support + def _save_pretrained(self, save_directory: Path) -> None: + """Save weights and config.json from a Pytorch model to a local directory.""" + + model_path = save_directory / "bigvgan_generator.pt" + torch.save({"generator": self.state_dict()}, model_path) + + config_path = save_directory / "config.json" + with open(config_path, "w") as config_file: + json.dump(self.h, config_file, indent=4) + + @classmethod + def _from_pretrained( + cls, + *, + model_id: str, + revision: str, + cache_dir: str, + force_download: bool, + proxies: Optional[Dict], + resume_download: bool, + local_files_only: bool, + token: Union[str, bool, None], + map_location: str = "cpu", # Additional argument + strict: bool = False, # Additional argument + use_cuda_kernel: bool = False, + **model_kwargs, + ): + """Load Pytorch pretrained weights and return the loaded model.""" + + # Download and load hyperparameters (h) used by BigVGAN + if os.path.isdir(model_id): + print("Loading config.json from local directory") + config_file = os.path.join(model_id, "config.json") + else: + config_file = hf_hub_download( + repo_id=model_id, + filename="config.json", + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + h = load_hparams_from_json(config_file) + + # instantiate BigVGAN using h + if use_cuda_kernel: + print( + f"[WARNING] You have specified use_cuda_kernel=True during BigVGAN.from_pretrained(). Only inference is supported (training is not implemented)!" + ) + print( + f"[WARNING] You need nvcc and ninja installed in your system that matches your PyTorch build is using to build the kernel. If not, the model will fail to initialize or generate incorrect waveform!" + ) + print( + f"[WARNING] For detail, see the official GitHub repository: https://github.com/NVIDIA/BigVGAN?tab=readme-ov-file#using-custom-cuda-kernel-for-synthesis" + ) + model = cls(h, use_cuda_kernel=use_cuda_kernel) + + # Download and load pretrained generator weight + if os.path.isdir(model_id): + print("Loading weights from local directory") + model_file = os.path.join(model_id, "bigvgan_generator.pt") + else: + print(f"Loading weights from {model_id}") + model_file = hf_hub_download( + repo_id=model_id, + filename="bigvgan_generator.pt", + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + + checkpoint_dict = torch.load(model_file, map_location=map_location, weights_only=True) + + try: + model.load_state_dict(checkpoint_dict["generator"]) + except RuntimeError: + print( + f"[INFO] the pretrained checkpoint does not contain weight norm. Loading the checkpoint after removing weight norm!" + ) + model.remove_weight_norm() + model.load_state_dict(checkpoint_dict["generator"]) + + return model diff --git a/mmaudio/ext/bigvgan_v2/env.py b/mmaudio/ext/bigvgan_v2/env.py new file mode 100644 index 0000000000000000000000000000000000000000..b8be238d4db710c8c9a338d336baea0138f18d1f --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/env.py @@ -0,0 +1,18 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os +import shutil + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + +def build_env(config, config_name, path): + t_path = os.path.join(path, config_name) + if config != t_path: + os.makedirs(path, exist_ok=True) + shutil.copyfile(config, os.path.join(path, config_name)) \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_1 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_1 new file mode 100644 index 0000000000000000000000000000000000000000..5afae394d6b37da0e12ba6b290d2512687f421ac --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_1 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Jungil Kong + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_2 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_2 new file mode 100644 index 0000000000000000000000000000000000000000..322b758863c4219be68291ae3826218baa93cb4c --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_2 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Edward Dixon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_3 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_3 new file mode 100644 index 0000000000000000000000000000000000000000..56ee3c8c4cc2b4b32e0975d17258f9ba515fdbcc --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_3 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_4 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_4 new file mode 100644 index 0000000000000000000000000000000000000000..48fd1a1ba8d81a94b6c7d1c2ff1a1f307cc5371d --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_4 @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, Seungwon Park 박승원 +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_5 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_5 new file mode 100644 index 0000000000000000000000000000000000000000..01ae5538e6b7c787bb4f5d6f2cd9903520d6e465 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_5 @@ -0,0 +1,16 @@ +Copyright 2020 Alexandre Défossez + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_6 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_6 new file mode 100644 index 0000000000000000000000000000000000000000..2569ec0b6c85f94f3cd071ba16e9028ccf156be2 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_6 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023-present, Descript + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_7 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_7 new file mode 100644 index 0000000000000000000000000000000000000000..c37bdaf99c6921f5849425d546069e972f52d7fa --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_7 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Charactr Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_8 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_8 new file mode 100644 index 0000000000000000000000000000000000000000..ab3d7ffe795779f54e339078e4e752ad9019aae8 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_8 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Amphion + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/utils.py b/mmaudio/ext/bigvgan_v2/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3b1d41670fa1ee257b2ed22c61086ba7a32c7cb0 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/utils.py @@ -0,0 +1,31 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os + +import torch +from torch.nn.utils import weight_norm + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def apply_weight_norm(m): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + weight_norm(m) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + + +def load_checkpoint(filepath, device): + assert os.path.isfile(filepath) + print(f"Loading '{filepath}'") + checkpoint_dict = torch.load(filepath, map_location=device) + print("Complete.") + return checkpoint_dict diff --git a/mmaudio/ext/mel_converter.py b/mmaudio/ext/mel_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..6fc589c9468e077fc580965db250fd502e229672 --- /dev/null +++ b/mmaudio/ext/mel_converter.py @@ -0,0 +1,82 @@ +# Reference: # https://github.com/bytedance/Make-An-Audio-2 + +import torch +import torch.nn as nn +from librosa.filters import mel as librosa_mel_fn + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5, norm_fn=torch.log10): + return norm_fn(torch.clamp(x, min=clip_val) * C) + + +def spectral_normalize_torch(magnitudes, norm_fn): + output = dynamic_range_compression_torch(magnitudes, norm_fn=norm_fn) + return output + + +class MelConverter(nn.Module): + + def __init__( + self, + *, + sampling_rate: float = 16_000, + n_fft: int = 1024, + num_mels: int = 80, + hop_size: int = 256, + win_size: int = 1024, + fmin: float = 0, + fmax: float = 8_000, + norm_fn=torch.log10, + ): + super().__init__() + self.sampling_rate = sampling_rate + self.n_fft = n_fft + self.num_mels = num_mels + self.hop_size = hop_size + self.win_size = win_size + self.fmin = fmin + self.fmax = fmax + self.norm_fn = norm_fn + + mel = librosa_mel_fn(sr=self.sampling_rate, + n_fft=self.n_fft, + n_mels=self.num_mels, + fmin=self.fmin, + fmax=self.fmax) + mel_basis = torch.from_numpy(mel).float() + hann_window = torch.hann_window(self.win_size) + + self.register_buffer('mel_basis', mel_basis) + self.register_buffer('hann_window', hann_window) + + @property + def device(self): + return self.mel_basis.device + + def forward(self, waveform: torch.Tensor, center: bool = False) -> torch.Tensor: + waveform = waveform.clamp(min=-1., max=1.).to(self.device) + + waveform = torch.nn.functional.pad( + waveform.unsqueeze(1), + [int((self.n_fft - self.hop_size) / 2), + int((self.n_fft - self.hop_size) / 2)], + mode='reflect') + waveform = waveform.squeeze(1) + + spec = torch.stft(waveform, + self.n_fft, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=center, + pad_mode='reflect', + normalized=False, + onesided=True, + return_complex=True) + + spec = torch.view_as_real(spec) + spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9)) + spec = torch.matmul(self.mel_basis, spec) + spec = spectral_normalize_torch(spec, self.norm_fn) + + return spec diff --git a/mmaudio/ext/rotary_embeddings.py b/mmaudio/ext/rotary_embeddings.py new file mode 100644 index 0000000000000000000000000000000000000000..1ea9d56278cb68b7577ed13148227c30ed98fd02 --- /dev/null +++ b/mmaudio/ext/rotary_embeddings.py @@ -0,0 +1,35 @@ +from typing import Union + +import torch +from einops import rearrange +from torch import Tensor + +# Ref: https://github.com/black-forest-labs/flux/blob/main/src/flux/math.py +# Ref: https://github.com/lucidrains/rotary-embedding-torch + + +def compute_rope_rotations(length: int, + dim: int, + theta: int, + *, + freq_scaling: float = 1.0, + device: Union[torch.device, str] = 'cpu') -> Tensor: + assert dim % 2 == 0 + + with torch.amp.autocast(device_type='cuda', enabled=False): + pos = torch.arange(length, dtype=torch.float32, device=device) + freqs = 1.0 / (theta**(torch.arange(0, dim, 2, dtype=torch.float32, device=device) / dim)) + freqs *= freq_scaling + + rot = torch.einsum('..., f -> ... f', pos, freqs) + rot = torch.stack([torch.cos(rot), -torch.sin(rot), torch.sin(rot), torch.cos(rot)], dim=-1) + rot = rearrange(rot, 'n d (i j) -> 1 n d i j', i=2, j=2) + return rot + + +def apply_rope(x: Tensor, rot: Tensor) -> tuple[Tensor, Tensor]: + with torch.amp.autocast(device_type='cuda', enabled=False): + _x = x.float() + _x = _x.view(*_x.shape[:-1], -1, 1, 2) + x_out = rot[..., 0] * _x[..., 0] + rot[..., 1] * _x[..., 1] + return x_out.reshape(*x.shape).to(dtype=x.dtype) diff --git a/mmaudio/ext/stft_converter.py b/mmaudio/ext/stft_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..62922067ef3b1d3b8727ec39e7d664ccb304d9fe --- /dev/null +++ b/mmaudio/ext/stft_converter.py @@ -0,0 +1,183 @@ +# Reference: # https://github.com/bytedance/Make-An-Audio-2 + +import torch +import torch.nn as nn +import torchaudio +from einops import rearrange +from librosa.filters import mel as librosa_mel_fn + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5, norm_fn=torch.log10): + return norm_fn(torch.clamp(x, min=clip_val) * C) + + +def spectral_normalize_torch(magnitudes, norm_fn): + output = dynamic_range_compression_torch(magnitudes, norm_fn=norm_fn) + return output + + +class STFTConverter(nn.Module): + + def __init__( + self, + *, + sampling_rate: float = 16_000, + n_fft: int = 1024, + num_mels: int = 128, + hop_size: int = 256, + win_size: int = 1024, + fmin: float = 0, + fmax: float = 8_000, + norm_fn=torch.log, + ): + super().__init__() + self.sampling_rate = sampling_rate + self.n_fft = n_fft + self.num_mels = num_mels + self.hop_size = hop_size + self.win_size = win_size + self.fmin = fmin + self.fmax = fmax + self.norm_fn = norm_fn + + mel = librosa_mel_fn(sr=self.sampling_rate, + n_fft=self.n_fft, + n_mels=self.num_mels, + fmin=self.fmin, + fmax=self.fmax) + mel_basis = torch.from_numpy(mel).float() + hann_window = torch.hann_window(self.win_size) + + self.register_buffer('mel_basis', mel_basis) + self.register_buffer('hann_window', hann_window) + + @property + def device(self): + return self.hann_window.device + + def forward(self, waveform: torch.Tensor) -> torch.Tensor: + # input: batch_size * length + bs = waveform.shape[0] + waveform = waveform.clamp(min=-1., max=1.) + + spec = torch.stft(waveform, + self.n_fft, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=True, + pad_mode='reflect', + normalized=False, + onesided=True, + return_complex=True) + + spec = torch.view_as_real(spec) + # print('After stft', spec.shape, spec.min(), spec.max(), spec.mean()) + + power = spec.pow(2).sum(-1) + angle = torch.atan2(spec[..., 1], spec[..., 0]) + + print('power', power.shape, power.min(), power.max(), power.mean()) + print('angle', angle.shape, angle.min(), angle.max(), angle.mean()) + + # print('mel', self.mel_basis.shape, self.mel_basis.min(), self.mel_basis.max(), + # self.mel_basis.mean()) + + # spec = rearrange(spec, 'b f t c -> (b c) f t') + + # spec = self.mel_transform(spec) + + # spec = torch.matmul(self.mel_basis, spec) + + # print('After mel', spec.shape, spec.min(), spec.max(), spec.mean()) + + # spec = spectral_normalize_torch(spec, self.norm_fn) + + # print('After norm', spec.shape, spec.min(), spec.max(), spec.mean()) + + # compute magnitude + # magnitude = torch.sqrt((spec**2).sum(-1)) + # normalize by magnitude + # scaled_magnitude = torch.log10(magnitude.clamp(min=1e-5)) * 10 + # spec = spec / magnitude.unsqueeze(-1) * scaled_magnitude.unsqueeze(-1) + + # power = torch.log10(power.clamp(min=1e-5)) * 10 + power = torch.log10(power.clamp(min=1e-5)) + + print('After scaling', power.shape, power.min(), power.max(), power.mean()) + + spec = torch.stack([power, angle], dim=-1) + + # spec = rearrange(spec, '(b c) f t -> b c f t', b=bs) + spec = rearrange(spec, 'b f t c -> b c f t', b=bs) + + # spec[:, :, 400:] = 0 + + return spec + + def invert(self, spec: torch.Tensor, length: int) -> torch.Tensor: + bs = spec.shape[0] + + # spec = rearrange(spec, 'b c f t -> (b c) f t') + # print(spec.shape, self.mel_basis.shape) + # spec = torch.linalg.lstsq(self.mel_basis.unsqueeze(0), spec).solution + # spec = torch.linalg.pinv(self.mel_basis.unsqueeze(0)) @ spec + + # spec = self.invmel_transform(spec) + + spec = rearrange(spec, 'b c f t -> b f t c', b=bs).contiguous() + + # spec[..., 0] = 10**(spec[..., 0] / 10) + + power = spec[..., 0] + power = 10**power + + # print('After unscaling', spec[..., 0].shape, spec[..., 0].min(), spec[..., 0].max(), + # spec[..., 0].mean()) + + unit_vector = torch.stack([ + torch.cos(spec[..., 1]), + torch.sin(spec[..., 1]), + ], dim=-1) + + spec = torch.sqrt(power) * unit_vector + + # spec = rearrange(spec, '(b c) f t -> b f t c', b=bs).contiguous() + spec = torch.view_as_complex(spec) + + waveform = torch.istft( + spec, + self.n_fft, + length=length, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=True, + normalized=False, + onesided=True, + return_complex=False, + ) + + return waveform + + +if __name__ == '__main__': + + converter = STFTConverter(sampling_rate=16000) + + signal = torchaudio.load('./output/ZZ6GRocWW38_000090.wav')[0] + # resample signal at 44100 Hz + # signal = torchaudio.transforms.Resample(16_000, 44_100)(signal) + + L = signal.shape[1] + print('Input signal', signal.shape) + spec = converter(signal) + + print('Final spec', spec.shape) + + signal_recon = converter.invert(spec, length=L) + print('Output signal', signal_recon.shape, signal_recon.min(), signal_recon.max(), + signal_recon.mean()) + + print('MSE', torch.nn.functional.mse_loss(signal, signal_recon)) + torchaudio.save('./output/ZZ6GRocWW38_000090_recon.wav', signal_recon, 16000) diff --git a/mmaudio/ext/stft_converter_mel.py b/mmaudio/ext/stft_converter_mel.py new file mode 100644 index 0000000000000000000000000000000000000000..f6b32d4cb9a23cd74f723e7d8307fd82fa1abba0 --- /dev/null +++ b/mmaudio/ext/stft_converter_mel.py @@ -0,0 +1,234 @@ +# Reference: # https://github.com/bytedance/Make-An-Audio-2 + +import torch +import torch.nn as nn +import torchaudio +from einops import rearrange +from librosa.filters import mel as librosa_mel_fn + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5, norm_fn=torch.log10): + return norm_fn(torch.clamp(x, min=clip_val) * C) + + +def spectral_normalize_torch(magnitudes, norm_fn): + output = dynamic_range_compression_torch(magnitudes, norm_fn=norm_fn) + return output + + +class STFTConverter(nn.Module): + + def __init__( + self, + *, + sampling_rate: float = 16_000, + n_fft: int = 1024, + num_mels: int = 128, + hop_size: int = 256, + win_size: int = 1024, + fmin: float = 0, + fmax: float = 8_000, + norm_fn=torch.log, + ): + super().__init__() + self.sampling_rate = sampling_rate + self.n_fft = n_fft + self.num_mels = num_mels + self.hop_size = hop_size + self.win_size = win_size + self.fmin = fmin + self.fmax = fmax + self.norm_fn = norm_fn + + mel = librosa_mel_fn(sr=self.sampling_rate, + n_fft=self.n_fft, + n_mels=self.num_mels, + fmin=self.fmin, + fmax=self.fmax) + mel_basis = torch.from_numpy(mel).float() + hann_window = torch.hann_window(self.win_size) + + self.register_buffer('mel_basis', mel_basis) + self.register_buffer('hann_window', hann_window) + + @property + def device(self): + return self.hann_window.device + + def forward(self, waveform: torch.Tensor) -> torch.Tensor: + # input: batch_size * length + bs = waveform.shape[0] + waveform = waveform.clamp(min=-1., max=1.) + + spec = torch.stft(waveform, + self.n_fft, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=True, + pad_mode='reflect', + normalized=False, + onesided=True, + return_complex=True) + + spec = torch.view_as_real(spec) + # print('After stft', spec.shape, spec.min(), spec.max(), spec.mean()) + + power = (spec.pow(2).sum(-1))**(0.5) + angle = torch.atan2(spec[..., 1], spec[..., 0]) + + print('power 1', power.shape, power.min(), power.max(), power.mean()) + print('angle 1', angle.shape, angle.min(), angle.max(), angle.mean(), angle[:, :2, :2]) + + # print('mel', self.mel_basis.shape, self.mel_basis.min(), self.mel_basis.max(), + # self.mel_basis.mean()) + + # spec = self.mel_transform(spec) + + # power = torch.matmul(self.mel_basis, power) + + spec = rearrange(spec, 'b f t c -> (b c) f t') + spec = self.mel_basis.unsqueeze(0) @ spec + spec = rearrange(spec, '(b c) f t -> b f t c', b=bs) + + power = (spec.pow(2).sum(-1))**(0.5) + angle = torch.atan2(spec[..., 1], spec[..., 0]) + + print('power', power.shape, power.min(), power.max(), power.mean()) + print('angle', angle.shape, angle.min(), angle.max(), angle.mean(), angle[:, :2, :2]) + + # print('After mel', spec.shape, spec.min(), spec.max(), spec.mean()) + + # spec = spectral_normalize_torch(spec, self.norm_fn) + + # print('After norm', spec.shape, spec.min(), spec.max(), spec.mean()) + + # compute magnitude + # magnitude = torch.sqrt((spec**2).sum(-1)) + # normalize by magnitude + # scaled_magnitude = torch.log10(magnitude.clamp(min=1e-5)) * 10 + # spec = spec / magnitude.unsqueeze(-1) * scaled_magnitude.unsqueeze(-1) + + # power = torch.log10(power.clamp(min=1e-5)) * 10 + power = torch.log10(power.clamp(min=1e-8)) + + print('After scaling', power.shape, power.min(), power.max(), power.mean()) + + # spec = torch.stack([power, angle], dim=-1) + + # spec = rearrange(spec, '(b c) f t -> b c f t', b=bs) + # spec = rearrange(spec, 'b f t c -> b c f t', b=bs) + + # spec[:, :, 400:] = 0 + + return power, angle + # return spec[..., 0], spec[..., 1] + + def invert(self, spec: torch.Tensor, length: int) -> torch.Tensor: + + power, angle = spec + + bs = power.shape[0] + + # spec = rearrange(spec, 'b c f t -> (b c) f t') + # print(spec.shape, self.mel_basis.shape) + # spec = torch.linalg.lstsq(self.mel_basis.unsqueeze(0), spec).solution + # spec = torch.linalg.pinv(self.mel_basis.unsqueeze(0)) @ spec + + # spec = self.invmel_transform(spec) + + # spec = rearrange(spec, 'b c f t -> b f t c', b=bs).contiguous() + + # spec[..., 0] = 10**(spec[..., 0] / 10) + + # power = spec[..., 0] + power = 10**power + + # print('After unscaling', spec[..., 0].shape, spec[..., 0].min(), spec[..., 0].max(), + # spec[..., 0].mean()) + + unit_vector = torch.stack([ + torch.cos(angle), + torch.sin(angle), + ], dim=-1) + + spec = power.unsqueeze(-1) * unit_vector + + # power = torch.linalg.lstsq(self.mel_basis.unsqueeze(0), power).solution + spec = rearrange(spec, 'b f t c -> (b c) f t') + spec = torch.linalg.pinv(self.mel_basis.unsqueeze(0)) @ spec + # spec = torch.linalg.lstsq(self.mel_basis.unsqueeze(0), spec).solution + spec = rearrange(spec, '(b c) f t -> b f t c', b=bs).contiguous() + + power = (spec.pow(2).sum(-1))**(0.5) + angle = torch.atan2(spec[..., 1], spec[..., 0]) + + print('power 2', power.shape, power.min(), power.max(), power.mean()) + print('angle 2', angle.shape, angle.min(), angle.max(), angle.mean(), angle[:, :2, :2]) + + # spec = rearrange(spec, '(b c) f t -> b f t c', b=bs).contiguous() + spec = torch.view_as_complex(spec) + + waveform = torch.istft( + spec, + self.n_fft, + length=length, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=True, + normalized=False, + onesided=True, + return_complex=False, + ) + + return waveform + + +if __name__ == '__main__': + + converter = STFTConverter(sampling_rate=16000) + + signal = torchaudio.load('./output/ZZ6GRocWW38_000090.wav')[0] + # resample signal at 44100 Hz + # signal = torchaudio.transforms.Resample(16_000, 44_100)(signal) + + L = signal.shape[1] + print('Input signal', signal.shape) + spec = converter(signal) + + power, angle = spec + + # print(power.shape, angle.shape) + # print(power, power.min(), power.max(), power.mean()) + # power = power.clamp(-1, 1) + # angle = angle.clamp(-1, 1) + + import matplotlib.pyplot as plt + + # Visualize power + plt.figure() + plt.imshow(power[0].detach().numpy(), aspect='auto', origin='lower') + plt.colorbar() + plt.title('Power') + plt.xlabel('Time') + plt.ylabel('Frequency') + plt.savefig('./output/power.png') + + # Visualize angle + plt.figure() + plt.imshow(angle[0].detach().numpy(), aspect='auto', origin='lower') + plt.colorbar() + plt.title('Angle') + plt.xlabel('Time') + plt.ylabel('Frequency') + plt.savefig('./output/angle.png') + + # print('Final spec', spec.shape) + + signal_recon = converter.invert(spec, length=L) + print('Output signal', signal_recon.shape, signal_recon.min(), signal_recon.max(), + signal_recon.mean()) + + print('MSE', torch.nn.functional.mse_loss(signal, signal_recon)) + torchaudio.save('./output/ZZ6GRocWW38_000090_recon.wav', signal_recon, 16000) diff --git a/mmaudio/ext/synchformer/LICENSE b/mmaudio/ext/synchformer/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2f70bf24b6f45f458998bdf5746376c4832352ea --- /dev/null +++ b/mmaudio/ext/synchformer/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Vladimir Iashin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/mmaudio/ext/synchformer/__init__.py b/mmaudio/ext/synchformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3aa1c4b6464593722e557505d721f3ca5e05f4e8 --- /dev/null +++ b/mmaudio/ext/synchformer/__init__.py @@ -0,0 +1 @@ +from mmaudio.ext.synchformer.synchformer import Synchformer diff --git a/mmaudio/ext/synchformer/divided_224_16x4.yaml b/mmaudio/ext/synchformer/divided_224_16x4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f9d20b76302a8af7928391643bd4b2d184e970aa --- /dev/null +++ b/mmaudio/ext/synchformer/divided_224_16x4.yaml @@ -0,0 +1,84 @@ +TRAIN: + ENABLE: True + DATASET: Ssv2 + BATCH_SIZE: 32 + EVAL_PERIOD: 5 + CHECKPOINT_PERIOD: 5 + AUTO_RESUME: True + CHECKPOINT_EPOCH_RESET: True + CHECKPOINT_FILE_PATH: /checkpoint/fmetze/neurips_sota/40944587/checkpoints/checkpoint_epoch_00035.pyth +DATA: + NUM_FRAMES: 16 + SAMPLING_RATE: 4 + TRAIN_JITTER_SCALES: [256, 320] + TRAIN_CROP_SIZE: 224 + TEST_CROP_SIZE: 224 + INPUT_CHANNEL_NUM: [3] + MEAN: [0.5, 0.5, 0.5] + STD: [0.5, 0.5, 0.5] + PATH_TO_DATA_DIR: /private/home/mandelapatrick/slowfast/data/ssv2 + PATH_PREFIX: /datasets01/SomethingV2/092720/20bn-something-something-v2-frames + INV_UNIFORM_SAMPLE: True + RANDOM_FLIP: False + REVERSE_INPUT_CHANNEL: True + USE_RAND_AUGMENT: True + RE_PROB: 0.0 + USE_REPEATED_AUG: False + USE_RANDOM_RESIZE_CROPS: False + COLORJITTER: False + GRAYSCALE: False + GAUSSIAN: False +SOLVER: + BASE_LR: 1e-4 + LR_POLICY: steps_with_relative_lrs + LRS: [1, 0.1, 0.01] + STEPS: [0, 20, 30] + MAX_EPOCH: 35 + MOMENTUM: 0.9 + WEIGHT_DECAY: 5e-2 + WARMUP_EPOCHS: 0.0 + OPTIMIZING_METHOD: adamw + USE_MIXED_PRECISION: True + SMOOTHING: 0.2 +SLOWFAST: + ALPHA: 8 +VIT: + PATCH_SIZE: 16 + PATCH_SIZE_TEMP: 2 + CHANNELS: 3 + EMBED_DIM: 768 + DEPTH: 12 + NUM_HEADS: 12 + MLP_RATIO: 4 + QKV_BIAS: True + VIDEO_INPUT: True + TEMPORAL_RESOLUTION: 8 + USE_MLP: True + DROP: 0.0 + POS_DROPOUT: 0.0 + DROP_PATH: 0.2 + IM_PRETRAINED: True + HEAD_DROPOUT: 0.0 + HEAD_ACT: tanh + PRETRAINED_WEIGHTS: vit_1k + ATTN_LAYER: divided +MODEL: + NUM_CLASSES: 174 + ARCH: slow + MODEL_NAME: VisionTransformer + LOSS_FUNC: cross_entropy +TEST: + ENABLE: True + DATASET: Ssv2 + BATCH_SIZE: 64 + NUM_ENSEMBLE_VIEWS: 1 + NUM_SPATIAL_CROPS: 3 +DATA_LOADER: + NUM_WORKERS: 4 + PIN_MEMORY: True +NUM_GPUS: 8 +NUM_SHARDS: 4 +RNG_SEED: 0 +OUTPUT_DIR: . +TENSORBOARD: + ENABLE: True diff --git a/mmaudio/ext/synchformer/motionformer.py b/mmaudio/ext/synchformer/motionformer.py new file mode 100644 index 0000000000000000000000000000000000000000..f02141e7cf3a3a133553b6a25341b4b68a483de4 --- /dev/null +++ b/mmaudio/ext/synchformer/motionformer.py @@ -0,0 +1,400 @@ +import logging +from pathlib import Path + +import einops +import torch +from omegaconf import OmegaConf +from timm.layers import trunc_normal_ +from torch import nn + +from mmaudio.ext.synchformer.utils import check_if_file_exists_else_download +from mmaudio.ext.synchformer.video_model_builder import VisionTransformer + +FILE2URL = { + # cfg + 'motionformer_224_16x4.yaml': + 'https://raw.githubusercontent.com/facebookresearch/Motionformer/bf43d50/configs/SSV2/motionformer_224_16x4.yaml', + 'joint_224_16x4.yaml': + 'https://raw.githubusercontent.com/facebookresearch/Motionformer/bf43d50/configs/SSV2/joint_224_16x4.yaml', + 'divided_224_16x4.yaml': + 'https://raw.githubusercontent.com/facebookresearch/Motionformer/bf43d50/configs/SSV2/divided_224_16x4.yaml', + # ckpt + 'ssv2_motionformer_224_16x4.pyth': + 'https://dl.fbaipublicfiles.com/motionformer/ssv2_motionformer_224_16x4.pyth', + 'ssv2_joint_224_16x4.pyth': + 'https://dl.fbaipublicfiles.com/motionformer/ssv2_joint_224_16x4.pyth', + 'ssv2_divided_224_16x4.pyth': + 'https://dl.fbaipublicfiles.com/motionformer/ssv2_divided_224_16x4.pyth', +} + + +class MotionFormer(VisionTransformer): + ''' This class serves three puposes: + 1. Renames the class to MotionFormer. + 2. Downloads the cfg from the original repo and patches it if needed. + 3. Takes care of feature extraction by redefining .forward() + - if `extract_features=True` and `factorize_space_time=False`, + the output is of shape (B, T, D) where T = 1 + (224 // 16) * (224 // 16) * 8 + - if `extract_features=True` and `factorize_space_time=True`, the output is of shape (B*S, D) + and spatial and temporal transformer encoder layers are used. + - if `extract_features=True` and `factorize_space_time=True` as well as `add_global_repr=True` + the output is of shape (B, D) and spatial and temporal transformer encoder layers + are used as well as the global representation is extracted from segments (extra pos emb + is added). + ''' + + def __init__( + self, + extract_features: bool = False, + ckpt_path: str = None, + factorize_space_time: bool = None, + agg_space_module: str = None, + agg_time_module: str = None, + add_global_repr: bool = True, + agg_segments_module: str = None, + max_segments: int = None, + ): + self.extract_features = extract_features + self.ckpt_path = ckpt_path + self.factorize_space_time = factorize_space_time + + if self.ckpt_path is not None: + check_if_file_exists_else_download(self.ckpt_path, FILE2URL) + ckpt = torch.load(self.ckpt_path, map_location='cpu') + mformer_ckpt2cfg = { + 'ssv2_motionformer_224_16x4.pyth': 'motionformer_224_16x4.yaml', + 'ssv2_joint_224_16x4.pyth': 'joint_224_16x4.yaml', + 'ssv2_divided_224_16x4.pyth': 'divided_224_16x4.yaml', + } + # init from motionformer ckpt or from our Stage I ckpt + # depending on whether the feat extractor was pre-trained on AVCLIPMoCo or not, we need to + # load the state dict differently + was_pt_on_avclip = self.ckpt_path.endswith( + '.pt') # checks if it is a stage I ckpt (FIXME: a bit generic) + if self.ckpt_path.endswith(tuple(mformer_ckpt2cfg.keys())): + cfg_fname = mformer_ckpt2cfg[Path(self.ckpt_path).name] + elif was_pt_on_avclip: + # TODO: this is a hack, we should be able to get the cfg from the ckpt (earlier ckpt didn't have it) + s1_cfg = ckpt.get('args', None) # Stage I cfg + if s1_cfg is not None: + s1_vfeat_extractor_ckpt_path = s1_cfg.model.params.vfeat_extractor.params.ckpt_path + # if the stage I ckpt was initialized from a motionformer ckpt or train from scratch + if s1_vfeat_extractor_ckpt_path is not None: + cfg_fname = mformer_ckpt2cfg[Path(s1_vfeat_extractor_ckpt_path).name] + else: + cfg_fname = 'divided_224_16x4.yaml' + else: + cfg_fname = 'divided_224_16x4.yaml' + else: + raise ValueError(f'ckpt_path {self.ckpt_path} is not supported.') + else: + was_pt_on_avclip = False + cfg_fname = 'divided_224_16x4.yaml' + # logging.info(f'No ckpt_path provided, using {cfg_fname} config.') + + if cfg_fname in ['motionformer_224_16x4.yaml', 'divided_224_16x4.yaml']: + pos_emb_type = 'separate' + elif cfg_fname == 'joint_224_16x4.yaml': + pos_emb_type = 'joint' + + self.mformer_cfg_path = Path(__file__).absolute().parent / cfg_fname + + check_if_file_exists_else_download(self.mformer_cfg_path, FILE2URL) + mformer_cfg = OmegaConf.load(self.mformer_cfg_path) + logging.info(f'Loading MotionFormer config from {self.mformer_cfg_path.absolute()}') + + # patch the cfg (from the default cfg defined in the repo `Motionformer/slowfast/config/defaults.py`) + mformer_cfg.VIT.ATTN_DROPOUT = 0.0 + mformer_cfg.VIT.POS_EMBED = pos_emb_type + mformer_cfg.VIT.USE_ORIGINAL_TRAJ_ATTN_CODE = True + mformer_cfg.VIT.APPROX_ATTN_TYPE = 'none' # guessing + mformer_cfg.VIT.APPROX_ATTN_DIM = 64 # from ckpt['cfg'] + + # finally init VisionTransformer with the cfg + super().__init__(mformer_cfg) + + # load the ckpt now if ckpt is provided and not from AVCLIPMoCo-pretrained ckpt + if (self.ckpt_path is not None) and (not was_pt_on_avclip): + _ckpt_load_status = self.load_state_dict(ckpt['model_state'], strict=False) + if len(_ckpt_load_status.missing_keys) > 0 or len( + _ckpt_load_status.unexpected_keys) > 0: + logging.warning(f'Loading exact vfeat_extractor ckpt from {self.ckpt_path} failed.' \ + f'Missing keys: {_ckpt_load_status.missing_keys}, ' \ + f'Unexpected keys: {_ckpt_load_status.unexpected_keys}') + else: + logging.info(f'Loading vfeat_extractor ckpt from {self.ckpt_path} succeeded.') + + if self.extract_features: + assert isinstance(self.norm, + nn.LayerNorm), 'early x[:, 1:, :] may not be safe for per-tr weights' + # pre-logits are Sequential(nn.Linear(emb, emd), act) and `act` is tanh but see the logger + self.pre_logits = nn.Identity() + # we don't need the classification head (saving memory) + self.head = nn.Identity() + self.head_drop = nn.Identity() + # avoiding code duplication (used only if agg_*_module is TransformerEncoderLayer) + transf_enc_layer_kwargs = dict( + d_model=self.embed_dim, + nhead=self.num_heads, + activation=nn.GELU(), + batch_first=True, + dim_feedforward=self.mlp_ratio * self.embed_dim, + dropout=self.drop_rate, + layer_norm_eps=1e-6, + norm_first=True, + ) + # define adapters if needed + if self.factorize_space_time: + if agg_space_module == 'TransformerEncoderLayer': + self.spatial_attn_agg = SpatialTransformerEncoderLayer( + **transf_enc_layer_kwargs) + elif agg_space_module == 'AveragePooling': + self.spatial_attn_agg = AveragePooling(avg_pattern='BS D t h w -> BS D t', + then_permute_pattern='BS D t -> BS t D') + if agg_time_module == 'TransformerEncoderLayer': + self.temp_attn_agg = TemporalTransformerEncoderLayer(**transf_enc_layer_kwargs) + elif agg_time_module == 'AveragePooling': + self.temp_attn_agg = AveragePooling(avg_pattern='BS t D -> BS D') + elif 'Identity' in agg_time_module: + self.temp_attn_agg = nn.Identity() + # define a global aggregation layer (aggregarate over segments) + self.add_global_repr = add_global_repr + if add_global_repr: + if agg_segments_module == 'TransformerEncoderLayer': + # we can reuse the same layer as for temporal factorization (B, dim_to_agg, D) -> (B, D) + # we need to add pos emb (PE) because previously we added the same PE for each segment + pos_max_len = max_segments if max_segments is not None else 16 # 16 = 10sec//0.64sec + 1 + self.global_attn_agg = TemporalTransformerEncoderLayer( + add_pos_emb=True, + pos_emb_drop=mformer_cfg.VIT.POS_DROPOUT, + pos_max_len=pos_max_len, + **transf_enc_layer_kwargs) + elif agg_segments_module == 'AveragePooling': + self.global_attn_agg = AveragePooling(avg_pattern='B S D -> B D') + + if was_pt_on_avclip: + # we need to filter out the state_dict of the AVCLIP model (has both A and V extractors) + # and keep only the state_dict of the feat extractor + ckpt_weights = dict() + for k, v in ckpt['state_dict'].items(): + if k.startswith(('module.v_encoder.', 'v_encoder.')): + k = k.replace('module.', '').replace('v_encoder.', '') + ckpt_weights[k] = v + _load_status = self.load_state_dict(ckpt_weights, strict=False) + if len(_load_status.missing_keys) > 0 or len(_load_status.unexpected_keys) > 0: + logging.warning(f'Loading exact vfeat_extractor ckpt from {self.ckpt_path} failed. \n' \ + f'Missing keys ({len(_load_status.missing_keys)}): ' \ + f'{_load_status.missing_keys}, \n' \ + f'Unexpected keys ({len(_load_status.unexpected_keys)}): ' \ + f'{_load_status.unexpected_keys} \n' \ + f'temp_attn_agg are expected to be missing if ckpt was pt contrastively.') + else: + logging.info(f'Loading vfeat_extractor ckpt from {self.ckpt_path} succeeded.') + + # patch_embed is not used in MotionFormer, only patch_embed_3d, because cfg.VIT.PATCH_SIZE_TEMP > 1 + # but it used to calculate the number of patches, so we need to set keep it + self.patch_embed.requires_grad_(False) + + def forward(self, x): + ''' + x is of shape (B, S, C, T, H, W) where S is the number of segments. + ''' + # Batch, Segments, Channels, T=frames, Height, Width + B, S, C, T, H, W = x.shape + # Motionformer expects a tensor of shape (1, B, C, T, H, W). + # The first dimension (1) is a dummy dimension to make the input tensor and won't be used: + # see `video_model_builder.video_input`. + # x = x.unsqueeze(0) # (1, B, S, C, T, H, W) + + orig_shape = (B, S, C, T, H, W) + x = x.view(B * S, C, T, H, W) # flatten batch and segments + x = self.forward_segments(x, orig_shape=orig_shape) + # unpack the segments (using rest dimensions to support different shapes e.g. (BS, D) or (BS, t, D)) + x = x.view(B, S, *x.shape[1:]) + # x is now of shape (B*S, D) or (B*S, t, D) if `self.temp_attn_agg` is `Identity` + + return x # x is (B, S, ...) + + def forward_segments(self, x, orig_shape: tuple) -> torch.Tensor: + '''x is of shape (1, BS, C, T, H, W) where S is the number of segments.''' + x, x_mask = self.forward_features(x) + + assert self.extract_features + + # (BS, T, D) where T = 1 + (224 // 16) * (224 // 16) * 8 + x = x[:, + 1:, :] # without the CLS token for efficiency (should be safe for LayerNorm and FC) + x = self.norm(x) + x = self.pre_logits(x) + if self.factorize_space_time: + x = self.restore_spatio_temp_dims(x, orig_shape) # (B*S, D, t, h, w) <- (B*S, t*h*w, D) + + x = self.spatial_attn_agg(x, x_mask) # (B*S, t, D) + x = self.temp_attn_agg( + x) # (B*S, D) or (BS, t, D) if `self.temp_attn_agg` is `Identity` + + return x + + def restore_spatio_temp_dims(self, feats: torch.Tensor, orig_shape: tuple) -> torch.Tensor: + ''' + feats are of shape (B*S, T, D) where T = 1 + (224 // 16) * (224 // 16) * 8 + Our goal is to make them of shape (B*S, t, h, w, D) where h, w are the spatial dimensions. + From `self.patch_embed_3d`, it follows that we could reshape feats with: + `feats.transpose(1, 2).view(B*S, D, t, h, w)` + ''' + B, S, C, T, H, W = orig_shape + D = self.embed_dim + + # num patches in each dimension + t = T // self.patch_embed_3d.z_block_size + h = self.patch_embed_3d.height + w = self.patch_embed_3d.width + + feats = feats.permute(0, 2, 1) # (B*S, D, T) + feats = feats.view(B * S, D, t, h, w) # (B*S, D, t, h, w) + + return feats + + +class BaseEncoderLayer(nn.TransformerEncoderLayer): + ''' + This is a wrapper around nn.TransformerEncoderLayer that adds a CLS token + to the sequence and outputs the CLS token's representation. + This base class parents both SpatialEncoderLayer and TemporalEncoderLayer for the RGB stream + and the FrequencyEncoderLayer and TemporalEncoderLayer for the audio stream stream. + We also, optionally, add a positional embedding to the input sequence which + allows to reuse it for global aggregation (of segments) for both streams. + ''' + + def __init__(self, + add_pos_emb: bool = False, + pos_emb_drop: float = None, + pos_max_len: int = None, + *args_transformer_enc, + **kwargs_transformer_enc): + super().__init__(*args_transformer_enc, **kwargs_transformer_enc) + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.self_attn.embed_dim)) + trunc_normal_(self.cls_token, std=.02) + + # add positional embedding + self.add_pos_emb = add_pos_emb + if add_pos_emb: + self.pos_max_len = 1 + pos_max_len # +1 (for CLS) + self.pos_emb = nn.Parameter(torch.zeros(1, self.pos_max_len, self.self_attn.embed_dim)) + self.pos_drop = nn.Dropout(pos_emb_drop) + trunc_normal_(self.pos_emb, std=.02) + + self.apply(self._init_weights) + + def forward(self, x: torch.Tensor, x_mask: torch.Tensor = None): + ''' x is of shape (B, N, D); if provided x_mask is of shape (B, N)''' + batch_dim = x.shape[0] + + # add CLS token + cls_tokens = self.cls_token.expand(batch_dim, -1, -1) # expanding to match batch dimension + x = torch.cat((cls_tokens, x), dim=-2) # (batch_dim, 1+seq_len, D) + if x_mask is not None: + cls_mask = torch.ones((batch_dim, 1), dtype=torch.bool, + device=x_mask.device) # 1=keep; 0=mask + x_mask_w_cls = torch.cat((cls_mask, x_mask), dim=-1) # (batch_dim, 1+seq_len) + B, N = x_mask_w_cls.shape + # torch expects (N, N) or (B*num_heads, N, N) mask (sadness ahead); torch masks + x_mask_w_cls = x_mask_w_cls.reshape(B, 1, 1, N)\ + .expand(-1, self.self_attn.num_heads, N, -1)\ + .reshape(B * self.self_attn.num_heads, N, N) + assert x_mask_w_cls.dtype == x_mask_w_cls.bool().dtype, 'x_mask_w_cls.dtype != bool' + x_mask_w_cls = ~x_mask_w_cls # invert mask (1=mask) + else: + x_mask_w_cls = None + + # add positional embedding + if self.add_pos_emb: + seq_len = x.shape[ + 1] # (don't even think about moving it before the CLS token concatenation) + assert seq_len <= self.pos_max_len, f'Seq len ({seq_len}) > pos_max_len ({self.pos_max_len})' + x = x + self.pos_emb[:, :seq_len, :] + x = self.pos_drop(x) + + # apply encoder layer (calls nn.TransformerEncoderLayer.forward); + x = super().forward(src=x, src_mask=x_mask_w_cls) # (batch_dim, 1+seq_len, D) + + # CLS token is expected to hold spatial information for each frame + x = x[:, 0, :] # (batch_dim, D) + + return x + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'cls_token', 'pos_emb'} + + +class SpatialTransformerEncoderLayer(BaseEncoderLayer): + ''' Aggregates spatial dimensions by applying attention individually to each frame. ''' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def forward(self, x: torch.Tensor, x_mask: torch.Tensor = None) -> torch.Tensor: + ''' x is of shape (B*S, D, t, h, w) where S is the number of segments. + if specified x_mask (B*S, t, h, w), 0=masked, 1=kept + Returns a tensor of shape (B*S, t, D) pooling spatial information for each frame. ''' + BS, D, t, h, w = x.shape + + # time as a batch dimension and flatten spatial dimensions as sequence + x = einops.rearrange(x, 'BS D t h w -> (BS t) (h w) D') + # similar to mask + if x_mask is not None: + x_mask = einops.rearrange(x_mask, 'BS t h w -> (BS t) (h w)') + + # apply encoder layer (BaseEncoderLayer.forward) - it will add CLS token and output its representation + x = super().forward(x=x, x_mask=x_mask) # (B*S*t, D) + + # reshape back to (B*S, t, D) + x = einops.rearrange(x, '(BS t) D -> BS t D', BS=BS, t=t) + + # (B*S, t, D) + return x + + +class TemporalTransformerEncoderLayer(BaseEncoderLayer): + ''' Aggregates temporal dimension with attention. Also used with pos emb as global aggregation + in both streams. ''' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def forward(self, x): + ''' x is of shape (B*S, t, D) where S is the number of segments. + Returns a tensor of shape (B*S, D) pooling temporal information. ''' + BS, t, D = x.shape + + # apply encoder layer (BaseEncoderLayer.forward) - it will add CLS token and output its representation + x = super().forward(x) # (B*S, D) + + return x # (B*S, D) + + +class AveragePooling(nn.Module): + + def __init__(self, avg_pattern: str, then_permute_pattern: str = None) -> None: + ''' patterns are e.g. "bs t d -> bs d" ''' + super().__init__() + # TODO: need to register them as buffers (but fails because these are strings) + self.reduce_fn = 'mean' + self.avg_pattern = avg_pattern + self.then_permute_pattern = then_permute_pattern + + def forward(self, x: torch.Tensor, x_mask: torch.Tensor = None) -> torch.Tensor: + x = einops.reduce(x, self.avg_pattern, self.reduce_fn) + if self.then_permute_pattern is not None: + x = einops.rearrange(x, self.then_permute_pattern) + return x diff --git a/mmaudio/ext/synchformer/synchformer.py b/mmaudio/ext/synchformer/synchformer.py new file mode 100644 index 0000000000000000000000000000000000000000..80871f004d6f4c57f48594d90195f84f89d7cb0a --- /dev/null +++ b/mmaudio/ext/synchformer/synchformer.py @@ -0,0 +1,55 @@ +import logging +from typing import Any, Mapping + +import torch +from torch import nn + +from mmaudio.ext.synchformer.motionformer import MotionFormer + + +class Synchformer(nn.Module): + + def __init__(self): + super().__init__() + + self.vfeat_extractor = MotionFormer(extract_features=True, + factorize_space_time=True, + agg_space_module='TransformerEncoderLayer', + agg_time_module='torch.nn.Identity', + add_global_repr=False) + + # self.vfeat_extractor = instantiate_from_config(vfeat_extractor) + # self.afeat_extractor = instantiate_from_config(afeat_extractor) + # # bridging the s3d latent dim (1024) into what is specified in the config + # # to match e.g. the transformer dim + # self.vproj = instantiate_from_config(vproj) + # self.aproj = instantiate_from_config(aproj) + # self.transformer = instantiate_from_config(transformer) + + def forward(self, vis): + B, S, Tv, C, H, W = vis.shape + vis = vis.permute(0, 1, 3, 2, 4, 5) # (B, S, C, Tv, H, W) + # feat extractors return a tuple of segment-level and global features (ignored for sync) + # (B, S, tv, D), e.g. (B, 7, 8, 768) + vis = self.vfeat_extractor(vis) + return vis + + def load_state_dict(self, sd: Mapping[str, Any], strict: bool = True): + # discard all entries except vfeat_extractor + sd = {k: v for k, v in sd.items() if k.startswith('vfeat_extractor')} + + return super().load_state_dict(sd, strict) + + +if __name__ == "__main__": + model = Synchformer().cuda().eval() + sd = torch.load('./ext_weights/synchformer_state_dict.pth', weights_only=True) + model.load_state_dict(sd) + + vid = torch.randn(2, 7, 16, 3, 224, 224).cuda() + features = model.extract_vfeats(vid, for_loop=False).detach().cpu() + print(features.shape) + + # extract and save the state dict only + # sd = torch.load('./ext_weights/sync_model_audioset.pt')['model'] + # torch.save(sd, './ext_weights/synchformer_state_dict.pth') diff --git a/mmaudio/ext/synchformer/utils.py b/mmaudio/ext/synchformer/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a797eb9c66f04b7c29934bfc384c935cdf441a62 --- /dev/null +++ b/mmaudio/ext/synchformer/utils.py @@ -0,0 +1,92 @@ +from hashlib import md5 +from pathlib import Path + +import requests +from tqdm import tqdm + +PARENT_LINK = 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a' +FNAME2LINK = { + # S3: Synchability: AudioSet (run 2) + '24-01-22T20-34-52.pt': + f'{PARENT_LINK}/sync/sync_models/24-01-22T20-34-52/24-01-22T20-34-52.pt', + 'cfg-24-01-22T20-34-52.yaml': + f'{PARENT_LINK}/sync/sync_models/24-01-22T20-34-52/cfg-24-01-22T20-34-52.yaml', + # S2: Synchformer: AudioSet (run 2) + '24-01-04T16-39-21.pt': + f'{PARENT_LINK}/sync/sync_models/24-01-04T16-39-21/24-01-04T16-39-21.pt', + 'cfg-24-01-04T16-39-21.yaml': + f'{PARENT_LINK}/sync/sync_models/24-01-04T16-39-21/cfg-24-01-04T16-39-21.yaml', + # S2: Synchformer: AudioSet (run 1) + '23-08-28T11-23-23.pt': + f'{PARENT_LINK}/sync/sync_models/23-08-28T11-23-23/23-08-28T11-23-23.pt', + 'cfg-23-08-28T11-23-23.yaml': + f'{PARENT_LINK}/sync/sync_models/23-08-28T11-23-23/cfg-23-08-28T11-23-23.yaml', + # S2: Synchformer: LRS3 (run 2) + '23-12-23T18-33-57.pt': + f'{PARENT_LINK}/sync/sync_models/23-12-23T18-33-57/23-12-23T18-33-57.pt', + 'cfg-23-12-23T18-33-57.yaml': + f'{PARENT_LINK}/sync/sync_models/23-12-23T18-33-57/cfg-23-12-23T18-33-57.yaml', + # S2: Synchformer: VGS (run 2) + '24-01-02T10-00-53.pt': + f'{PARENT_LINK}/sync/sync_models/24-01-02T10-00-53/24-01-02T10-00-53.pt', + 'cfg-24-01-02T10-00-53.yaml': + f'{PARENT_LINK}/sync/sync_models/24-01-02T10-00-53/cfg-24-01-02T10-00-53.yaml', + # SparseSync: ft VGGSound-Full + '22-09-21T21-00-52.pt': + f'{PARENT_LINK}/sync/sync_models/22-09-21T21-00-52/22-09-21T21-00-52.pt', + 'cfg-22-09-21T21-00-52.yaml': + f'{PARENT_LINK}/sync/sync_models/22-09-21T21-00-52/cfg-22-09-21T21-00-52.yaml', + # SparseSync: ft VGGSound-Sparse + '22-07-28T15-49-45.pt': + f'{PARENT_LINK}/sync/sync_models/22-07-28T15-49-45/22-07-28T15-49-45.pt', + 'cfg-22-07-28T15-49-45.yaml': + f'{PARENT_LINK}/sync/sync_models/22-07-28T15-49-45/cfg-22-07-28T15-49-45.yaml', + # SparseSync: only pt on LRS3 + '22-07-13T22-25-49.pt': + f'{PARENT_LINK}/sync/sync_models/22-07-13T22-25-49/22-07-13T22-25-49.pt', + 'cfg-22-07-13T22-25-49.yaml': + f'{PARENT_LINK}/sync/sync_models/22-07-13T22-25-49/cfg-22-07-13T22-25-49.yaml', + # SparseSync: feature extractors + 'ResNetAudio-22-08-04T09-51-04.pt': + f'{PARENT_LINK}/sync/ResNetAudio-22-08-04T09-51-04.pt', # 2s + 'ResNetAudio-22-08-03T23-14-49.pt': + f'{PARENT_LINK}/sync/ResNetAudio-22-08-03T23-14-49.pt', # 3s + 'ResNetAudio-22-08-03T23-14-28.pt': + f'{PARENT_LINK}/sync/ResNetAudio-22-08-03T23-14-28.pt', # 4s + 'ResNetAudio-22-06-24T08-10-33.pt': + f'{PARENT_LINK}/sync/ResNetAudio-22-06-24T08-10-33.pt', # 5s + 'ResNetAudio-22-06-24T17-31-07.pt': + f'{PARENT_LINK}/sync/ResNetAudio-22-06-24T17-31-07.pt', # 6s + 'ResNetAudio-22-06-24T23-57-11.pt': + f'{PARENT_LINK}/sync/ResNetAudio-22-06-24T23-57-11.pt', # 7s + 'ResNetAudio-22-06-25T04-35-42.pt': + f'{PARENT_LINK}/sync/ResNetAudio-22-06-25T04-35-42.pt', # 8s +} + + +def check_if_file_exists_else_download(path, fname2link=FNAME2LINK, chunk_size=1024): + '''Checks if file exists, if not downloads it from the link to the path''' + path = Path(path) + if not path.exists(): + path.parent.mkdir(exist_ok=True, parents=True) + link = fname2link.get(path.name, None) + if link is None: + raise ValueError(f'Cant find the checkpoint file: {path}.', + f'Please download it manually and ensure the path exists.') + with requests.get(fname2link[path.name], stream=True) as r: + total_size = int(r.headers.get('content-length', 0)) + with tqdm(total=total_size, unit='B', unit_scale=True) as pbar: + with open(path, 'wb') as f: + for data in r.iter_content(chunk_size=chunk_size): + if data: + f.write(data) + pbar.update(chunk_size) + + +def get_md5sum(path): + hash_md5 = md5() + with open(path, 'rb') as f: + for chunk in iter(lambda: f.read(4096 * 8), b''): + hash_md5.update(chunk) + md5sum = hash_md5.hexdigest() + return md5sum diff --git a/mmaudio/ext/synchformer/video_model_builder.py b/mmaudio/ext/synchformer/video_model_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..3defae4d07806086fd654906fab3d9f64ba4544f --- /dev/null +++ b/mmaudio/ext/synchformer/video_model_builder.py @@ -0,0 +1,277 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# Copyright 2020 Ross Wightman +# Modified Model definition + +from collections import OrderedDict +from functools import partial + +import torch +import torch.nn as nn +from timm.layers import trunc_normal_ + +from mmaudio.ext.synchformer import vit_helper + + +class VisionTransformer(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage """ + + def __init__(self, cfg): + super().__init__() + self.img_size = cfg.DATA.TRAIN_CROP_SIZE + self.patch_size = cfg.VIT.PATCH_SIZE + self.in_chans = cfg.VIT.CHANNELS + if cfg.TRAIN.DATASET == "Epickitchens": + self.num_classes = [97, 300] + else: + self.num_classes = cfg.MODEL.NUM_CLASSES + self.embed_dim = cfg.VIT.EMBED_DIM + self.depth = cfg.VIT.DEPTH + self.num_heads = cfg.VIT.NUM_HEADS + self.mlp_ratio = cfg.VIT.MLP_RATIO + self.qkv_bias = cfg.VIT.QKV_BIAS + self.drop_rate = cfg.VIT.DROP + self.drop_path_rate = cfg.VIT.DROP_PATH + self.head_dropout = cfg.VIT.HEAD_DROPOUT + self.video_input = cfg.VIT.VIDEO_INPUT + self.temporal_resolution = cfg.VIT.TEMPORAL_RESOLUTION + self.use_mlp = cfg.VIT.USE_MLP + self.num_features = self.embed_dim + norm_layer = partial(nn.LayerNorm, eps=1e-6) + self.attn_drop_rate = cfg.VIT.ATTN_DROPOUT + self.head_act = cfg.VIT.HEAD_ACT + self.cfg = cfg + + # Patch Embedding + self.patch_embed = vit_helper.PatchEmbed(img_size=224, + patch_size=self.patch_size, + in_chans=self.in_chans, + embed_dim=self.embed_dim) + + # 3D Patch Embedding + self.patch_embed_3d = vit_helper.PatchEmbed3D(img_size=self.img_size, + temporal_resolution=self.temporal_resolution, + patch_size=self.patch_size, + in_chans=self.in_chans, + embed_dim=self.embed_dim, + z_block_size=self.cfg.VIT.PATCH_SIZE_TEMP) + self.patch_embed_3d.proj.weight.data = torch.zeros_like( + self.patch_embed_3d.proj.weight.data) + + # Number of patches + if self.video_input: + num_patches = self.patch_embed.num_patches * self.temporal_resolution + else: + num_patches = self.patch_embed.num_patches + self.num_patches = num_patches + + # CLS token + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) + trunc_normal_(self.cls_token, std=.02) + + # Positional embedding + self.pos_embed = nn.Parameter( + torch.zeros(1, self.patch_embed.num_patches + 1, self.embed_dim)) + self.pos_drop = nn.Dropout(p=cfg.VIT.POS_DROPOUT) + trunc_normal_(self.pos_embed, std=.02) + + if self.cfg.VIT.POS_EMBED == "joint": + self.st_embed = nn.Parameter(torch.zeros(1, num_patches + 1, self.embed_dim)) + trunc_normal_(self.st_embed, std=.02) + elif self.cfg.VIT.POS_EMBED == "separate": + self.temp_embed = nn.Parameter(torch.zeros(1, self.temporal_resolution, self.embed_dim)) + + # Layer Blocks + dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate, self.depth)] + if self.cfg.VIT.ATTN_LAYER == "divided": + self.blocks = nn.ModuleList([ + vit_helper.DividedSpaceTimeBlock( + attn_type=cfg.VIT.ATTN_LAYER, + dim=self.embed_dim, + num_heads=self.num_heads, + mlp_ratio=self.mlp_ratio, + qkv_bias=self.qkv_bias, + drop=self.drop_rate, + attn_drop=self.attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + ) for i in range(self.depth) + ]) + else: + self.blocks = nn.ModuleList([ + vit_helper.Block(attn_type=cfg.VIT.ATTN_LAYER, + dim=self.embed_dim, + num_heads=self.num_heads, + mlp_ratio=self.mlp_ratio, + qkv_bias=self.qkv_bias, + drop=self.drop_rate, + attn_drop=self.attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + use_original_code=self.cfg.VIT.USE_ORIGINAL_TRAJ_ATTN_CODE) + for i in range(self.depth) + ]) + self.norm = norm_layer(self.embed_dim) + + # MLP head + if self.use_mlp: + hidden_dim = self.embed_dim + if self.head_act == 'tanh': + # logging.info("Using TanH activation in MLP") + act = nn.Tanh() + elif self.head_act == 'gelu': + # logging.info("Using GELU activation in MLP") + act = nn.GELU() + else: + # logging.info("Using ReLU activation in MLP") + act = nn.ReLU() + self.pre_logits = nn.Sequential( + OrderedDict([ + ('fc', nn.Linear(self.embed_dim, hidden_dim)), + ('act', act), + ])) + else: + self.pre_logits = nn.Identity() + + # Classifier Head + self.head_drop = nn.Dropout(p=self.head_dropout) + if isinstance(self.num_classes, (list, )) and len(self.num_classes) > 1: + for a, i in enumerate(range(len(self.num_classes))): + setattr(self, "head%d" % a, nn.Linear(self.embed_dim, self.num_classes[i])) + else: + self.head = nn.Linear(self.embed_dim, + self.num_classes) if self.num_classes > 0 else nn.Identity() + + # Initialize weights + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + if self.cfg.VIT.POS_EMBED == "joint": + return {'pos_embed', 'cls_token', 'st_embed'} + else: + return {'pos_embed', 'cls_token', 'temp_embed'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = (nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()) + + def forward_features(self, x): + # if self.video_input: + # x = x[0] + B = x.shape[0] + + # Tokenize input + # if self.cfg.VIT.PATCH_SIZE_TEMP > 1: + # for simplicity of mapping between content dimensions (input x) and token dims (after patching) + # we use the same trick as for AST (see modeling_ast.ASTModel.forward for the details): + + # apply patching on input + x = self.patch_embed_3d(x) + tok_mask = None + + # else: + # tok_mask = None + # # 2D tokenization + # if self.video_input: + # x = x.permute(0, 2, 1, 3, 4) + # (B, T, C, H, W) = x.shape + # x = x.reshape(B * T, C, H, W) + + # x = self.patch_embed(x) + + # if self.video_input: + # (B2, T2, D2) = x.shape + # x = x.reshape(B, T * T2, D2) + + # Append CLS token + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + # if tok_mask is not None: + # # prepend 1(=keep) to the mask to account for the CLS token as well + # tok_mask = torch.cat((torch.ones_like(tok_mask[:, [0]]), tok_mask), dim=1) + + # Interpolate positinoal embeddings + # if self.cfg.DATA.TRAIN_CROP_SIZE != 224: + # pos_embed = self.pos_embed + # N = pos_embed.shape[1] - 1 + # npatch = int((x.size(1) - 1) / self.temporal_resolution) + # class_emb = pos_embed[:, 0] + # pos_embed = pos_embed[:, 1:] + # dim = x.shape[-1] + # pos_embed = torch.nn.functional.interpolate( + # pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), + # scale_factor=math.sqrt(npatch / N), + # mode='bicubic', + # ) + # pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + # new_pos_embed = torch.cat((class_emb.unsqueeze(0), pos_embed), dim=1) + # else: + new_pos_embed = self.pos_embed + npatch = self.patch_embed.num_patches + + # Add positional embeddings to input + if self.video_input: + if self.cfg.VIT.POS_EMBED == "separate": + cls_embed = self.pos_embed[:, 0, :].unsqueeze(1) + tile_pos_embed = new_pos_embed[:, 1:, :].repeat(1, self.temporal_resolution, 1) + tile_temporal_embed = self.temp_embed.repeat_interleave(npatch, 1) + total_pos_embed = tile_pos_embed + tile_temporal_embed + total_pos_embed = torch.cat([cls_embed, total_pos_embed], dim=1) + x = x + total_pos_embed + elif self.cfg.VIT.POS_EMBED == "joint": + x = x + self.st_embed + else: + # image input + x = x + new_pos_embed + + # Apply positional dropout + x = self.pos_drop(x) + + # Encoding using transformer layers + for i, blk in enumerate(self.blocks): + x = blk(x, + seq_len=npatch, + num_frames=self.temporal_resolution, + approx=self.cfg.VIT.APPROX_ATTN_TYPE, + num_landmarks=self.cfg.VIT.APPROX_ATTN_DIM, + tok_mask=tok_mask) + + ### v-iashin: I moved it to the forward pass + # x = self.norm(x)[:, 0] + # x = self.pre_logits(x) + ### + return x, tok_mask + + # def forward(self, x): + # x = self.forward_features(x) + # ### v-iashin: here. This should leave the same forward output as before + # x = self.norm(x)[:, 0] + # x = self.pre_logits(x) + # ### + # x = self.head_drop(x) + # if isinstance(self.num_classes, (list, )) and len(self.num_classes) > 1: + # output = [] + # for head in range(len(self.num_classes)): + # x_out = getattr(self, "head%d" % head)(x) + # if not self.training: + # x_out = torch.nn.functional.softmax(x_out, dim=-1) + # output.append(x_out) + # return output + # else: + # x = self.head(x) + # if not self.training: + # x = torch.nn.functional.softmax(x, dim=-1) + # return x diff --git a/mmaudio/ext/synchformer/vit_helper.py b/mmaudio/ext/synchformer/vit_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..6af730a135bf49240ec439c81c9ad0aa5c9a505e --- /dev/null +++ b/mmaudio/ext/synchformer/vit_helper.py @@ -0,0 +1,399 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# Copyright 2020 Ross Wightman +# Modified Model definition +"""Video models.""" + +import math + +import torch +import torch.nn as nn +from einops import rearrange, repeat +from timm.layers import to_2tuple +from torch import einsum +from torch.nn import functional as F + +default_cfgs = { + 'vit_1k': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth', + 'vit_1k_large': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth', +} + + +def qkv_attn(q, k, v, tok_mask: torch.Tensor = None): + sim = einsum('b i d, b j d -> b i j', q, k) + # apply masking if provided, tok_mask is (B*S*H, N): 1s - keep; sim is (B*S*H, H, N, N) + if tok_mask is not None: + BSH, N = tok_mask.shape + sim = sim.masked_fill(tok_mask.view(BSH, 1, N) == 0, + float('-inf')) # 1 - broadcasts across N + attn = sim.softmax(dim=-1) + out = einsum('b i j, b j d -> b i d', attn, v) + return out + + +class DividedAttention(nn.Module): + + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim**-0.5 + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + # init to zeros + self.qkv.weight.data.fill_(0) + self.qkv.bias.data.fill_(0) + self.proj.weight.data.fill_(1) + self.proj.bias.data.fill_(0) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, einops_from, einops_to, tok_mask: torch.Tensor = None, **einops_dims): + # num of heads variable + h = self.num_heads + + # project x to q, k, v vaalues + q, k, v = self.qkv(x).chunk(3, dim=-1) + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + if tok_mask is not None: + # replicate token mask across heads (b, n) -> (b, h, n) -> (b*h, n) -- same as qkv but w/o d + assert len(tok_mask.shape) == 2 + tok_mask = tok_mask.unsqueeze(1).expand(-1, h, -1).reshape(-1, tok_mask.shape[1]) + + # Scale q + q *= self.scale + + # Take out cls_q, cls_k, cls_v + (cls_q, q_), (cls_k, k_), (cls_v, v_) = map(lambda t: (t[:, 0:1], t[:, 1:]), (q, k, v)) + # the same for masking + if tok_mask is not None: + cls_mask, mask_ = tok_mask[:, 0:1], tok_mask[:, 1:] + else: + cls_mask, mask_ = None, None + + # let CLS token attend to key / values of all patches across time and space + cls_out = qkv_attn(cls_q, k, v, tok_mask=tok_mask) + + # rearrange across time or space + q_, k_, v_ = map(lambda t: rearrange(t, f'{einops_from} -> {einops_to}', **einops_dims), + (q_, k_, v_)) + + # expand CLS token keys and values across time or space and concat + r = q_.shape[0] // cls_k.shape[0] + cls_k, cls_v = map(lambda t: repeat(t, 'b () d -> (b r) () d', r=r), (cls_k, cls_v)) + + k_ = torch.cat((cls_k, k_), dim=1) + v_ = torch.cat((cls_v, v_), dim=1) + + # the same for masking (if provided) + if tok_mask is not None: + # since mask does not have the latent dim (d), we need to remove it from einops dims + mask_ = rearrange(mask_, f'{einops_from} -> {einops_to}'.replace(' d', ''), + **einops_dims) + cls_mask = repeat(cls_mask, 'b () -> (b r) ()', + r=r) # expand cls_mask across time or space + mask_ = torch.cat((cls_mask, mask_), dim=1) + + # attention + out = qkv_attn(q_, k_, v_, tok_mask=mask_) + + # merge back time or space + out = rearrange(out, f'{einops_to} -> {einops_from}', **einops_dims) + + # concat back the cls token + out = torch.cat((cls_out, out), dim=1) + + # merge back the heads + out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + + ## to out + x = self.proj(out) + x = self.proj_drop(x) + return x + + +class DividedSpaceTimeBlock(nn.Module): + + def __init__(self, + dim=768, + num_heads=12, + attn_type='divided', + mlp_ratio=4., + qkv_bias=False, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm): + super().__init__() + + self.einops_from_space = 'b (f n) d' + self.einops_to_space = '(b f) n d' + self.einops_from_time = 'b (f n) d' + self.einops_to_time = '(b n) f d' + + self.norm1 = norm_layer(dim) + + self.attn = DividedAttention(dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=drop) + + self.timeattn = DividedAttention(dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=drop) + + # self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.drop_path = nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + self.norm3 = norm_layer(dim) + + def forward(self, + x, + seq_len=196, + num_frames=8, + approx='none', + num_landmarks=128, + tok_mask: torch.Tensor = None): + time_output = self.timeattn(self.norm3(x), + self.einops_from_time, + self.einops_to_time, + n=seq_len, + tok_mask=tok_mask) + time_residual = x + time_output + + space_output = self.attn(self.norm1(time_residual), + self.einops_from_space, + self.einops_to_space, + f=num_frames, + tok_mask=tok_mask) + space_residual = time_residual + self.drop_path(space_output) + + x = space_residual + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Mlp(nn.Module): + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = img_size if type(img_size) is tuple else to_2tuple(img_size) + patch_size = img_size if type(patch_size) is tuple else to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class PatchEmbed3D(nn.Module): + """ Image to Patch Embedding """ + + def __init__(self, + img_size=224, + temporal_resolution=4, + in_chans=3, + patch_size=16, + z_block_size=2, + embed_dim=768, + flatten=True): + super().__init__() + self.height = (img_size // patch_size) + self.width = (img_size // patch_size) + ### v-iashin: these two are incorrect + # self.frames = (temporal_resolution // z_block_size) + # self.num_patches = self.height * self.width * self.frames + self.z_block_size = z_block_size + ### + self.proj = nn.Conv3d(in_chans, + embed_dim, + kernel_size=(z_block_size, patch_size, patch_size), + stride=(z_block_size, patch_size, patch_size)) + self.flatten = flatten + + def forward(self, x): + B, C, T, H, W = x.shape + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) + return x + + +class HeadMLP(nn.Module): + + def __init__(self, n_input, n_classes, n_hidden=512, p=0.1): + super(HeadMLP, self).__init__() + self.n_input = n_input + self.n_classes = n_classes + self.n_hidden = n_hidden + if n_hidden is None: + # use linear classifier + self.block_forward = nn.Sequential(nn.Dropout(p=p), + nn.Linear(n_input, n_classes, bias=True)) + else: + # use simple MLP classifier + self.block_forward = nn.Sequential(nn.Dropout(p=p), + nn.Linear(n_input, n_hidden, bias=True), + nn.BatchNorm1d(n_hidden), nn.ReLU(inplace=True), + nn.Dropout(p=p), + nn.Linear(n_hidden, n_classes, bias=True)) + print(f"Dropout-NLP: {p}") + + def forward(self, x): + return self.block_forward(x) + + +def _conv_filter(state_dict, patch_size=16): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + for k, v in state_dict.items(): + if 'patch_embed.proj.weight' in k: + v = v.reshape((v.shape[0], 3, patch_size, patch_size)) + out_dict[k] = v + return out_dict + + +def adapt_input_conv(in_chans, conv_weight, agg='sum'): + conv_type = conv_weight.dtype + conv_weight = conv_weight.float() + O, I, J, K = conv_weight.shape + if in_chans == 1: + if I > 3: + assert conv_weight.shape[1] % 3 == 0 + # For models with space2depth stems + conv_weight = conv_weight.reshape(O, I // 3, 3, J, K) + conv_weight = conv_weight.sum(dim=2, keepdim=False) + else: + if agg == 'sum': + print("Summing conv1 weights") + conv_weight = conv_weight.sum(dim=1, keepdim=True) + else: + print("Averaging conv1 weights") + conv_weight = conv_weight.mean(dim=1, keepdim=True) + elif in_chans != 3: + if I != 3: + raise NotImplementedError('Weight format not supported by conversion.') + else: + if agg == 'sum': + print("Summing conv1 weights") + repeat = int(math.ceil(in_chans / 3)) + conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :] + conv_weight *= (3 / float(in_chans)) + else: + print("Averaging conv1 weights") + conv_weight = conv_weight.mean(dim=1, keepdim=True) + conv_weight = conv_weight.repeat(1, in_chans, 1, 1) + conv_weight = conv_weight.to(conv_type) + return conv_weight + + +def load_pretrained(model, + cfg=None, + num_classes=1000, + in_chans=3, + filter_fn=None, + strict=True, + progress=False): + # Load state dict + assert (f"{cfg.VIT.PRETRAINED_WEIGHTS} not in [vit_1k, vit_1k_large]") + state_dict = torch.hub.load_state_dict_from_url(url=default_cfgs[cfg.VIT.PRETRAINED_WEIGHTS]) + + if filter_fn is not None: + state_dict = filter_fn(state_dict) + + input_convs = 'patch_embed.proj' + if input_convs is not None and in_chans != 3: + if isinstance(input_convs, str): + input_convs = (input_convs, ) + for input_conv_name in input_convs: + weight_name = input_conv_name + '.weight' + try: + state_dict[weight_name] = adapt_input_conv(in_chans, + state_dict[weight_name], + agg='avg') + print( + f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)' + ) + except NotImplementedError as e: + del state_dict[weight_name] + strict = False + print( + f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.' + ) + + classifier_name = 'head' + label_offset = cfg.get('label_offset', 0) + pretrain_classes = 1000 + if num_classes != pretrain_classes: + # completely discard fully connected if model num_classes doesn't match pretrained weights + del state_dict[classifier_name + '.weight'] + del state_dict[classifier_name + '.bias'] + strict = False + elif label_offset > 0: + # special case for pretrained weights with an extra background class in pretrained weights + classifier_weight = state_dict[classifier_name + '.weight'] + state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:] + classifier_bias = state_dict[classifier_name + '.bias'] + state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:] + + loaded_state = state_dict + self_state = model.state_dict() + all_names = set(self_state.keys()) + saved_names = set([]) + for name, param in loaded_state.items(): + param = param + if 'module.' in name: + name = name.replace('module.', '') + if name in self_state.keys() and param.shape == self_state[name].shape: + saved_names.add(name) + self_state[name].copy_(param) + else: + print(f"didnt load: {name} of shape: {param.shape}") + print("Missing Keys:") + print(all_names - saved_names) diff --git a/mmaudio/model/__init__.py b/mmaudio/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mmaudio/model/embeddings.py b/mmaudio/model/embeddings.py new file mode 100644 index 0000000000000000000000000000000000000000..297feb4d2c79d306771f5436dbd4ada1a976b3bc --- /dev/null +++ b/mmaudio/model/embeddings.py @@ -0,0 +1,49 @@ +import torch +import torch.nn as nn + +# https://github.com/facebookresearch/DiT + + +class TimestepEmbedder(nn.Module): + """ + Embeds scalar timesteps into vector representations. + """ + + def __init__(self, dim, frequency_embedding_size, max_period): + super().__init__() + self.mlp = nn.Sequential( + nn.Linear(frequency_embedding_size, dim), + nn.SiLU(), + nn.Linear(dim, dim), + ) + self.dim = dim + self.max_period = max_period + assert dim % 2 == 0, 'dim must be even.' + + with torch.autocast('cuda', enabled=False): + self.freqs = nn.Buffer( + 1.0 / (10000**(torch.arange(0, frequency_embedding_size, 2, dtype=torch.float32) / + frequency_embedding_size)), + persistent=False) + freq_scale = 10000 / max_period + self.freqs = freq_scale * self.freqs + + def timestep_embedding(self, t): + """ + Create sinusoidal timestep embeddings. + :param t: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an (N, D) Tensor of positional embeddings. + """ + # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py + + args = t[:, None].float() * self.freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + return embedding + + def forward(self, t): + t_freq = self.timestep_embedding(t).to(t.dtype) + t_emb = self.mlp(t_freq) + return t_emb diff --git a/mmaudio/model/flow_matching.py b/mmaudio/model/flow_matching.py new file mode 100644 index 0000000000000000000000000000000000000000..a04510ab888c0c3c3398360f97b8b7e3c55998ad --- /dev/null +++ b/mmaudio/model/flow_matching.py @@ -0,0 +1,88 @@ +import logging +from typing import Callable, Iterable, Optional + +import torch +from torchdiffeq import odeint + +# from torchcfm.conditional_flow_matching import ExactOptimalTransportConditionalFlowMatcher + +log = logging.getLogger() + + +# Partially from https://github.com/gle-bellier/flow-matching +class FlowMatching: + + def __init__(self, min_sigma: float = 0.0, inference_mode='euler', num_steps: int = 25): + # inference_mode: 'euler' or 'adaptive' + # num_steps: number of steps in the euler inference mode + super().__init__() + self.min_sigma = min_sigma + self.inference_mode = inference_mode + self.num_steps = num_steps + + # self.fm = ExactOptimalTransportConditionalFlowMatcher(sigma=min_sigma) + + assert self.inference_mode in ['euler', 'adaptive'] + if self.inference_mode == 'adaptive' and num_steps > 0: + log.info('The number of steps is ignored in adaptive inference mode ') + + def get_conditional_flow(self, x0: torch.Tensor, x1: torch.Tensor, + t: torch.Tensor) -> torch.Tensor: + # which is psi_t(x), eq 22 in flow matching for generative models + t = t[:, None, None].expand_as(x0) + return (1 - (1 - self.min_sigma) * t) * x0 + t * x1 + + def loss(self, predicted_v: torch.Tensor, x0: torch.Tensor, x1: torch.Tensor) -> torch.Tensor: + # return the mean error without reducing the batch dimension + reduce_dim = list(range(1, len(predicted_v.shape))) + target_v = x1 - (1 - self.min_sigma) * x0 + return (predicted_v - target_v).pow(2).mean(dim=reduce_dim) + + def get_x0_xt_c( + self, + x1: torch.Tensor, + t: torch.Tensor, + Cs: list[torch.Tensor], + generator: Optional[torch.Generator] = None + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + # x0 = torch.randn_like(x1, generator=generator) + x0 = torch.empty_like(x1).normal_(generator=generator) + + # find mini-batch optimal transport + # x0, x1, _, Cs = self.fm.ot_sampler.sample_plan_with_labels(x0, x1, None, Cs, replace=True) + + xt = self.get_conditional_flow(x0, x1, t) + return x0, x1, xt, Cs + + def to_prior(self, fn: Callable, x1: torch.Tensor) -> torch.Tensor: + return self.run_t0_to_t1(fn, x1, 1, 0) + + def to_data(self, fn: Callable, x0: torch.Tensor) -> torch.Tensor: + return self.run_t0_to_t1(fn, x0, 0, 1) + + def run_t0_to_t1(self, fn: Callable, x0: torch.Tensor, t0: float, t1: float) -> torch.Tensor: + # fn: a function that takes (t, x) and returns the direction x0->x1 + + if self.inference_mode == 'adaptive': + return odeint(fn, x0, torch.tensor([t0, t1], device=x0.device, dtype=x0.dtype)) + elif self.inference_mode == 'euler': + x = x0 + steps = torch.linspace(t0, t1 - self.min_sigma, self.num_steps + 1) + for ti, t in enumerate(steps[:-1]): + flow = fn(t, x) + next_t = steps[ti + 1] + dt = next_t - t + x = x + dt * flow + + # return odeint(fn, + # x0, + # torch.tensor([t0, t1], device=x0.device, dtype=x0.dtype), + # method='rk4', + # options=dict(step_size=(t1 - t0) / self.num_steps))[-1] + # return odeint(fn, + # x0, + # torch.tensor([t0, t1], device=x0.device, dtype=x0.dtype), + # method='euler', + # options=dict(step_size=(t1 - t0) / self.num_steps))[-1] + + return x diff --git a/mmaudio/model/low_level.py b/mmaudio/model/low_level.py new file mode 100644 index 0000000000000000000000000000000000000000..c8326a8bec99f1be08b92e76fda4b59e777b39d2 --- /dev/null +++ b/mmaudio/model/low_level.py @@ -0,0 +1,95 @@ +import torch +from torch import nn +from torch.nn import functional as F + + +class ChannelLastConv1d(nn.Conv1d): + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = x.permute(0, 2, 1) + x = super().forward(x) + x = x.permute(0, 2, 1) + return x + + +# https://github.com/Stability-AI/sd3-ref +class MLP(nn.Module): + + def __init__( + self, + dim: int, + hidden_dim: int, + multiple_of: int = 256, + ): + """ + Initialize the FeedForward module. + + Args: + dim (int): Input dimension. + hidden_dim (int): Hidden dimension of the feedforward layer. + multiple_of (int): Value to ensure hidden dimension is a multiple of this value. + + Attributes: + w1 (ColumnParallelLinear): Linear transformation for the first layer. + w2 (RowParallelLinear): Linear transformation for the second layer. + w3 (ColumnParallelLinear): Linear transformation for the third layer. + + """ + super().__init__() + hidden_dim = int(2 * hidden_dim / 3) + hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) + + self.w1 = nn.Linear(dim, hidden_dim, bias=False) + self.w2 = nn.Linear(hidden_dim, dim, bias=False) + self.w3 = nn.Linear(dim, hidden_dim, bias=False) + + def forward(self, x): + return self.w2(F.silu(self.w1(x)) * self.w3(x)) + + +class ConvMLP(nn.Module): + + def __init__( + self, + dim: int, + hidden_dim: int, + multiple_of: int = 256, + kernel_size: int = 3, + padding: int = 1, + ): + """ + Initialize the FeedForward module. + + Args: + dim (int): Input dimension. + hidden_dim (int): Hidden dimension of the feedforward layer. + multiple_of (int): Value to ensure hidden dimension is a multiple of this value. + + Attributes: + w1 (ColumnParallelLinear): Linear transformation for the first layer. + w2 (RowParallelLinear): Linear transformation for the second layer. + w3 (ColumnParallelLinear): Linear transformation for the third layer. + + """ + super().__init__() + hidden_dim = int(2 * hidden_dim / 3) + hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) + + self.w1 = ChannelLastConv1d(dim, + hidden_dim, + bias=False, + kernel_size=kernel_size, + padding=padding) + self.w2 = ChannelLastConv1d(hidden_dim, + dim, + bias=False, + kernel_size=kernel_size, + padding=padding) + self.w3 = ChannelLastConv1d(dim, + hidden_dim, + bias=False, + kernel_size=kernel_size, + padding=padding) + + def forward(self, x): + return self.w2(F.silu(self.w1(x)) * self.w3(x)) diff --git a/mmaudio/model/networks.py b/mmaudio/model/networks.py new file mode 100644 index 0000000000000000000000000000000000000000..f378585402daef128ba92df2bc62756ada8f798d --- /dev/null +++ b/mmaudio/model/networks.py @@ -0,0 +1,469 @@ +import logging +from dataclasses import dataclass +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmaudio.ext.rotary_embeddings import compute_rope_rotations +from mmaudio.model.embeddings import TimestepEmbedder +from mmaudio.model.low_level import MLP, ChannelLastConv1d, ConvMLP +from mmaudio.model.transformer_layers import (FinalBlock, JointBlock, MMDitSingleBlock) + +log = logging.getLogger() + + +@dataclass +class PreprocessedConditions: + clip_f: torch.Tensor + sync_f: torch.Tensor + text_f: torch.Tensor + clip_f_c: torch.Tensor + text_f_c: torch.Tensor + + +# Partially from https://github.com/facebookresearch/DiT +class MMAudio(nn.Module): + + def __init__(self, + *, + latent_dim: int, + clip_dim: int, + sync_dim: int, + text_dim: int, + hidden_dim: int, + depth: int, + fused_depth: int, + num_heads: int, + mlp_ratio: float = 4.0, + latent_seq_len: int, + clip_seq_len: int, + sync_seq_len: int, + text_seq_len: int = 77, + latent_mean: Optional[torch.Tensor] = None, + latent_std: Optional[torch.Tensor] = None, + empty_string_feat: Optional[torch.Tensor] = None, + v2: bool = False) -> None: + super().__init__() + + self.v2 = v2 + self.latent_dim = latent_dim + self._latent_seq_len = latent_seq_len + self._clip_seq_len = clip_seq_len + self._sync_seq_len = sync_seq_len + self._text_seq_len = text_seq_len + self.hidden_dim = hidden_dim + self.num_heads = num_heads + + if v2: + self.audio_input_proj = nn.Sequential( + ChannelLastConv1d(latent_dim, hidden_dim, kernel_size=7, padding=3), + nn.SiLU(), + ConvMLP(hidden_dim, hidden_dim * 4, kernel_size=7, padding=3), + ) + + self.clip_input_proj = nn.Sequential( + nn.Linear(clip_dim, hidden_dim), + nn.SiLU(), + ConvMLP(hidden_dim, hidden_dim * 4, kernel_size=3, padding=1), + ) + + self.sync_input_proj = nn.Sequential( + ChannelLastConv1d(sync_dim, hidden_dim, kernel_size=7, padding=3), + nn.SiLU(), + ConvMLP(hidden_dim, hidden_dim * 4, kernel_size=3, padding=1), + ) + + self.text_input_proj = nn.Sequential( + nn.Linear(text_dim, hidden_dim), + nn.SiLU(), + MLP(hidden_dim, hidden_dim * 4), + ) + else: + self.audio_input_proj = nn.Sequential( + ChannelLastConv1d(latent_dim, hidden_dim, kernel_size=7, padding=3), + nn.SELU(), + ConvMLP(hidden_dim, hidden_dim * 4, kernel_size=7, padding=3), + ) + + self.clip_input_proj = nn.Sequential( + nn.Linear(clip_dim, hidden_dim), + ConvMLP(hidden_dim, hidden_dim * 4, kernel_size=3, padding=1), + ) + + self.sync_input_proj = nn.Sequential( + ChannelLastConv1d(sync_dim, hidden_dim, kernel_size=7, padding=3), + nn.SELU(), + ConvMLP(hidden_dim, hidden_dim * 4, kernel_size=3, padding=1), + ) + + self.text_input_proj = nn.Sequential( + nn.Linear(text_dim, hidden_dim), + MLP(hidden_dim, hidden_dim * 4), + ) + + self.clip_cond_proj = nn.Linear(hidden_dim, hidden_dim) + self.text_cond_proj = nn.Linear(hidden_dim, hidden_dim) + self.global_cond_mlp = MLP(hidden_dim, hidden_dim * 4) + # each synchformer output segment has 8 feature frames + self.sync_pos_emb = nn.Parameter(torch.zeros((1, 1, 8, sync_dim))) + + self.final_layer = FinalBlock(hidden_dim, latent_dim) + + if v2: + self.t_embed = TimestepEmbedder(hidden_dim, + frequency_embedding_size=hidden_dim, + max_period=1) + else: + self.t_embed = TimestepEmbedder(hidden_dim, + frequency_embedding_size=256, + max_period=10000) + self.joint_blocks = nn.ModuleList([ + JointBlock(hidden_dim, + num_heads, + mlp_ratio=mlp_ratio, + pre_only=(i == depth - fused_depth - 1)) for i in range(depth - fused_depth) + ]) + + self.fused_blocks = nn.ModuleList([ + MMDitSingleBlock(hidden_dim, num_heads, mlp_ratio=mlp_ratio, kernel_size=3, padding=1) + for i in range(fused_depth) + ]) + + if latent_mean is None: + # these values are not meant to be used + # if you don't provide mean/std here, we should load them later from a checkpoint + assert latent_std is None + latent_mean = torch.ones(latent_dim).view(1, 1, -1).fill_(float('nan')) + latent_std = torch.ones(latent_dim).view(1, 1, -1).fill_(float('nan')) + else: + assert latent_std is not None + assert latent_mean.numel() == latent_dim, f'{latent_mean.numel()=} != {latent_dim=}' + if empty_string_feat is None: + empty_string_feat = torch.zeros((text_seq_len, text_dim)) + self.latent_mean = nn.Parameter(latent_mean.view(1, 1, -1), requires_grad=False) + self.latent_std = nn.Parameter(latent_std.view(1, 1, -1), requires_grad=False) + + self.empty_string_feat = nn.Parameter(empty_string_feat, requires_grad=False) + self.empty_clip_feat = nn.Parameter(torch.zeros(1, clip_dim), requires_grad=True) + self.empty_sync_feat = nn.Parameter(torch.zeros(1, sync_dim), requires_grad=True) + + self.initialize_weights() + self.initialize_rotations() + + def initialize_rotations(self): + base_freq = 1.0 + latent_rot = compute_rope_rotations(self._latent_seq_len, + self.hidden_dim // self.num_heads, + 10000, + freq_scaling=base_freq, + device=self.device) + clip_rot = compute_rope_rotations(self._clip_seq_len, + self.hidden_dim // self.num_heads, + 10000, + freq_scaling=base_freq * self._latent_seq_len / + self._clip_seq_len, + device=self.device) + + self.latent_rot = nn.Buffer(latent_rot, persistent=False) + self.clip_rot = nn.Buffer(clip_rot, persistent=False) + + def update_seq_lengths(self, latent_seq_len: int, clip_seq_len: int, sync_seq_len: int) -> None: + self._latent_seq_len = latent_seq_len + self._clip_seq_len = clip_seq_len + self._sync_seq_len = sync_seq_len + self.initialize_rotations() + + def initialize_weights(self): + + def _basic_init(module): + if isinstance(module, nn.Linear): + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + + self.apply(_basic_init) + + # Initialize timestep embedding MLP: + nn.init.normal_(self.t_embed.mlp[0].weight, std=0.02) + nn.init.normal_(self.t_embed.mlp[2].weight, std=0.02) + + # Zero-out adaLN modulation layers in DiT blocks: + for block in self.joint_blocks: + nn.init.constant_(block.latent_block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.latent_block.adaLN_modulation[-1].bias, 0) + nn.init.constant_(block.clip_block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.clip_block.adaLN_modulation[-1].bias, 0) + nn.init.constant_(block.text_block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.text_block.adaLN_modulation[-1].bias, 0) + for block in self.fused_blocks: + nn.init.constant_(block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.adaLN_modulation[-1].bias, 0) + + # Zero-out output layers: + nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0) + nn.init.constant_(self.final_layer.adaLN_modulation[-1].bias, 0) + nn.init.constant_(self.final_layer.conv.weight, 0) + nn.init.constant_(self.final_layer.conv.bias, 0) + + # empty string feat shall be initialized by a CLIP encoder + nn.init.constant_(self.sync_pos_emb, 0) + nn.init.constant_(self.empty_clip_feat, 0) + nn.init.constant_(self.empty_sync_feat, 0) + + def normalize(self, x: torch.Tensor) -> torch.Tensor: + # return (x - self.latent_mean) / self.latent_std + return x.sub_(self.latent_mean).div_(self.latent_std) + + def unnormalize(self, x: torch.Tensor) -> torch.Tensor: + # return x * self.latent_std + self.latent_mean + return x.mul_(self.latent_std).add_(self.latent_mean) + + def preprocess_conditions(self, clip_f: torch.Tensor, sync_f: torch.Tensor, + text_f: torch.Tensor) -> PreprocessedConditions: + """ + cache computations that do not depend on the latent/time step + i.e., the features are reused over steps during inference + """ + assert clip_f.shape[1] == self._clip_seq_len, f'{clip_f.shape=} {self._clip_seq_len=}' + assert sync_f.shape[1] == self._sync_seq_len, f'{sync_f.shape=} {self._sync_seq_len=}' + assert text_f.shape[1] == self._text_seq_len, f'{text_f.shape=} {self._text_seq_len=}' + + bs = clip_f.shape[0] + + # B * num_segments (24) * 8 * 768 + num_sync_segments = self._sync_seq_len // 8 + sync_f = sync_f.view(bs, num_sync_segments, 8, -1) + self.sync_pos_emb + sync_f = sync_f.flatten(1, 2) # (B, VN, D) + + # extend vf to match x + clip_f = self.clip_input_proj(clip_f) # (B, VN, D) + sync_f = self.sync_input_proj(sync_f) # (B, VN, D) + text_f = self.text_input_proj(text_f) # (B, VN, D) + + # upsample the sync features to match the audio + sync_f = sync_f.transpose(1, 2) # (B, D, VN) + sync_f = F.interpolate(sync_f, size=self._latent_seq_len, mode='nearest-exact') + sync_f = sync_f.transpose(1, 2) # (B, N, D) + + # get conditional features from the clip side + clip_f_c = self.clip_cond_proj(clip_f.mean(dim=1)) # (B, D) + text_f_c = self.text_cond_proj(text_f.mean(dim=1)) # (B, D) + + return PreprocessedConditions(clip_f=clip_f, + sync_f=sync_f, + text_f=text_f, + clip_f_c=clip_f_c, + text_f_c=text_f_c) + + def predict_flow(self, latent: torch.Tensor, t: torch.Tensor, + conditions: PreprocessedConditions) -> torch.Tensor: + """ + for non-cacheable computations + """ + assert latent.shape[1] == self._latent_seq_len, f'{latent.shape=} {self._latent_seq_len=}' + + clip_f = conditions.clip_f + sync_f = conditions.sync_f + text_f = conditions.text_f + clip_f_c = conditions.clip_f_c + text_f_c = conditions.text_f_c + + latent = self.audio_input_proj(latent) # (B, N, D) + global_c = self.global_cond_mlp(clip_f_c + text_f_c) # (B, D) + + global_c = self.t_embed(t).unsqueeze(1) + global_c.unsqueeze(1) # (B, D) + extended_c = global_c + sync_f + + for block in self.joint_blocks: + latent, clip_f, text_f = block(latent, clip_f, text_f, global_c, extended_c, + self.latent_rot, self.clip_rot) # (B, N, D) + + for block in self.fused_blocks: + latent = block(latent, extended_c, self.latent_rot) + + flow = self.final_layer(latent, global_c) # (B, N, out_dim), remove t + return flow + + def forward(self, latent: torch.Tensor, clip_f: torch.Tensor, sync_f: torch.Tensor, + text_f: torch.Tensor, t: torch.Tensor) -> torch.Tensor: + """ + latent: (B, N, C) + vf: (B, T, C_V) + t: (B,) + """ + conditions = self.preprocess_conditions(clip_f, sync_f, text_f) + flow = self.predict_flow(latent, t, conditions) + return flow + + def get_empty_string_sequence(self, bs: int) -> torch.Tensor: + return self.empty_string_feat.unsqueeze(0).expand(bs, -1, -1) + + def get_empty_clip_sequence(self, bs: int) -> torch.Tensor: + return self.empty_clip_feat.unsqueeze(0).expand(bs, self._clip_seq_len, -1) + + def get_empty_sync_sequence(self, bs: int) -> torch.Tensor: + return self.empty_sync_feat.unsqueeze(0).expand(bs, self._sync_seq_len, -1) + + def get_empty_conditions( + self, + bs: int, + *, + negative_text_features: Optional[torch.Tensor] = None) -> PreprocessedConditions: + if negative_text_features is not None: + empty_text = negative_text_features + else: + empty_text = self.get_empty_string_sequence(1) + + empty_clip = self.get_empty_clip_sequence(1) + empty_sync = self.get_empty_sync_sequence(1) + conditions = self.preprocess_conditions(empty_clip, empty_sync, empty_text) + conditions.clip_f = conditions.clip_f.expand(bs, -1, -1) + conditions.sync_f = conditions.sync_f.expand(bs, -1, -1) + conditions.clip_f_c = conditions.clip_f_c.expand(bs, -1) + if negative_text_features is None: + conditions.text_f = conditions.text_f.expand(bs, -1, -1) + conditions.text_f_c = conditions.text_f_c.expand(bs, -1) + + return conditions + + def ode_wrapper(self, t: torch.Tensor, latent: torch.Tensor, conditions: PreprocessedConditions, + empty_conditions: PreprocessedConditions, cfg_strength: float) -> torch.Tensor: + t = t * torch.ones(len(latent), device=latent.device, dtype=latent.dtype) + + if cfg_strength < 1.0: + return self.predict_flow(latent, t, conditions) + else: + return (cfg_strength * self.predict_flow(latent, t, conditions) + + (1 - cfg_strength) * self.predict_flow(latent, t, empty_conditions)) + + def load_weights(self, src_dict) -> None: + if 't_embed.freqs' in src_dict: + del src_dict['t_embed.freqs'] + if 'latent_rot' in src_dict: + del src_dict['latent_rot'] + if 'clip_rot' in src_dict: + del src_dict['clip_rot'] + + self.load_state_dict(src_dict, strict=True) + + @property + def device(self) -> torch.device: + return self.latent_mean.device + + @property + def latent_seq_len(self) -> int: + return self._latent_seq_len + + @property + def clip_seq_len(self) -> int: + return self._clip_seq_len + + @property + def sync_seq_len(self) -> int: + return self._sync_seq_len + + +def small_16k(**kwargs) -> MMAudio: + num_heads = 7 + return MMAudio(latent_dim=20, + clip_dim=1024, + sync_dim=768, + text_dim=1024, + hidden_dim=64 * num_heads, + depth=12, + fused_depth=8, + num_heads=num_heads, + latent_seq_len=250, + clip_seq_len=64, + sync_seq_len=192, + **kwargs) + + +def small_44k(**kwargs) -> MMAudio: + num_heads = 7 + return MMAudio(latent_dim=40, + clip_dim=1024, + sync_dim=768, + text_dim=1024, + hidden_dim=64 * num_heads, + depth=12, + fused_depth=8, + num_heads=num_heads, + latent_seq_len=345, + clip_seq_len=64, + sync_seq_len=192, + **kwargs) + + +def medium_44k(**kwargs) -> MMAudio: + num_heads = 14 + return MMAudio(latent_dim=40, + clip_dim=1024, + sync_dim=768, + text_dim=1024, + hidden_dim=64 * num_heads, + depth=12, + fused_depth=8, + num_heads=num_heads, + latent_seq_len=345, + clip_seq_len=64, + sync_seq_len=192, + **kwargs) + + +def large_44k(**kwargs) -> MMAudio: + num_heads = 14 + return MMAudio(latent_dim=40, + clip_dim=1024, + sync_dim=768, + text_dim=1024, + hidden_dim=64 * num_heads, + depth=21, + fused_depth=14, + num_heads=num_heads, + latent_seq_len=345, + clip_seq_len=64, + sync_seq_len=192, + **kwargs) + + +def large_44k_v2(**kwargs) -> MMAudio: + num_heads = 14 + return MMAudio(latent_dim=40, + clip_dim=1024, + sync_dim=768, + text_dim=1024, + hidden_dim=64 * num_heads, + depth=21, + fused_depth=14, + num_heads=num_heads, + latent_seq_len=345, + clip_seq_len=64, + sync_seq_len=192, + v2=True, + **kwargs) + + +def get_my_mmaudio(name: str, **kwargs) -> MMAudio: + if name == 'small_16k': + return small_16k(**kwargs) + if name == 'small_44k': + return small_44k(**kwargs) + if name == 'medium_44k': + return medium_44k(**kwargs) + if name == 'large_44k': + return large_44k(**kwargs) + if name == 'large_44k_v2': + return large_44k_v2(**kwargs) + + raise ValueError(f'Unknown model name: {name}') + + +if __name__ == '__main__': + network = get_my_mmaudio('small_16k') + + # print the number of parameters in terms of millions + num_params = sum(p.numel() for p in network.parameters()) / 1e6 + print(f'Number of parameters: {num_params:.2f}M') diff --git a/mmaudio/model/sequence_config.py b/mmaudio/model/sequence_config.py new file mode 100644 index 0000000000000000000000000000000000000000..14269014dc401b4751d172466813a935fddda6c1 --- /dev/null +++ b/mmaudio/model/sequence_config.py @@ -0,0 +1,58 @@ +import dataclasses +import math + + +@dataclasses.dataclass +class SequenceConfig: + # general + duration: float + + # audio + sampling_rate: int + spectrogram_frame_rate: int + latent_downsample_rate: int = 2 + + # visual + clip_frame_rate: int = 8 + sync_frame_rate: int = 25 + sync_num_frames_per_segment: int = 16 + sync_step_size: int = 8 + sync_downsample_rate: int = 2 + + @property + def num_audio_frames(self) -> int: + # we need an integer number of latents + return self.latent_seq_len * self.spectrogram_frame_rate * self.latent_downsample_rate + + @property + def latent_seq_len(self) -> int: + return int( + math.ceil(self.duration * self.sampling_rate / self.spectrogram_frame_rate / + self.latent_downsample_rate)) + + @property + def clip_seq_len(self) -> int: + return int(self.duration * self.clip_frame_rate) + + @property + def sync_seq_len(self) -> int: + num_frames = self.duration * self.sync_frame_rate + num_segments = (num_frames - self.sync_num_frames_per_segment) // self.sync_step_size + 1 + return int(num_segments * self.sync_num_frames_per_segment / self.sync_downsample_rate) + + +CONFIG_16K = SequenceConfig(duration=8.0, sampling_rate=16000, spectrogram_frame_rate=256) +CONFIG_44K = SequenceConfig(duration=8.0, sampling_rate=44100, spectrogram_frame_rate=512) + +if __name__ == '__main__': + assert CONFIG_16K.latent_seq_len == 250 + assert CONFIG_16K.clip_seq_len == 64 + assert CONFIG_16K.sync_seq_len == 192 + assert CONFIG_16K.num_audio_frames == 128000 + + assert CONFIG_44K.latent_seq_len == 345 + assert CONFIG_44K.clip_seq_len == 64 + assert CONFIG_44K.sync_seq_len == 192 + assert CONFIG_44K.num_audio_frames == 353280 + + print('Passed') diff --git a/mmaudio/model/transformer_layers.py b/mmaudio/model/transformer_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..3ca02ec3b6c00b9c39624d97d55a211cdd2e427d --- /dev/null +++ b/mmaudio/model/transformer_layers.py @@ -0,0 +1,203 @@ +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +from einops.layers.torch import Rearrange +from torch.nn.attention import SDPBackend, sdpa_kernel + +from mmaudio.ext.rotary_embeddings import apply_rope +from mmaudio.model.low_level import MLP, ChannelLastConv1d, ConvMLP + + +def modulate(x: torch.Tensor, shift: torch.Tensor, scale: torch.Tensor): + return x * (1 + scale) + shift + + +def attention(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor): + # training will crash without these contiguous calls and the CUDNN limitation + # I believe this is related to https://github.com/pytorch/pytorch/issues/133974 + # unresolved at the time of writing + q = q.contiguous() + k = k.contiguous() + v = v.contiguous() + out = F.scaled_dot_product_attention(q, k, v) + out = rearrange(out, 'b h n d -> b n (h d)').contiguous() + return out + + +class SelfAttention(nn.Module): + + def __init__(self, dim: int, nheads: int): + super().__init__() + self.dim = dim + self.nheads = nheads + + self.qkv = nn.Linear(dim, dim * 3, bias=True) + self.q_norm = nn.RMSNorm(dim // nheads) + self.k_norm = nn.RMSNorm(dim // nheads) + + self.split_into_heads = Rearrange('b n (h d j) -> b h n d j', + h=nheads, + d=dim // nheads, + j=3) + + def pre_attention( + self, x: torch.Tensor, + rot: Optional[torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + # x: batch_size * n_tokens * n_channels + qkv = self.qkv(x) + q, k, v = self.split_into_heads(qkv).chunk(3, dim=-1) + q = q.squeeze(-1) + k = k.squeeze(-1) + v = v.squeeze(-1) + q = self.q_norm(q) + k = self.k_norm(k) + + if rot is not None: + q = apply_rope(q, rot) + k = apply_rope(k, rot) + + return q, k, v + + def forward( + self, + x: torch.Tensor, # batch_size * n_tokens * n_channels + ) -> torch.Tensor: + q, v, k = self.pre_attention(x) + out = attention(q, k, v) + return out + + +class MMDitSingleBlock(nn.Module): + + def __init__(self, + dim: int, + nhead: int, + mlp_ratio: float = 4.0, + pre_only: bool = False, + kernel_size: int = 7, + padding: int = 3): + super().__init__() + self.norm1 = nn.LayerNorm(dim, elementwise_affine=False) + self.attn = SelfAttention(dim, nhead) + + self.pre_only = pre_only + if pre_only: + self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim, 2 * dim, bias=True)) + else: + if kernel_size == 1: + self.linear1 = nn.Linear(dim, dim) + else: + self.linear1 = ChannelLastConv1d(dim, dim, kernel_size=kernel_size, padding=padding) + self.norm2 = nn.LayerNorm(dim, elementwise_affine=False) + + if kernel_size == 1: + self.ffn = MLP(dim, int(dim * mlp_ratio)) + else: + self.ffn = ConvMLP(dim, + int(dim * mlp_ratio), + kernel_size=kernel_size, + padding=padding) + + self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim, 6 * dim, bias=True)) + + def pre_attention(self, x: torch.Tensor, c: torch.Tensor, rot: Optional[torch.Tensor]): + # x: BS * N * D + # cond: BS * D + modulation = self.adaLN_modulation(c) + if self.pre_only: + (shift_msa, scale_msa) = modulation.chunk(2, dim=-1) + gate_msa = shift_mlp = scale_mlp = gate_mlp = None + else: + (shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, + gate_mlp) = modulation.chunk(6, dim=-1) + + x = modulate(self.norm1(x), shift_msa, scale_msa) + q, k, v = self.attn.pre_attention(x, rot) + return (q, k, v), (gate_msa, shift_mlp, scale_mlp, gate_mlp) + + def post_attention(self, x: torch.Tensor, attn_out: torch.Tensor, c: tuple[torch.Tensor]): + if self.pre_only: + return x + + (gate_msa, shift_mlp, scale_mlp, gate_mlp) = c + x = x + self.linear1(attn_out) * gate_msa + r = modulate(self.norm2(x), shift_mlp, scale_mlp) + x = x + self.ffn(r) * gate_mlp + + return x + + def forward(self, x: torch.Tensor, cond: torch.Tensor, + rot: Optional[torch.Tensor]) -> torch.Tensor: + # x: BS * N * D + # cond: BS * D + x_qkv, x_conditions = self.pre_attention(x, cond, rot) + attn_out = attention(*x_qkv) + x = self.post_attention(x, attn_out, x_conditions) + + return x + + +class JointBlock(nn.Module): + + def __init__(self, dim: int, nhead: int, mlp_ratio: float = 4.0, pre_only: bool = False): + super().__init__() + self.pre_only = pre_only + self.latent_block = MMDitSingleBlock(dim, + nhead, + mlp_ratio, + pre_only=False, + kernel_size=3, + padding=1) + self.clip_block = MMDitSingleBlock(dim, + nhead, + mlp_ratio, + pre_only=pre_only, + kernel_size=3, + padding=1) + self.text_block = MMDitSingleBlock(dim, nhead, mlp_ratio, pre_only=pre_only, kernel_size=1) + + def forward(self, latent: torch.Tensor, clip_f: torch.Tensor, text_f: torch.Tensor, + global_c: torch.Tensor, extended_c: torch.Tensor, latent_rot: torch.Tensor, + clip_rot: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: + # latent: BS * N1 * D + # clip_f: BS * N2 * D + # c: BS * (1/N) * D + x_qkv, x_mod = self.latent_block.pre_attention(latent, extended_c, latent_rot) + c_qkv, c_mod = self.clip_block.pre_attention(clip_f, global_c, clip_rot) + t_qkv, t_mod = self.text_block.pre_attention(text_f, global_c, rot=None) + + latent_len = latent.shape[1] + clip_len = clip_f.shape[1] + text_len = text_f.shape[1] + + joint_qkv = [torch.cat([x_qkv[i], c_qkv[i], t_qkv[i]], dim=2) for i in range(3)] + + attn_out = attention(*joint_qkv) + x_attn_out = attn_out[:, :latent_len] + c_attn_out = attn_out[:, latent_len:latent_len + clip_len] + t_attn_out = attn_out[:, latent_len + clip_len:] + + latent = self.latent_block.post_attention(latent, x_attn_out, x_mod) + if not self.pre_only: + clip_f = self.clip_block.post_attention(clip_f, c_attn_out, c_mod) + text_f = self.text_block.post_attention(text_f, t_attn_out, t_mod) + + return latent, clip_f, text_f + + +class FinalBlock(nn.Module): + + def __init__(self, dim, out_dim): + super().__init__() + self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim, 2 * dim, bias=True)) + self.norm = nn.LayerNorm(dim, elementwise_affine=False) + self.conv = ChannelLastConv1d(dim, out_dim, kernel_size=7, padding=3) + + def forward(self, latent, c): + shift, scale = self.adaLN_modulation(c).chunk(2, dim=-1) + latent = modulate(self.norm(latent), shift, scale) + latent = self.conv(latent) + return latent diff --git a/mmaudio/model/utils/__init__.py b/mmaudio/model/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mmaudio/model/utils/distributions.py b/mmaudio/model/utils/distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..1d526a5b0b3dd2ae556d806a3397e1cf43c07fb9 --- /dev/null +++ b/mmaudio/model/utils/distributions.py @@ -0,0 +1,46 @@ +from typing import Optional + +import numpy as np +import torch + + +class DiagonalGaussianDistribution: + + def __init__(self, parameters, deterministic=False): + self.parameters = parameters + self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) + + def sample(self, rng: Optional[torch.Generator] = None): + # x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) + + r = torch.empty_like(self.mean).normal_(generator=rng) + x = self.mean + self.std * r + + return x + + def kl(self, other=None): + if self.deterministic: + return torch.Tensor([0.]) + else: + if other is None: + + return 0.5 * torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar + else: + return 0.5 * (torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var - 1.0 - self.logvar + other.logvar) + + def nll(self, sample, dims=[1, 2, 3]): + if self.deterministic: + return torch.Tensor([0.]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims) + + def mode(self): + return self.mean diff --git a/mmaudio/model/utils/features_utils.py b/mmaudio/model/utils/features_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c9385798bd5214f48f43804b3a21866765e00916 --- /dev/null +++ b/mmaudio/model/utils/features_utils.py @@ -0,0 +1,162 @@ +from typing import Literal, Optional + +import open_clip +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +from open_clip import create_model_from_pretrained +from torchvision.transforms import Normalize + +from mmaudio.ext.autoencoder import AutoEncoderModule +from mmaudio.ext.mel_converter import MelConverter +from mmaudio.ext.synchformer import Synchformer +from mmaudio.model.utils.distributions import DiagonalGaussianDistribution + + +def patch_clip(clip_model): + # a hack to make it output last hidden states + # https://github.com/mlfoundations/open_clip/blob/fc5a37b72d705f760ebbc7915b84729816ed471f/src/open_clip/model.py#L269 + def new_encode_text(self, text, normalize: bool = False): + cast_dtype = self.transformer.get_cast_dtype() + + x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model] + + x = x + self.positional_embedding.to(cast_dtype) + x = self.transformer(x, attn_mask=self.attn_mask) + x = self.ln_final(x) # [batch_size, n_ctx, transformer.width] + return F.normalize(x, dim=-1) if normalize else x + + clip_model.encode_text = new_encode_text.__get__(clip_model) + return clip_model + + +class FeaturesUtils(nn.Module): + + def __init__( + self, + *, + tod_vae_ckpt: Optional[str] = None, + bigvgan_vocoder_ckpt: Optional[str] = None, + synchformer_ckpt: Optional[str] = None, + enable_conditions: bool = True, + mode=Literal['16k', '44k'], + ): + super().__init__() + + if enable_conditions: + self.clip_model = create_model_from_pretrained('hf-hub:apple/DFN5B-CLIP-ViT-H-14-384', + return_transform=False) + self.clip_preprocess = Normalize(mean=[0.48145466, 0.4578275, 0.40821073], + std=[0.26862954, 0.26130258, 0.27577711]) + self.clip_model = patch_clip(self.clip_model) + + self.synchformer = Synchformer() + self.synchformer.load_state_dict( + torch.load(synchformer_ckpt, weights_only=True, map_location='cpu')) + + self.tokenizer = open_clip.get_tokenizer('ViT-H-14-378-quickgelu') # same as 'ViT-H-14' + else: + self.clip_model = None + self.synchformer = None + self.tokenizer = None + + if tod_vae_ckpt is not None: + self.tod = AutoEncoderModule(vae_ckpt_path=tod_vae_ckpt, + vocoder_ckpt_path=bigvgan_vocoder_ckpt, + mode=mode) + else: + self.tod = None + self.mel_converter = MelConverter() + + def compile(self): + if self.clip_model is not None: + self.encode_video_with_clip = torch.compile(self.encode_video_with_clip) + self.clip_model.encode_image = torch.compile(self.clip_model.encode_image) + self.clip_model.encode_text = torch.compile(self.clip_model.encode_text) + if self.synchformer is not None: + self.synchformer = torch.compile(self.synchformer) + self.tod.encode = torch.compile(self.tod.encode) + self.decode = torch.compile(self.decode) + self.vocode = torch.compile(self.vocode) + + def train(self, mode: bool) -> None: + return super().train(False) + + @torch.inference_mode() + def encode_video_with_clip(self, x: torch.Tensor, batch_size: int = -1) -> torch.Tensor: + assert self.clip_model is not None, 'CLIP is not loaded' + # x: (B, T, C, H, W) H/W: 384 + b, t, c, h, w = x.shape + assert c == 3 and h == 384 and w == 384 + x = self.clip_preprocess(x) + x = rearrange(x, 'b t c h w -> (b t) c h w') + outputs = [] + if batch_size < 0: + batch_size = b * t + for i in range(0, b * t, batch_size): + outputs.append(self.clip_model.encode_image(x[i:i + batch_size], normalize=True)) + x = torch.cat(outputs, dim=0) + # x = self.clip_model.encode_image(x, normalize=True) + x = rearrange(x, '(b t) d -> b t d', b=b) + return x + + @torch.inference_mode() + def encode_video_with_sync(self, x: torch.Tensor, batch_size: int = -1) -> torch.Tensor: + assert self.synchformer is not None, 'Synchformer is not loaded' + # x: (B, T, C, H, W) H/W: 384 + + b, t, c, h, w = x.shape + assert c == 3 and h == 224 and w == 224 + + # partition the video + segment_size = 16 + step_size = 8 + num_segments = (t - segment_size) // step_size + 1 + segments = [] + for i in range(num_segments): + segments.append(x[:, i * step_size:i * step_size + segment_size]) + x = torch.stack(segments, dim=1) # (B, S, T, C, H, W) + + outputs = [] + if batch_size < 0: + batch_size = b + for i in range(0, b, batch_size): + outputs.append(self.synchformer(x[i:i + batch_size])) + x = torch.cat(outputs, dim=0).flatten(start_dim=1, end_dim=2) + return x + + @torch.inference_mode() + def encode_text(self, text: list[str]) -> torch.Tensor: + assert self.clip_model is not None, 'CLIP is not loaded' + assert self.tokenizer is not None, 'Tokenizer is not loaded' + # x: (B, L) + tokens = self.tokenizer(text).to(self.device) + return self.clip_model.encode_text(tokens, normalize=True) + + @torch.inference_mode() + def encode_audio(self, x) -> DiagonalGaussianDistribution: + assert self.tod is not None, 'VAE is not loaded' + # x: (B * L) + mel = self.mel_converter(x) + dist = self.tod.encode(mel) + + return dist + + @torch.inference_mode() + def vocode(self, mel: torch.Tensor) -> torch.Tensor: + assert self.tod is not None, 'VAE is not loaded' + return self.tod.vocode(mel) + + @torch.inference_mode() + def decode(self, z: torch.Tensor) -> torch.Tensor: + assert self.tod is not None, 'VAE is not loaded' + return self.tod.decode(z.transpose(1, 2)) + + @property + def device(self): + return next(self.parameters()).device + + @property + def dtype(self): + return next(self.parameters()).dtype diff --git a/mmaudio/model/utils/parameter_groups.py b/mmaudio/model/utils/parameter_groups.py new file mode 100644 index 0000000000000000000000000000000000000000..89c3993083f470dfc6b18a5c90f908ea37bde12b --- /dev/null +++ b/mmaudio/model/utils/parameter_groups.py @@ -0,0 +1,72 @@ +import logging + +log = logging.getLogger() + + +def get_parameter_groups(model, cfg, print_log=False): + """ + Assign different weight decays and learning rates to different parameters. + Returns a parameter group which can be passed to the optimizer. + """ + weight_decay = cfg.weight_decay + # embed_weight_decay = cfg.embed_weight_decay + # backbone_lr_ratio = cfg.backbone_lr_ratio + base_lr = cfg.learning_rate + + backbone_params = [] + embed_params = [] + other_params = [] + + # embedding_names = ['summary_pos', 'query_init', 'query_emb', 'obj_pe'] + # embedding_names = [e + '.weight' for e in embedding_names] + + # inspired by detectron2 + memo = set() + for name, param in model.named_parameters(): + if not param.requires_grad: + continue + # Avoid duplicating parameters + if param in memo: + continue + memo.add(param) + + if name.startswith('module'): + name = name[7:] + + inserted = False + # if name.startswith('pixel_encoder.'): + # backbone_params.append(param) + # inserted = True + # if print_log: + # log.info(f'{name} counted as a backbone parameter.') + # else: + # for e in embedding_names: + # if name.endswith(e): + # embed_params.append(param) + # inserted = True + # if print_log: + # log.info(f'{name} counted as an embedding parameter.') + # break + + # if not inserted: + other_params.append(param) + + parameter_groups = [ + # { + # 'params': backbone_params, + # 'lr': base_lr * backbone_lr_ratio, + # 'weight_decay': weight_decay + # }, + # { + # 'params': embed_params, + # 'lr': base_lr, + # 'weight_decay': embed_weight_decay + # }, + { + 'params': other_params, + 'lr': base_lr, + 'weight_decay': weight_decay + }, + ] + + return parameter_groups diff --git a/mmaudio/model/utils/sample_utils.py b/mmaudio/model/utils/sample_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d44cf278e0b464bc6ac7e240fcab4a23895caa2f --- /dev/null +++ b/mmaudio/model/utils/sample_utils.py @@ -0,0 +1,12 @@ +from typing import Optional + +import torch + + +def log_normal_sample(x: torch.Tensor, + generator: Optional[torch.Generator] = None, + m: float = 0.0, + s: float = 1.0) -> torch.Tensor: + bs = x.shape[0] + s = torch.randn(bs, device=x.device, generator=generator) * s + m + return torch.sigmoid(s) diff --git a/mmaudio/utils/__init__.py b/mmaudio/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mmaudio/utils/dist_utils.py b/mmaudio/utils/dist_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..354229b5d94bd03d104a07c7f16a06df9b519bdd --- /dev/null +++ b/mmaudio/utils/dist_utils.py @@ -0,0 +1,17 @@ +import os +from logging import Logger + +from mmaudio.utils.logger import TensorboardLogger + +local_rank = int(os.environ['LOCAL_RANK']) if 'LOCAL_RANK' in os.environ else 0 +world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 + + +def info_if_rank_zero(logger: Logger, msg: str): + if local_rank == 0: + logger.info(msg) + + +def string_if_rank_zero(logger: TensorboardLogger, tag: str, msg: str): + if local_rank == 0: + logger.log_string(tag, msg) diff --git a/mmaudio/utils/download_utils.py b/mmaudio/utils/download_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..09c48f5acd90e277a7b80d5ddddb37566948361c --- /dev/null +++ b/mmaudio/utils/download_utils.py @@ -0,0 +1,84 @@ +import hashlib +import logging +from pathlib import Path + +import requests +from tqdm import tqdm + +log = logging.getLogger() + +links = [ + { + 'name': 'mmaudio_small_16k.pth', + 'url': 'https://databank.illinois.edu/datafiles/k6jve/download', + 'md5': 'af93cde404179f58e3919ac085b8033b', + }, + { + 'name': 'mmaudio_small_44k.pth', + 'url': 'https://databank.illinois.edu/datafiles/864ya/download', + 'md5': 'babd74c884783d13701ea2820a5f5b6d', + }, + { + 'name': 'mmaudio_medium_44k.pth', + 'url': 'https://databank.illinois.edu/datafiles/pa94t/download', + 'md5': '5a56b6665e45a1e65ada534defa903d0', + }, + { + 'name': 'mmaudio_large_44k.pth', + 'url': 'https://databank.illinois.edu/datafiles/4jx76/download', + 'md5': 'fed96c325a6785b85ce75ae1aafd2673' + }, + { + 'name': 'mmaudio_large_44k_v2.pth', + 'url': 'https://databank.illinois.edu/datafiles/16j46/download', + 'md5': '01ad4464f049b2d7efdaa4c1a59b8dfe' + }, + { + 'name': 'v1-16.pth', + 'url': 'https://github.com/hkchengrex/MMAudio/releases/download/v0.1/v1-16.pth', + 'md5': '69f56803f59a549a1a507c93859fd4d7' + }, + { + 'name': 'best_netG.pt', + 'url': 'https://github.com/hkchengrex/MMAudio/releases/download/v0.1/best_netG.pt', + 'md5': 'eeaf372a38a9c31c362120aba2dde292' + }, + { + 'name': 'v1-44.pth', + 'url': 'https://github.com/hkchengrex/MMAudio/releases/download/v0.1/v1-44.pth', + 'md5': 'fab020275fa44c6589820ce025191600' + }, + { + 'name': 'synchformer_state_dict.pth', + 'url': + 'https://github.com/hkchengrex/MMAudio/releases/download/v0.1/synchformer_state_dict.pth', + 'md5': '5b2f5594b0730f70e41e549b7c94390c' + }, +] + + +def download_model_if_needed(model_path: Path): + base_name = model_path.name + + for link in links: + if link['name'] == base_name: + target_link = link + break + else: + raise ValueError(f'No link found for {base_name}') + + model_path.parent.mkdir(parents=True, exist_ok=True) + if not model_path.exists() or hashlib.md5(open(model_path, + 'rb').read()).hexdigest() != target_link['md5']: + log.info(f'Downloading {base_name} to {model_path}...') + r = requests.get(target_link['url'], stream=True) + total_size = int(r.headers.get('content-length', 0)) + block_size = 1024 + t = tqdm(total=total_size, unit='iB', unit_scale=True) + with open(model_path, 'wb') as f: + for data in r.iter_content(block_size): + t.update(len(data)) + f.write(data) + t.close() + if total_size != 0 and t.n != total_size: + raise RuntimeError('Error while downloading %s' % base_name) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..160d9d00777a11dafb4b56f553f76c1be06213a6 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,52 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.metadata] +allow-direct-references = true + +[tool.yapf] +based_on_style = "pep8" +indent_width = 4 +column_limit = 100 + +[project] +name = "mmaudio" +version = "1.0.0" +authors = [{ name = "Rex Cheng", email = "hkchengrex@gmail.com" }] +description = "" +readme = "README.md" +requires-python = ">=3.9" +classifiers = [ + "Programming Language :: Python :: 3", + "Operating System :: OS Independent", +] +dependencies = [ + 'torch >= 2.5.1', + 'python-dotenv', + 'cython', + 'gitpython >= 3.1', + 'tensorboard >= 2.11', + 'numpy >= 1.21, <2.1', + 'Pillow >= 9.5', + 'opencv-python >= 4.8', + 'scipy >= 1.7', + 'tqdm >= 4.66.1', + 'gradio >= 3.34', + 'einops >= 0.6', + 'hydra-core >= 1.3.2', + 'requests', + 'torchdiffeq', + 'librosa >= 0.8.1', + 'nitrous-ema', + 'safetensors', + 'auraloss', + 'hydra_colorlog', + 'tensordict', + 'colorlog', + 'open_clip_torch', + 'soundfile', +] + +[tool.hatch.build.targets.wheel] +packages = ["mmaudio"] diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..efc1e0f41ab90a8327e06082834a9529f10bff14 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,26 @@ +torch >= 2.5.1 +torchaudio +torchvision +python-dotenv +cython +gitpython >= 3.1 +tensorboard >= 2.11 +numpy >= 1.21, <2.1 +Pillow >= 9.5 +opencv-python >= 4.8 +scipy >= 1.7 +tqdm >= 4.66.1 +gradio >= 3.34 +einops >= 0.6 +hydra-core >= 1.3.2 +requests +torchdiffeq +librosa >= 0.8.1 +nitrous-ema +safetensors +auraloss +hydra_colorlog +tensordict +colorlog +open_clip_torch +soundfile \ No newline at end of file