sarinam commited on
Commit
09b2bbf
1 Parent(s): dcd7182

Initial commit

Browse files
Files changed (36) hide show
  1. IMSToucan/.gitignore +20 -0
  2. IMSToucan/InferenceInterfaces/AnonFastSpeech2.py +91 -0
  3. IMSToucan/InferenceInterfaces/InferenceArchitectures/InferenceFastSpeech2.py +256 -0
  4. IMSToucan/InferenceInterfaces/InferenceArchitectures/InferenceHiFiGAN.py +91 -0
  5. IMSToucan/InferenceInterfaces/InferenceArchitectures/__init__.py +0 -0
  6. IMSToucan/InferenceInterfaces/__init__.py +0 -0
  7. IMSToucan/LICENSE +202 -0
  8. IMSToucan/Layers/Attention.py +324 -0
  9. IMSToucan/Layers/Conformer.py +144 -0
  10. IMSToucan/Layers/Convolution.py +55 -0
  11. IMSToucan/Layers/DurationPredictor.py +139 -0
  12. IMSToucan/Layers/EncoderLayer.py +144 -0
  13. IMSToucan/Layers/LayerNorm.py +36 -0
  14. IMSToucan/Layers/LengthRegulator.py +62 -0
  15. IMSToucan/Layers/MultiLayeredConv1d.py +87 -0
  16. IMSToucan/Layers/MultiSequential.py +33 -0
  17. IMSToucan/Layers/PositionalEncoding.py +166 -0
  18. IMSToucan/Layers/PositionwiseFeedForward.py +26 -0
  19. IMSToucan/Layers/PostNet.py +74 -0
  20. IMSToucan/Layers/RNNAttention.py +282 -0
  21. IMSToucan/Layers/ResidualBlock.py +98 -0
  22. IMSToucan/Layers/ResidualStack.py +51 -0
  23. IMSToucan/Layers/STFT.py +118 -0
  24. IMSToucan/Layers/Swish.py +18 -0
  25. IMSToucan/Layers/VariancePredictor.py +65 -0
  26. IMSToucan/Layers/__init__.py +0 -0
  27. IMSToucan/Preprocessing/ArticulatoryCombinedTextFrontend.py +323 -0
  28. IMSToucan/Preprocessing/AudioPreprocessor.py +166 -0
  29. IMSToucan/Preprocessing/ProsodicConditionExtractor.py +40 -0
  30. IMSToucan/Preprocessing/__init__.py +0 -0
  31. IMSToucan/Preprocessing/papercup_features.py +637 -0
  32. IMSToucan/README.md +327 -0
  33. IMSToucan/Utility/SpeakerVisualization.py +94 -0
  34. IMSToucan/Utility/__init__.py +0 -0
  35. IMSToucan/Utility/utils.py +320 -0
  36. IMSToucan/requirements.txt +83 -0
IMSToucan/.gitignore ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .idea
2
+ *.pyc
3
+ *.png
4
+ *.pdf
5
+ tensorboard_logs
6
+ Corpora
7
+ Models
8
+ *_graph
9
+ *.out
10
+ *.wav
11
+ audios/notes.txt
12
+ audios/
13
+ *playground*
14
+ apex/
15
+ pretrained_models/
16
+ *.json
17
+ .tmp/
18
+ .vscode/
19
+ split/
20
+ singing/
IMSToucan/InferenceInterfaces/AnonFastSpeech2.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import librosa.display as lbd
2
+ import matplotlib.pyplot as plt
3
+ import soundfile
4
+ import torch
5
+
6
+ from .InferenceArchitectures.InferenceFastSpeech2 import FastSpeech2
7
+ from .InferenceArchitectures.InferenceHiFiGAN import HiFiGANGenerator
8
+ from ..Preprocessing.ArticulatoryCombinedTextFrontend import ArticulatoryCombinedTextFrontend
9
+ from ..Preprocessing.ArticulatoryCombinedTextFrontend import get_language_id
10
+
11
+
12
+ class AnonFastSpeech2(torch.nn.Module):
13
+
14
+ def __init__(self, device: str, path_to_hifigan_model: str, path_to_fastspeech_model: str):
15
+ """
16
+ Args:
17
+ device: Device to run on. CPU is feasible, still faster than real-time, but a GPU is significantly faster.
18
+ path_to_hifigan_model: Path to the vocoder model, including filename and suffix.
19
+ path_to_fastspeech_model: Path to the synthesis model, including filename and suffix.
20
+
21
+ """
22
+ super().__init__()
23
+ language = "en"
24
+ self.device = device
25
+ self.text2phone = ArticulatoryCombinedTextFrontend(language=language, add_silence_to_end=True)
26
+ checkpoint = torch.load(path_to_fastspeech_model, map_location='cpu')
27
+ self.phone2mel = FastSpeech2(weights=checkpoint["model"], lang_embs=None).to(torch.device(device))
28
+ self.mel2wav = HiFiGANGenerator(path_to_weights=path_to_hifigan_model).to(torch.device(device))
29
+ self.default_utterance_embedding = checkpoint["default_emb"].to(self.device)
30
+ self.phone2mel.eval()
31
+ self.mel2wav.eval()
32
+ self.lang_id = get_language_id(language)
33
+ self.to(torch.device(device))
34
+
35
+ def forward(self, text, view=False, text_is_phonemes=False):
36
+ """
37
+ Args:
38
+ text: The text that the TTS should convert to speech
39
+ view: Boolean flag whether to produce and display a graphic showing the generated audio
40
+ text_is_phonemes: Boolean flag whether the text parameter contains phonemes (True) or graphemes (False)
41
+
42
+ Returns:
43
+ 48kHz waveform as 1d tensor
44
+
45
+ """
46
+ with torch.no_grad():
47
+ phones = self.text2phone.string_to_tensor(text, input_phonemes=text_is_phonemes).to(torch.device(self.device))
48
+ mel, durations, pitch, energy = self.phone2mel(phones,
49
+ return_duration_pitch_energy=True,
50
+ utterance_embedding=self.default_utterance_embedding)
51
+ mel = mel.transpose(0, 1)
52
+ wave = self.mel2wav(mel)
53
+ if view:
54
+ from Utility.utils import cumsum_durations
55
+ fig, ax = plt.subplots(nrows=2, ncols=1)
56
+ ax[0].plot(wave.cpu().numpy())
57
+ lbd.specshow(mel.cpu().numpy(),
58
+ ax=ax[1],
59
+ sr=16000,
60
+ cmap='GnBu',
61
+ y_axis='mel',
62
+ x_axis=None,
63
+ hop_length=256)
64
+ ax[0].yaxis.set_visible(False)
65
+ ax[1].yaxis.set_visible(False)
66
+ duration_splits, label_positions = cumsum_durations(durations.cpu().numpy())
67
+ ax[1].set_xticks(duration_splits, minor=True)
68
+ ax[1].xaxis.grid(True, which='minor')
69
+ ax[1].set_xticks(label_positions, minor=False)
70
+ ax[1].set_xticklabels(self.text2phone.get_phone_string(text))
71
+ ax[0].set_title(text)
72
+ plt.subplots_adjust(left=0.05, bottom=0.1, right=0.95, top=.9, wspace=0.0, hspace=0.0)
73
+ plt.show()
74
+ return wave
75
+
76
+ def anonymize_to_file(self, text: str, text_is_phonemes: bool, target_speaker_embedding: torch.tensor, path_to_result_file: str):
77
+ """
78
+ Args:
79
+ text: The text that the TTS should convert to speech
80
+ text_is_phonemes: Boolean flag whether the text parameter contains phonemes (True) or graphemes (False)
81
+ target_speaker_embedding: The speaker embedding that should be used for the produced speech
82
+ path_to_result_file: The path to the location where the resulting speech should be saved (including the filename and .wav suffix)
83
+
84
+ """
85
+
86
+ assert text.strip() != ""
87
+ assert path_to_result_file.endswith(".wav")
88
+
89
+ self.default_utterance_embedding = target_speaker_embedding.to(self.device)
90
+ wav = self(text=text, text_is_phonemes=text_is_phonemes)
91
+ soundfile.write(file=path_to_result_file, data=wav.cpu().numpy(), samplerate=48000)
IMSToucan/InferenceInterfaces/InferenceArchitectures/InferenceFastSpeech2.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC
2
+
3
+ import torch
4
+
5
+ from ...Layers.Conformer import Conformer
6
+ from ...Layers.DurationPredictor import DurationPredictor
7
+ from ...Layers.LengthRegulator import LengthRegulator
8
+ from ...Layers.PostNet import PostNet
9
+ from ...Layers.VariancePredictor import VariancePredictor
10
+ from ...Utility.utils import make_non_pad_mask
11
+ from ...Utility.utils import make_pad_mask
12
+
13
+
14
+ class FastSpeech2(torch.nn.Module, ABC):
15
+
16
+ def __init__(self, # network structure related
17
+ weights,
18
+ idim=66,
19
+ odim=80,
20
+ adim=384,
21
+ aheads=4,
22
+ elayers=6,
23
+ eunits=1536,
24
+ dlayers=6,
25
+ dunits=1536,
26
+ postnet_layers=5,
27
+ postnet_chans=256,
28
+ postnet_filts=5,
29
+ positionwise_conv_kernel_size=1,
30
+ use_scaled_pos_enc=True,
31
+ use_batch_norm=True,
32
+ encoder_normalize_before=True,
33
+ decoder_normalize_before=True,
34
+ encoder_concat_after=False,
35
+ decoder_concat_after=False,
36
+ reduction_factor=1,
37
+ # encoder / decoder
38
+ use_macaron_style_in_conformer=True,
39
+ use_cnn_in_conformer=True,
40
+ conformer_enc_kernel_size=7,
41
+ conformer_dec_kernel_size=31,
42
+ # duration predictor
43
+ duration_predictor_layers=2,
44
+ duration_predictor_chans=256,
45
+ duration_predictor_kernel_size=3,
46
+ # energy predictor
47
+ energy_predictor_layers=2,
48
+ energy_predictor_chans=256,
49
+ energy_predictor_kernel_size=3,
50
+ energy_predictor_dropout=0.5,
51
+ energy_embed_kernel_size=1,
52
+ energy_embed_dropout=0.0,
53
+ stop_gradient_from_energy_predictor=True,
54
+ # pitch predictor
55
+ pitch_predictor_layers=5,
56
+ pitch_predictor_chans=256,
57
+ pitch_predictor_kernel_size=5,
58
+ pitch_predictor_dropout=0.5,
59
+ pitch_embed_kernel_size=1,
60
+ pitch_embed_dropout=0.0,
61
+ stop_gradient_from_pitch_predictor=True,
62
+ # training related
63
+ transformer_enc_dropout_rate=0.2,
64
+ transformer_enc_positional_dropout_rate=0.2,
65
+ transformer_enc_attn_dropout_rate=0.2,
66
+ transformer_dec_dropout_rate=0.2,
67
+ transformer_dec_positional_dropout_rate=0.2,
68
+ transformer_dec_attn_dropout_rate=0.2,
69
+ duration_predictor_dropout_rate=0.2,
70
+ postnet_dropout_rate=0.5,
71
+ # additional features
72
+ utt_embed_dim=704,
73
+ connect_utt_emb_at_encoder_out=True,
74
+ lang_embs=100):
75
+ super().__init__()
76
+ self.idim = idim
77
+ self.odim = odim
78
+ self.reduction_factor = reduction_factor
79
+ self.stop_gradient_from_pitch_predictor = stop_gradient_from_pitch_predictor
80
+ self.stop_gradient_from_energy_predictor = stop_gradient_from_energy_predictor
81
+ self.use_scaled_pos_enc = use_scaled_pos_enc
82
+ embed = torch.nn.Sequential(torch.nn.Linear(idim, 100),
83
+ torch.nn.Tanh(),
84
+ torch.nn.Linear(100, adim))
85
+ self.encoder = Conformer(idim=idim, attention_dim=adim, attention_heads=aheads, linear_units=eunits, num_blocks=elayers,
86
+ input_layer=embed, dropout_rate=transformer_enc_dropout_rate,
87
+ positional_dropout_rate=transformer_enc_positional_dropout_rate, attention_dropout_rate=transformer_enc_attn_dropout_rate,
88
+ normalize_before=encoder_normalize_before, concat_after=encoder_concat_after,
89
+ positionwise_conv_kernel_size=positionwise_conv_kernel_size, macaron_style=use_macaron_style_in_conformer,
90
+ use_cnn_module=use_cnn_in_conformer, cnn_module_kernel=conformer_enc_kernel_size, zero_triu=False,
91
+ utt_embed=utt_embed_dim, connect_utt_emb_at_encoder_out=connect_utt_emb_at_encoder_out, lang_embs=lang_embs)
92
+ self.duration_predictor = DurationPredictor(idim=adim, n_layers=duration_predictor_layers,
93
+ n_chans=duration_predictor_chans,
94
+ kernel_size=duration_predictor_kernel_size,
95
+ dropout_rate=duration_predictor_dropout_rate, )
96
+ self.pitch_predictor = VariancePredictor(idim=adim, n_layers=pitch_predictor_layers,
97
+ n_chans=pitch_predictor_chans,
98
+ kernel_size=pitch_predictor_kernel_size,
99
+ dropout_rate=pitch_predictor_dropout)
100
+ self.pitch_embed = torch.nn.Sequential(torch.nn.Conv1d(in_channels=1, out_channels=adim,
101
+ kernel_size=pitch_embed_kernel_size,
102
+ padding=(pitch_embed_kernel_size - 1) // 2),
103
+ torch.nn.Dropout(pitch_embed_dropout))
104
+ self.energy_predictor = VariancePredictor(idim=adim, n_layers=energy_predictor_layers,
105
+ n_chans=energy_predictor_chans,
106
+ kernel_size=energy_predictor_kernel_size,
107
+ dropout_rate=energy_predictor_dropout)
108
+ self.energy_embed = torch.nn.Sequential(torch.nn.Conv1d(in_channels=1, out_channels=adim,
109
+ kernel_size=energy_embed_kernel_size,
110
+ padding=(energy_embed_kernel_size - 1) // 2),
111
+ torch.nn.Dropout(energy_embed_dropout))
112
+ self.length_regulator = LengthRegulator()
113
+ self.decoder = Conformer(idim=0,
114
+ attention_dim=adim,
115
+ attention_heads=aheads,
116
+ linear_units=dunits,
117
+ num_blocks=dlayers,
118
+ input_layer=None,
119
+ dropout_rate=transformer_dec_dropout_rate,
120
+ positional_dropout_rate=transformer_dec_positional_dropout_rate,
121
+ attention_dropout_rate=transformer_dec_attn_dropout_rate,
122
+ normalize_before=decoder_normalize_before,
123
+ concat_after=decoder_concat_after,
124
+ positionwise_conv_kernel_size=positionwise_conv_kernel_size,
125
+ macaron_style=use_macaron_style_in_conformer,
126
+ use_cnn_module=use_cnn_in_conformer,
127
+ cnn_module_kernel=conformer_dec_kernel_size)
128
+ self.feat_out = torch.nn.Linear(adim, odim * reduction_factor)
129
+ self.postnet = PostNet(idim=idim,
130
+ odim=odim,
131
+ n_layers=postnet_layers,
132
+ n_chans=postnet_chans,
133
+ n_filts=postnet_filts,
134
+ use_batch_norm=use_batch_norm,
135
+ dropout_rate=postnet_dropout_rate)
136
+ self.load_state_dict(weights)
137
+
138
+ def _forward(self, text_tensors, text_lens, gold_speech=None, speech_lens=None,
139
+ gold_durations=None, gold_pitch=None, gold_energy=None,
140
+ is_inference=False, alpha=1.0, utterance_embedding=None, lang_ids=None):
141
+ # forward encoder
142
+ text_masks = self._source_mask(text_lens)
143
+
144
+ encoded_texts, _ = self.encoder(text_tensors, text_masks, utterance_embedding=utterance_embedding, lang_ids=lang_ids) # (B, Tmax, adim)
145
+
146
+ # forward duration predictor and variance predictors
147
+ duration_masks = make_pad_mask(text_lens, device=text_lens.device)
148
+
149
+ if self.stop_gradient_from_pitch_predictor:
150
+ pitch_predictions = self.pitch_predictor(encoded_texts.detach(), duration_masks.unsqueeze(-1))
151
+ else:
152
+ pitch_predictions = self.pitch_predictor(encoded_texts, duration_masks.unsqueeze(-1))
153
+
154
+ if self.stop_gradient_from_energy_predictor:
155
+ energy_predictions = self.energy_predictor(encoded_texts.detach(), duration_masks.unsqueeze(-1))
156
+ else:
157
+ energy_predictions = self.energy_predictor(encoded_texts, duration_masks.unsqueeze(-1))
158
+
159
+ if is_inference:
160
+ if gold_durations is not None:
161
+ duration_predictions = gold_durations
162
+ else:
163
+ duration_predictions = self.duration_predictor.inference(encoded_texts, duration_masks)
164
+ if gold_pitch is not None:
165
+ pitch_predictions = gold_pitch
166
+ if gold_energy is not None:
167
+ energy_predictions = gold_energy
168
+ pitch_embeddings = self.pitch_embed(pitch_predictions.transpose(1, 2)).transpose(1, 2)
169
+ energy_embeddings = self.energy_embed(energy_predictions.transpose(1, 2)).transpose(1, 2)
170
+ encoded_texts = encoded_texts + energy_embeddings + pitch_embeddings
171
+ encoded_texts = self.length_regulator(encoded_texts, duration_predictions, alpha)
172
+ else:
173
+ duration_predictions = self.duration_predictor(encoded_texts, duration_masks)
174
+
175
+ # use groundtruth in training
176
+ pitch_embeddings = self.pitch_embed(gold_pitch.transpose(1, 2)).transpose(1, 2)
177
+ energy_embeddings = self.energy_embed(gold_energy.transpose(1, 2)).transpose(1, 2)
178
+ encoded_texts = encoded_texts + energy_embeddings + pitch_embeddings
179
+ encoded_texts = self.length_regulator(encoded_texts, gold_durations) # (B, Lmax, adim)
180
+
181
+ # forward decoder
182
+ if speech_lens is not None and not is_inference:
183
+ if self.reduction_factor > 1:
184
+ olens_in = speech_lens.new([olen // self.reduction_factor for olen in speech_lens])
185
+ else:
186
+ olens_in = speech_lens
187
+ h_masks = self._source_mask(olens_in)
188
+ else:
189
+ h_masks = None
190
+ zs, _ = self.decoder(encoded_texts, h_masks) # (B, Lmax, adim)
191
+ before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim) # (B, Lmax, odim)
192
+
193
+ # postnet -> (B, Lmax//r * r, odim)
194
+ after_outs = before_outs + self.postnet(before_outs.transpose(1, 2)).transpose(1, 2)
195
+
196
+ return before_outs, after_outs, duration_predictions, pitch_predictions, energy_predictions
197
+
198
+ @torch.no_grad()
199
+ def forward(self,
200
+ text,
201
+ speech=None,
202
+ durations=None,
203
+ pitch=None,
204
+ energy=None,
205
+ utterance_embedding=None,
206
+ return_duration_pitch_energy=False,
207
+ lang_id=None):
208
+ """
209
+ Generate the sequence of features given the sequences of characters.
210
+
211
+ Args:
212
+ text: Input sequence of characters
213
+ speech: Feature sequence to extract style
214
+ durations: Groundtruth of duration
215
+ pitch: Groundtruth of token-averaged pitch
216
+ energy: Groundtruth of token-averaged energy
217
+ return_duration_pitch_energy: whether to return the list of predicted durations for nicer plotting
218
+ utterance_embedding: embedding of utterance wide parameters
219
+
220
+ Returns:
221
+ Mel Spectrogram
222
+
223
+ """
224
+ self.eval()
225
+ # setup batch axis
226
+ ilens = torch.tensor([text.shape[0]], dtype=torch.long, device=text.device)
227
+ if speech is not None:
228
+ gold_speech = speech.unsqueeze(0)
229
+ else:
230
+ gold_speech = None
231
+ if durations is not None:
232
+ durations = durations.unsqueeze(0)
233
+ if pitch is not None:
234
+ pitch = pitch.unsqueeze(0)
235
+ if energy is not None:
236
+ energy = energy.unsqueeze(0)
237
+ if lang_id is not None:
238
+ lang_id = lang_id.unsqueeze(0)
239
+
240
+ before_outs, after_outs, d_outs, pitch_predictions, energy_predictions = self._forward(text.unsqueeze(0),
241
+ ilens,
242
+ gold_speech=gold_speech,
243
+ gold_durations=durations,
244
+ is_inference=True,
245
+ gold_pitch=pitch,
246
+ gold_energy=energy,
247
+ utterance_embedding=utterance_embedding.unsqueeze(0),
248
+ lang_ids=lang_id)
249
+ self.train()
250
+ if return_duration_pitch_energy:
251
+ return after_outs[0], d_outs[0], pitch_predictions[0], energy_predictions[0]
252
+ return after_outs[0]
253
+
254
+ def _source_mask(self, ilens):
255
+ x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)
256
+ return x_masks.unsqueeze(-2)
IMSToucan/InferenceInterfaces/InferenceArchitectures/InferenceHiFiGAN.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from ...Layers.ResidualBlock import HiFiGANResidualBlock as ResidualBlock
4
+
5
+
6
+ class HiFiGANGenerator(torch.nn.Module):
7
+
8
+ def __init__(self,
9
+ path_to_weights,
10
+ in_channels=80,
11
+ out_channels=1,
12
+ channels=512,
13
+ kernel_size=7,
14
+ upsample_scales=(8, 6, 4, 4),
15
+ upsample_kernel_sizes=(16, 12, 8, 8),
16
+ resblock_kernel_sizes=(3, 7, 11),
17
+ resblock_dilations=[(1, 3, 5), (1, 3, 5), (1, 3, 5)],
18
+ use_additional_convs=True,
19
+ bias=True,
20
+ nonlinear_activation="LeakyReLU",
21
+ nonlinear_activation_params={"negative_slope": 0.1},
22
+ use_weight_norm=True, ):
23
+ super().__init__()
24
+ assert kernel_size % 2 == 1, "Kernal size must be odd number."
25
+ assert len(upsample_scales) == len(upsample_kernel_sizes)
26
+ assert len(resblock_dilations) == len(resblock_kernel_sizes)
27
+ self.num_upsamples = len(upsample_kernel_sizes)
28
+ self.num_blocks = len(resblock_kernel_sizes)
29
+ self.input_conv = torch.nn.Conv1d(in_channels,
30
+ channels,
31
+ kernel_size,
32
+ 1,
33
+ padding=(kernel_size - 1) // 2, )
34
+ self.upsamples = torch.nn.ModuleList()
35
+ self.blocks = torch.nn.ModuleList()
36
+ for i in range(len(upsample_kernel_sizes)):
37
+ self.upsamples += [
38
+ torch.nn.Sequential(getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
39
+ torch.nn.ConvTranspose1d(channels // (2 ** i),
40
+ channels // (2 ** (i + 1)),
41
+ upsample_kernel_sizes[i],
42
+ upsample_scales[i],
43
+ padding=(upsample_kernel_sizes[i] - upsample_scales[i]) // 2, ), )]
44
+ for j in range(len(resblock_kernel_sizes)):
45
+ self.blocks += [ResidualBlock(kernel_size=resblock_kernel_sizes[j],
46
+ channels=channels // (2 ** (i + 1)),
47
+ dilations=resblock_dilations[j],
48
+ bias=bias,
49
+ use_additional_convs=use_additional_convs,
50
+ nonlinear_activation=nonlinear_activation,
51
+ nonlinear_activation_params=nonlinear_activation_params, )]
52
+ self.output_conv = torch.nn.Sequential(
53
+ torch.nn.LeakyReLU(),
54
+ torch.nn.Conv1d(channels // (2 ** (i + 1)),
55
+ out_channels,
56
+ kernel_size,
57
+ 1,
58
+ padding=(kernel_size - 1) // 2, ),
59
+ torch.nn.Tanh(), )
60
+ if use_weight_norm:
61
+ self.apply_weight_norm()
62
+ self.load_state_dict(torch.load(path_to_weights, map_location='cpu')["generator"])
63
+
64
+ def forward(self, c, normalize_before=False):
65
+ if normalize_before:
66
+ c = (c - self.mean) / self.scale
67
+ c = self.input_conv(c.unsqueeze(0))
68
+ for i in range(self.num_upsamples):
69
+ c = self.upsamples[i](c)
70
+ cs = 0.0 # initialize
71
+ for j in range(self.num_blocks):
72
+ cs = cs + self.blocks[i * self.num_blocks + j](c)
73
+ c = cs / self.num_blocks
74
+ c = self.output_conv(c)
75
+ return c.squeeze(0).squeeze(0)
76
+
77
+ def remove_weight_norm(self):
78
+ def _remove_weight_norm(m):
79
+ try:
80
+ torch.nn.utils.remove_weight_norm(m)
81
+ except ValueError:
82
+ return
83
+
84
+ self.apply(_remove_weight_norm)
85
+
86
+ def apply_weight_norm(self):
87
+ def _apply_weight_norm(m):
88
+ if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
89
+ torch.nn.utils.weight_norm(m)
90
+
91
+ self.apply(_apply_weight_norm)
IMSToucan/InferenceInterfaces/InferenceArchitectures/__init__.py ADDED
File without changes
IMSToucan/InferenceInterfaces/__init__.py ADDED
File without changes
IMSToucan/LICENSE ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright for most of the PyTorch modules: 2017 Johns Hopkins University (Shinji Watanabe)
190
+ Copyright for the rest: 2021 University of Stuttgart (Florian Lux)
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ http://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
IMSToucan/Layers/Attention.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Written by Shigeki Karita, 2019
2
+ # Published under Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
3
+ # Adapted by Florian Lux, 2021
4
+
5
+ """Multi-Head Attention layer definition."""
6
+
7
+ import math
8
+
9
+ import numpy
10
+ import torch
11
+ from torch import nn
12
+
13
+ from ..Utility.utils import make_non_pad_mask
14
+
15
+
16
+ class MultiHeadedAttention(nn.Module):
17
+ """
18
+ Multi-Head Attention layer.
19
+
20
+ Args:
21
+ n_head (int): The number of heads.
22
+ n_feat (int): The number of features.
23
+ dropout_rate (float): Dropout rate.
24
+ """
25
+
26
+ def __init__(self, n_head, n_feat, dropout_rate):
27
+ """
28
+ Construct an MultiHeadedAttention object.
29
+ """
30
+ super(MultiHeadedAttention, self).__init__()
31
+ assert n_feat % n_head == 0
32
+ # We assume d_v always equals d_k
33
+ self.d_k = n_feat // n_head
34
+ self.h = n_head
35
+ self.linear_q = nn.Linear(n_feat, n_feat)
36
+ self.linear_k = nn.Linear(n_feat, n_feat)
37
+ self.linear_v = nn.Linear(n_feat, n_feat)
38
+ self.linear_out = nn.Linear(n_feat, n_feat)
39
+ self.attn = None
40
+ self.dropout = nn.Dropout(p=dropout_rate)
41
+
42
+ def forward_qkv(self, query, key, value):
43
+ """
44
+ Transform query, key and value.
45
+
46
+ Args:
47
+ query (torch.Tensor): Query tensor (#batch, time1, size).
48
+ key (torch.Tensor): Key tensor (#batch, time2, size).
49
+ value (torch.Tensor): Value tensor (#batch, time2, size).
50
+
51
+ Returns:
52
+ torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
53
+ torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
54
+ torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
55
+ """
56
+ n_batch = query.size(0)
57
+ q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
58
+ k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
59
+ v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
60
+ q = q.transpose(1, 2) # (batch, head, time1, d_k)
61
+ k = k.transpose(1, 2) # (batch, head, time2, d_k)
62
+ v = v.transpose(1, 2) # (batch, head, time2, d_k)
63
+
64
+ return q, k, v
65
+
66
+ def forward_attention(self, value, scores, mask):
67
+ """
68
+ Compute attention context vector.
69
+
70
+ Args:
71
+ value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
72
+ scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
73
+ mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
74
+
75
+ Returns:
76
+ torch.Tensor: Transformed value (#batch, time1, d_model)
77
+ weighted by the attention score (#batch, time1, time2).
78
+ """
79
+ n_batch = value.size(0)
80
+ if mask is not None:
81
+ mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
82
+ min_value = float(numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min)
83
+ scores = scores.masked_fill(mask, min_value)
84
+ self.attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) # (batch, head, time1, time2)
85
+ else:
86
+ self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
87
+
88
+ p_attn = self.dropout(self.attn)
89
+ x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
90
+ x = (x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)) # (batch, time1, d_model)
91
+
92
+ return self.linear_out(x) # (batch, time1, d_model)
93
+
94
+ def forward(self, query, key, value, mask):
95
+ """
96
+ Compute scaled dot product attention.
97
+
98
+ Args:
99
+ query (torch.Tensor): Query tensor (#batch, time1, size).
100
+ key (torch.Tensor): Key tensor (#batch, time2, size).
101
+ value (torch.Tensor): Value tensor (#batch, time2, size).
102
+ mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
103
+ (#batch, time1, time2).
104
+
105
+ Returns:
106
+ torch.Tensor: Output tensor (#batch, time1, d_model).
107
+ """
108
+ q, k, v = self.forward_qkv(query, key, value)
109
+ scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
110
+ return self.forward_attention(v, scores, mask)
111
+
112
+
113
+ class RelPositionMultiHeadedAttention(MultiHeadedAttention):
114
+ """
115
+ Multi-Head Attention layer with relative position encoding.
116
+ Details can be found in https://github.com/espnet/espnet/pull/2816.
117
+ Paper: https://arxiv.org/abs/1901.02860
118
+ Args:
119
+ n_head (int): The number of heads.
120
+ n_feat (int): The number of features.
121
+ dropout_rate (float): Dropout rate.
122
+ zero_triu (bool): Whether to zero the upper triangular part of attention matrix.
123
+ """
124
+
125
+ def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):
126
+ """Construct an RelPositionMultiHeadedAttention object."""
127
+ super().__init__(n_head, n_feat, dropout_rate)
128
+ self.zero_triu = zero_triu
129
+ # linear transformation for positional encoding
130
+ self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
131
+ # these two learnable bias are used in matrix c and matrix d
132
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
133
+ self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
134
+ self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
135
+ torch.nn.init.xavier_uniform_(self.pos_bias_u)
136
+ torch.nn.init.xavier_uniform_(self.pos_bias_v)
137
+
138
+ def rel_shift(self, x):
139
+ """
140
+ Compute relative positional encoding.
141
+ Args:
142
+ x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).
143
+ time1 means the length of query vector.
144
+ Returns:
145
+ torch.Tensor: Output tensor.
146
+ """
147
+ zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)
148
+ x_padded = torch.cat([zero_pad, x], dim=-1)
149
+
150
+ x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
151
+ x = x_padded[:, :, 1:].view_as(x)[:, :, :, : x.size(-1) // 2 + 1] # only keep the positions from 0 to time2
152
+
153
+ if self.zero_triu:
154
+ ones = torch.ones((x.size(2), x.size(3)), device=x.device)
155
+ x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
156
+
157
+ return x
158
+
159
+ def forward(self, query, key, value, pos_emb, mask):
160
+ """
161
+ Compute 'Scaled Dot Product Attention' with rel. positional encoding.
162
+ Args:
163
+ query (torch.Tensor): Query tensor (#batch, time1, size).
164
+ key (torch.Tensor): Key tensor (#batch, time2, size).
165
+ value (torch.Tensor): Value tensor (#batch, time2, size).
166
+ pos_emb (torch.Tensor): Positional embedding tensor
167
+ (#batch, 2*time1-1, size).
168
+ mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
169
+ (#batch, time1, time2).
170
+ Returns:
171
+ torch.Tensor: Output tensor (#batch, time1, d_model).
172
+ """
173
+ q, k, v = self.forward_qkv(query, key, value)
174
+ q = q.transpose(1, 2) # (batch, time1, head, d_k)
175
+
176
+ n_batch_pos = pos_emb.size(0)
177
+ p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
178
+ p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)
179
+
180
+ # (batch, head, time1, d_k)
181
+ q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
182
+ # (batch, head, time1, d_k)
183
+ q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
184
+
185
+ # compute attention score
186
+ # first compute matrix a and matrix c
187
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
188
+ # (batch, head, time1, time2)
189
+ matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
190
+
191
+ # compute matrix b and matrix d
192
+ # (batch, head, time1, 2*time1-1)
193
+ matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
194
+ matrix_bd = self.rel_shift(matrix_bd)
195
+
196
+ scores = (matrix_ac + matrix_bd) / math.sqrt(self.d_k) # (batch, head, time1, time2)
197
+
198
+ return self.forward_attention(v, scores, mask)
199
+
200
+
201
+ class GuidedAttentionLoss(torch.nn.Module):
202
+ """
203
+ Guided attention loss function module.
204
+
205
+ This module calculates the guided attention loss described
206
+ in `Efficiently Trainable Text-to-Speech System Based
207
+ on Deep Convolutional Networks with Guided Attention`_,
208
+ which forces the attention to be diagonal.
209
+
210
+ .. _`Efficiently Trainable Text-to-Speech System
211
+ Based on Deep Convolutional Networks with Guided Attention`:
212
+ https://arxiv.org/abs/1710.08969
213
+ """
214
+
215
+ def __init__(self, sigma=0.4, alpha=1.0):
216
+ """
217
+ Initialize guided attention loss module.
218
+
219
+ Args:
220
+ sigma (float, optional): Standard deviation to control
221
+ how close attention to a diagonal.
222
+ alpha (float, optional): Scaling coefficient (lambda).
223
+ reset_always (bool, optional): Whether to always reset masks.
224
+ """
225
+ super(GuidedAttentionLoss, self).__init__()
226
+ self.sigma = sigma
227
+ self.alpha = alpha
228
+ self.guided_attn_masks = None
229
+ self.masks = None
230
+
231
+ def _reset_masks(self):
232
+ self.guided_attn_masks = None
233
+ self.masks = None
234
+
235
+ def forward(self, att_ws, ilens, olens):
236
+ """
237
+ Calculate forward propagation.
238
+
239
+ Args:
240
+ att_ws (Tensor): Batch of attention weights (B, T_max_out, T_max_in).
241
+ ilens (LongTensor): Batch of input lenghts (B,).
242
+ olens (LongTensor): Batch of output lenghts (B,).
243
+
244
+ Returns:
245
+ Tensor: Guided attention loss value.
246
+ """
247
+ self._reset_masks()
248
+ self.guided_attn_masks = self._make_guided_attention_masks(ilens, olens).to(att_ws.device)
249
+ self.masks = self._make_masks(ilens, olens).to(att_ws.device)
250
+ losses = self.guided_attn_masks * att_ws
251
+ loss = torch.mean(losses.masked_select(self.masks))
252
+ self._reset_masks()
253
+ return self.alpha * loss
254
+
255
+ def _make_guided_attention_masks(self, ilens, olens):
256
+ n_batches = len(ilens)
257
+ max_ilen = max(ilens)
258
+ max_olen = max(olens)
259
+ guided_attn_masks = torch.zeros((n_batches, max_olen, max_ilen), device=ilens.device)
260
+ for idx, (ilen, olen) in enumerate(zip(ilens, olens)):
261
+ guided_attn_masks[idx, :olen, :ilen] = self._make_guided_attention_mask(ilen, olen, self.sigma)
262
+ return guided_attn_masks
263
+
264
+ @staticmethod
265
+ def _make_guided_attention_mask(ilen, olen, sigma):
266
+ """
267
+ Make guided attention mask.
268
+ """
269
+ grid_x, grid_y = torch.meshgrid(torch.arange(olen, device=olen.device).float(), torch.arange(ilen, device=ilen.device).float())
270
+ return 1.0 - torch.exp(-((grid_y / ilen - grid_x / olen) ** 2) / (2 * (sigma ** 2)))
271
+
272
+ @staticmethod
273
+ def _make_masks(ilens, olens):
274
+ """
275
+ Make masks indicating non-padded part.
276
+
277
+ Args:
278
+ ilens (LongTensor or List): Batch of lengths (B,).
279
+ olens (LongTensor or List): Batch of lengths (B,).
280
+
281
+ Returns:
282
+ Tensor: Mask tensor indicating non-padded part.
283
+ dtype=torch.uint8 in PyTorch 1.2-
284
+ dtype=torch.bool in PyTorch 1.2+ (including 1.2)
285
+ """
286
+ in_masks = make_non_pad_mask(ilens, device=ilens.device) # (B, T_in)
287
+ out_masks = make_non_pad_mask(olens, device=olens.device) # (B, T_out)
288
+ return out_masks.unsqueeze(-1) & in_masks.unsqueeze(-2) # (B, T_out, T_in)
289
+
290
+
291
+ class GuidedMultiHeadAttentionLoss(GuidedAttentionLoss):
292
+ """
293
+ Guided attention loss function module for multi head attention.
294
+
295
+ Args:
296
+ sigma (float, optional): Standard deviation to control
297
+ how close attention to a diagonal.
298
+ alpha (float, optional): Scaling coefficient (lambda).
299
+ reset_always (bool, optional): Whether to always reset masks.
300
+ """
301
+
302
+ def forward(self, att_ws, ilens, olens):
303
+ """
304
+ Calculate forward propagation.
305
+
306
+ Args:
307
+ att_ws (Tensor):
308
+ Batch of multi head attention weights (B, H, T_max_out, T_max_in).
309
+ ilens (LongTensor): Batch of input lenghts (B,).
310
+ olens (LongTensor): Batch of output lenghts (B,).
311
+
312
+ Returns:
313
+ Tensor: Guided attention loss value.
314
+ """
315
+ if self.guided_attn_masks is None:
316
+ self.guided_attn_masks = (self._make_guided_attention_masks(ilens, olens).to(att_ws.device).unsqueeze(1))
317
+ if self.masks is None:
318
+ self.masks = self._make_masks(ilens, olens).to(att_ws.device).unsqueeze(1)
319
+ losses = self.guided_attn_masks * att_ws
320
+ loss = torch.mean(losses.masked_select(self.masks))
321
+ if self.reset_always:
322
+ self._reset_masks()
323
+
324
+ return self.alpha * loss
IMSToucan/Layers/Conformer.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Taken from ESPNet
3
+ """
4
+
5
+ import torch
6
+ import torch.nn.functional as F
7
+
8
+ from .Attention import RelPositionMultiHeadedAttention
9
+ from .Convolution import ConvolutionModule
10
+ from .EncoderLayer import EncoderLayer
11
+ from .LayerNorm import LayerNorm
12
+ from .MultiLayeredConv1d import MultiLayeredConv1d
13
+ from .MultiSequential import repeat
14
+ from .PositionalEncoding import RelPositionalEncoding
15
+ from .Swish import Swish
16
+
17
+
18
+ class Conformer(torch.nn.Module):
19
+ """
20
+ Conformer encoder module.
21
+
22
+ Args:
23
+ idim (int): Input dimension.
24
+ attention_dim (int): Dimension of attention.
25
+ attention_heads (int): The number of heads of multi head attention.
26
+ linear_units (int): The number of units of position-wise feed forward.
27
+ num_blocks (int): The number of decoder blocks.
28
+ dropout_rate (float): Dropout rate.
29
+ positional_dropout_rate (float): Dropout rate after adding positional encoding.
30
+ attention_dropout_rate (float): Dropout rate in attention.
31
+ input_layer (Union[str, torch.nn.Module]): Input layer type.
32
+ normalize_before (bool): Whether to use layer_norm before the first block.
33
+ concat_after (bool): Whether to concat attention layer's input and output.
34
+ if True, additional linear will be applied.
35
+ i.e. x -> x + linear(concat(x, att(x)))
36
+ if False, no additional linear will be applied. i.e. x -> x + att(x)
37
+ positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear".
38
+ positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer.
39
+ macaron_style (bool): Whether to use macaron style for positionwise layer.
40
+ pos_enc_layer_type (str): Conformer positional encoding layer type.
41
+ selfattention_layer_type (str): Conformer attention layer type.
42
+ activation_type (str): Conformer activation function type.
43
+ use_cnn_module (bool): Whether to use convolution module.
44
+ cnn_module_kernel (int): Kernerl size of convolution module.
45
+ padding_idx (int): Padding idx for input_layer=embed.
46
+
47
+ """
48
+
49
+ def __init__(self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1,
50
+ attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, positionwise_conv_kernel_size=1,
51
+ macaron_style=False, use_cnn_module=False, cnn_module_kernel=31, zero_triu=False, utt_embed=None, connect_utt_emb_at_encoder_out=True,
52
+ spk_emb_bottleneck_size=128, lang_embs=None):
53
+ super(Conformer, self).__init__()
54
+
55
+ activation = Swish()
56
+ self.conv_subsampling_factor = 1
57
+
58
+ if isinstance(input_layer, torch.nn.Module):
59
+ self.embed = input_layer
60
+ self.pos_enc = RelPositionalEncoding(attention_dim, positional_dropout_rate)
61
+ elif input_layer is None:
62
+ self.embed = None
63
+ self.pos_enc = torch.nn.Sequential(RelPositionalEncoding(attention_dim, positional_dropout_rate))
64
+ else:
65
+ raise ValueError("unknown input_layer: " + input_layer)
66
+
67
+ self.normalize_before = normalize_before
68
+
69
+ self.connect_utt_emb_at_encoder_out = connect_utt_emb_at_encoder_out
70
+ if utt_embed is not None:
71
+ self.hs_emb_projection = torch.nn.Linear(attention_dim + spk_emb_bottleneck_size, attention_dim)
72
+ # embedding projection derived from https://arxiv.org/pdf/1705.08947.pdf
73
+ self.embedding_projection = torch.nn.Sequential(torch.nn.Linear(utt_embed, spk_emb_bottleneck_size),
74
+ torch.nn.Softsign())
75
+ if lang_embs is not None:
76
+ self.language_embedding = torch.nn.Embedding(num_embeddings=lang_embs, embedding_dim=attention_dim)
77
+
78
+ # self-attention module definition
79
+ encoder_selfattn_layer = RelPositionMultiHeadedAttention
80
+ encoder_selfattn_layer_args = (attention_heads, attention_dim, attention_dropout_rate, zero_triu)
81
+
82
+ # feed-forward module definition
83
+ positionwise_layer = MultiLayeredConv1d
84
+ positionwise_layer_args = (attention_dim, linear_units, positionwise_conv_kernel_size, dropout_rate,)
85
+
86
+ # convolution module definition
87
+ convolution_layer = ConvolutionModule
88
+ convolution_layer_args = (attention_dim, cnn_module_kernel, activation)
89
+
90
+ self.encoders = repeat(num_blocks, lambda lnum: EncoderLayer(attention_dim, encoder_selfattn_layer(*encoder_selfattn_layer_args),
91
+ positionwise_layer(*positionwise_layer_args),
92
+ positionwise_layer(*positionwise_layer_args) if macaron_style else None,
93
+ convolution_layer(*convolution_layer_args) if use_cnn_module else None, dropout_rate,
94
+ normalize_before, concat_after))
95
+ if self.normalize_before:
96
+ self.after_norm = LayerNorm(attention_dim)
97
+
98
+ def forward(self, xs, masks, utterance_embedding=None, lang_ids=None):
99
+ """
100
+ Encode input sequence.
101
+
102
+ Args:
103
+ utterance_embedding: embedding containing lots of conditioning signals
104
+ step: indicator for when to start updating the embedding function
105
+ xs (torch.Tensor): Input tensor (#batch, time, idim).
106
+ masks (torch.Tensor): Mask tensor (#batch, time).
107
+
108
+ Returns:
109
+ torch.Tensor: Output tensor (#batch, time, attention_dim).
110
+ torch.Tensor: Mask tensor (#batch, time).
111
+
112
+ """
113
+
114
+ if self.embed is not None:
115
+ xs = self.embed(xs)
116
+
117
+ if lang_ids is not None:
118
+ lang_embs = self.language_embedding(lang_ids)
119
+ xs = xs + lang_embs # offset the phoneme distribution of a language
120
+
121
+ if utterance_embedding is not None and not self.connect_utt_emb_at_encoder_out:
122
+ xs = self._integrate_with_utt_embed(xs, utterance_embedding)
123
+
124
+ xs = self.pos_enc(xs)
125
+
126
+ xs, masks = self.encoders(xs, masks)
127
+ if isinstance(xs, tuple):
128
+ xs = xs[0]
129
+
130
+ if self.normalize_before:
131
+ xs = self.after_norm(xs)
132
+
133
+ if utterance_embedding is not None and self.connect_utt_emb_at_encoder_out:
134
+ xs = self._integrate_with_utt_embed(xs, utterance_embedding)
135
+
136
+ return xs, masks
137
+
138
+ def _integrate_with_utt_embed(self, hs, utt_embeddings):
139
+ # project embedding into smaller space
140
+ speaker_embeddings_projected = self.embedding_projection(utt_embeddings)
141
+ # concat hidden states with spk embeds and then apply projection
142
+ speaker_embeddings_expanded = F.normalize(speaker_embeddings_projected).unsqueeze(1).expand(-1, hs.size(1), -1)
143
+ hs = self.hs_emb_projection(torch.cat([hs, speaker_embeddings_expanded], dim=-1))
144
+ return hs
IMSToucan/Layers/Convolution.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 Johns Hopkins University (Shinji Watanabe)
2
+ # Northwestern Polytechnical University (Pengcheng Guo)
3
+ # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
4
+ # Adapted by Florian Lux 2021
5
+
6
+
7
+ from torch import nn
8
+
9
+
10
+ class ConvolutionModule(nn.Module):
11
+ """
12
+ ConvolutionModule in Conformer model.
13
+
14
+ Args:
15
+ channels (int): The number of channels of conv layers.
16
+ kernel_size (int): Kernel size of conv layers.
17
+
18
+ """
19
+
20
+ def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):
21
+ super(ConvolutionModule, self).__init__()
22
+ # kernel_size should be an odd number for 'SAME' padding
23
+ assert (kernel_size - 1) % 2 == 0
24
+
25
+ self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=bias, )
26
+ self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias=bias, )
27
+ self.norm = nn.GroupNorm(num_groups=32, num_channels=channels)
28
+ self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=bias, )
29
+ self.activation = activation
30
+
31
+ def forward(self, x):
32
+ """
33
+ Compute convolution module.
34
+
35
+ Args:
36
+ x (torch.Tensor): Input tensor (#batch, time, channels).
37
+
38
+ Returns:
39
+ torch.Tensor: Output tensor (#batch, time, channels).
40
+
41
+ """
42
+ # exchange the temporal dimension and the feature dimension
43
+ x = x.transpose(1, 2)
44
+
45
+ # GLU mechanism
46
+ x = self.pointwise_conv1(x) # (batch, 2*channel, dim)
47
+ x = nn.functional.glu(x, dim=1) # (batch, channel, dim)
48
+
49
+ # 1D Depthwise Conv
50
+ x = self.depthwise_conv(x)
51
+ x = self.activation(self.norm(x))
52
+
53
+ x = self.pointwise_conv2(x)
54
+
55
+ return x.transpose(1, 2)
IMSToucan/Layers/DurationPredictor.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 Tomoki Hayashi
2
+ # MIT License (https://opensource.org/licenses/MIT)
3
+ # Adapted by Florian Lux 2021
4
+
5
+
6
+ import torch
7
+
8
+ from .LayerNorm import LayerNorm
9
+
10
+
11
+ class DurationPredictor(torch.nn.Module):
12
+ """
13
+ Duration predictor module.
14
+
15
+ This is a module of duration predictor described
16
+ in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
17
+ The duration predictor predicts a duration of each frame in log domain
18
+ from the hidden embeddings of encoder.
19
+
20
+ .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
21
+ https://arxiv.org/pdf/1905.09263.pdf
22
+
23
+ Note:
24
+ The calculation domain of outputs is different
25
+ between in `forward` and in `inference`. In `forward`,
26
+ the outputs are calculated in log domain but in `inference`,
27
+ those are calculated in linear domain.
28
+
29
+ """
30
+
31
+ def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0):
32
+ """
33
+ Initialize duration predictor module.
34
+
35
+ Args:
36
+ idim (int): Input dimension.
37
+ n_layers (int, optional): Number of convolutional layers.
38
+ n_chans (int, optional): Number of channels of convolutional layers.
39
+ kernel_size (int, optional): Kernel size of convolutional layers.
40
+ dropout_rate (float, optional): Dropout rate.
41
+ offset (float, optional): Offset value to avoid nan in log domain.
42
+
43
+ """
44
+ super(DurationPredictor, self).__init__()
45
+ self.offset = offset
46
+ self.conv = torch.nn.ModuleList()
47
+ for idx in range(n_layers):
48
+ in_chans = idim if idx == 0 else n_chans
49
+ self.conv += [torch.nn.Sequential(torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2, ), torch.nn.ReLU(),
50
+ LayerNorm(n_chans, dim=1), torch.nn.Dropout(dropout_rate), )]
51
+ self.linear = torch.nn.Linear(n_chans, 1)
52
+
53
+ def _forward(self, xs, x_masks=None, is_inference=False):
54
+ xs = xs.transpose(1, -1) # (B, idim, Tmax)
55
+ for f in self.conv:
56
+ xs = f(xs) # (B, C, Tmax)
57
+
58
+ # NOTE: calculate in log domain
59
+ xs = self.linear(xs.transpose(1, -1)).squeeze(-1) # (B, Tmax)
60
+
61
+ if is_inference:
62
+ # NOTE: calculate in linear domain
63
+ xs = torch.clamp(torch.round(xs.exp() - self.offset), min=0).long() # avoid negative value
64
+
65
+ if x_masks is not None:
66
+ xs = xs.masked_fill(x_masks, 0.0)
67
+
68
+ return xs
69
+
70
+ def forward(self, xs, x_masks=None):
71
+ """
72
+ Calculate forward propagation.
73
+
74
+ Args:
75
+ xs (Tensor): Batch of input sequences (B, Tmax, idim).
76
+ x_masks (ByteTensor, optional):
77
+ Batch of masks indicating padded part (B, Tmax).
78
+
79
+ Returns:
80
+ Tensor: Batch of predicted durations in log domain (B, Tmax).
81
+
82
+ """
83
+ return self._forward(xs, x_masks, False)
84
+
85
+ def inference(self, xs, x_masks=None):
86
+ """
87
+ Inference duration.
88
+
89
+ Args:
90
+ xs (Tensor): Batch of input sequences (B, Tmax, idim).
91
+ x_masks (ByteTensor, optional):
92
+ Batch of masks indicating padded part (B, Tmax).
93
+
94
+ Returns:
95
+ LongTensor: Batch of predicted durations in linear domain (B, Tmax).
96
+
97
+ """
98
+ return self._forward(xs, x_masks, True)
99
+
100
+
101
+ class DurationPredictorLoss(torch.nn.Module):
102
+ """
103
+ Loss function module for duration predictor.
104
+
105
+ The loss value is Calculated in log domain to make it Gaussian.
106
+
107
+ """
108
+
109
+ def __init__(self, offset=1.0, reduction="mean"):
110
+ """
111
+ Args:
112
+ offset (float, optional): Offset value to avoid nan in log domain.
113
+ reduction (str): Reduction type in loss calculation.
114
+
115
+ """
116
+ super(DurationPredictorLoss, self).__init__()
117
+ self.criterion = torch.nn.MSELoss(reduction=reduction)
118
+ self.offset = offset
119
+
120
+ def forward(self, outputs, targets):
121
+ """
122
+ Calculate forward propagation.
123
+
124
+ Args:
125
+ outputs (Tensor): Batch of prediction durations in log domain (B, T)
126
+ targets (LongTensor): Batch of groundtruth durations in linear domain (B, T)
127
+
128
+ Returns:
129
+ Tensor: Mean squared error loss value.
130
+
131
+ Note:
132
+ `outputs` is in log domain but `targets` is in linear domain.
133
+
134
+ """
135
+ # NOTE: outputs is in log domain while targets in linear
136
+ targets = torch.log(targets.float() + self.offset)
137
+ loss = self.criterion(outputs, targets)
138
+
139
+ return loss
IMSToucan/Layers/EncoderLayer.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 Johns Hopkins University (Shinji Watanabe)
2
+ # Northwestern Polytechnical University (Pengcheng Guo)
3
+ # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
4
+ # Adapted by Florian Lux 2021
5
+
6
+
7
+ import torch
8
+ from torch import nn
9
+
10
+ from .LayerNorm import LayerNorm
11
+
12
+
13
+ class EncoderLayer(nn.Module):
14
+ """
15
+ Encoder layer module.
16
+
17
+ Args:
18
+ size (int): Input dimension.
19
+ self_attn (torch.nn.Module): Self-attention module instance.
20
+ `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance
21
+ can be used as the argument.
22
+ feed_forward (torch.nn.Module): Feed-forward module instance.
23
+ `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
24
+ can be used as the argument.
25
+ feed_forward_macaron (torch.nn.Module): Additional feed-forward module instance.
26
+ `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
27
+ can be used as the argument.
28
+ conv_module (torch.nn.Module): Convolution module instance.
29
+ `ConvlutionModule` instance can be used as the argument.
30
+ dropout_rate (float): Dropout rate.
31
+ normalize_before (bool): Whether to use layer_norm before the first block.
32
+ concat_after (bool): Whether to concat attention layer's input and output.
33
+ if True, additional linear will be applied.
34
+ i.e. x -> x + linear(concat(x, att(x)))
35
+ if False, no additional linear will be applied. i.e. x -> x + att(x)
36
+
37
+ """
38
+
39
+ def __init__(self, size, self_attn, feed_forward, feed_forward_macaron, conv_module, dropout_rate, normalize_before=True, concat_after=False, ):
40
+ super(EncoderLayer, self).__init__()
41
+ self.self_attn = self_attn
42
+ self.feed_forward = feed_forward
43
+ self.feed_forward_macaron = feed_forward_macaron
44
+ self.conv_module = conv_module
45
+ self.norm_ff = LayerNorm(size) # for the FNN module
46
+ self.norm_mha = LayerNorm(size) # for the MHA module
47
+ if feed_forward_macaron is not None:
48
+ self.norm_ff_macaron = LayerNorm(size)
49
+ self.ff_scale = 0.5
50
+ else:
51
+ self.ff_scale = 1.0
52
+ if self.conv_module is not None:
53
+ self.norm_conv = LayerNorm(size) # for the CNN module
54
+ self.norm_final = LayerNorm(size) # for the final output of the block
55
+ self.dropout = nn.Dropout(dropout_rate)
56
+ self.size = size
57
+ self.normalize_before = normalize_before
58
+ self.concat_after = concat_after
59
+ if self.concat_after:
60
+ self.concat_linear = nn.Linear(size + size, size)
61
+
62
+ def forward(self, x_input, mask, cache=None):
63
+ """
64
+ Compute encoded features.
65
+
66
+ Args:
67
+ x_input (Union[Tuple, torch.Tensor]): Input tensor w/ or w/o pos emb.
68
+ - w/ pos emb: Tuple of tensors [(#batch, time, size), (1, time, size)].
69
+ - w/o pos emb: Tensor (#batch, time, size).
70
+ mask (torch.Tensor): Mask tensor for the input (#batch, time).
71
+ cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
72
+
73
+ Returns:
74
+ torch.Tensor: Output tensor (#batch, time, size).
75
+ torch.Tensor: Mask tensor (#batch, time).
76
+
77
+ """
78
+ if isinstance(x_input, tuple):
79
+ x, pos_emb = x_input[0], x_input[1]
80
+ else:
81
+ x, pos_emb = x_input, None
82
+
83
+ # whether to use macaron style
84
+ if self.feed_forward_macaron is not None:
85
+ residual = x
86
+ if self.normalize_before:
87
+ x = self.norm_ff_macaron(x)
88
+ x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x))
89
+ if not self.normalize_before:
90
+ x = self.norm_ff_macaron(x)
91
+
92
+ # multi-headed self-attention module
93
+ residual = x
94
+ if self.normalize_before:
95
+ x = self.norm_mha(x)
96
+
97
+ if cache is None:
98
+ x_q = x
99
+ else:
100
+ assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)
101
+ x_q = x[:, -1:, :]
102
+ residual = residual[:, -1:, :]
103
+ mask = None if mask is None else mask[:, -1:, :]
104
+
105
+ if pos_emb is not None:
106
+ x_att = self.self_attn(x_q, x, x, pos_emb, mask)
107
+ else:
108
+ x_att = self.self_attn(x_q, x, x, mask)
109
+
110
+ if self.concat_after:
111
+ x_concat = torch.cat((x, x_att), dim=-1)
112
+ x = residual + self.concat_linear(x_concat)
113
+ else:
114
+ x = residual + self.dropout(x_att)
115
+ if not self.normalize_before:
116
+ x = self.norm_mha(x)
117
+
118
+ # convolution module
119
+ if self.conv_module is not None:
120
+ residual = x
121
+ if self.normalize_before:
122
+ x = self.norm_conv(x)
123
+ x = residual + self.dropout(self.conv_module(x))
124
+ if not self.normalize_before:
125
+ x = self.norm_conv(x)
126
+
127
+ # feed forward module
128
+ residual = x
129
+ if self.normalize_before:
130
+ x = self.norm_ff(x)
131
+ x = residual + self.ff_scale * self.dropout(self.feed_forward(x))
132
+ if not self.normalize_before:
133
+ x = self.norm_ff(x)
134
+
135
+ if self.conv_module is not None:
136
+ x = self.norm_final(x)
137
+
138
+ if cache is not None:
139
+ x = torch.cat([cache, x], dim=1)
140
+
141
+ if pos_emb is not None:
142
+ return (x, pos_emb), mask
143
+
144
+ return x, mask
IMSToucan/Layers/LayerNorm.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Written by Shigeki Karita, 2019
2
+ # Published under Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
3
+ # Adapted by Florian Lux, 2021
4
+
5
+ import torch
6
+
7
+
8
+ class LayerNorm(torch.nn.LayerNorm):
9
+ """
10
+ Layer normalization module.
11
+
12
+ Args:
13
+ nout (int): Output dim size.
14
+ dim (int): Dimension to be normalized.
15
+ """
16
+
17
+ def __init__(self, nout, dim=-1):
18
+ """
19
+ Construct an LayerNorm object.
20
+ """
21
+ super(LayerNorm, self).__init__(nout, eps=1e-12)
22
+ self.dim = dim
23
+
24
+ def forward(self, x):
25
+ """
26
+ Apply layer normalization.
27
+
28
+ Args:
29
+ x (torch.Tensor): Input tensor.
30
+
31
+ Returns:
32
+ torch.Tensor: Normalized tensor.
33
+ """
34
+ if self.dim == -1:
35
+ return super(LayerNorm, self).forward(x)
36
+ return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
IMSToucan/Layers/LengthRegulator.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 Tomoki Hayashi
2
+ # MIT License (https://opensource.org/licenses/MIT)
3
+ # Adapted by Florian Lux 2021
4
+
5
+ from abc import ABC
6
+
7
+ import torch
8
+
9
+ from ..Utility.utils import pad_list
10
+
11
+
12
+ class LengthRegulator(torch.nn.Module, ABC):
13
+ """
14
+ Length regulator module for feed-forward Transformer.
15
+
16
+ This is a module of length regulator described in
17
+ `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
18
+ The length regulator expands char or
19
+ phoneme-level embedding features to frame-level by repeating each
20
+ feature based on the corresponding predicted durations.
21
+
22
+ .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
23
+ https://arxiv.org/pdf/1905.09263.pdf
24
+
25
+ """
26
+
27
+ def __init__(self, pad_value=0.0):
28
+ """
29
+ Initialize length regulator module.
30
+
31
+ Args:
32
+ pad_value (float, optional): Value used for padding.
33
+ """
34
+ super(LengthRegulator, self).__init__()
35
+ self.pad_value = pad_value
36
+
37
+ def forward(self, xs, ds, alpha=1.0):
38
+ """
39
+ Calculate forward propagation.
40
+
41
+ Args:
42
+ xs (Tensor): Batch of sequences of char or phoneme embeddings (B, Tmax, D).
43
+ ds (LongTensor): Batch of durations of each frame (B, T).
44
+ alpha (float, optional): Alpha value to control speed of speech.
45
+
46
+ Returns:
47
+ Tensor: replicated input tensor based on durations (B, T*, D).
48
+ """
49
+ if alpha != 1.0:
50
+ assert alpha > 0
51
+ ds = torch.round(ds.float() * alpha).long()
52
+
53
+ if ds.sum() == 0:
54
+ ds[ds.sum(dim=1).eq(0)] = 1
55
+
56
+ return pad_list([self._repeat_one_sequence(x, d) for x, d in zip(xs, ds)], self.pad_value)
57
+
58
+ def _repeat_one_sequence(self, x, d):
59
+ """
60
+ Repeat each frame according to duration
61
+ """
62
+ return torch.repeat_interleave(x, d, dim=0)
IMSToucan/Layers/MultiLayeredConv1d.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 Tomoki Hayashi
2
+ # MIT License (https://opensource.org/licenses/MIT)
3
+ # Adapted by Florian Lux 2021
4
+
5
+ """
6
+ Layer modules for FFT block in FastSpeech (Feed-forward Transformer).
7
+ """
8
+
9
+ import torch
10
+
11
+
12
+ class MultiLayeredConv1d(torch.nn.Module):
13
+ """
14
+ Multi-layered conv1d for Transformer block.
15
+
16
+ This is a module of multi-layered conv1d designed
17
+ to replace positionwise feed-forward network
18
+ in Transformer block, which is introduced in
19
+ `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
20
+
21
+ .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
22
+ https://arxiv.org/pdf/1905.09263.pdf
23
+ """
24
+
25
+ def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
26
+ """
27
+ Initialize MultiLayeredConv1d module.
28
+
29
+ Args:
30
+ in_chans (int): Number of input channels.
31
+ hidden_chans (int): Number of hidden channels.
32
+ kernel_size (int): Kernel size of conv1d.
33
+ dropout_rate (float): Dropout rate.
34
+ """
35
+ super(MultiLayeredConv1d, self).__init__()
36
+ self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2, )
37
+ self.w_2 = torch.nn.Conv1d(hidden_chans, in_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2, )
38
+ self.dropout = torch.nn.Dropout(dropout_rate)
39
+
40
+ def forward(self, x):
41
+ """
42
+ Calculate forward propagation.
43
+
44
+ Args:
45
+ x (torch.Tensor): Batch of input tensors (B, T, in_chans).
46
+
47
+ Returns:
48
+ torch.Tensor: Batch of output tensors (B, T, hidden_chans).
49
+ """
50
+ x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
51
+ return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)
52
+
53
+
54
+ class Conv1dLinear(torch.nn.Module):
55
+ """
56
+ Conv1D + Linear for Transformer block.
57
+
58
+ A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.
59
+ """
60
+
61
+ def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
62
+ """
63
+ Initialize Conv1dLinear module.
64
+
65
+ Args:
66
+ in_chans (int): Number of input channels.
67
+ hidden_chans (int): Number of hidden channels.
68
+ kernel_size (int): Kernel size of conv1d.
69
+ dropout_rate (float): Dropout rate.
70
+ """
71
+ super(Conv1dLinear, self).__init__()
72
+ self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2, )
73
+ self.w_2 = torch.nn.Linear(hidden_chans, in_chans)
74
+ self.dropout = torch.nn.Dropout(dropout_rate)
75
+
76
+ def forward(self, x):
77
+ """
78
+ Calculate forward propagation.
79
+
80
+ Args:
81
+ x (torch.Tensor): Batch of input tensors (B, T, in_chans).
82
+
83
+ Returns:
84
+ torch.Tensor: Batch of output tensors (B, T, hidden_chans).
85
+ """
86
+ x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
87
+ return self.w_2(self.dropout(x))
IMSToucan/Layers/MultiSequential.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Written by Shigeki Karita, 2019
2
+ # Published under Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
3
+ # Adapted by Florian Lux, 2021
4
+
5
+ import torch
6
+
7
+
8
+ class MultiSequential(torch.nn.Sequential):
9
+ """
10
+ Multi-input multi-output torch.nn.Sequential.
11
+ """
12
+
13
+ def forward(self, *args):
14
+ """
15
+ Repeat.
16
+ """
17
+ for m in self:
18
+ args = m(*args)
19
+ return args
20
+
21
+
22
+ def repeat(N, fn):
23
+ """
24
+ Repeat module N times.
25
+
26
+ Args:
27
+ N (int): Number of repeat time.
28
+ fn (Callable): Function to generate module.
29
+
30
+ Returns:
31
+ MultiSequential: Repeated model instance.
32
+ """
33
+ return MultiSequential(*[fn(n) for n in range(N)])
IMSToucan/Layers/PositionalEncoding.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Taken from ESPNet
3
+ """
4
+
5
+ import math
6
+
7
+ import torch
8
+
9
+
10
+ class PositionalEncoding(torch.nn.Module):
11
+ """
12
+ Positional encoding.
13
+
14
+ Args:
15
+ d_model (int): Embedding dimension.
16
+ dropout_rate (float): Dropout rate.
17
+ max_len (int): Maximum input length.
18
+ reverse (bool): Whether to reverse the input position.
19
+ """
20
+
21
+ def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):
22
+ """
23
+ Construct an PositionalEncoding object.
24
+ """
25
+ super(PositionalEncoding, self).__init__()
26
+ self.d_model = d_model
27
+ self.reverse = reverse
28
+ self.xscale = math.sqrt(self.d_model)
29
+ self.dropout = torch.nn.Dropout(p=dropout_rate)
30
+ self.pe = None
31
+ self.extend_pe(torch.tensor(0.0, device=d_model.device).expand(1, max_len))
32
+
33
+ def extend_pe(self, x):
34
+ """
35
+ Reset the positional encodings.
36
+ """
37
+ if self.pe is not None:
38
+ if self.pe.size(1) >= x.size(1):
39
+ if self.pe.dtype != x.dtype or self.pe.device != x.device:
40
+ self.pe = self.pe.to(dtype=x.dtype, device=x.device)
41
+ return
42
+ pe = torch.zeros(x.size(1), self.d_model)
43
+ if self.reverse:
44
+ position = torch.arange(x.size(1) - 1, -1, -1.0, dtype=torch.float32).unsqueeze(1)
45
+ else:
46
+ position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
47
+ div_term = torch.exp(torch.arange(0, self.d_model, 2, dtype=torch.float32) * -(math.log(10000.0) / self.d_model))
48
+ pe[:, 0::2] = torch.sin(position * div_term)
49
+ pe[:, 1::2] = torch.cos(position * div_term)
50
+ pe = pe.unsqueeze(0)
51
+ self.pe = pe.to(device=x.device, dtype=x.dtype)
52
+
53
+ def forward(self, x):
54
+ """
55
+ Add positional encoding.
56
+
57
+ Args:
58
+ x (torch.Tensor): Input tensor (batch, time, `*`).
59
+
60
+ Returns:
61
+ torch.Tensor: Encoded tensor (batch, time, `*`).
62
+ """
63
+ self.extend_pe(x)
64
+ x = x * self.xscale + self.pe[:, : x.size(1)]
65
+ return self.dropout(x)
66
+
67
+
68
+ class RelPositionalEncoding(torch.nn.Module):
69
+ """
70
+ Relative positional encoding module (new implementation).
71
+ Details can be found in https://github.com/espnet/espnet/pull/2816.
72
+ See : Appendix B in https://arxiv.org/abs/1901.02860
73
+ Args:
74
+ d_model (int): Embedding dimension.
75
+ dropout_rate (float): Dropout rate.
76
+ max_len (int): Maximum input length.
77
+ """
78
+
79
+ def __init__(self, d_model, dropout_rate, max_len=5000):
80
+ """
81
+ Construct an PositionalEncoding object.
82
+ """
83
+ super(RelPositionalEncoding, self).__init__()
84
+ self.d_model = d_model
85
+ self.xscale = math.sqrt(self.d_model)
86
+ self.dropout = torch.nn.Dropout(p=dropout_rate)
87
+ self.pe = None
88
+ self.extend_pe(torch.tensor(0.0).expand(1, max_len))
89
+
90
+ def extend_pe(self, x):
91
+ """Reset the positional encodings."""
92
+ if self.pe is not None:
93
+ # self.pe contains both positive and negative parts
94
+ # the length of self.pe is 2 * input_len - 1
95
+ if self.pe.size(1) >= x.size(1) * 2 - 1:
96
+ if self.pe.dtype != x.dtype or self.pe.device != x.device:
97
+ self.pe = self.pe.to(dtype=x.dtype, device=x.device)
98
+ return
99
+ # Suppose `i` means to the position of query vecotr and `j` means the
100
+ # position of key vector. We use position relative positions when keys
101
+ # are to the left (i>j) and negative relative positions otherwise (i<j).
102
+ pe_positive = torch.zeros(x.size(1), self.d_model, device=x.device)
103
+ pe_negative = torch.zeros(x.size(1), self.d_model, device=x.device)
104
+ position = torch.arange(0, x.size(1), dtype=torch.float32, device=x.device).unsqueeze(1)
105
+ div_term = torch.exp(torch.arange(0, self.d_model, 2, dtype=torch.float32, device=x.device) * -(math.log(10000.0) / self.d_model))
106
+ pe_positive[:, 0::2] = torch.sin(position * div_term)
107
+ pe_positive[:, 1::2] = torch.cos(position * div_term)
108
+ pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)
109
+ pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)
110
+
111
+ # Reserve the order of positive indices and concat both positive and
112
+ # negative indices. This is used to support the shifting trick
113
+ # as in https://arxiv.org/abs/1901.02860
114
+ pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)
115
+ pe_negative = pe_negative[1:].unsqueeze(0)
116
+ pe = torch.cat([pe_positive, pe_negative], dim=1)
117
+ self.pe = pe.to(dtype=x.dtype)
118
+
119
+ def forward(self, x):
120
+ """
121
+ Add positional encoding.
122
+ Args:
123
+ x (torch.Tensor): Input tensor (batch, time, `*`).
124
+ Returns:
125
+ torch.Tensor: Encoded tensor (batch, time, `*`).
126
+ """
127
+ self.extend_pe(x)
128
+ x = x * self.xscale
129
+ pos_emb = self.pe[:, self.pe.size(1) // 2 - x.size(1) + 1: self.pe.size(1) // 2 + x.size(1), ]
130
+ return self.dropout(x), self.dropout(pos_emb)
131
+
132
+
133
+ class ScaledPositionalEncoding(PositionalEncoding):
134
+ """
135
+ Scaled positional encoding module.
136
+
137
+ See Sec. 3.2 https://arxiv.org/abs/1809.08895
138
+
139
+ Args:
140
+ d_model (int): Embedding dimension.
141
+ dropout_rate (float): Dropout rate.
142
+ max_len (int): Maximum input length.
143
+
144
+ """
145
+
146
+ def __init__(self, d_model, dropout_rate, max_len=5000):
147
+ super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)
148
+ self.alpha = torch.nn.Parameter(torch.tensor(1.0))
149
+
150
+ def reset_parameters(self):
151
+ self.alpha.data = torch.tensor(1.0)
152
+
153
+ def forward(self, x):
154
+ """
155
+ Add positional encoding.
156
+
157
+ Args:
158
+ x (torch.Tensor): Input tensor (batch, time, `*`).
159
+
160
+ Returns:
161
+ torch.Tensor: Encoded tensor (batch, time, `*`).
162
+
163
+ """
164
+ self.extend_pe(x)
165
+ x = x + self.alpha * self.pe[:, : x.size(1)]
166
+ return self.dropout(x)
IMSToucan/Layers/PositionwiseFeedForward.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Written by Shigeki Karita, 2019
2
+ # Published under Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
3
+ # Adapted by Florian Lux, 2021
4
+
5
+
6
+ import torch
7
+
8
+
9
+ class PositionwiseFeedForward(torch.nn.Module):
10
+ """
11
+ Args:
12
+ idim (int): Input dimenstion.
13
+ hidden_units (int): The number of hidden units.
14
+ dropout_rate (float): Dropout rate.
15
+
16
+ """
17
+
18
+ def __init__(self, idim, hidden_units, dropout_rate, activation=torch.nn.ReLU()):
19
+ super(PositionwiseFeedForward, self).__init__()
20
+ self.w_1 = torch.nn.Linear(idim, hidden_units)
21
+ self.w_2 = torch.nn.Linear(hidden_units, idim)
22
+ self.dropout = torch.nn.Dropout(dropout_rate)
23
+ self.activation = activation
24
+
25
+ def forward(self, x):
26
+ return self.w_2(self.dropout(self.activation(self.w_1(x))))
IMSToucan/Layers/PostNet.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Taken from ESPNet
3
+ """
4
+
5
+ import torch
6
+
7
+
8
+ class PostNet(torch.nn.Module):
9
+ """
10
+ From Tacotron2
11
+
12
+ Postnet module for Spectrogram prediction network.
13
+
14
+ This is a module of Postnet in Spectrogram prediction network,
15
+ which described in `Natural TTS Synthesis by
16
+ Conditioning WaveNet on Mel Spectrogram Predictions`_.
17
+ The Postnet refines the predicted
18
+ Mel-filterbank of the decoder,
19
+ which helps to compensate the detail sturcture of spectrogram.
20
+
21
+ .. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
22
+ https://arxiv.org/abs/1712.05884
23
+ """
24
+
25
+ def __init__(self, idim, odim, n_layers=5, n_chans=512, n_filts=5, dropout_rate=0.5, use_batch_norm=True):
26
+ """
27
+ Initialize postnet module.
28
+
29
+ Args:
30
+ idim (int): Dimension of the inputs.
31
+ odim (int): Dimension of the outputs.
32
+ n_layers (int, optional): The number of layers.
33
+ n_filts (int, optional): The number of filter size.
34
+ n_units (int, optional): The number of filter channels.
35
+ use_batch_norm (bool, optional): Whether to use batch normalization..
36
+ dropout_rate (float, optional): Dropout rate..
37
+ """
38
+ super(PostNet, self).__init__()
39
+ self.postnet = torch.nn.ModuleList()
40
+ for layer in range(n_layers - 1):
41
+ ichans = odim if layer == 0 else n_chans
42
+ ochans = odim if layer == n_layers - 1 else n_chans
43
+ if use_batch_norm:
44
+ self.postnet += [torch.nn.Sequential(torch.nn.Conv1d(ichans, ochans, n_filts, stride=1, padding=(n_filts - 1) // 2, bias=False, ),
45
+ torch.nn.GroupNorm(num_groups=32, num_channels=ochans), torch.nn.Tanh(),
46
+ torch.nn.Dropout(dropout_rate), )]
47
+
48
+ else:
49
+ self.postnet += [
50
+ torch.nn.Sequential(torch.nn.Conv1d(ichans, ochans, n_filts, stride=1, padding=(n_filts - 1) // 2, bias=False, ), torch.nn.Tanh(),
51
+ torch.nn.Dropout(dropout_rate), )]
52
+ ichans = n_chans if n_layers != 1 else odim
53
+ if use_batch_norm:
54
+ self.postnet += [torch.nn.Sequential(torch.nn.Conv1d(ichans, odim, n_filts, stride=1, padding=(n_filts - 1) // 2, bias=False, ),
55
+ torch.nn.GroupNorm(num_groups=20, num_channels=odim),
56
+ torch.nn.Dropout(dropout_rate), )]
57
+
58
+ else:
59
+ self.postnet += [torch.nn.Sequential(torch.nn.Conv1d(ichans, odim, n_filts, stride=1, padding=(n_filts - 1) // 2, bias=False, ),
60
+ torch.nn.Dropout(dropout_rate), )]
61
+
62
+ def forward(self, xs):
63
+ """
64
+ Calculate forward propagation.
65
+
66
+ Args:
67
+ xs (Tensor): Batch of the sequences of padded input tensors (B, idim, Tmax).
68
+
69
+ Returns:
70
+ Tensor: Batch of padded output tensor. (B, odim, Tmax).
71
+ """
72
+ for i in range(len(self.postnet)):
73
+ xs = self.postnet[i](xs)
74
+ return xs
IMSToucan/Layers/RNNAttention.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Published under Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
2
+ # Adapted by Florian Lux, 2021
3
+
4
+ import torch
5
+ import torch.nn.functional as F
6
+
7
+ from ..Utility.utils import make_pad_mask
8
+ from ..Utility.utils import to_device
9
+
10
+
11
+ def _apply_attention_constraint(e, last_attended_idx, backward_window=1, forward_window=3):
12
+ """
13
+ Apply monotonic attention constraint.
14
+
15
+ This function apply the monotonic attention constraint
16
+ introduced in `Deep Voice 3: Scaling
17
+ Text-to-Speech with Convolutional Sequence Learning`_.
18
+
19
+ Args:
20
+ e (Tensor): Attention energy before applying softmax (1, T).
21
+ last_attended_idx (int): The index of the inputs of the last attended [0, T].
22
+ backward_window (int, optional): Backward window size in attention constraint.
23
+ forward_window (int, optional): Forward window size in attetion constraint.
24
+
25
+ Returns:
26
+ Tensor: Monotonic constrained attention energy (1, T).
27
+
28
+ .. _`Deep Voice 3: Scaling Text-to-Speech with Convolutional Sequence Learning`:
29
+ https://arxiv.org/abs/1710.07654
30
+ """
31
+ if e.size(0) != 1:
32
+ raise NotImplementedError("Batch attention constraining is not yet supported.")
33
+ backward_idx = last_attended_idx - backward_window
34
+ forward_idx = last_attended_idx + forward_window
35
+ if backward_idx > 0:
36
+ e[:, :backward_idx] = -float("inf")
37
+ if forward_idx < e.size(1):
38
+ e[:, forward_idx:] = -float("inf")
39
+ return e
40
+
41
+
42
+ class AttLoc(torch.nn.Module):
43
+ """
44
+ location-aware attention module.
45
+
46
+ Reference: Attention-Based Models for Speech Recognition
47
+ (https://arxiv.org/pdf/1506.07503.pdf)
48
+
49
+ :param int eprojs: # projection-units of encoder
50
+ :param int dunits: # units of decoder
51
+ :param int att_dim: attention dimension
52
+ :param int aconv_chans: # channels of attention convolution
53
+ :param int aconv_filts: filter size of attention convolution
54
+ :param bool han_mode: flag to switch on mode of hierarchical attention
55
+ and not store pre_compute_enc_h
56
+ """
57
+
58
+ def __init__(self, eprojs, dunits, att_dim, aconv_chans, aconv_filts, han_mode=False):
59
+ super(AttLoc, self).__init__()
60
+ self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
61
+ self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
62
+ self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
63
+ self.loc_conv = torch.nn.Conv2d(1, aconv_chans, (1, 2 * aconv_filts + 1), padding=(0, aconv_filts), bias=False, )
64
+ self.gvec = torch.nn.Linear(att_dim, 1)
65
+
66
+ self.dunits = dunits
67
+ self.eprojs = eprojs
68
+ self.att_dim = att_dim
69
+ self.h_length = None
70
+ self.enc_h = None
71
+ self.pre_compute_enc_h = None
72
+ self.mask = None
73
+ self.han_mode = han_mode
74
+
75
+ def reset(self):
76
+ """reset states"""
77
+ self.h_length = None
78
+ self.enc_h = None
79
+ self.pre_compute_enc_h = None
80
+ self.mask = None
81
+
82
+ def forward(self,
83
+ enc_hs_pad,
84
+ enc_hs_len,
85
+ dec_z,
86
+ att_prev,
87
+ scaling=2.0,
88
+ last_attended_idx=None,
89
+ backward_window=1,
90
+ forward_window=3):
91
+ """
92
+ Calculate AttLoc forward propagation.
93
+
94
+ :param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
95
+ :param list enc_hs_len: padded encoder hidden state length (B)
96
+ :param torch.Tensor dec_z: decoder hidden state (B x D_dec)
97
+ :param torch.Tensor att_prev: previous attention weight (B x T_max)
98
+ :param float scaling: scaling parameter before applying softmax
99
+ :param torch.Tensor forward_window:
100
+ forward window size when constraining attention
101
+ :param int last_attended_idx: index of the inputs of the last attended
102
+ :param int backward_window: backward window size in attention constraint
103
+ :param int forward_window: forward window size in attention constraint
104
+ :return: attention weighted encoder state (B, D_enc)
105
+ :rtype: torch.Tensor
106
+ :return: previous attention weights (B x T_max)
107
+ :rtype: torch.Tensor
108
+ """
109
+ batch = len(enc_hs_pad)
110
+ # pre-compute all h outside the decoder loop
111
+ if self.pre_compute_enc_h is None or self.han_mode:
112
+ self.enc_h = enc_hs_pad # utt x frame x hdim
113
+ self.h_length = self.enc_h.size(1)
114
+ # utt x frame x att_dim
115
+ self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
116
+ if dec_z is None:
117
+ dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
118
+ else:
119
+ dec_z = dec_z.view(batch, self.dunits)
120
+
121
+ # initialize attention weight with uniform dist.
122
+ if att_prev is None:
123
+ # if no bias, 0 0-pad goes 0
124
+ att_prev = 1.0 - make_pad_mask(enc_hs_len, device=dec_z.device).to(dtype=dec_z.dtype)
125
+ att_prev = att_prev / att_prev.new(enc_hs_len).unsqueeze(-1)
126
+
127
+ # att_prev: utt x frame -> utt x 1 x 1 x frame
128
+ # -> utt x att_conv_chans x 1 x frame
129
+ att_conv = self.loc_conv(att_prev.view(batch, 1, 1, self.h_length))
130
+ # att_conv: utt x att_conv_chans x 1 x frame -> utt x frame x att_conv_chans
131
+ att_conv = att_conv.squeeze(2).transpose(1, 2)
132
+ # att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
133
+ att_conv = self.mlp_att(att_conv)
134
+
135
+ # dec_z_tiled: utt x frame x att_dim
136
+ dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
137
+
138
+ # dot with gvec
139
+ # utt x frame x att_dim -> utt x frame
140
+ e = self.gvec(torch.tanh(att_conv + self.pre_compute_enc_h + dec_z_tiled)).squeeze(2)
141
+
142
+ # NOTE: consider zero padding when compute w.
143
+ if self.mask is None:
144
+ self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
145
+ e.masked_fill_(self.mask, -float("inf"))
146
+
147
+ # apply monotonic attention constraint (mainly for TTS)
148
+ if last_attended_idx is not None:
149
+ e = _apply_attention_constraint(e, last_attended_idx, backward_window, forward_window)
150
+
151
+ w = F.softmax(scaling * e, dim=1)
152
+
153
+ # weighted sum over flames
154
+ # utt x hdim
155
+ c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
156
+
157
+ return c, w
158
+
159
+
160
+ class AttForwardTA(torch.nn.Module):
161
+ """Forward attention with transition agent module.
162
+ Reference:
163
+ Forward attention in sequence-to-sequence acoustic modeling for speech synthesis
164
+ (https://arxiv.org/pdf/1807.06736.pdf)
165
+ :param int eunits: # units of encoder
166
+ :param int dunits: # units of decoder
167
+ :param int att_dim: attention dimension
168
+ :param int aconv_chans: # channels of attention convolution
169
+ :param int aconv_filts: filter size of attention convolution
170
+ :param int odim: output dimension
171
+ """
172
+
173
+ def __init__(self, eunits, dunits, att_dim, aconv_chans, aconv_filts, odim):
174
+ super(AttForwardTA, self).__init__()
175
+ self.mlp_enc = torch.nn.Linear(eunits, att_dim)
176
+ self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
177
+ self.mlp_ta = torch.nn.Linear(eunits + dunits + odim, 1)
178
+ self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
179
+ self.loc_conv = torch.nn.Conv2d(1, aconv_chans, (1, 2 * aconv_filts + 1), padding=(0, aconv_filts), bias=False, )
180
+ self.gvec = torch.nn.Linear(att_dim, 1)
181
+ self.dunits = dunits
182
+ self.eunits = eunits
183
+ self.att_dim = att_dim
184
+ self.h_length = None
185
+ self.enc_h = None
186
+ self.pre_compute_enc_h = None
187
+ self.mask = None
188
+ self.trans_agent_prob = 0.5
189
+
190
+ def reset(self):
191
+ self.h_length = None
192
+ self.enc_h = None
193
+ self.pre_compute_enc_h = None
194
+ self.mask = None
195
+ self.trans_agent_prob = 0.5
196
+
197
+ def forward(self,
198
+ enc_hs_pad,
199
+ enc_hs_len,
200
+ dec_z,
201
+ att_prev,
202
+ out_prev,
203
+ scaling=1.0,
204
+ last_attended_idx=None,
205
+ backward_window=1,
206
+ forward_window=3):
207
+ """
208
+ Calculate AttForwardTA forward propagation.
209
+
210
+ :param torch.Tensor enc_hs_pad: padded encoder hidden state (B, Tmax, eunits)
211
+ :param list enc_hs_len: padded encoder hidden state length (B)
212
+ :param torch.Tensor dec_z: decoder hidden state (B, dunits)
213
+ :param torch.Tensor att_prev: attention weights of previous step
214
+ :param torch.Tensor out_prev: decoder outputs of previous step (B, odim)
215
+ :param float scaling: scaling parameter before applying softmax
216
+ :param int last_attended_idx: index of the inputs of the last attended
217
+ :param int backward_window: backward window size in attention constraint
218
+ :param int forward_window: forward window size in attetion constraint
219
+ :return: attention weighted encoder state (B, dunits)
220
+ :rtype: torch.Tensor
221
+ :return: previous attention weights (B, Tmax)
222
+ :rtype: torch.Tensor
223
+ """
224
+ batch = len(enc_hs_pad)
225
+ # pre-compute all h outside the decoder loop
226
+ if self.pre_compute_enc_h is None:
227
+ self.enc_h = enc_hs_pad # utt x frame x hdim
228
+ self.h_length = self.enc_h.size(1)
229
+ # utt x frame x att_dim
230
+ self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
231
+
232
+ if dec_z is None:
233
+ dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
234
+ else:
235
+ dec_z = dec_z.view(batch, self.dunits)
236
+
237
+ if att_prev is None:
238
+ # initial attention will be [1, 0, 0, ...]
239
+ att_prev = enc_hs_pad.new_zeros(*enc_hs_pad.size()[:2])
240
+ att_prev[:, 0] = 1.0
241
+
242
+ # att_prev: utt x frame -> utt x 1 x 1 x frame
243
+ # -> utt x att_conv_chans x 1 x frame
244
+ att_conv = self.loc_conv(att_prev.view(batch, 1, 1, self.h_length))
245
+ # att_conv: utt x att_conv_chans x 1 x frame -> utt x frame x att_conv_chans
246
+ att_conv = att_conv.squeeze(2).transpose(1, 2)
247
+ # att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
248
+ att_conv = self.mlp_att(att_conv)
249
+
250
+ # dec_z_tiled: utt x frame x att_dim
251
+ dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
252
+
253
+ # dot with gvec
254
+ # utt x frame x att_dim -> utt x frame
255
+ e = self.gvec(torch.tanh(att_conv + self.pre_compute_enc_h + dec_z_tiled)).squeeze(2)
256
+
257
+ # NOTE consider zero padding when compute w.
258
+ if self.mask is None:
259
+ self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
260
+ e.masked_fill_(self.mask, -float("inf"))
261
+
262
+ # apply monotonic attention constraint (mainly for TTS)
263
+ if last_attended_idx is not None:
264
+ e = _apply_attention_constraint(e, last_attended_idx, backward_window, forward_window)
265
+
266
+ w = F.softmax(scaling * e, dim=1)
267
+
268
+ # forward attention
269
+ att_prev_shift = F.pad(att_prev, (1, 0))[:, :-1]
270
+ w = (self.trans_agent_prob * att_prev + (1 - self.trans_agent_prob) * att_prev_shift) * w
271
+ # NOTE: clamp is needed to avoid nan gradient
272
+ w = F.normalize(torch.clamp(w, 1e-6), p=1, dim=1)
273
+
274
+ # weighted sum over flames
275
+ # utt x hdim
276
+ # NOTE use bmm instead of sum(*)
277
+ c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
278
+
279
+ # update transition agent prob
280
+ self.trans_agent_prob = torch.sigmoid(self.mlp_ta(torch.cat([c, out_prev, dec_z], dim=1)))
281
+
282
+ return c, w
IMSToucan/Layers/ResidualBlock.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ """
4
+ References:
5
+ - https://github.com/jik876/hifi-gan
6
+ - https://github.com/kan-bayashi/ParallelWaveGAN
7
+ """
8
+
9
+ import torch
10
+
11
+
12
+ class Conv1d(torch.nn.Conv1d):
13
+ """
14
+ Conv1d module with customized initialization.
15
+ """
16
+
17
+ def __init__(self, *args, **kwargs):
18
+ super(Conv1d, self).__init__(*args, **kwargs)
19
+
20
+ def reset_parameters(self):
21
+ torch.nn.init.kaiming_normal_(self.weight, nonlinearity="relu")
22
+ if self.bias is not None:
23
+ torch.nn.init.constant_(self.bias, 0.0)
24
+
25
+
26
+ class Conv1d1x1(Conv1d):
27
+ """
28
+ 1x1 Conv1d with customized initialization.
29
+ """
30
+
31
+ def __init__(self, in_channels, out_channels, bias):
32
+ super(Conv1d1x1, self).__init__(in_channels, out_channels, kernel_size=1, padding=0, dilation=1, bias=bias)
33
+
34
+
35
+ class HiFiGANResidualBlock(torch.nn.Module):
36
+ """Residual block module in HiFiGAN."""
37
+
38
+ def __init__(self,
39
+ kernel_size=3,
40
+ channels=512,
41
+ dilations=(1, 3, 5),
42
+ bias=True,
43
+ use_additional_convs=True,
44
+ nonlinear_activation="LeakyReLU",
45
+ nonlinear_activation_params={"negative_slope": 0.1}, ):
46
+ """
47
+ Initialize HiFiGANResidualBlock module.
48
+
49
+ Args:
50
+ kernel_size (int): Kernel size of dilation convolution layer.
51
+ channels (int): Number of channels for convolution layer.
52
+ dilations (List[int]): List of dilation factors.
53
+ use_additional_convs (bool): Whether to use additional convolution layers.
54
+ bias (bool): Whether to add bias parameter in convolution layers.
55
+ nonlinear_activation (str): Activation function module name.
56
+ nonlinear_activation_params (dict): Hyperparameters for activation function.
57
+ """
58
+ super().__init__()
59
+ self.use_additional_convs = use_additional_convs
60
+ self.convs1 = torch.nn.ModuleList()
61
+ if use_additional_convs:
62
+ self.convs2 = torch.nn.ModuleList()
63
+ assert kernel_size % 2 == 1, "Kernel size must be odd number."
64
+ for dilation in dilations:
65
+ self.convs1 += [torch.nn.Sequential(getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
66
+ torch.nn.Conv1d(channels,
67
+ channels,
68
+ kernel_size,
69
+ 1,
70
+ dilation=dilation,
71
+ bias=bias,
72
+ padding=(kernel_size - 1) // 2 * dilation, ), )]
73
+ if use_additional_convs:
74
+ self.convs2 += [torch.nn.Sequential(getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
75
+ torch.nn.Conv1d(channels,
76
+ channels,
77
+ kernel_size,
78
+ 1,
79
+ dilation=1,
80
+ bias=bias,
81
+ padding=(kernel_size - 1) // 2, ), )]
82
+
83
+ def forward(self, x):
84
+ """
85
+ Calculate forward propagation.
86
+
87
+ Args:
88
+ x (Tensor): Input tensor (B, channels, T).
89
+
90
+ Returns:
91
+ Tensor: Output tensor (B, channels, T).
92
+ """
93
+ for idx in range(len(self.convs1)):
94
+ xt = self.convs1[idx](x)
95
+ if self.use_additional_convs:
96
+ xt = self.convs2[idx](xt)
97
+ x = xt + x
98
+ return x
IMSToucan/Layers/ResidualStack.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 Tomoki Hayashi
2
+ # MIT License (https://opensource.org/licenses/MIT)
3
+ # Adapted by Florian Lux 2021
4
+
5
+
6
+ import torch
7
+
8
+
9
+ class ResidualStack(torch.nn.Module):
10
+
11
+ def __init__(self, kernel_size=3, channels=32, dilation=1, bias=True, nonlinear_activation="LeakyReLU", nonlinear_activation_params={"negative_slope": 0.2},
12
+ pad="ReflectionPad1d", pad_params={}, ):
13
+ """
14
+ Initialize ResidualStack module.
15
+
16
+ Args:
17
+ kernel_size (int): Kernel size of dilation convolution layer.
18
+ channels (int): Number of channels of convolution layers.
19
+ dilation (int): Dilation factor.
20
+ bias (bool): Whether to add bias parameter in convolution layers.
21
+ nonlinear_activation (str): Activation function module name.
22
+ nonlinear_activation_params (dict): Hyperparameters for activation function.
23
+ pad (str): Padding function module name before dilated convolution layer.
24
+ pad_params (dict): Hyperparameters for padding function.
25
+
26
+ """
27
+ super(ResidualStack, self).__init__()
28
+
29
+ # defile residual stack part
30
+ assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
31
+ self.stack = torch.nn.Sequential(getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
32
+ getattr(torch.nn, pad)((kernel_size - 1) // 2 * dilation, **pad_params),
33
+ torch.nn.Conv1d(channels, channels, kernel_size, dilation=dilation, bias=bias),
34
+ getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
35
+ torch.nn.Conv1d(channels, channels, 1, bias=bias), )
36
+
37
+ # defile extra layer for skip connection
38
+ self.skip_layer = torch.nn.Conv1d(channels, channels, 1, bias=bias)
39
+
40
+ def forward(self, c):
41
+ """
42
+ Calculate forward propagation.
43
+
44
+ Args:
45
+ c (Tensor): Input tensor (B, channels, T).
46
+
47
+ Returns:
48
+ Tensor: Output tensor (B, chennels, T).
49
+
50
+ """
51
+ return self.stack(c) + self.skip_layer(c)
IMSToucan/Layers/STFT.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Taken from ESPNet
3
+ """
4
+
5
+ import torch
6
+ from torch.functional import stft as torch_stft
7
+ from torch_complex.tensor import ComplexTensor
8
+
9
+ from ..Utility.utils import make_pad_mask
10
+
11
+
12
+ class STFT(torch.nn.Module):
13
+
14
+ def __init__(self, n_fft=512, win_length=None, hop_length=128, window="hann", center=True, normalized=False,
15
+ onesided=True):
16
+ super().__init__()
17
+ self.n_fft = n_fft
18
+ if win_length is None:
19
+ self.win_length = n_fft
20
+ else:
21
+ self.win_length = win_length
22
+ self.hop_length = hop_length
23
+ self.center = center
24
+ self.normalized = normalized
25
+ self.onesided = onesided
26
+ self.window = window
27
+
28
+ def extra_repr(self):
29
+ return (f"n_fft={self.n_fft}, "
30
+ f"win_length={self.win_length}, "
31
+ f"hop_length={self.hop_length}, "
32
+ f"center={self.center}, "
33
+ f"normalized={self.normalized}, "
34
+ f"onesided={self.onesided}")
35
+
36
+ def forward(self, input_wave, ilens=None):
37
+ """
38
+ STFT forward function.
39
+ Args:
40
+ input_wave: (Batch, Nsamples) or (Batch, Nsample, Channels)
41
+ ilens: (Batch)
42
+ Returns:
43
+ output: (Batch, Frames, Freq, 2) or (Batch, Frames, Channels, Freq, 2)
44
+ """
45
+ bs = input_wave.size(0)
46
+
47
+ if input_wave.dim() == 3:
48
+ multi_channel = True
49
+ # input: (Batch, Nsample, Channels) -> (Batch * Channels, Nsample)
50
+ input_wave = input_wave.transpose(1, 2).reshape(-1, input_wave.size(1))
51
+ else:
52
+ multi_channel = False
53
+
54
+ # output: (Batch, Freq, Frames, 2=real_imag)
55
+ # or (Batch, Channel, Freq, Frames, 2=real_imag)
56
+ if self.window is not None:
57
+ window_func = getattr(torch, f"{self.window}_window")
58
+ window = window_func(self.win_length, dtype=input_wave.dtype, device=input_wave.device)
59
+ else:
60
+ window = None
61
+
62
+ complex_output = torch_stft(input=input_wave,
63
+ n_fft=self.n_fft,
64
+ win_length=self.win_length,
65
+ hop_length=self.hop_length,
66
+ center=self.center,
67
+ window=window,
68
+ normalized=self.normalized,
69
+ onesided=self.onesided,
70
+ return_complex=True)
71
+ output = torch.view_as_real(complex_output)
72
+ # output: (Batch, Freq, Frames, 2=real_imag)
73
+ # -> (Batch, Frames, Freq, 2=real_imag)
74
+ output = output.transpose(1, 2)
75
+ if multi_channel:
76
+ # output: (Batch * Channel, Frames, Freq, 2=real_imag)
77
+ # -> (Batch, Frame, Channel, Freq, 2=real_imag)
78
+ output = output.view(bs, -1, output.size(1), output.size(2), 2).transpose(1, 2)
79
+
80
+ if ilens is not None:
81
+ if self.center:
82
+ pad = self.win_length // 2
83
+ ilens = ilens + 2 * pad
84
+
85
+ olens = (ilens - self.win_length) // self.hop_length + 1
86
+ output.masked_fill_(make_pad_mask(olens, output, 1), 0.0)
87
+ else:
88
+ olens = None
89
+
90
+ return output, olens
91
+
92
+ def inverse(self, input, ilens=None):
93
+ """
94
+ Inverse STFT.
95
+ Args:
96
+ input: Tensor(batch, T, F, 2) or ComplexTensor(batch, T, F)
97
+ ilens: (batch,)
98
+ Returns:
99
+ wavs: (batch, samples)
100
+ ilens: (batch,)
101
+ """
102
+ istft = torch.functional.istft
103
+
104
+ if self.window is not None:
105
+ window_func = getattr(torch, f"{self.window}_window")
106
+ window = window_func(self.win_length, dtype=input.dtype, device=input.device)
107
+ else:
108
+ window = None
109
+
110
+ if isinstance(input, ComplexTensor):
111
+ input = torch.stack([input.real, input.imag], dim=-1)
112
+ assert input.shape[-1] == 2
113
+ input = input.transpose(1, 2)
114
+
115
+ wavs = istft(input, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=window, center=self.center,
116
+ normalized=self.normalized, onesided=self.onesided, length=ilens.max() if ilens is not None else ilens)
117
+
118
+ return wavs, ilens
IMSToucan/Layers/Swish.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 Johns Hopkins University (Shinji Watanabe)
2
+ # Northwestern Polytechnical University (Pengcheng Guo)
3
+ # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
4
+ # Adapted by Florian Lux 2021
5
+
6
+ import torch
7
+
8
+
9
+ class Swish(torch.nn.Module):
10
+ """
11
+ Construct an Swish activation function for Conformer.
12
+ """
13
+
14
+ def forward(self, x):
15
+ """
16
+ Return Swish activation function.
17
+ """
18
+ return x * torch.sigmoid(x)
IMSToucan/Layers/VariancePredictor.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 Tomoki Hayashi
2
+ # MIT License (https://opensource.org/licenses/MIT)
3
+ # Adapted by Florian Lux 2021
4
+
5
+ from abc import ABC
6
+
7
+ import torch
8
+
9
+ from .LayerNorm import LayerNorm
10
+
11
+
12
+ class VariancePredictor(torch.nn.Module, ABC):
13
+ """
14
+ Variance predictor module.
15
+
16
+ This is a module of variance predictor described in `FastSpeech 2:
17
+ Fast and High-Quality End-to-End Text to Speech`_.
18
+
19
+ .. _`FastSpeech 2: Fast and High-Quality End-to-End Text to Speech`:
20
+ https://arxiv.org/abs/2006.04558
21
+
22
+ """
23
+
24
+ def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, bias=True, dropout_rate=0.5, ):
25
+ """
26
+ Initilize duration predictor module.
27
+
28
+ Args:
29
+ idim (int): Input dimension.
30
+ n_layers (int, optional): Number of convolutional layers.
31
+ n_chans (int, optional): Number of channels of convolutional layers.
32
+ kernel_size (int, optional): Kernel size of convolutional layers.
33
+ dropout_rate (float, optional): Dropout rate.
34
+ """
35
+ super().__init__()
36
+ self.conv = torch.nn.ModuleList()
37
+ for idx in range(n_layers):
38
+ in_chans = idim if idx == 0 else n_chans
39
+ self.conv += [
40
+ torch.nn.Sequential(torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2, bias=bias, ), torch.nn.ReLU(),
41
+ LayerNorm(n_chans, dim=1), torch.nn.Dropout(dropout_rate), )]
42
+ self.linear = torch.nn.Linear(n_chans, 1)
43
+
44
+ def forward(self, xs, x_masks=None):
45
+ """
46
+ Calculate forward propagation.
47
+
48
+ Args:
49
+ xs (Tensor): Batch of input sequences (B, Tmax, idim).
50
+ x_masks (ByteTensor, optional):
51
+ Batch of masks indicating padded part (B, Tmax).
52
+
53
+ Returns:
54
+ Tensor: Batch of predicted sequences (B, Tmax, 1).
55
+ """
56
+ xs = xs.transpose(1, -1) # (B, idim, Tmax)
57
+ for f in self.conv:
58
+ xs = f(xs) # (B, C, Tmax)
59
+
60
+ xs = self.linear(xs.transpose(1, 2)) # (B, Tmax, 1)
61
+
62
+ if x_masks is not None:
63
+ xs = xs.masked_fill(x_masks, 0.0)
64
+
65
+ return xs
IMSToucan/Layers/__init__.py ADDED
File without changes
IMSToucan/Preprocessing/ArticulatoryCombinedTextFrontend.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import sys
3
+
4
+ import panphon
5
+ import phonemizer
6
+ import torch
7
+
8
+ from .papercup_features import generate_feature_table
9
+
10
+
11
+ class ArticulatoryCombinedTextFrontend:
12
+
13
+ def __init__(self,
14
+ language,
15
+ use_word_boundaries=False, # goes together well with
16
+ # parallel models and a aligner. Doesn't go together
17
+ # well with autoregressive models.
18
+ use_explicit_eos=True,
19
+ use_prosody=False, # unfortunately the non-segmental
20
+ # nature of prosodic markers mixed with the sequential
21
+ # phonemes hurts the performance of end-to-end models a
22
+ # lot, even though one might think enriching the input
23
+ # with such information would help.
24
+ use_lexical_stress=False,
25
+ silent=True,
26
+ allow_unknown=False,
27
+ add_silence_to_end=True,
28
+ strip_silence=True):
29
+ """
30
+ Mostly preparing ID lookups
31
+ """
32
+ self.strip_silence = strip_silence
33
+ self.use_word_boundaries = use_word_boundaries
34
+ self.allow_unknown = allow_unknown
35
+ self.use_explicit_eos = use_explicit_eos
36
+ self.use_prosody = use_prosody
37
+ self.use_stress = use_lexical_stress
38
+ self.add_silence_to_end = add_silence_to_end
39
+ self.feature_table = panphon.FeatureTable()
40
+
41
+ if language == "en":
42
+ self.g2p_lang = "en-us"
43
+ self.expand_abbreviations = english_text_expansion
44
+ if not silent:
45
+ print("Created an English Text-Frontend")
46
+
47
+ elif language == "de":
48
+ self.g2p_lang = "de"
49
+ self.expand_abbreviations = lambda x: x
50
+ if not silent:
51
+ print("Created a German Text-Frontend")
52
+
53
+ elif language == "el":
54
+ self.g2p_lang = "el"
55
+ self.expand_abbreviations = lambda x: x
56
+ if not silent:
57
+ print("Created a Greek Text-Frontend")
58
+
59
+ elif language == "es":
60
+ self.g2p_lang = "es"
61
+ self.expand_abbreviations = lambda x: x
62
+ if not silent:
63
+ print("Created a Spanish Text-Frontend")
64
+
65
+ elif language == "fi":
66
+ self.g2p_lang = "fi"
67
+ self.expand_abbreviations = lambda x: x
68
+ if not silent:
69
+ print("Created a Finnish Text-Frontend")
70
+
71
+ elif language == "ru":
72
+ self.g2p_lang = "ru"
73
+ self.expand_abbreviations = lambda x: x
74
+ if not silent:
75
+ print("Created a Russian Text-Frontend")
76
+
77
+ elif language == "hu":
78
+ self.g2p_lang = "hu"
79
+ self.expand_abbreviations = lambda x: x
80
+ if not silent:
81
+ print("Created a Hungarian Text-Frontend")
82
+
83
+ elif language == "nl":
84
+ self.g2p_lang = "nl"
85
+ self.expand_abbreviations = lambda x: x
86
+ if not silent:
87
+ print("Created a Dutch Text-Frontend")
88
+
89
+ elif language == "fr":
90
+ self.g2p_lang = "fr-fr"
91
+ self.expand_abbreviations = lambda x: x
92
+ if not silent:
93
+ print("Created a French Text-Frontend")
94
+
95
+ elif language == "it":
96
+ self.g2p_lang = "it"
97
+ self.expand_abbreviations = lambda x: x
98
+ if not silent:
99
+ print("Created a Italian Text-Frontend")
100
+
101
+ elif language == "pt":
102
+ self.g2p_lang = "pt"
103
+ self.expand_abbreviations = lambda x: x
104
+ if not silent:
105
+ print("Created a Portuguese Text-Frontend")
106
+
107
+ elif language == "pl":
108
+ self.g2p_lang = "pl"
109
+ self.expand_abbreviations = lambda x: x
110
+ if not silent:
111
+ print("Created a Polish Text-Frontend")
112
+
113
+ # remember to also update get_language_id() when adding something here
114
+
115
+ else:
116
+ print("Language not supported yet")
117
+ sys.exit()
118
+
119
+ self.phone_to_vector_papercup = generate_feature_table()
120
+
121
+ self.phone_to_vector = dict()
122
+ for phone in self.phone_to_vector_papercup:
123
+ panphon_features = self.feature_table.word_to_vector_list(phone, numeric=True)
124
+ if panphon_features == []:
125
+ panphon_features = [[0] * 24]
126
+ papercup_features = self.phone_to_vector_papercup[phone]
127
+ self.phone_to_vector[phone] = papercup_features + panphon_features[0]
128
+
129
+ self.phone_to_id = { # this lookup must be updated manually, because the only
130
+ # other way would be extracting them from a set, which can be non-deterministic
131
+ '~': 0,
132
+ '#': 1,
133
+ '?': 2,
134
+ '!': 3,
135
+ '.': 4,
136
+ 'ɜ': 5,
137
+ 'ɫ': 6,
138
+ 'ə': 7,
139
+ 'ɚ': 8,
140
+ 'a': 9,
141
+ 'ð': 10,
142
+ 'ɛ': 11,
143
+ 'ɪ': 12,
144
+ 'ᵻ': 13,
145
+ 'ŋ': 14,
146
+ 'ɔ': 15,
147
+ 'ɒ': 16,
148
+ 'ɾ': 17,
149
+ 'ʃ': 18,
150
+ 'θ': 19,
151
+ 'ʊ': 20,
152
+ 'ʌ': 21,
153
+ 'ʒ': 22,
154
+ 'æ': 23,
155
+ 'b': 24,
156
+ 'ʔ': 25,
157
+ 'd': 26,
158
+ 'e': 27,
159
+ 'f': 28,
160
+ 'g': 29,
161
+ 'h': 30,
162
+ 'i': 31,
163
+ 'j': 32,
164
+ 'k': 33,
165
+ 'l': 34,
166
+ 'm': 35,
167
+ 'n': 36,
168
+ 'ɳ': 37,
169
+ 'o': 38,
170
+ 'p': 39,
171
+ 'ɡ': 40,
172
+ 'ɹ': 41,
173
+ 'r': 42,
174
+ 's': 43,
175
+ 't': 44,
176
+ 'u': 45,
177
+ 'v': 46,
178
+ 'w': 47,
179
+ 'x': 48,
180
+ 'z': 49,
181
+ 'ʀ': 50,
182
+ 'ø': 51,
183
+ 'ç': 52,
184
+ 'ɐ': 53,
185
+ 'œ': 54,
186
+ 'y': 55,
187
+ 'ʏ': 56,
188
+ 'ɑ': 57,
189
+ 'c': 58,
190
+ 'ɲ': 59,
191
+ 'ɣ': 60,
192
+ 'ʎ': 61,
193
+ 'β': 62,
194
+ 'ʝ': 63,
195
+ 'ɟ': 64,
196
+ 'q': 65,
197
+ 'ɕ': 66,
198
+ 'ʲ': 67,
199
+ 'ɭ': 68,
200
+ 'ɵ': 69,
201
+ 'ʑ': 70,
202
+ 'ʋ': 71,
203
+ 'ʁ': 72,
204
+ 'ɨ': 73,
205
+ 'ʂ': 74,
206
+ 'ɬ': 75,
207
+ } # for the states of the ctc loss and dijkstra/mas in the aligner
208
+
209
+ self.id_to_phone = {v: k for k, v in self.phone_to_id.items()}
210
+
211
+ def string_to_tensor(self, text, view=False, device="cpu", handle_missing=True, input_phonemes=False):
212
+ """
213
+ Fixes unicode errors, expands some abbreviations,
214
+ turns graphemes into phonemes and then vectorizes
215
+ the sequence as articulatory features
216
+ """
217
+ if input_phonemes:
218
+ phones = text
219
+ else:
220
+ phones = self.get_phone_string(text=text, include_eos_symbol=True)
221
+ if view:
222
+ print("Phonemes: \n{}\n".format(phones))
223
+ phones_vector = list()
224
+ # turn into numeric vectors
225
+ for char in phones:
226
+ if handle_missing:
227
+ try:
228
+ phones_vector.append(self.phone_to_vector[char])
229
+ except KeyError:
230
+ print("unknown phoneme: {}".format(char))
231
+ else:
232
+ phones_vector.append(self.phone_to_vector[char]) # leave error handling to elsewhere
233
+
234
+ return torch.Tensor(phones_vector, device=device)
235
+
236
+ def get_phone_string(self, text, include_eos_symbol=True):
237
+ # expand abbreviations
238
+ utt = self.expand_abbreviations(text)
239
+ # phonemize
240
+ phones = phonemizer.phonemize(utt,
241
+ language_switch='remove-flags',
242
+ backend="espeak",
243
+ language=self.g2p_lang,
244
+ preserve_punctuation=True,
245
+ strip=True,
246
+ punctuation_marks=';:,.!?¡¿—…"«»“”~/',
247
+ with_stress=self.use_stress).replace(";", ",").replace("/", " ").replace("—", "") \
248
+ .replace(":", ",").replace('"', ",").replace("-", ",").replace("...", ",").replace("-", ",").replace("\n", " ") \
249
+ .replace("\t", " ").replace("¡", "").replace("¿", "").replace(",", "~").replace(" ̃", "").replace('̩', "").replace("̃", "").replace("̪", "")
250
+ # less than 1 wide characters hidden here
251
+ phones = re.sub("~+", "~", phones)
252
+ if not self.use_prosody:
253
+ # retain ~ as heuristic pause marker, even though all other symbols are removed with this option.
254
+ # also retain . ? and ! since they can be indicators for the stop token
255
+ phones = phones.replace("ˌ", "").replace("ː", "").replace("ˑ", "") \
256
+ .replace("˘", "").replace("|", "").replace("‖", "")
257
+ if not self.use_word_boundaries:
258
+ phones = phones.replace(" ", "")
259
+ else:
260
+ phones = re.sub(r"\s+", " ", phones)
261
+ phones = re.sub(" ", "~", phones)
262
+ if self.strip_silence:
263
+ phones = phones.lstrip("~").rstrip("~")
264
+ if self.add_silence_to_end:
265
+ phones += "~" # adding a silence in the end during add_silence_to_end produces more natural sounding prosody
266
+ if include_eos_symbol:
267
+ phones += "#"
268
+
269
+ phones = "~" + phones
270
+ phones = re.sub("~+", "~", phones)
271
+
272
+ return phones
273
+
274
+
275
+ def english_text_expansion(text):
276
+ """
277
+ Apply as small part of the tacotron style text cleaning pipeline, suitable for e.g. LJSpeech.
278
+ See https://github.com/keithito/tacotron/
279
+ Careful: Only apply to english datasets. Different languages need different cleaners.
280
+ """
281
+ _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in
282
+ [('Mrs.', 'misess'), ('Mr.', 'mister'), ('Dr.', 'doctor'), ('St.', 'saint'), ('Co.', 'company'), ('Jr.', 'junior'), ('Maj.', 'major'),
283
+ ('Gen.', 'general'), ('Drs.', 'doctors'), ('Rev.', 'reverend'), ('Lt.', 'lieutenant'), ('Hon.', 'honorable'), ('Sgt.', 'sergeant'),
284
+ ('Capt.', 'captain'), ('Esq.', 'esquire'), ('Ltd.', 'limited'), ('Col.', 'colonel'), ('Ft.', 'fort')]]
285
+ for regex, replacement in _abbreviations:
286
+ text = re.sub(regex, replacement, text)
287
+ return text
288
+
289
+
290
+ def get_language_id(language):
291
+ if language == "en":
292
+ return torch.LongTensor([0])
293
+ elif language == "de":
294
+ return torch.LongTensor([1])
295
+ elif language == "el":
296
+ return torch.LongTensor([2])
297
+ elif language == "es":
298
+ return torch.LongTensor([3])
299
+ elif language == "fi":
300
+ return torch.LongTensor([4])
301
+ elif language == "ru":
302
+ return torch.LongTensor([5])
303
+ elif language == "hu":
304
+ return torch.LongTensor([6])
305
+ elif language == "nl":
306
+ return torch.LongTensor([7])
307
+ elif language == "fr":
308
+ return torch.LongTensor([8])
309
+ elif language == "pt":
310
+ return torch.LongTensor([9])
311
+ elif language == "pl":
312
+ return torch.LongTensor([10])
313
+ elif language == "it":
314
+ return torch.LongTensor([11])
315
+
316
+
317
+ if __name__ == '__main__':
318
+ # test an English utterance
319
+ tfr_en = ArticulatoryCombinedTextFrontend(language="en")
320
+ print(tfr_en.string_to_tensor("This is a complex sentence, it even has a pause! But can it do this? Nice.", view=True))
321
+
322
+ tfr_en = ArticulatoryCombinedTextFrontend(language="de")
323
+ print(tfr_en.string_to_tensor("Alles klar, jetzt testen wir einen deutschen Satz. Ich hoffe es gibt nicht mehr viele unspezifizierte Phoneme.", view=True))
IMSToucan/Preprocessing/AudioPreprocessor.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import librosa
2
+ import librosa.core as lb
3
+ import librosa.display as lbd
4
+ import matplotlib.pyplot as plt
5
+ import numpy
6
+ import numpy as np
7
+ import pyloudnorm as pyln
8
+ import torch
9
+ from torchaudio.transforms import Resample
10
+
11
+
12
+ class AudioPreprocessor:
13
+
14
+ def __init__(self, input_sr, output_sr=None, melspec_buckets=80, hop_length=256, n_fft=1024, cut_silence=False, device="cpu"):
15
+ """
16
+ The parameters are by default set up to do well
17
+ on a 16kHz signal. A different sampling rate may
18
+ require different hop_length and n_fft (e.g.
19
+ doubling frequency --> doubling hop_length and
20
+ doubling n_fft)
21
+ """
22
+ self.cut_silence = cut_silence
23
+ self.device = device
24
+ self.sr = input_sr
25
+ self.new_sr = output_sr
26
+ self.hop_length = hop_length
27
+ self.n_fft = n_fft
28
+ self.mel_buckets = melspec_buckets
29
+ self.meter = pyln.Meter(input_sr)
30
+ self.final_sr = input_sr
31
+ if cut_silence:
32
+ torch.hub._validate_not_a_forked_repo = lambda a, b, c: True # torch 1.9 has a bug in the hub loading, this is a workaround
33
+ # careful: assumes 16kHz or 8kHz audio
34
+ self.silero_model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad',
35
+ model='silero_vad',
36
+ force_reload=False,
37
+ onnx=False,
38
+ verbose=False)
39
+ (self.get_speech_timestamps,
40
+ self.save_audio,
41
+ self.read_audio,
42
+ self.VADIterator,
43
+ self.collect_chunks) = utils
44
+ self.silero_model = self.silero_model.to(self.device)
45
+ if output_sr is not None and output_sr != input_sr:
46
+ self.resample = Resample(orig_freq=input_sr, new_freq=output_sr).to(self.device)
47
+ self.final_sr = output_sr
48
+ else:
49
+ self.resample = lambda x: x
50
+
51
+ def cut_silence_from_audio(self, audio):
52
+ """
53
+ https://github.com/snakers4/silero-vad
54
+ """
55
+ return self.collect_chunks(self.get_speech_timestamps(audio, self.silero_model, sampling_rate=self.final_sr), audio)
56
+
57
+ def to_mono(self, x):
58
+ """
59
+ make sure we deal with a 1D array
60
+ """
61
+ if len(x.shape) == 2:
62
+ return lb.to_mono(numpy.transpose(x))
63
+ else:
64
+ return x
65
+
66
+ def normalize_loudness(self, audio):
67
+ """
68
+ normalize the amplitudes according to
69
+ their decibels, so this should turn any
70
+ signal with different magnitudes into
71
+ the same magnitude by analysing loudness
72
+ """
73
+ loudness = self.meter.integrated_loudness(audio)
74
+ loud_normed = pyln.normalize.loudness(audio, loudness, -30.0)
75
+ peak = numpy.amax(numpy.abs(loud_normed))
76
+ peak_normed = numpy.divide(loud_normed, peak)
77
+ return peak_normed
78
+
79
+ def logmelfilterbank(self, audio, sampling_rate, fmin=40, fmax=8000, eps=1e-10):
80
+ """
81
+ Compute log-Mel filterbank
82
+
83
+ one day this could be replaced by torchaudio's internal log10(melspec(audio)), but
84
+ for some reason it gives slightly different results, so in order not to break backwards
85
+ compatibility, this is kept for now. If there is ever a reason to completely re-train
86
+ all models, this would be a good opportunity to make the switch.
87
+ """
88
+ if isinstance(audio, torch.Tensor):
89
+ audio = audio.numpy()
90
+ # get amplitude spectrogram
91
+ x_stft = librosa.stft(audio, n_fft=self.n_fft, hop_length=self.hop_length, win_length=None, window="hann", pad_mode="reflect")
92
+ spc = np.abs(x_stft).T
93
+ # get mel basis
94
+ fmin = 0 if fmin is None else fmin
95
+ fmax = sampling_rate / 2 if fmax is None else fmax
96
+ mel_basis = librosa.filters.mel(sampling_rate, self.n_fft, self.mel_buckets, fmin, fmax)
97
+ # apply log and return
98
+ return torch.Tensor(np.log10(np.maximum(eps, np.dot(spc, mel_basis.T)))).transpose(0, 1)
99
+
100
+ def normalize_audio(self, audio):
101
+ """
102
+ one function to apply them all in an
103
+ order that makes sense.
104
+ """
105
+ audio = self.to_mono(audio)
106
+ audio = self.normalize_loudness(audio)
107
+ audio = torch.Tensor(audio).to(self.device)
108
+ audio = self.resample(audio)
109
+ if self.cut_silence:
110
+ audio = self.cut_silence_from_audio(audio)
111
+ return audio.to("cpu")
112
+
113
+ def visualize_cleaning(self, unclean_audio):
114
+ """
115
+ displays Mel Spectrogram of unclean audio
116
+ and then displays Mel Spectrogram of the
117
+ cleaned version.
118
+ """
119
+ fig, ax = plt.subplots(nrows=2, ncols=1)
120
+ unclean_audio_mono = self.to_mono(unclean_audio)
121
+ unclean_spec = self.audio_to_mel_spec_tensor(unclean_audio_mono, normalize=False).numpy()
122
+ clean_spec = self.audio_to_mel_spec_tensor(unclean_audio_mono, normalize=True).numpy()
123
+ lbd.specshow(unclean_spec, sr=self.sr, cmap='GnBu', y_axis='mel', ax=ax[0], x_axis='time')
124
+ ax[0].set(title='Uncleaned Audio')
125
+ ax[0].label_outer()
126
+ if self.new_sr is not None:
127
+ lbd.specshow(clean_spec, sr=self.new_sr, cmap='GnBu', y_axis='mel', ax=ax[1], x_axis='time')
128
+ else:
129
+ lbd.specshow(clean_spec, sr=self.sr, cmap='GnBu', y_axis='mel', ax=ax[1], x_axis='time')
130
+ ax[1].set(title='Cleaned Audio')
131
+ ax[1].label_outer()
132
+ plt.show()
133
+
134
+ def audio_to_wave_tensor(self, audio, normalize=True):
135
+ if normalize:
136
+ return self.normalize_audio(audio)
137
+ else:
138
+ if isinstance(audio, torch.Tensor):
139
+ return audio
140
+ else:
141
+ return torch.Tensor(audio)
142
+
143
+ def audio_to_mel_spec_tensor(self, audio, normalize=True, explicit_sampling_rate=None):
144
+ """
145
+ explicit_sampling_rate is for when
146
+ normalization has already been applied
147
+ and that included resampling. No way
148
+ to detect the current sr of the incoming
149
+ audio
150
+ """
151
+ if explicit_sampling_rate is None:
152
+ if normalize:
153
+ audio = self.normalize_audio(audio)
154
+ return self.logmelfilterbank(audio=audio, sampling_rate=self.final_sr)
155
+ return self.logmelfilterbank(audio=audio, sampling_rate=self.sr)
156
+ if normalize:
157
+ audio = self.normalize_audio(audio)
158
+ return self.logmelfilterbank(audio=audio, sampling_rate=explicit_sampling_rate)
159
+
160
+
161
+ if __name__ == '__main__':
162
+ import soundfile
163
+
164
+ wav, sr = soundfile.read("../audios/test.wav")
165
+ ap = AudioPreprocessor(input_sr=sr, output_sr=16000)
166
+ ap.visualize_cleaning(wav)
IMSToucan/Preprocessing/ProsodicConditionExtractor.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import soundfile as sf
2
+ import torch
3
+ import torch.multiprocessing
4
+ import torch.multiprocessing
5
+ from numpy import trim_zeros
6
+ from speechbrain.pretrained import EncoderClassifier
7
+
8
+ from .AudioPreprocessor import AudioPreprocessor
9
+
10
+
11
+ class ProsodicConditionExtractor:
12
+
13
+ def __init__(self, sr, device=torch.device("cpu")):
14
+ self.ap = AudioPreprocessor(input_sr=sr, output_sr=16000, melspec_buckets=80, hop_length=256, n_fft=1024, cut_silence=False)
15
+ # https://huggingface.co/speechbrain/spkrec-ecapa-voxceleb
16
+ self.speaker_embedding_func_ecapa = EncoderClassifier.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb",
17
+ run_opts={"device": str(device)},
18
+ savedir="Models/SpeakerEmbedding/speechbrain_speaker_embedding_ecapa")
19
+ # https://huggingface.co/speechbrain/spkrec-xvect-voxceleb
20
+ self.speaker_embedding_func_xvector = EncoderClassifier.from_hparams(source="speechbrain/spkrec-xvect-voxceleb",
21
+ run_opts={"device": str(device)},
22
+ savedir="Models/SpeakerEmbedding/speechbrain_speaker_embedding_xvector")
23
+
24
+ def extract_condition_from_reference_wave(self, wave, already_normalized=False):
25
+ if already_normalized:
26
+ norm_wave = wave
27
+ else:
28
+ norm_wave = self.ap.audio_to_wave_tensor(normalize=True, audio=wave)
29
+ norm_wave = torch.tensor(trim_zeros(norm_wave.numpy()))
30
+ spk_emb_ecapa = self.speaker_embedding_func_ecapa.encode_batch(wavs=norm_wave.unsqueeze(0)).squeeze()
31
+ spk_emb_xvector = self.speaker_embedding_func_xvector.encode_batch(wavs=norm_wave.unsqueeze(0)).squeeze()
32
+ combined_utt_condition = torch.cat([spk_emb_ecapa.cpu(),
33
+ spk_emb_xvector.cpu()], dim=0)
34
+ return combined_utt_condition
35
+
36
+
37
+ if __name__ == '__main__':
38
+ wave, sr = sf.read("../audios/1.wav")
39
+ ext = ProsodicConditionExtractor(sr=sr)
40
+ print(ext.extract_condition_from_reference_wave(wave=wave).shape)
IMSToucan/Preprocessing/__init__.py ADDED
File without changes
IMSToucan/Preprocessing/papercup_features.py ADDED
@@ -0,0 +1,637 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Derived from an open-source resource provided by Papercup Technologies Limited
2
+ # Resource-Author: Marlene Staib
3
+ # Modified by Florian Lux, 2021
4
+
5
+ def generate_feature_lookup():
6
+ return {
7
+ '~': {'symbol_type': 'silence'},
8
+ '#': {'symbol_type': 'end of sentence'},
9
+ '?': {'symbol_type': 'questionmark'},
10
+ '!': {'symbol_type': 'exclamationmark'},
11
+ '.': {'symbol_type': 'fullstop'},
12
+ 'ɜ': {
13
+ 'symbol_type' : 'phoneme',
14
+ 'vowel_consonant' : 'vowel',
15
+ 'VUV' : 'voiced',
16
+ 'vowel_frontness' : 'central',
17
+ 'vowel_openness' : 'open-mid',
18
+ 'vowel_roundedness': 'unrounded',
19
+ },
20
+ 'ɫ': {
21
+ 'symbol_type' : 'phoneme',
22
+ 'vowel_consonant' : 'consonant',
23
+ 'VUV' : 'voiced',
24
+ 'consonant_place' : 'alveolar',
25
+ 'consonant_manner': 'lateral-approximant',
26
+ },
27
+ 'ə': {
28
+ 'symbol_type' : 'phoneme',
29
+ 'vowel_consonant' : 'vowel',
30
+ 'VUV' : 'voiced',
31
+ 'vowel_frontness' : 'central',
32
+ 'vowel_openness' : 'mid',
33
+ 'vowel_roundedness': 'unrounded',
34
+ },
35
+ 'ɚ': {
36
+ 'symbol_type' : 'phoneme',
37
+ 'vowel_consonant' : 'vowel',
38
+ 'VUV' : 'voiced',
39
+ 'vowel_frontness' : 'central',
40
+ 'vowel_openness' : 'mid',
41
+ 'vowel_roundedness': 'unrounded',
42
+ },
43
+ 'a': {
44
+ 'symbol_type' : 'phoneme',
45
+ 'vowel_consonant' : 'vowel',
46
+ 'VUV' : 'voiced',
47
+ 'vowel_frontness' : 'front',
48
+ 'vowel_openness' : 'open',
49
+ 'vowel_roundedness': 'unrounded',
50
+ },
51
+ 'ð': {
52
+ 'symbol_type' : 'phoneme',
53
+ 'vowel_consonant' : 'consonant',
54
+ 'VUV' : 'voiced',
55
+ 'consonant_place' : 'dental',
56
+ 'consonant_manner': 'fricative'
57
+ },
58
+ 'ɛ': {
59
+ 'symbol_type' : 'phoneme',
60
+ 'vowel_consonant' : 'vowel',
61
+ 'VUV' : 'voiced',
62
+ 'vowel_frontness' : 'front',
63
+ 'vowel_openness' : 'open-mid',
64
+ 'vowel_roundedness': 'unrounded',
65
+ },
66
+ 'ɪ': {
67
+ 'symbol_type' : 'phoneme',
68
+ 'vowel_consonant' : 'vowel',
69
+ 'VUV' : 'voiced',
70
+ 'vowel_frontness' : 'front_central',
71
+ 'vowel_openness' : 'close_close-mid',
72
+ 'vowel_roundedness': 'unrounded',
73
+ },
74
+ 'ᵻ': {
75
+ 'symbol_type' : 'phoneme',
76
+ 'vowel_consonant' : 'vowel',
77
+ 'VUV' : 'voiced',
78
+ 'vowel_frontness' : 'central',
79
+ 'vowel_openness' : 'close',
80
+ 'vowel_roundedness': 'unrounded',
81
+ },
82
+ 'ŋ': {
83
+ 'symbol_type' : 'phoneme',
84
+ 'vowel_consonant' : 'consonant',
85
+ 'VUV' : 'voiced',
86
+ 'consonant_place' : 'velar',
87
+ 'consonant_manner': 'nasal'
88
+ },
89
+ 'ɔ': {
90
+ 'symbol_type' : 'phoneme',
91
+ 'vowel_consonant' : 'vowel',
92
+ 'VUV' : 'voiced',
93
+ 'vowel_frontness' : 'back',
94
+ 'vowel_openness' : 'open-mid',
95
+ 'vowel_roundedness': 'rounded',
96
+ },
97
+ 'ɒ': {
98
+ 'symbol_type' : 'phoneme',
99
+ 'vowel_consonant' : 'vowel',
100
+ 'VUV' : 'voiced',
101
+ 'vowel_frontness' : 'back',
102
+ 'vowel_openness' : 'open',
103
+ 'vowel_roundedness': 'rounded',
104
+ },
105
+ 'ɾ': {
106
+ 'symbol_type' : 'phoneme',
107
+ 'vowel_consonant' : 'consonant',
108
+ 'VUV' : 'voiced',
109
+ 'consonant_place' : 'alveolar',
110
+ 'consonant_manner': 'tap'
111
+ },
112
+ 'ʃ': {
113
+ 'symbol_type' : 'phoneme',
114
+ 'vowel_consonant' : 'consonant',
115
+ 'VUV' : 'unvoiced',
116
+ 'consonant_place' : 'postalveolar',
117
+ 'consonant_manner': 'fricative'
118
+ },
119
+ 'θ': {
120
+ 'symbol_type' : 'phoneme',
121
+ 'vowel_consonant' : 'consonant',
122
+ 'VUV' : 'unvoiced',
123
+ 'consonant_place' : 'dental',
124
+ 'consonant_manner': 'fricative'
125
+ },
126
+ 'ʊ': {
127
+ 'symbol_type' : 'phoneme',
128
+ 'vowel_consonant' : 'vowel',
129
+ 'VUV' : 'voiced',
130
+ 'vowel_frontness' : 'central_back',
131
+ 'vowel_openness' : 'close_close-mid',
132
+ 'vowel_roundedness': 'unrounded'
133
+ },
134
+ 'ʌ': {
135
+ 'symbol_type' : 'phoneme',
136
+ 'vowel_consonant' : 'vowel',
137
+ 'VUV' : 'voiced',
138
+ 'vowel_frontness' : 'back',
139
+ 'vowel_openness' : 'open-mid',
140
+ 'vowel_roundedness': 'unrounded'
141
+ },
142
+ 'ʒ': {
143
+ 'symbol_type' : 'phoneme',
144
+ 'vowel_consonant' : 'consonant',
145
+ 'VUV' : 'voiced',
146
+ 'consonant_place' : 'postalveolar',
147
+ 'consonant_manner': 'fricative'
148
+ },
149
+ 'æ': {
150
+ 'symbol_type' : 'phoneme',
151
+ 'vowel_consonant' : 'vowel',
152
+ 'VUV' : 'voiced',
153
+ 'vowel_frontness' : 'front',
154
+ 'vowel_openness' : 'open-mid_open',
155
+ 'vowel_roundedness': 'unrounded'
156
+ },
157
+ 'b': {
158
+ 'symbol_type' : 'phoneme',
159
+ 'vowel_consonant' : 'consonant',
160
+ 'VUV' : 'voiced',
161
+ 'consonant_place' : 'bilabial',
162
+ 'consonant_manner': 'stop'
163
+ },
164
+ 'ʔ': {
165
+ 'symbol_type' : 'phoneme',
166
+ 'vowel_consonant' : 'consonant',
167
+ 'VUV' : 'unvoiced',
168
+ 'consonant_place' : 'glottal',
169
+ 'consonant_manner': 'stop'
170
+ },
171
+ 'd': {
172
+ 'symbol_type' : 'phoneme',
173
+ 'vowel_consonant' : 'consonant',
174
+ 'VUV' : 'voiced',
175
+ 'consonant_place' : 'alveolar',
176
+ 'consonant_manner': 'stop'
177
+ },
178
+ 'e': {
179
+ 'symbol_type' : 'phoneme',
180
+ 'vowel_consonant' : 'vowel',
181
+ 'VUV' : 'voiced',
182
+ 'vowel_frontness' : 'front',
183
+ 'vowel_openness' : 'close-mid',
184
+ 'vowel_roundedness': 'unrounded'
185
+ },
186
+ 'f': {
187
+ 'symbol_type' : 'phoneme',
188
+ 'vowel_consonant' : 'consonant',
189
+ 'VUV' : 'unvoiced',
190
+ 'consonant_place' : 'labiodental',
191
+ 'consonant_manner': 'fricative'
192
+ },
193
+ 'g': {
194
+ 'symbol_type' : 'phoneme',
195
+ 'vowel_consonant' : 'consonant',
196
+ 'VUV' : 'voiced',
197
+ 'consonant_place' : 'velar',
198
+ 'consonant_manner': 'stop'
199
+ },
200
+ 'h': {
201
+ 'symbol_type' : 'phoneme',
202
+ 'vowel_consonant' : 'consonant',
203
+ 'VUV' : 'unvoiced',
204
+ 'consonant_place' : 'glottal',
205
+ 'consonant_manner': 'fricative'
206
+ },
207
+ 'i': {
208
+ 'symbol_type' : 'phoneme',
209
+ 'vowel_consonant' : 'vowel',
210
+ 'VUV' : 'voiced',
211
+ 'vowel_frontness' : 'front',
212
+ 'vowel_openness' : 'close',
213
+ 'vowel_roundedness': 'unrounded'
214
+ },
215
+ 'j': {
216
+ 'symbol_type' : 'phoneme',
217
+ 'vowel_consonant' : 'consonant',
218
+ 'VUV' : 'voiced',
219
+ 'consonant_place' : 'palatal',
220
+ 'consonant_manner': 'approximant'
221
+ },
222
+ 'k': {
223
+ 'symbol_type' : 'phoneme',
224
+ 'vowel_consonant' : 'consonant',
225
+ 'VUV' : 'unvoiced',
226
+ 'consonant_place' : 'velar',
227
+ 'consonant_manner': 'stop'
228
+ },
229
+ 'l': {
230
+ 'symbol_type' : 'phoneme',
231
+ 'vowel_consonant' : 'consonant',
232
+ 'VUV' : 'voiced',
233
+ 'consonant_place' : 'alveolar',
234
+ 'consonant_manner': 'lateral-approximant'
235
+ },
236
+ 'm': {
237
+ 'symbol_type' : 'phoneme',
238
+ 'vowel_consonant' : 'consonant',
239
+ 'VUV' : 'voiced',
240
+ 'consonant_place' : 'bilabial',
241
+ 'consonant_manner': 'nasal'
242
+ },
243
+ 'n': {
244
+ 'symbol_type' : 'phoneme',
245
+ 'vowel_consonant' : 'consonant',
246
+ 'VUV' : 'voiced',
247
+ 'consonant_place' : 'alveolar',
248
+ 'consonant_manner': 'nasal'
249
+ },
250
+ 'ɳ': {
251
+ 'symbol_type' : 'phoneme',
252
+ 'vowel_consonant' : 'consonant',
253
+ 'VUV' : 'voiced',
254
+ 'consonant_place' : 'palatal',
255
+ 'consonant_manner': 'nasal'
256
+ },
257
+ 'o': {
258
+ 'symbol_type' : 'phoneme',
259
+ 'vowel_consonant' : 'vowel',
260
+ 'VUV' : 'voiced',
261
+ 'vowel_frontness' : 'back',
262
+ 'vowel_openness' : 'close-mid',
263
+ 'vowel_roundedness': 'rounded'
264
+ },
265
+ 'p': {
266
+ 'symbol_type' : 'phoneme',
267
+ 'vowel_consonant' : 'consonant',
268
+ 'VUV' : 'unvoiced',
269
+ 'consonant_place' : 'bilabial',
270
+ 'consonant_manner': 'stop'
271
+ },
272
+ 'ɡ': {
273
+ 'symbol_type' : 'phoneme',
274
+ 'vowel_consonant' : 'consonant',
275
+ 'VUV' : 'voiced',
276
+ 'consonant_place' : 'velar',
277
+ 'consonant_manner': 'stop'
278
+ },
279
+ 'ɹ': {
280
+ 'symbol_type' : 'phoneme',
281
+ 'vowel_consonant' : 'consonant',
282
+ 'VUV' : 'voiced',
283
+ 'consonant_place' : 'alveolar',
284
+ 'consonant_manner': 'approximant'
285
+ },
286
+ 'r': {
287
+ 'symbol_type' : 'phoneme',
288
+ 'vowel_consonant' : 'consonant',
289
+ 'VUV' : 'voiced',
290
+ 'consonant_place' : 'alveolar',
291
+ 'consonant_manner': 'trill'
292
+ },
293
+ 's': {
294
+ 'symbol_type' : 'phoneme',
295
+ 'vowel_consonant' : 'consonant',
296
+ 'VUV' : 'unvoiced',
297
+ 'consonant_place' : 'alveolar',
298
+ 'consonant_manner': 'fricative'
299
+ },
300
+ 't': {
301
+ 'symbol_type' : 'phoneme',
302
+ 'vowel_consonant' : 'consonant',
303
+ 'VUV' : 'unvoiced',
304
+ 'consonant_place' : 'alveolar',
305
+ 'consonant_manner': 'stop'
306
+ },
307
+ 'u': {
308
+ 'symbol_type' : 'phoneme',
309
+ 'vowel_consonant' : 'vowel',
310
+ 'VUV' : 'voiced',
311
+ 'vowel_frontness' : 'back',
312
+ 'vowel_openness' : 'close',
313
+ 'vowel_roundedness': 'rounded',
314
+ },
315
+ 'v': {
316
+ 'symbol_type' : 'phoneme',
317
+ 'vowel_consonant' : 'consonant',
318
+ 'VUV' : 'voiced',
319
+ 'consonant_place' : 'labiodental',
320
+ 'consonant_manner': 'fricative'
321
+ },
322
+ 'w': {
323
+ 'symbol_type' : 'phoneme',
324
+ 'vowel_consonant' : 'consonant',
325
+ 'VUV' : 'voiced',
326
+ 'consonant_place' : 'labial-velar',
327
+ 'consonant_manner': 'approximant'
328
+ },
329
+ 'x': {
330
+ 'symbol_type' : 'phoneme',
331
+ 'vowel_consonant' : 'consonant',
332
+ 'VUV' : 'unvoiced',
333
+ 'consonant_place' : 'velar',
334
+ 'consonant_manner': 'fricative'
335
+ },
336
+ 'z': {
337
+ 'symbol_type' : 'phoneme',
338
+ 'vowel_consonant' : 'consonant',
339
+ 'VUV' : 'voiced',
340
+ 'consonant_place' : 'alveolar',
341
+ 'consonant_manner': 'fricative'
342
+ },
343
+ 'ʀ': {
344
+ 'symbol_type' : 'phoneme',
345
+ 'vowel_consonant' : 'consonant',
346
+ 'VUV' : 'voiced',
347
+ 'consonant_place' : 'uvular',
348
+ 'consonant_manner': 'trill'
349
+ },
350
+ 'ø': {
351
+ 'symbol_type' : 'phoneme',
352
+ 'vowel_consonant' : 'vowel',
353
+ 'VUV' : 'voiced',
354
+ 'vowel_frontness' : 'front',
355
+ 'vowel_openness' : 'close-mid',
356
+ 'vowel_roundedness': 'rounded'
357
+ },
358
+ 'ç': {
359
+ 'symbol_type' : 'phoneme',
360
+ 'vowel_consonant' : 'consonant',
361
+ 'VUV' : 'unvoiced',
362
+ 'consonant_place' : 'palatal',
363
+ 'consonant_manner': 'fricative'
364
+ },
365
+ 'ɐ': {
366
+ 'symbol_type' : 'phoneme',
367
+ 'vowel_consonant' : 'vowel',
368
+ 'VUV' : 'voiced',
369
+ 'vowel_frontness' : 'central',
370
+ 'vowel_openness' : 'open',
371
+ 'vowel_roundedness': 'unrounded'
372
+ },
373
+ 'œ': {
374
+ 'symbol_type' : 'phoneme',
375
+ 'vowel_consonant' : 'vowel',
376
+ 'VUV' : 'voiced',
377
+ 'vowel_frontness' : 'front',
378
+ 'vowel_openness' : 'open-mid',
379
+ 'vowel_roundedness': 'rounded'
380
+ },
381
+ 'y': {
382
+ 'symbol_type' : 'phoneme',
383
+ 'vowel_consonant' : 'vowel',
384
+ 'VUV' : 'voiced',
385
+ 'vowel_frontness' : 'front',
386
+ 'vowel_openness' : 'close',
387
+ 'vowel_roundedness': 'rounded'
388
+ },
389
+ 'ʏ': {
390
+ 'symbol_type' : 'phoneme',
391
+ 'vowel_consonant' : 'vowel',
392
+ 'VUV' : 'voiced',
393
+ 'vowel_frontness' : 'front_central',
394
+ 'vowel_openness' : 'close_close-mid',
395
+ 'vowel_roundedness': 'rounded'
396
+ },
397
+ 'ɑ': {
398
+ 'symbol_type' : 'phoneme',
399
+ 'vowel_consonant' : 'vowel',
400
+ 'VUV' : 'voiced',
401
+ 'vowel_frontness' : 'back',
402
+ 'vowel_openness' : 'open',
403
+ 'vowel_roundedness': 'unrounded'
404
+ },
405
+ 'c': {
406
+ 'symbol_type' : 'phoneme',
407
+ 'vowel_consonant' : 'consonant',
408
+ 'VUV' : 'unvoiced',
409
+ 'consonant_place' : 'palatal',
410
+ 'consonant_manner': 'stop'
411
+ },
412
+ 'ɲ': {
413
+ 'symbol_type' : 'phoneme',
414
+ 'vowel_consonant' : 'consonant',
415
+ 'VUV' : 'voiced',
416
+ 'consonant_place' : 'palatal',
417
+ 'consonant_manner': 'nasal'
418
+ },
419
+ 'ɣ': {
420
+ 'symbol_type' : 'phoneme',
421
+ 'vowel_consonant' : 'consonant',
422
+ 'VUV' : 'voiced',
423
+ 'consonant_place' : 'velar',
424
+ 'consonant_manner': 'fricative'
425
+ },
426
+ 'ʎ': {
427
+ 'symbol_type' : 'phoneme',
428
+ 'vowel_consonant' : 'consonant',
429
+ 'VUV' : 'voiced',
430
+ 'consonant_place' : 'palatal',
431
+ 'consonant_manner': 'lateral-approximant'
432
+ },
433
+ 'β': {
434
+ 'symbol_type' : 'phoneme',
435
+ 'vowel_consonant' : 'consonant',
436
+ 'VUV' : 'voiced',
437
+ 'consonant_place' : 'bilabial',
438
+ 'consonant_manner': 'fricative'
439
+ },
440
+ 'ʝ': {
441
+ 'symbol_type' : 'phoneme',
442
+ 'vowel_consonant' : 'consonant',
443
+ 'VUV' : 'voiced',
444
+ 'consonant_place' : 'palatal',
445
+ 'consonant_manner': 'fricative'
446
+ },
447
+ 'ɟ': {
448
+ 'symbol_type' : 'phoneme',
449
+ 'vowel_consonant' : 'consonant',
450
+ 'VUV' : 'voiced',
451
+ 'consonant_place' : 'palatal',
452
+ 'consonant_manner': 'stop'
453
+ },
454
+ 'q': {
455
+ 'symbol_type' : 'phoneme',
456
+ 'vowel_consonant' : 'consonant',
457
+ 'VUV' : 'unvoiced',
458
+ 'consonant_place' : 'uvular',
459
+ 'consonant_manner': 'stop'
460
+ },
461
+ 'ɕ': {
462
+ 'symbol_type' : 'phoneme',
463
+ 'vowel_consonant' : 'consonant',
464
+ 'VUV' : 'unvoiced',
465
+ 'consonant_place' : 'alveolopalatal',
466
+ 'consonant_manner': 'fricative'
467
+ },
468
+ 'ʲ': {
469
+ 'symbol_type' : 'phoneme',
470
+ 'vowel_consonant' : 'consonant',
471
+ 'VUV' : 'voiced',
472
+ 'consonant_place' : 'palatal',
473
+ 'consonant_manner': 'approximant'
474
+ },
475
+ 'ɭ': {
476
+ 'symbol_type' : 'phoneme',
477
+ 'vowel_consonant' : 'consonant',
478
+ 'VUV' : 'voiced',
479
+ 'consonant_place' : 'palatal', # should be retroflex, but palatal should be close enough
480
+ 'consonant_manner': 'lateral-approximant'
481
+ },
482
+ 'ɵ': {
483
+ 'symbol_type' : 'phoneme',
484
+ 'vowel_consonant' : 'vowel',
485
+ 'VUV' : 'voiced',
486
+ 'vowel_frontness' : 'central',
487
+ 'vowel_openness' : 'open-mid',
488
+ 'vowel_roundedness': 'rounded'
489
+ },
490
+ 'ʑ': {
491
+ 'symbol_type' : 'phoneme',
492
+ 'vowel_consonant' : 'consonant',
493
+ 'VUV' : 'voiced',
494
+ 'consonant_place' : 'alveolopalatal',
495
+ 'consonant_manner': 'fricative'
496
+ },
497
+ 'ʋ': {
498
+ 'symbol_type' : 'phoneme',
499
+ 'vowel_consonant' : 'consonant',
500
+ 'VUV' : 'voiced',
501
+ 'consonant_place' : 'labiodental',
502
+ 'consonant_manner': 'approximant'
503
+ },
504
+ 'ʁ': {
505
+ 'symbol_type' : 'phoneme',
506
+ 'vowel_consonant' : 'consonant',
507
+ 'VUV' : 'voiced',
508
+ 'consonant_place' : 'uvular',
509
+ 'consonant_manner': 'fricative'
510
+ },
511
+ 'ɨ': {
512
+ 'symbol_type' : 'phoneme',
513
+ 'vowel_consonant' : 'vowel',
514
+ 'VUV' : 'voiced',
515
+ 'vowel_frontness' : 'central',
516
+ 'vowel_openness' : 'close',
517
+ 'vowel_roundedness': 'unrounded'
518
+ },
519
+ 'ʂ': {
520
+ 'symbol_type' : 'phoneme',
521
+ 'vowel_consonant' : 'consonant',
522
+ 'VUV' : 'unvoiced',
523
+ 'consonant_place' : 'palatal', # should be retroflex, but palatal should be close enough
524
+ 'consonant_manner': 'fricative'
525
+ },
526
+ 'ɬ': {
527
+ 'symbol_type' : 'phoneme',
528
+ 'vowel_consonant' : 'consonant',
529
+ 'VUV' : 'unvoiced',
530
+ 'consonant_place' : 'alveolar', # should be noted it's also lateral, but should be close enough
531
+ 'consonant_manner': 'fricative'
532
+ },
533
+ } # REMEMBER to also add the phonemes added here to the ID lookup table in the TextFrontend as the new highest ID
534
+
535
+
536
+ def generate_feature_table():
537
+ ipa_to_phonemefeats = generate_feature_lookup()
538
+
539
+ feat_types = set()
540
+ for ipa in ipa_to_phonemefeats:
541
+ if len(ipa) == 1:
542
+ [feat_types.add(feat) for feat in ipa_to_phonemefeats[ipa].keys()]
543
+
544
+ feat_to_val_set = dict()
545
+ for feat in feat_types:
546
+ feat_to_val_set[feat] = set()
547
+ for ipa in ipa_to_phonemefeats:
548
+ if len(ipa) == 1:
549
+ for feat in ipa_to_phonemefeats[ipa]:
550
+ feat_to_val_set[feat].add(ipa_to_phonemefeats[ipa][feat])
551
+
552
+ # print(feat_to_val_set)
553
+
554
+ value_list = set()
555
+ for val_set in [feat_to_val_set[feat] for feat in feat_to_val_set]:
556
+ for value in val_set:
557
+ value_list.add(value)
558
+ # print("{")
559
+ # for index, value in enumerate(list(value_list)):
560
+ # print('"{}":{},'.format(value,index))
561
+ # print("}")
562
+
563
+ value_to_index = {
564
+ "dental" : 0,
565
+ "postalveolar" : 1,
566
+ "mid" : 2,
567
+ "close-mid" : 3,
568
+ "vowel" : 4,
569
+ "silence" : 5,
570
+ "consonant" : 6,
571
+ "close" : 7,
572
+ "velar" : 8,
573
+ "stop" : 9,
574
+ "palatal" : 10,
575
+ "nasal" : 11,
576
+ "glottal" : 12,
577
+ "central" : 13,
578
+ "back" : 14,
579
+ "approximant" : 15,
580
+ "uvular" : 16,
581
+ "open-mid" : 17,
582
+ "front_central" : 18,
583
+ "front" : 19,
584
+ "end of sentence" : 20,
585
+ "labiodental" : 21,
586
+ "close_close-mid" : 22,
587
+ "labial-velar" : 23,
588
+ "unvoiced" : 24,
589
+ "central_back" : 25,
590
+ "trill" : 26,
591
+ "rounded" : 27,
592
+ "open-mid_open" : 28,
593
+ "tap" : 29,
594
+ "alveolar" : 30,
595
+ "bilabial" : 31,
596
+ "phoneme" : 32,
597
+ "open" : 33,
598
+ "fricative" : 34,
599
+ "unrounded" : 35,
600
+ "lateral-approximant": 36,
601
+ "voiced" : 37,
602
+ "questionmark" : 38,
603
+ "exclamationmark" : 39,
604
+ "fullstop" : 40,
605
+ "alveolopalatal" : 41
606
+ }
607
+
608
+ phone_to_vector = dict()
609
+ for ipa in ipa_to_phonemefeats:
610
+ if len(ipa) == 1:
611
+ phone_to_vector[ipa] = [0] * sum([len(values) for values in [feat_to_val_set[feat] for feat in feat_to_val_set]])
612
+ for feat in ipa_to_phonemefeats[ipa]:
613
+ if ipa_to_phonemefeats[ipa][feat] in value_to_index:
614
+ phone_to_vector[ipa][value_to_index[ipa_to_phonemefeats[ipa][feat]]] = 1
615
+
616
+ for feat in feat_to_val_set:
617
+ for value in feat_to_val_set[feat]:
618
+ if value not in value_to_index:
619
+ print(f"Unknown feature value in featureset! {value}")
620
+
621
+ # print(f"{sum([len(values) for values in [feat_to_val_set[feat] for feat in feat_to_val_set]])} should be 42")
622
+
623
+ return phone_to_vector
624
+
625
+
626
+ def generate_phone_to_id_lookup():
627
+ ipa_to_phonemefeats = generate_feature_lookup()
628
+ count = 0
629
+ phone_to_id = dict()
630
+ for key in sorted(list(ipa_to_phonemefeats)): # careful: non-deterministic
631
+ phone_to_id[key] = count
632
+ count += 1
633
+ return phone_to_id
634
+
635
+
636
+ if __name__ == '__main__':
637
+ print(generate_phone_to_id_lookup())
IMSToucan/README.md ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ![image](Utility/toucan.png)
2
+
3
+ IMS Toucan is a toolkit for teaching, training and using state-of-the-art Speech Synthesis models, developed at the
4
+ **Institute for Natural Language Processing (IMS), University of Stuttgart, Germany**. Everything is pure Python and
5
+ PyTorch based to keep it as simple and beginner-friendly, yet powerful as possible.
6
+
7
+ The PyTorch Modules of [Tacotron 2](https://arxiv.org/abs/1712.05884)
8
+ and [FastSpeech 2](https://arxiv.org/abs/2006.04558) are taken from
9
+ [ESPnet](https://github.com/espnet/espnet), the PyTorch Modules of [HiFiGAN](https://arxiv.org/abs/2010.05646) are taken
10
+ from the [ParallelWaveGAN repository](https://github.com/kan-bayashi/ParallelWaveGAN)
11
+ which are also authored by the brilliant [Tomoki Hayashi](https://github.com/kan-bayashi).
12
+
13
+ For a version of the toolkit that includes TransformerTTS instead of Tacotron 2 and MelGAN instead of HiFiGAN, check out
14
+ the TransformerTTS and MelGAN branch. They are separated to keep the code clean, simple and minimal.
15
+
16
+ ---
17
+
18
+ ## Contents
19
+
20
+ - [New Features](#new-features)
21
+ - [Demonstration](#demonstration)
22
+ - [Installation](#installation)
23
+ + [Basic Requirements](#basic-requirements)
24
+ + [Speaker Embedding](#speaker-embedding)
25
+ + [espeak-ng](#espeak-ng)
26
+ - [Creating a new Pipeline](#creating-a-new-pipeline)
27
+ * [Build a HiFi-GAN Pipeline](#build-a-hifi-gan-pipeline)
28
+ * [Build a FastSpeech 2 Pipeline](#build-a-fastspeech-2-pipeline)
29
+ - [Training a Model](#training-a-model)
30
+ - [Creating a new InferenceInterface](#creating-a-new-inferenceinterface)
31
+ - [Using a trained Model for Inference](#using-a-trained-model-for-inference)
32
+ - [FAQ](#faq)
33
+ - [Citation](#citation)
34
+
35
+ ---
36
+
37
+ ## New Features
38
+
39
+ - [As shown in this paper](http://festvox.org/blizzard/bc2021/BC21_DelightfulTTS.pdf) vocoders can be used to perform
40
+ super-resolution and spectrogram inversion simultaneously. We added this to our HiFi-GAN vocoder. It now takes 16kHz
41
+ spectrograms as input, but produces 48kHz waveforms.
42
+ - We officially introduced IMS Toucan in
43
+ [our contribution to the Blizzard Challenge 2021](http://festvox.org/blizzard/bc2021/BC21_IMS.pdf). Check out the
44
+ bottom of the readme for a bibtex entry.
45
+ - We now use articulatory representations of phonemes as the input for all models. This allows us to easily use
46
+ multilingual data.
47
+ - We provide a checkpoint trained with [model agnostic meta learning](https://arxiv.org/abs/1703.03400) from which you
48
+ should be able to fine-tune a model with very little data in almost any language.
49
+ - We now use a small self-contained Aligner that is trained with CTC, inspired by
50
+ [this implementation](https://github.com/as-ideas/DeepForcedAligner). This allows us to get rid of the dependence on
51
+ autoregressive models. Tacotron 2 is thus now also no longer in this branch, but still present in other branches,
52
+ similar to TransformerTTS.
53
+
54
+ ---
55
+
56
+ ## Demonstration
57
+
58
+ [Here are two sentences](https://drive.google.com/file/d/1ltAyR2EwAbmDo2hgkx1mvUny4FuxYmru/view?usp=sharing)
59
+ produced by Tacotron 2 combined with HiFi-GAN, trained on
60
+ [Nancy Krebs](https://www.cstr.ed.ac.uk/projects/blizzard/2011/lessac_blizzard2011/) using this toolkit.
61
+
62
+ [Here is some speech](https://drive.google.com/file/d/1mZ1LvTlY6pJ5ZQ4UXZ9jbzB651mufBrB/view?usp=sharing)
63
+ produced by FastSpeech 2 and MelGAN trained on [LJSpeech](https://keithito.com/LJ-Speech-Dataset/)
64
+ using this toolkit.
65
+
66
+ And [here is a sentence](https://drive.google.com/file/d/1FT49Jf0yyibwMDbsEJEO9mjwHkHRIGXc/view?usp=sharing)
67
+ produced by TransformerTTS and MelGAN trained on [Thorsten](https://github.com/thorstenMueller/deep-learning-german-tts)
68
+ using this toolkit.
69
+
70
+ [Here is some speech](https://drive.google.com/file/d/14nPo2o1VKtWLPGF7e_0TxL8XGI3n7tAs/view?usp=sharing)
71
+ produced by a multi-speaker FastSpeech 2 with MelGAN trained on
72
+ [LibriTTS](https://research.google/tools/datasets/libri-tts/) using this toolkit. Fans of the videogame Portal may
73
+ recognize who was used as the reference speaker for this utterance.
74
+
75
+ [Interactive Demo of our entry to the Blizzard Challenge 2021.](https://colab.research.google.com/drive/1bRaySf8U55MRPaxqBr8huWrzCOzlxVqw)
76
+ This is based on an older version of the toolkit though. It uses FastSpeech2 and MelGAN as vocoder and is trained on 5
77
+ hours of Spanish.
78
+
79
+ ---
80
+
81
+ ## Installation
82
+
83
+ #### Basic Requirements
84
+
85
+ To install this toolkit, clone it onto the machine you want to use it on
86
+ (should have at least one GPU if you intend to train models on that machine. For inference, you can get by without GPU).
87
+ Navigate to the directory you have cloned. We are going to create and activate a
88
+ [conda virtual environment](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html)
89
+ to install the basic requirements into. After creating the environment, the command you need to use to activate the
90
+ virtual environment is displayed. The commands below show everything you need to do.
91
+
92
+ ```
93
+ conda create --prefix ./toucan_conda_venv --no-default-packages python=3.8
94
+
95
+ pip install --no-cache-dir -r requirements.txt
96
+
97
+ pip install torch==1.9.0+cu111 torchvision==0.10.0+cu111 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html
98
+ ```
99
+
100
+ #### Speaker Embedding
101
+
102
+ As [NVIDIA has shown](https://arxiv.org/pdf/2110.05798.pdf), you get better results by fine-tuning a pretrained model on
103
+ a new speaker, rather than training a multispeaker model. We have thus dropped support for zero-shot multispeaker models
104
+ using speaker embeddings. However we still
105
+ use [Speechbrain's ECAPA-TDNN](https://huggingface.co/speechbrain/spkrec-ecapa-voxceleb) for a cycle consistency loss to
106
+ make adapting to new speakers a bit faster.
107
+
108
+ In the current version of the toolkit no further action should be required. When you are using multispeaker for the
109
+ first time, it requires an internet connection to download the pretrained models though.
110
+
111
+ #### espeak-ng
112
+
113
+ And finally you need to have espeak-ng installed on your system, because it is used as backend for the phonemizer. If
114
+ you replace the phonemizer, you don't need it. On most Linux environments it will be installed already, and if it is
115
+ not, and you have the sufficient rights, you can install it by simply running
116
+
117
+ ```
118
+ apt-get install espeak-ng
119
+ ```
120
+
121
+ ---
122
+
123
+ ## Creating a new Pipeline
124
+
125
+ To create a new pipeline to train a HiFiGAN vocoder, you only need a set of audio files. To create a new pipeline for a
126
+ FastSpeech 2, you need audio files, corresponding text labels, and an already trained Aligner model to estimate the
127
+ duration information that FastSpeech 2 needs as input. Let's go through them in order of increasing complexity.
128
+
129
+ ### Build a HiFi-GAN Pipeline
130
+
131
+ In the directory called
132
+ *Utility* there is a file called
133
+ *file_lists.py*. In this file you should write a function that returns a list of all the absolute paths to each of the
134
+ audio files in your dataset as strings.
135
+
136
+ Then go to the directory
137
+ *TrainingInterfaces/TrainingPipelines*. In there, make a copy of any existing pipeline that has HiFiGAN in its name. We
138
+ will use this as reference and only make the necessary changes to use the new dataset. Import the function you have just
139
+ written as
140
+ *get_file_list*. Now look out for a variable called
141
+ *model_save_dir*. This is the default directory that checkpoints will be saved into, unless you specify another one when
142
+ calling the training script. Change it to whatever you like.
143
+
144
+ Now you need to add your newly created pipeline to the pipeline dictionary in the file
145
+ *run_training_pipeline.py* in the top level of the toolkit. In this file, import the
146
+ *run* function from the pipeline you just created and give it a speaking name. Now in the
147
+ *pipeline_dict*, add your imported function as value and use as key a shorthand that makes sense. And just like that
148
+ you're done.
149
+
150
+ ### Build a FastSpeech 2 Pipeline
151
+
152
+ In the directory called
153
+ *Utility* there is a file called
154
+ *path_to_transcript_dicts.py*. In this file you should write a function that returns a dictionary that has all the
155
+ absolute paths to each of the audio files in your dataset as strings as the keys and the textual transcriptions of the
156
+ corresponding audios as the values.
157
+
158
+ Then go to the directory
159
+ *TrainingInterfaces/TrainingPipelines*. In there, make a copy of any existing pipeline that has FastSpeech 2 in its
160
+ name. We will use this copy as reference and only make the necessary changes to use the new dataset. Import the function
161
+ you have just written as
162
+ *build_path_to_transcript_dict*. Since the data will be processed a considerable amount, a cache will be built and saved
163
+ as file for quick and easy restarts. So find the variable
164
+ *cache_dir* and adapt it to your needs. The same goes for the variable
165
+ *save_dir*, which is where the checkpoints will be saved to. This is a default value, you can overwrite it when calling
166
+ the pipeline later using a command line argument, in case you want to fine-tune from a checkpoint and thus save into a
167
+ different directory.
168
+
169
+ In your new pipeline file, look out for the line in which the
170
+ *acoustic_model* is loaded. Change the path to the checkpoint of an Aligner model. It can either be the one that is
171
+ supplied with the toolkit in the download script, or one that you trained yourself. In the example pipelines, the one
172
+ that we provide is finetuned to the dataset it is applied to before it is used to extract durations.
173
+
174
+ Since we are using text here, we have to make sure that the text processing is adequate for the language. So check in
175
+ *Preprocessing/TextFrontend* whether the TextFrontend already has a language ID (e.g. 'en' and 'de') for the language of
176
+ your dataset. If not, you'll have to implement handling for that, but it should be pretty simple by just doing it
177
+ analogous to what is there already. Now back in the pipeline, change the
178
+ *lang* argument in the creation of the dataset and in the call to the train loop function to the language ID that
179
+ matches your data.
180
+
181
+ Now navigate to the implementation of the
182
+ *train_loop* that is called in the pipeline. In this file, find the function called
183
+ *plot_progress_spec*. This function will produce spectrogram plots during training, which is the most important way to
184
+ monitor the progress of the training. In there, you may need to add an example sentence for the language of the data you
185
+ are using. It should all be pretty clear from looking at it.
186
+
187
+ Once this is done, we are almost done, now we just need to make it available to the
188
+ *run_training_pipeline.py* file in the top level. In said file, import the
189
+ *run* function from the pipeline you just created and give it a speaking name. Now in the
190
+ *pipeline_dict*, add your imported function as value and use as key a shorthand that makes sense. And that's it.
191
+
192
+ ---
193
+
194
+ ## Training a Model
195
+
196
+ Once you have a pipeline built, training is super easy. Just activate your virtual environment and run the command
197
+ below. You might want to use something like nohup to keep it running after you log out from the server (then you should
198
+ also add -u as option to python) and add an & to start it in the background. Also, you might want to direct the std:out
199
+ and std:err into a file using > but all of that is just standard shell use and has nothing to do with the toolkit.
200
+
201
+ ```
202
+ python run_training_pipeline.py <shorthand of the pipeline>
203
+ ```
204
+
205
+ You can supply any of the following arguments, but don't have to (although for training you should definitely specify at
206
+ least a GPU ID).
207
+
208
+ ```
209
+ --gpu_id <ID of the GPU you wish to use, as displayed with nvidia-smi, default is cpu>
210
+
211
+ --resume_checkpoint <path to a checkpoint to load>
212
+
213
+ --resume (if this is present, the furthest checkpoint available will be loaded automatically)
214
+
215
+ --finetune (if this is present, the provided checkpoint will be fine-tuned on the data from this pipeline)
216
+
217
+ --model_save_dir <path to a directory where the checkpoints should be saved>
218
+ ```
219
+
220
+ After every epoch, some logs will be written to the console. If the loss becomes NaN, you'll need to use a smaller
221
+ learning rate or more warmup steps in the arguments of the call to the training_loop in the pipeline you are running.
222
+
223
+ If you get cuda out of memory errors, you need to decrease the batchsize in the arguments of the call to the
224
+ training_loop in the pipeline you are running. Try decreasing the batchsize in small steps until you get no more out of
225
+ cuda memory errors. Decreasing the batchsize may also require you to use a smaller learning rate. The use of GroupNorm
226
+ should make it so that the training remains mostly stable.
227
+
228
+ Speaking of plots: in the directory you specified for saving model's checkpoint files and self-explanatory visualization
229
+ data will appear. Since the checkpoints are quite big, only the five most recent ones will be kept. Training will stop
230
+ after 500,000 for FastSpeech 2, and after 2,500,000 steps for HiFiGAN. Depending on the machine and configuration you
231
+ are using this will take multiple days, so verify that everything works on small tests before running the big thing. If
232
+ you want to stop earlier, just kill the process, since everything is daemonic all the child-processes should die with
233
+ it. In case there are some ghost-processes left behind, you can use the following command to find them and kill them
234
+ manually.
235
+
236
+ ```
237
+ fuser -v /dev/nvidia*
238
+ ```
239
+
240
+ After training is complete, it is recommended to run
241
+ *run_weight_averaging.py*. If you made no changes to the architectures and stuck to the default directory layout, it
242
+ will automatically load any models you produced with one pipeline, average their parameters to get a slightly more
243
+ robust model and save the result as
244
+ *best.pt* in the same directory where all the corresponding checkpoints lie. This also compresses the file size
245
+ significantly, so you should do this and then use the
246
+ *best.pt* model for inference.
247
+
248
+ ---
249
+
250
+ ## Creating a new InferenceInterface
251
+
252
+ To build a new
253
+ *InferenceInterface*, which you can then use for super simple inference, we're going to use an existing one as template
254
+ again. Make a copy of the
255
+ *InferenceInterface*. Change the name of the class in the copy and change the paths to the models to use the trained
256
+ models of your choice. Instantiate the model with the same hyperparameters that you used when you created it in the
257
+ corresponding training pipeline. The last thing to check is the language that you supply to the text frontend. Make sure
258
+ it matches what you used during training.
259
+
260
+ With your newly created
261
+ *InferenceInterface*, you can use your trained models pretty much anywhere, e.g. in other projects. All you need is the
262
+ *Utility* directory, the
263
+ *Layers*
264
+ directory, the
265
+ *Preprocessing* directory and the
266
+ *InferenceInterfaces* directory (and of course your model checkpoint). That's all the code you need, it works
267
+ standalone.
268
+
269
+ ---
270
+
271
+ ## Using a trained Model for Inference
272
+
273
+ An
274
+ *InferenceInterface* contains two useful methods. They are
275
+ *read_to_file* and
276
+ *read_aloud*.
277
+
278
+ - *read_to_file* takes as input a list of strings and a filename. It will synthesize the sentences in the list and
279
+ concatenate them with a short pause inbetween and write them to the filepath you supply as the other argument.
280
+
281
+ - *read_aloud* takes just a string, which it will then convert to speech and immediately play using the system's
282
+ speakers. If you set the optional argument
283
+ *view* to
284
+ *True* when calling it, it will also show a plot of the phonemes it produced, the spectrogram it came up with, and the
285
+ wave it created from that spectrogram. So all the representations can be seen, text to phoneme, phoneme to spectrogram
286
+ and finally spectrogram to wave.
287
+
288
+ Those methods are used in demo code in the toolkit. In
289
+ *run_interactive_demo.py* and
290
+ *run_text_to_file_reader.py*, you can import
291
+ *InferenceInterfaces* that you created and add them to the dictionary in each of the files with a shorthand that makes
292
+ sense. In the interactive demo, you can just call the python script, then type in the shorthand when prompted and
293
+ immediately listen to your synthesis saying whatever you put in next (be wary of out of memory errors for too long
294
+ inputs). In the text reader demo script you have to call the function that wraps around the
295
+ *InferenceInterface* and supply the shorthand of your choice. It should be pretty clear from looking at it.
296
+
297
+ ---
298
+
299
+ ## FAQ
300
+
301
+ Here are a few points that were brought up by users:
302
+
303
+ - My error message shows GPU0, even though I specified a different GPU - The way GPU selection works is that the
304
+ specified GPU is set as the only visible device, in order to avoid backend stuff running accidentally on different
305
+ GPUs. So internally the program will name the device GPU0, because it is the only GPU it can see. It is actually
306
+ running on the GPU you specified.
307
+
308
+ ---
309
+
310
+ This toolkit has been written by Florian Lux (except for the pytorch modules taken
311
+ from [ESPnet](https://github.com/espnet/espnet) and
312
+ [ParallelWaveGAN](https://github.com/kan-bayashi/ParallelWaveGAN), as mentioned above), so if you come across problems
313
+ or questions, feel free to [write a mail](mailto:[email protected]). Also let me know if you do something
314
+ cool with it. Thank you for reading.
315
+
316
+ ## Citation
317
+
318
+ ```
319
+ @inproceedings{lux2021toucan,
320
+ title={{The IMS Toucan system for the Blizzard Challenge 2021}},
321
+ author={Florian Lux and Julia Koch and Antje Schweitzer and Ngoc Thang Vu},
322
+ year={2021},
323
+ booktitle={Proc. Blizzard Challenge Workshop},
324
+ volume={2021},
325
+ publisher={{Speech Synthesis SIG}}
326
+ }
327
+ ```
IMSToucan/Utility/SpeakerVisualization.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib
2
+ import numpy
3
+ import soundfile as sf
4
+ from matplotlib import pyplot as plt
5
+ from matplotlib import cm
6
+ matplotlib.use("tkAgg")
7
+ from sklearn.manifold import TSNE
8
+ from sklearn.decomposition import PCA
9
+
10
+ from tqdm import tqdm
11
+
12
+ from Preprocessing.ProsodicConditionExtractor import ProsodicConditionExtractor
13
+
14
+
15
+ class Visualizer:
16
+
17
+ def __init__(self, sr=48000, device="cpu"):
18
+ """
19
+ Args:
20
+ sr: The sampling rate of the audios you want to visualize.
21
+ """
22
+ self.tsne = TSNE(n_jobs=-1)
23
+ self.pca = PCA(n_components=2)
24
+ self.pros_cond_ext = ProsodicConditionExtractor(sr=sr, device=device)
25
+ self.sr = sr
26
+
27
+ def visualize_speaker_embeddings(self, label_to_filepaths, title_of_plot, save_file_path=None, include_pca=True, legend=True):
28
+ label_list = list()
29
+ embedding_list = list()
30
+ for label in tqdm(label_to_filepaths):
31
+ for filepath in tqdm(label_to_filepaths[label]):
32
+ wave, sr = sf.read(filepath)
33
+ if len(wave) / sr < 1:
34
+ continue
35
+ if self.sr != sr:
36
+ print("One of the Audios you included doesn't match the sampling rate of this visualizer object, "
37
+ "creating a new condition extractor. Results will be correct, but if there are too many cases "
38
+ "of changing samplingrate, this will run very slowly.")
39
+ self.pros_cond_ext = ProsodicConditionExtractor(sr=sr)
40
+ self.sr = sr
41
+ embedding_list.append(self.pros_cond_ext.extract_condition_from_reference_wave(wave).squeeze().numpy())
42
+ label_list.append(label)
43
+ embeddings_as_array = numpy.array(embedding_list)
44
+
45
+ dimensionality_reduced_embeddings_tsne = self.tsne.fit_transform(embeddings_as_array)
46
+ self._plot_embeddings(projected_data=dimensionality_reduced_embeddings_tsne,
47
+ labels=label_list,
48
+ title=title_of_plot + " t-SNE" if include_pca else title_of_plot,
49
+ save_file_path=save_file_path,
50
+ legend=legend)
51
+
52
+ if include_pca:
53
+ dimensionality_reduced_embeddings_pca = self.pca.fit_transform(embeddings_as_array)
54
+ self._plot_embeddings(projected_data=dimensionality_reduced_embeddings_pca,
55
+ labels=label_list,
56
+ title=title_of_plot + " PCA",
57
+ save_file_path=save_file_path,
58
+ legend=legend)
59
+
60
+ def _plot_embeddings(self, projected_data, labels, title, save_file_path, legend):
61
+ colors = cm.gist_rainbow(numpy.linspace(0, 1, len(set(labels))))
62
+ label_to_color = dict()
63
+ for index, label in enumerate(list(set(labels))):
64
+ label_to_color[label] = colors[index]
65
+
66
+ labels_to_points_x = dict()
67
+ labels_to_points_y = dict()
68
+ for label in labels:
69
+ labels_to_points_x[label] = list()
70
+ labels_to_points_y[label] = list()
71
+ for index, label in enumerate(labels):
72
+ labels_to_points_x[label].append(projected_data[index][0])
73
+ labels_to_points_y[label].append(projected_data[index][1])
74
+
75
+ fig, ax = plt.subplots()
76
+ for label in set(labels):
77
+ x = numpy.array(labels_to_points_x[label])
78
+ y = numpy.array(labels_to_points_y[label])
79
+ ax.scatter(x=x,
80
+ y=y,
81
+ c=label_to_color[label],
82
+ label=label,
83
+ alpha=0.9)
84
+ if legend:
85
+ ax.legend()
86
+ fig.tight_layout()
87
+ ax.axis('off')
88
+ fig.subplots_adjust(top=0.9, bottom=0.0, right=1.0, left=0.0)
89
+ ax.set_title(title)
90
+ if save_file_path is not None:
91
+ plt.savefig(save_file_path)
92
+ else:
93
+ plt.show()
94
+ plt.close()
IMSToucan/Utility/__init__.py ADDED
File without changes
IMSToucan/Utility/utils.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Taken from ESPNet, modified by Florian Lux
3
+ """
4
+
5
+ import os
6
+ from abc import ABC
7
+
8
+ import torch
9
+
10
+
11
+ def cumsum_durations(durations):
12
+ out = [0]
13
+ for duration in durations:
14
+ out.append(duration + out[-1])
15
+ centers = list()
16
+ for index, _ in enumerate(out):
17
+ if index + 1 < len(out):
18
+ centers.append((out[index] + out[index + 1]) / 2)
19
+ return out, centers
20
+
21
+
22
+ def delete_old_checkpoints(checkpoint_dir, keep=5):
23
+ checkpoint_list = list()
24
+ for el in os.listdir(checkpoint_dir):
25
+ if el.endswith(".pt") and el != "best.pt":
26
+ checkpoint_list.append(int(el.split(".")[0].split("_")[1]))
27
+ if len(checkpoint_list) <= keep:
28
+ return
29
+ else:
30
+ checkpoint_list.sort(reverse=False)
31
+ checkpoints_to_delete = [os.path.join(checkpoint_dir, "checkpoint_{}.pt".format(step)) for step in checkpoint_list[:-keep]]
32
+ for old_checkpoint in checkpoints_to_delete:
33
+ os.remove(os.path.join(old_checkpoint))
34
+
35
+
36
+ def get_most_recent_checkpoint(checkpoint_dir, verbose=True):
37
+ checkpoint_list = list()
38
+ for el in os.listdir(checkpoint_dir):
39
+ if el.endswith(".pt") and el != "best.pt":
40
+ checkpoint_list.append(int(el.split(".")[0].split("_")[1]))
41
+ if len(checkpoint_list) == 0:
42
+ print("No previous checkpoints found, cannot reload.")
43
+ return None
44
+ checkpoint_list.sort(reverse=True)
45
+ if verbose:
46
+ print("Reloading checkpoint_{}.pt".format(checkpoint_list[0]))
47
+ return os.path.join(checkpoint_dir, "checkpoint_{}.pt".format(checkpoint_list[0]))
48
+
49
+
50
+ def make_pad_mask(lengths, xs=None, length_dim=-1, device=None):
51
+ """
52
+ Make mask tensor containing indices of padded part.
53
+
54
+ Args:
55
+ lengths (LongTensor or List): Batch of lengths (B,).
56
+ xs (Tensor, optional): The reference tensor.
57
+ If set, masks will be the same shape as this tensor.
58
+ length_dim (int, optional): Dimension indicator of the above tensor.
59
+ See the example.
60
+
61
+ Returns:
62
+ Tensor: Mask tensor containing indices of padded part.
63
+ dtype=torch.uint8 in PyTorch 1.2-
64
+ dtype=torch.bool in PyTorch 1.2+ (including 1.2)
65
+
66
+ """
67
+ if length_dim == 0:
68
+ raise ValueError("length_dim cannot be 0: {}".format(length_dim))
69
+
70
+ if not isinstance(lengths, list):
71
+ lengths = lengths.tolist()
72
+ bs = int(len(lengths))
73
+ if xs is None:
74
+ maxlen = int(max(lengths))
75
+ else:
76
+ maxlen = xs.size(length_dim)
77
+
78
+ if device is not None:
79
+ seq_range = torch.arange(0, maxlen, dtype=torch.int64, device=device)
80
+ else:
81
+ seq_range = torch.arange(0, maxlen, dtype=torch.int64)
82
+ seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
83
+ seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
84
+ mask = seq_range_expand >= seq_length_expand
85
+
86
+ if xs is not None:
87
+ assert xs.size(0) == bs, (xs.size(0), bs)
88
+
89
+ if length_dim < 0:
90
+ length_dim = xs.dim() + length_dim
91
+ # ind = (:, None, ..., None, :, , None, ..., None)
92
+ ind = tuple(slice(None) if i in (0, length_dim) else None for i in range(xs.dim()))
93
+ mask = mask[ind].expand_as(xs).to(xs.device)
94
+ return mask
95
+
96
+
97
+ def make_non_pad_mask(lengths, xs=None, length_dim=-1, device=None):
98
+ """
99
+ Make mask tensor containing indices of non-padded part.
100
+
101
+ Args:
102
+ lengths (LongTensor or List): Batch of lengths (B,).
103
+ xs (Tensor, optional): The reference tensor.
104
+ If set, masks will be the same shape as this tensor.
105
+ length_dim (int, optional): Dimension indicator of the above tensor.
106
+ See the example.
107
+
108
+ Returns:
109
+ ByteTensor: mask tensor containing indices of padded part.
110
+ dtype=torch.uint8 in PyTorch 1.2-
111
+ dtype=torch.bool in PyTorch 1.2+ (including 1.2)
112
+
113
+ """
114
+ return ~make_pad_mask(lengths, xs, length_dim, device=device)
115
+
116
+
117
+ def initialize(model, init):
118
+ """
119
+ Initialize weights of a neural network module.
120
+
121
+ Parameters are initialized using the given method or distribution.
122
+
123
+ Args:
124
+ model: Target.
125
+ init: Method of initialization.
126
+ """
127
+
128
+ # weight init
129
+ for p in model.parameters():
130
+ if p.dim() > 1:
131
+ if init == "xavier_uniform":
132
+ torch.nn.init.xavier_uniform_(p.data)
133
+ elif init == "xavier_normal":
134
+ torch.nn.init.xavier_normal_(p.data)
135
+ elif init == "kaiming_uniform":
136
+ torch.nn.init.kaiming_uniform_(p.data, nonlinearity="relu")
137
+ elif init == "kaiming_normal":
138
+ torch.nn.init.kaiming_normal_(p.data, nonlinearity="relu")
139
+ else:
140
+ raise ValueError("Unknown initialization: " + init)
141
+ # bias init
142
+ for p in model.parameters():
143
+ if p.dim() == 1:
144
+ p.data.zero_()
145
+
146
+ # reset some modules with default init
147
+ for m in model.modules():
148
+ if isinstance(m, (torch.nn.Embedding, torch.nn.LayerNorm)):
149
+ m.reset_parameters()
150
+
151
+
152
+ def pad_list(xs, pad_value):
153
+ """
154
+ Perform padding for the list of tensors.
155
+
156
+ Args:
157
+ xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
158
+ pad_value (float): Value for padding.
159
+
160
+ Returns:
161
+ Tensor: Padded tensor (B, Tmax, `*`).
162
+
163
+ """
164
+ n_batch = len(xs)
165
+ max_len = max(x.size(0) for x in xs)
166
+ pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)
167
+
168
+ for i in range(n_batch):
169
+ pad[i, : xs[i].size(0)] = xs[i]
170
+
171
+ return pad
172
+
173
+
174
+ def subsequent_mask(size, device="cpu", dtype=torch.bool):
175
+ """
176
+ Create mask for subsequent steps (size, size).
177
+
178
+ :param int size: size of mask
179
+ :param str device: "cpu" or "cuda" or torch.Tensor.device
180
+ :param torch.dtype dtype: result dtype
181
+ :rtype
182
+ """
183
+ ret = torch.ones(size, size, device=device, dtype=dtype)
184
+ return torch.tril(ret, out=ret)
185
+
186
+
187
+ class ScorerInterface:
188
+ """
189
+ Scorer interface for beam search.
190
+
191
+ The scorer performs scoring of the all tokens in vocabulary.
192
+
193
+ Examples:
194
+ * Search heuristics
195
+ * :class:`espnet.nets.scorers.length_bonus.LengthBonus`
196
+ * Decoder networks of the sequence-to-sequence models
197
+ * :class:`espnet.nets.pytorch_backend.nets.transformer.decoder.Decoder`
198
+ * :class:`espnet.nets.pytorch_backend.nets.rnn.decoders.Decoder`
199
+ * Neural language models
200
+ * :class:`espnet.nets.pytorch_backend.lm.transformer.TransformerLM`
201
+ * :class:`espnet.nets.pytorch_backend.lm.default.DefaultRNNLM`
202
+ * :class:`espnet.nets.pytorch_backend.lm.seq_rnn.SequentialRNNLM`
203
+
204
+ """
205
+
206
+ def init_state(self, x):
207
+ """
208
+ Get an initial state for decoding (optional).
209
+
210
+ Args:
211
+ x (torch.Tensor): The encoded feature tensor
212
+
213
+ Returns: initial state
214
+
215
+ """
216
+ return None
217
+
218
+ def select_state(self, state, i, new_id=None):
219
+ """
220
+ Select state with relative ids in the main beam search.
221
+
222
+ Args:
223
+ state: Decoder state for prefix tokens
224
+ i (int): Index to select a state in the main beam search
225
+ new_id (int): New label index to select a state if necessary
226
+
227
+ Returns:
228
+ state: pruned state
229
+
230
+ """
231
+ return None if state is None else state[i]
232
+
233
+ def score(self, y, state, x):
234
+ """
235
+ Score new token (required).
236
+
237
+ Args:
238
+ y (torch.Tensor): 1D torch.int64 prefix tokens.
239
+ state: Scorer state for prefix tokens
240
+ x (torch.Tensor): The encoder feature that generates ys.
241
+
242
+ Returns:
243
+ tuple[torch.Tensor, Any]: Tuple of
244
+ scores for next token that has a shape of `(n_vocab)`
245
+ and next state for ys
246
+
247
+ """
248
+ raise NotImplementedError
249
+
250
+ def final_score(self, state):
251
+ """
252
+ Score eos (optional).
253
+
254
+ Args:
255
+ state: Scorer state for prefix tokens
256
+
257
+ Returns:
258
+ float: final score
259
+
260
+ """
261
+ return 0.0
262
+
263
+
264
+ class BatchScorerInterface(ScorerInterface, ABC):
265
+
266
+ def batch_init_state(self, x):
267
+ """
268
+ Get an initial state for decoding (optional).
269
+
270
+ Args:
271
+ x (torch.Tensor): The encoded feature tensor
272
+
273
+ Returns: initial state
274
+
275
+ """
276
+ return self.init_state(x)
277
+
278
+ def batch_score(self, ys, states, xs):
279
+ """
280
+ Score new token batch (required).
281
+
282
+ Args:
283
+ ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
284
+ states (List[Any]): Scorer states for prefix tokens.
285
+ xs (torch.Tensor):
286
+ The encoder feature that generates ys (n_batch, xlen, n_feat).
287
+
288
+ Returns:
289
+ tuple[torch.Tensor, List[Any]]: Tuple of
290
+ batchfied scores for next token with shape of `(n_batch, n_vocab)`
291
+ and next state list for ys.
292
+
293
+ """
294
+ scores = list()
295
+ outstates = list()
296
+ for i, (y, state, x) in enumerate(zip(ys, states, xs)):
297
+ score, outstate = self.score(y, state, x)
298
+ outstates.append(outstate)
299
+ scores.append(score)
300
+ scores = torch.cat(scores, 0).view(ys.shape[0], -1)
301
+ return scores, outstates
302
+
303
+
304
+ def to_device(m, x):
305
+ """Send tensor into the device of the module.
306
+ Args:
307
+ m (torch.nn.Module): Torch module.
308
+ x (Tensor): Torch tensor.
309
+ Returns:
310
+ Tensor: Torch tensor located in the same place as torch module.
311
+ """
312
+ if isinstance(m, torch.nn.Module):
313
+ device = next(m.parameters()).device
314
+ elif isinstance(m, torch.Tensor):
315
+ device = m.device
316
+ else:
317
+ raise TypeError(
318
+ "Expected torch.nn.Module or torch.tensor, " f"bot got: {type(m)}"
319
+ )
320
+ return x.to(device)
IMSToucan/requirements.txt ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ appdirs~=1.4.4
2
+ attrs~=21.2.0
3
+ audioread~=2.1.9
4
+ auraloss~=0.2.1
5
+ certifi~=2021.5.30
6
+ cffi~=1.14.6
7
+ charset-normalizer~=2.0.4
8
+ clldutils~=3.9.0
9
+ colorama~=0.4.4
10
+ colorlog~=6.4.1
11
+ commonmark~=0.9.1
12
+ csvw~=1.11.0
13
+ cycler~=0.10.0
14
+ Cython~=0.29.24
15
+ decorator~=5.0.9
16
+ editdistance~=0.5.3
17
+ emoji~=1.4.2
18
+ filelock~=3.0.12
19
+ ftfy~=6.0.3
20
+ future~=0.18.2
21
+ gdown~=3.13.0
22
+ graphviz~=0.17
23
+ huggingface-hub~=0.0.16
24
+ HyperPyYAML~=1.0.0
25
+ idna~=3.2
26
+ isodate~=0.6.0
27
+ jiwer~=2.2.1
28
+ joblib~=1.0.1
29
+ kiwisolver~=1.3.2
30
+ librosa~=0.8.1
31
+ llvmlite~=0.37.0
32
+ matplotlib~=3.4.3
33
+ munkres~=1.1.4
34
+ numba~=0.54.0
35
+ numpy~=1.20.3
36
+ packaging~=21.0
37
+ panphon~=0.19
38
+ pedalboard~=0.3.11
39
+ phonemizer~=2.2.2
40
+ Pillow~=8.3.2
41
+ pooch~=1.5.1
42
+ pycparser~=2.20
43
+ Pygments~=2.10.0
44
+ pyloudnorm~=0.1.0
45
+ pyparsing~=2.4.7
46
+ pysndfx~=0.3.6
47
+ PySocks~=1.7.1
48
+ python-dateutil~=2.8.2
49
+ python-Levenshtein~=0.12.2
50
+ pyworld~=0.3.0
51
+ PyYAML~=5.4.1
52
+ regex~=2021.8.28
53
+ requests~=2.26.0
54
+ resampy~=0.2.2
55
+ rfc3986~=1.5.0
56
+ rich~=9.10.0
57
+ ruamel.yaml~=0.17.16
58
+ ruamel.yaml.clib~=0.2.6
59
+ scikit-learn~=0.24.2
60
+ scipy~=1.7.1
61
+ segments~=2.2.0
62
+ sentencepiece~=0.1.96
63
+ six~=1.16.0
64
+ sklearn~=0.0
65
+ sounddevice~=0.4.2
66
+ SoundFile~=0.10.3.post1
67
+ speechbrain~=0.5.10
68
+ tabulate~=0.8.9
69
+ threadpoolctl~=2.2.0
70
+ torch~=1.10.1
71
+ torch-complex~=0.2.1
72
+ torchaudio~=0.10.1
73
+ torchvision~=0.11.2
74
+ torchviz~=0.0.2
75
+ tqdm~=4.62.2
76
+ typing-extensions~=3.10.0.2
77
+ unicodecsv~=0.14.1
78
+ Unidecode~=1.2.0
79
+ unsilence~=1.0.8
80
+ uritemplate~=3.0.1
81
+ urllib3~=1.26.6
82
+ wcwidth~=0.2.5
83
+ wincertstore~=0.2