Model Introduction
Highlight
- The model is based on wav2vec2-base and fine-tuned with iemocap and Emotional Speech Dataset (ESD) data, so it supports Chinese and English audio.
- The accuracy is as high as 92.9%
- The model card shows only part of the source code.See Files and Versions for details
- The model can predict the emotion of anger
Some details are as follows
import logging
import pathlib
import re
import sys
import time
import csv
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Set, Union
import datasets
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from packaging import version
from torch.cuda.amp import GradScaler, autocast
import librosa
from lang_trans import arabic
from datasets import Dataset
import soundfile as sf
from model import Wav2Vec2ForCTCnCLS
from transformers.trainer_utils import get_last_checkpoint
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
Wav2Vec2CTCTokenizer,
Wav2Vec2FeatureExtractor,
Wav2Vec2Processor,
is_apex_available,
trainer_utils,
)
local_model_path = "local_model"
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
logger = logging.getLogger(__name__)
@dataclass
class TrainingArguments(TrainingArguments):
output_dir: str = field(
default="output/angry_tmp", metadata={"help": "The store of your output."})
do_predict: bool = field(
default=True, metadata={"help": "The store of your output."})
do_eval: bool = field(
default=False, metadata={"help": "The store of your output."})
overwrite_output_dir: str = field(
default='overwrite_output_dir', metadata={"help": "The store of your output."} )
per_device_eval_batch_size: int = field(
default=2, metadata={"help": "The store of your output."})
warmup_ratio: float = field(
default=0.1, metadata={"help": "Linear warmup over warmup_ratio fraction of total steps."}
)
@dataclass
class DataCollatorCTCWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
processor (:class:`~transformers.Wav2Vec2Processor`)
The processor used for proccessing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
max_length_labels (:obj:`int`, `optional`):
Maximum length of the ``labels`` returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
processor: Wav2Vec2Processor
padding: Union[bool, str] = True
max_length: Optional[int] = None
max_length_labels: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
pad_to_multiple_of_labels: Optional[int] = None
audio_only = False
duration = 6
sample_rate = 16000
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
input_features = [{"input_values": feature["input_values"]} for feature in features]
batch = self.processor.pad(
input_features,
padding=self.padding,
max_length=self.duration*self.sample_rate,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
return batch
class CTCTrainer(Trainer):
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
self.use_amp = False
self.use_apex = False
self.deepspeed = False
self.scaler = GradScaler()
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
kwargs = dict(device=self.args.device)
if self.deepspeed and inputs[k].dtype != torch.int64:
kwargs.update(dict(dtype=self.args.hf_deepspeed_config.dtype()))
inputs[k] = v.to(**kwargs)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def create_dataset(audio_path):
data = {
'file': [audio_path]
}
dataset = Dataset.from_dict(data)
return dataset
def exeute_angry_predict(audio_path):
target_sr = 16000
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
configure_logger(model_args, training_args)
orthography = Orthography.from_name(data_args.orthography.lower())
orthography.tokenizer = model_args.tokenizer
processor = orthography.create_processor(model_args)
if data_args.dataset_name == 'emotion':
val_dataset = create_dataset(audio_path)
cls_label_map = {"neutral":0, "angry":1}
model = Wav2Vec2ForCTCnCLS.from_pretrained(
local_model_path,
gradient_checkpointing=True,
cls_len=len(cls_label_map),
)
def prepare_example(example, audio_only=False):
example["speech"], example["sampling_rate"] = librosa.load(example[data_args.speech_file_column], sr=target_sr)
orig_sample_rate = example["sampling_rate"]
target_sample_rate = target_sr
if orig_sample_rate != target_sample_rate:
example["speech"] = librosa.resample(example["speech"], orig_sr=orig_sample_rate, target_sr=target_sample_rate)
if data_args.max_duration_in_seconds is not None:
example["duration_in_seconds"] = len(example["speech"]) / example["sampling_rate"]
return example
if training_args.do_predict:
val_dataset = val_dataset.map(prepare_example, fn_kwargs={'audio_only':True})
def prepare_dataset(batch, audio_only=False):
assert (
len(set(batch["sampling_rate"])) == 1
), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
batch["input_values"] = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0]).input_values
return batch
if training_args.do_predict:
val_dataset = val_dataset.map(
prepare_dataset,
fn_kwargs={'audio_only':True},
batch_size=training_args.per_device_eval_batch_size,
batched=True,
num_proc=data_args.preprocessing_num_workers,
)
data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
trainer = CTCTrainer(
model=model,
args=training_args,
eval_dataset=val_dataset,
tokenizer=processor.feature_extractor,
)
if training_args.do_predict:
logger.info('******* Predict ********')
data_collator.audio_only=True
results= {}
result= ''
predictions, labels, metrics = trainer.predict(val_dataset, metric_key_prefix="predict")
logits_ctc, logits_cls = predictions
pred_ids = np.argmax(logits_cls, axis=-1)
if pred_ids==0:
result = "neutral"
if pred_ids==1:
result = "angry"
results[audio_path] = result
print("results", results)
if __name__ == "__main__":
audio_path = 'audio.mp3'
exeute_angry_predict(audio_path)