You need to agree to share your contact information to access this dataset

This repository is publicly accessible, but you have to accept the conditions to access its files and content.

Log in or Sign Up to review the conditions and access this dataset content.

Still uploading

This script is for merging tokenized speech datasets stored in memmap format. The input datasets can be combined to form larger training datasets.

import numpy as np
import os

def merge_memmap_datasets(dataset_dirs, output_dir):
    # Ensure the output directory exists
    os.makedirs(output_dir, exist_ok=True)

    # Dataset splits to be merged
    splits = ['train', 'val']

    for split in splits:
        shapes = []
        seq_len = None
        total_samples = 0

        # Collect shapes of all datasets and check sequence length consistency
        for dataset_dir in dataset_dirs:
            shape_path = os.path.join(dataset_dir, f'{split}_input_ids_shape.npy')
            if not os.path.exists(shape_path):
                print(f"Warning: {split}_input_ids_shape.npy not found in {dataset_dir}, skipping this dataset.")
                continue
            shape = np.load(shape_path)
            print(f"Loaded shape of {split} data from {dataset_dir}: {shape}")
            shape = tuple(shape)
            shapes.append((dataset_dir, shape))
            total_samples += shape[0]
            if seq_len is None:
                seq_len = shape[1]
            elif seq_len != shape[1]:
                print(f"Error: Sequence length mismatch in {split} data from {dataset_dir}.")
                return

        if total_samples == 0:
            print(f"Error: No valid {split} data found for merging.")
            continue

        new_shape = (total_samples, seq_len)

        # Create new memmap file
        output_memmap_path = os.path.join(output_dir, f'{split}_input_ids.memmap')
        output_memmap = np.memmap(
            output_memmap_path, dtype='int32', mode='w+', shape=new_shape
        )

        # Copy data from each dataset to the new memmap file
        start_idx = 0
        for dataset_dir, shape in shapes:
            memmap_path = os.path.join(dataset_dir, f'{split}_input_ids.memmap')
            data = np.memmap(
                memmap_path, dtype='int32', mode='r', shape=shape
            )
            end_idx = start_idx + shape[0]
            output_memmap[start_idx:end_idx, :] = data[:]
            print(f"Merged {split} data from {dataset_dir} into positions {start_idx}:{end_idx}")
            start_idx = end_idx
            del data  # Free memory

        # Delete temporary variable and flush data to disk
        del output_memmap

        # Save the new shape file
        np.save(os.path.join(output_dir, f'{split}_input_ids_shape.npy'), new_shape)

        print(f"Completed merging {split} data. New shape: {new_shape}")

if __name__ == "__main__":
    dataset_dirs = [
        'libriheavy_tts_1',
        'libriheavy_tts_2',
        'libriheavy_tts_3',
        'libriheavy_tts_4'
    ]
    output_dir = 'libriheavy_tts_all'
    merge_memmap_datasets(dataset_dirs, output_dir)
Downloads last month
40