import os import datasets _HOMEPAGE = "" _CITATION = "" _DESCRIPTION = """\ This is a dataset of particle samples to be classified for the ancient mortars project. """ _URLS = { "train": "https://huggingface.co./datasets/apetulante/mortars_test/resolve/main/data/train.zip", "validation": "https://huggingface.co./datasets/apetulante/mortars_test/resolve/main/data/valid.zip", "test": "https://huggingface.co./datasets/apetulante/mortars_test/resolve/main/data/test.zip", } #names_list = open("https://huggingface.co./datasets/apetulante/mortars_test/resolve/main/data/particle_names.txt","r").read().split("\n") _NAMES = ["kurkar", "sand", "soil", "chert", "obsidian", "arch_18", "kurkar_nahal","sand_beach","volcanicash_pozzuoli","arch","volcanicash"] class MortarsTest(datasets.GeneratorBasedBuilder): """Ancient particles dataset.""" def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "image_file_path": datasets.Value("string"), "image": datasets.Image(), "labels": datasets.features.ClassLabel(names=_NAMES), } ), supervised_keys=("image", "labels"), homepage=_HOMEPAGE, citation=_CITATION, ) def _split_generators(self, dl_manager): data_files = dl_manager.download_and_extract(_URLS) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "files": dl_manager.iter_files([data_files["train"]]), }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "files": dl_manager.iter_files([data_files["validation"]]), }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "files": dl_manager.iter_files([data_files["test"]]), }, ), ] def _generate_examples(self, files): for i, path in enumerate(files): file_name = os.path.basename(path) if file_name.endswith(".bmp"): yield i, { "image_file_path": path, "image": path, "labels": os.path.basename(file_name).lower().split('-')[0].split('_')[0], }