Search is not available for this dataset
image
array 3D
segmentation
array 2D
depth
array 3D
normal
array 3D
noise
array 3D
[[[0.32549020648002625,0.3803921639919281,0.42352941632270813,0.4588235318660736,0.4627451002597809,(...TRUNCATED)
[[11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11(...TRUNCATED)
[[[2.0542805194854736,2.067620038986206,2.072299003601074,2.079108476638794,2.0875754356384277,2.094(...TRUNCATED)
[[[0.9586489796638489,0.9638221859931946,0.9632613658905029,0.962428629398346,0.9634701609611511,0.9(...TRUNCATED)
[[[0.9806107878684998,0.053858935832977295,0.10456126928329468,0.6988880038261414,0.729923665523529,(...TRUNCATED)
[[[0.5529412031173706,0.5529412031173706,0.5529412031173706,0.5490196347236633,0.5529412031173706,0.(...TRUNCATED)
[[11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11(...TRUNCATED)
[[[2.093186616897583,2.0968432426452637,2.101078748703003,2.106825828552246,2.1095168590545654,2.109(...TRUNCATED)
[[[0.6945553421974182,0.6677089929580688,0.6210795640945435,0.592814028263092,0.5494623184204102,0.5(...TRUNCATED)
[[[0.9457264542579651,0.44787901639938354,0.6487457156181335,0.07645350694656372,0.5587204098701477,(...TRUNCATED)
[[[0.2862745225429535,0.3529411852359772,0.3960784375667572,0.4156862795352936,0.43529412150382996,0(...TRUNCATED)
[[-1,-1,6,6,6,6,6,-1,-1,-1,-1,-1,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,-1,-1(...TRUNCATED)
[[[3.3896234035491943,3.38931941986084,3.4007084369659424,3.4028170108795166,3.522275447845459,3.533(...TRUNCATED)
[[[0.7046963572502136,0.6834113001823425,0.7042268514633179,0.7047390937805176,0.6782957911491394,0.(...TRUNCATED)
[[[0.7984963059425354,0.7351861596107483,0.49925166368484497,0.6089344024658203,0.673566460609436,0.(...TRUNCATED)
[[[0.9529411792755127,0.9529411792755127,0.9490196108818054,0.9529411792755127,0.9607843160629272,0.(...TRUNCATED)
[[6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,(...TRUNCATED)
[[[3.6774911880493164,3.6792519092559814,3.6835148334503174,3.7075164318084717,3.7222025394439697,3.(...TRUNCATED)
[[[0.9100942015647888,0.9086946249008179,0.9157149195671082,0.912836492061615,0.901278555393219,0.89(...TRUNCATED)
[[[0.03872889280319214,0.053709983825683594,0.16429954767227173,0.3676280975341797,0.243040204048156(...TRUNCATED)
[[[0.6313725709915161,0.5490196347236633,0.5333333611488342,0.6274510025978088,0.7019608020782471,0.(...TRUNCATED)
[[-1,-1,-1,-1,-1,-1,6,6,6,6,6,6,6,6,-1,-1,-1,-1,6,6,6,6,6,6,6,6,6,6,-1,-1,6,6,6,6,6,6,6,6,6,6,6,6,6,(...TRUNCATED)
[[[3.7695064544677734,3.753194808959961,3.7319133281707764,3.7480568885803223,3.7964982986450195,3.8(...TRUNCATED)
[[[0.8245018720626831,0.8480510115623474,0.8614233136177063,0.8903467059135437,0.9021579027175903,0.(...TRUNCATED)
[[[0.5399834513664246,0.38637256622314453,0.43011564016342163,0.2925761938095093,0.9405155777931213,(...TRUNCATED)
[[[0.47058823704719543,0.41960784792900085,0.3921568691730499,0.3686274588108063,0.33725491166114807(...TRUNCATED)
[[11,11,11,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,6,6,6,6,6,6,6,6,6,6,6,-1,-1,-1,11,11,11,11,11,11,11,11,1(...TRUNCATED)
[[[3.3236899375915527,3.308816909790039,3.3366148471832275,3.3609752655029297,3.429319381713867,3.42(...TRUNCATED)
[[[0.8634723424911499,0.858651876449585,0.8588695526123047,0.8528505563735962,0.8489331603050232,0.8(...TRUNCATED)
[[[0.3317156434059143,0.9091740846633911,0.9729962944984436,0.9875734448432922,0.20794522762298584,0(...TRUNCATED)
[[[0.3529411852359772,0.3529411852359772,0.3529411852359772,0.35686275362968445,0.3607843220233917,0(...TRUNCATED)
[[11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11(...TRUNCATED)
[[[3.480231523513794,3.4966070652008057,3.529115915298462,3.534325361251831,3.560516119003296,3.5456(...TRUNCATED)
[[[0.7335928678512573,0.7163592576980591,0.7262836694717407,0.718122124671936,0.7384446263313293,0.7(...TRUNCATED)
[[[0.7425082921981812,0.6010449528694153,0.19122952222824097,0.4366301894187927,0.2927890419960022,0(...TRUNCATED)
[[[0.38823530077934265,0.3490196168422699,0.3137255012989044,0.3019607961177826,0.3019607961177826,0(...TRUNCATED)
[[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1(...TRUNCATED)
[[[2.2736029624938965,2.2829623222351074,2.28863787651062,2.289372205734253,2.2952826023101807,2.305(...TRUNCATED)
[[[0.9663563370704651,0.9645861983299255,0.9693266749382019,0.969973623752594,0.9809253811836243,0.9(...TRUNCATED)
[[[0.4210195541381836,0.8602367639541626,0.1153443455696106,0.0556071400642395,0.5062122344970703,0.(...TRUNCATED)
[[[1.0,1.0,1.0,0.9921568632125854,0.9921568632125854,0.9764705896377563,0.8627451062202454,0.8039215(...TRUNCATED)
[[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,(...TRUNCATED)
[[[0.0,0.0,3.0612640380859375,3.060121774673462,3.076970338821411,3.0779712200164795,3.0753152370452(...TRUNCATED)
[[[0.0,0.0,0.2451324760913849,0.29221394658088684,0.3089829683303833,0.3623013198375702,0.4091989398(...TRUNCATED)
[[[0.6180070638656616,0.016381144523620605,0.7287148833274841,0.5976073145866394,0.8199144601821899,(...TRUNCATED)
[[[0.32156863808631897,0.32156863808631897,0.3019607961177826,0.2823529541492462,0.2705882489681244,(...TRUNCATED)
[[2,2,2,2,2,2,2,2,2,2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,(...TRUNCATED)
[[[2.6640377044677734,2.6722207069396973,2.6731247901916504,2.685142755508423,2.7037830352783203,2.7(...TRUNCATED)
[[[0.6172354817390442,0.6022921800613403,0.6072736978530884,0.6086580157279968,0.603678286075592,0.5(...TRUNCATED)
[[[0.8295018076896667,0.30207592248916626,0.04866224527359009,0.05302554368972778,0.4153133034706116(...TRUNCATED)

This is the NYUv2 dataset for scene understanding tasks. I downloaded the original data from the Tsinghua Cloud and transformed it into Huggingface Dataset. Credit to ForkMerge: Mitigating Negative Transfer in Auxiliary-Task Learning.

Dataset Information

This data contains two splits: 'train' and 'val' (used as test dataset). Each sample in the dataset has 5 items: 'image', 'segmentation', 'depth', 'normal', and 'noise'. The noise is generated using torch.rand().

Usage

dataset = load_dataset('tanganke/nyuv2')
dataset = dataset.with_format('torch') # this will convert the items into `torch.Tensor` objects

this will return a DatasetDict:

DatasetDict({
    train: Dataset({
        features: ['image', 'segmentation', 'depth', 'normal', 'noise'],
        num_rows: 795
    })
    val: Dataset({
        features: ['image', 'segmentation', 'depth', 'normal', 'noise'],
        num_rows: 654
    })
})

The features:

{'image': Array3D(shape=(3, 288, 384), dtype='float32', id=None),
 'segmentation': Array2D(shape=(288, 384), dtype='int64', id=None),
 'depth': Array3D(shape=(1, 288, 384), dtype='float32', id=None),
 'normal': Array3D(shape=(3, 288, 384), dtype='float32', id=None),
 'noise': Array3D(shape=(1, 288, 384), dtype='float32', id=None)}
Downloads last month
77