|
{ |
|
"_name_or_path": "model/patchtst_etth_pretraining", |
|
"activation_function": "gelu", |
|
"architectures": [ |
|
"PatchTSTForMaskPretraining" |
|
], |
|
"attention_dropout": 0.0, |
|
"bias": true, |
|
"channel_attention": false, |
|
"channel_consistent_masking": false, |
|
"context_length": 512, |
|
"d_model": 128, |
|
"dropout": 0.3, |
|
"dropout_path": 0.0, |
|
"encoder_attention_heads": 16, |
|
"encoder_ffn_dim": 512, |
|
"encoder_layers": 6, |
|
"ff_dropout": 0.0, |
|
"head_dropout": 0.2, |
|
"init_std": 0.02, |
|
"learn_pe": false, |
|
"mask_input": true, |
|
"mask_patch_ratios": [ |
|
1, |
|
1 |
|
], |
|
"mask_patches": [ |
|
2, |
|
3 |
|
], |
|
"mask_ratio": 0.2, |
|
"mask_type": "random", |
|
"mask_value": 0, |
|
"model_type": "patchtst", |
|
"norm": "BatchNorm", |
|
"num_classes": 0, |
|
"num_input_channels": 7, |
|
"num_output_channels": 1, |
|
"num_patches": 42, |
|
"patch_length": 12, |
|
"pooling": null, |
|
"positional_dropout": 0.0, |
|
"positional_encoding": "sincos", |
|
"pre_norm": false, |
|
"prediction_length": 24, |
|
"prediction_range": null, |
|
"scaling": "mean", |
|
"seed_number": null, |
|
"shared_embedding": true, |
|
"shared_projection": true, |
|
"stride": 12, |
|
"torch_dtype": "float32", |
|
"transformers_version": "4.33.0.dev0", |
|
"unmasked_channel_indices": null, |
|
"use_cls_token": false |
|
} |
|
|