daking commited on
Commit
3096448
1 Parent(s): 0fdcdcc

Upload model

Browse files
Files changed (4) hide show
  1. config.json +26 -0
  2. configuration_resnet.py +35 -0
  3. modeling_resnet.py +56 -0
  4. pytorch_model.bin +3 -0
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ResnetModelForImageClassification"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_resnet.ResnetConfig",
7
+ "AutoModelForImageClassification": "modeling_resnet.ResnetModelForImageClassification"
8
+ },
9
+ "avg_down": true,
10
+ "base_width": 64,
11
+ "block_type": "bottleneck",
12
+ "cardinality": 1,
13
+ "input_channels": 3,
14
+ "layers": [
15
+ 3,
16
+ 4,
17
+ 6,
18
+ 3
19
+ ],
20
+ "model_type": "resnet",
21
+ "num_classes": 1000,
22
+ "stem_type": "deep",
23
+ "stem_width": 32,
24
+ "torch_dtype": "float32",
25
+ "transformers_version": "4.26.0"
26
+ }
configuration_resnet.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+ from typing import List
3
+
4
+
5
+ class ResnetConfig(PretrainedConfig):
6
+ model_type = "resnet"
7
+
8
+ def __init__(
9
+ self,
10
+ block_type="bottleneck",
11
+ layers: List[int] = [3, 4, 6, 3],
12
+ num_classes: int = 1000,
13
+ input_channels: int = 3,
14
+ cardinality: int = 1,
15
+ base_width: int = 64,
16
+ stem_width: int = 64,
17
+ stem_type: str = "",
18
+ avg_down: bool = False,
19
+ **kwargs,
20
+ ):
21
+ if block_type not in ["basic", "bottleneck"]:
22
+ raise ValueError(f"`block_type` must be 'basic' or bottleneck', got {block_type}.")
23
+ if stem_type not in ["", "deep", "deep-tiered"]:
24
+ raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}.")
25
+
26
+ self.block_type = block_type
27
+ self.layers = layers
28
+ self.num_classes = num_classes
29
+ self.input_channels = input_channels
30
+ self.cardinality = cardinality
31
+ self.base_width = base_width
32
+ self.stem_width = stem_width
33
+ self.stem_type = stem_type
34
+ self.avg_down = avg_down
35
+ super().__init__(**kwargs)
modeling_resnet.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PreTrainedModel
2
+ from timm.models.resnet import BasicBlock, Bottleneck, ResNet
3
+ from .configuration_resnet import ResnetConfig
4
+
5
+
6
+ BLOCK_MAPPING = {"basic": BasicBlock, "bottleneck": Bottleneck}
7
+
8
+
9
+ class ResnetModel(PreTrainedModel):
10
+ config_class = ResnetConfig
11
+
12
+ def __init__(self, config):
13
+ super().__init__(config)
14
+ block_layer = BLOCK_MAPPING[config.block_type]
15
+ self.model = ResNet(
16
+ block_layer,
17
+ config.layers,
18
+ num_classes=config.num_classes,
19
+ in_chans=config.input_channels,
20
+ cardinality=config.cardinality,
21
+ base_width=config.base_width,
22
+ stem_width=config.stem_width,
23
+ stem_type=config.stem_type,
24
+ avg_down=config.avg_down,
25
+ )
26
+
27
+ def forward(self, tensor):
28
+ return self.model.forward_features(tensor)
29
+
30
+ import torch
31
+
32
+
33
+ class ResnetModelForImageClassification(PreTrainedModel):
34
+ config_class = ResnetConfig
35
+
36
+ def __init__(self, config):
37
+ super().__init__(config)
38
+ block_layer = BLOCK_MAPPING[config.block_type]
39
+ self.model = ResNet(
40
+ block_layer,
41
+ config.layers,
42
+ num_classes=config.num_classes,
43
+ in_chans=config.input_channels,
44
+ cardinality=config.cardinality,
45
+ base_width=config.base_width,
46
+ stem_width=config.stem_width,
47
+ stem_type=config.stem_type,
48
+ avg_down=config.avg_down,
49
+ )
50
+
51
+ def forward(self, tensor, labels=None):
52
+ logits = self.model(tensor)
53
+ if labels is not None:
54
+ loss = torch.nn.cross_entropy(logits, labels)
55
+ return {"loss": loss, "logits": logits}
56
+ return {"logits": logits}
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a945a1d4bcb0c80f4a944dd77dc32441d39c2c06fa67d650e9a9090fde8934b
3
+ size 102620157