2ms commited on
Commit
f1e29ff
·
1 Parent(s): 121039d

remove unused option

Browse files
Files changed (1) hide show
  1. models/aggregator.py +0 -12
models/aggregator.py CHANGED
@@ -12,14 +12,9 @@ class CLIPVisionCfg:
12
  width: int = 512
13
  head_width: int = 64
14
  mlp_ratio: float = 4.0
15
- patch_size: int = 16
16
- image_size: Union[Tuple[int, int], int] = 224
17
 
18
  ls_init_value: Optional[float] = None # layer scale initial value
19
  patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results
20
- attentional_pool: bool = False # whether to use attentional pooler in the last embedding layer (overrides pool_type)
21
- attn_pooler_queries: int = 256 # n_queries for attentional pooler
22
- attn_pooler_heads: int = 8 # n heads for attentional_pooling
23
  no_ln_pre: bool = False # disable pre transformer LayerNorm
24
  pos_embed_type: str = 'none'
25
  final_ln_after_pool: bool = True # apply final LayerNorm after pooling
@@ -28,13 +23,6 @@ class CLIPVisionCfg:
28
  act_kwargs: Optional[dict] = None
29
  norm_kwargs: Optional[dict] = None
30
 
31
- timm_model_name: Optional[str] = None # a valid model name overrides layers, width, patch_size
32
- timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model
33
- timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
34
- timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '')
35
- timm_proj_bias: bool = False # enable bias final projection
36
- timm_drop: float = 0. # head dropout
37
- timm_drop_path: Optional[float] = None # backbone stochastic depth
38
  img_embed: bool = False
39
  cls_embed: bool = False
40
  projection = False
 
12
  width: int = 512
13
  head_width: int = 64
14
  mlp_ratio: float = 4.0
 
 
15
 
16
  ls_init_value: Optional[float] = None # layer scale initial value
17
  patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results
 
 
 
18
  no_ln_pre: bool = False # disable pre transformer LayerNorm
19
  pos_embed_type: str = 'none'
20
  final_ln_after_pool: bool = True # apply final LayerNorm after pooling
 
23
  act_kwargs: Optional[dict] = None
24
  norm_kwargs: Optional[dict] = None
25
 
 
 
 
 
 
 
 
26
  img_embed: bool = False
27
  cls_embed: bool = False
28
  projection = False