YannQi commited on
Commit
187b607
·
verified ·
1 Parent(s): 753c5a3

Upload configuration_r.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. configuration_r.py +5 -2
configuration_r.py CHANGED
@@ -17,7 +17,6 @@ from transformers.configuration_utils import PretrainedConfig
17
  from transformers.utils import (
18
  logging,
19
  )
20
- from transformers.models.auto import CONFIG_MAPPING, AutoConfig
21
 
22
 
23
  logger = logging.get_logger(__name__)
@@ -28,7 +27,9 @@ class RConfig(PretrainedConfig):
28
  attribute_map = {
29
  "image_token_id": "image_token_index",
30
  }
31
- sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
 
 
32
 
33
  def __init__(
34
  self,
@@ -45,6 +46,8 @@ class RConfig(PretrainedConfig):
45
  max_position_embeddings=32768,
46
  **kwargs,
47
  ):
 
 
48
  self.image_token_index = image_token_index
49
  self.projector_hidden_act = projector_hidden_act
50
  self.multimodal_projector_bias = multimodal_projector_bias
 
17
  from transformers.utils import (
18
  logging,
19
  )
 
20
 
21
 
22
  logger = logging.get_logger(__name__)
 
27
  attribute_map = {
28
  "image_token_id": "image_token_index",
29
  }
30
+
31
+
32
+ # sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
33
 
34
  def __init__(
35
  self,
 
46
  max_position_embeddings=32768,
47
  **kwargs,
48
  ):
49
+
50
+ from transformers.models.auto import CONFIG_MAPPING, AutoConfig # for vllm
51
  self.image_token_index = image_token_index
52
  self.projector_hidden_act = projector_hidden_act
53
  self.multimodal_projector_bias = multimodal_projector_bias