fix: same assertions in other models
Browse files- modeling_for_glue.py +10 -10
modeling_for_glue.py
CHANGED
|
@@ -143,16 +143,16 @@ class BertForQuestionAnswering(BertPreTrainedModel):
|
|
| 143 |
return_dict if return_dict is not None else self.config.use_return_dict
|
| 144 |
)
|
| 145 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
outputs = self.bert(
|
| 147 |
input_ids,
|
| 148 |
attention_mask=attention_mask,
|
| 149 |
token_type_ids=token_type_ids,
|
| 150 |
position_ids=position_ids,
|
| 151 |
-
head_mask=head_mask,
|
| 152 |
-
inputs_embeds=inputs_embeds,
|
| 153 |
-
output_attentions=output_attentions,
|
| 154 |
-
output_hidden_states=output_hidden_states,
|
| 155 |
-
return_dict=return_dict,
|
| 156 |
)
|
| 157 |
|
| 158 |
sequence_output = outputs[0]
|
|
@@ -230,16 +230,16 @@ class BertForTokenClassification(BertPreTrainedModel):
|
|
| 230 |
return_dict if return_dict is not None else self.config.use_return_dict
|
| 231 |
)
|
| 232 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 233 |
outputs = self.bert(
|
| 234 |
input_ids,
|
| 235 |
attention_mask=attention_mask,
|
| 236 |
token_type_ids=token_type_ids,
|
| 237 |
position_ids=position_ids,
|
| 238 |
-
head_mask=head_mask,
|
| 239 |
-
inputs_embeds=inputs_embeds,
|
| 240 |
-
output_attentions=output_attentions,
|
| 241 |
-
output_hidden_states=output_hidden_states,
|
| 242 |
-
return_dict=return_dict,
|
| 243 |
)
|
| 244 |
|
| 245 |
sequence_output = outputs[0]
|
|
|
|
| 143 |
return_dict if return_dict is not None else self.config.use_return_dict
|
| 144 |
)
|
| 145 |
|
| 146 |
+
assert head_mask is None
|
| 147 |
+
assert inputs_embeds is None
|
| 148 |
+
assert output_attentions is None
|
| 149 |
+
assert output_hidden_states is None
|
| 150 |
+
assert return_dict is None
|
| 151 |
outputs = self.bert(
|
| 152 |
input_ids,
|
| 153 |
attention_mask=attention_mask,
|
| 154 |
token_type_ids=token_type_ids,
|
| 155 |
position_ids=position_ids,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
)
|
| 157 |
|
| 158 |
sequence_output = outputs[0]
|
|
|
|
| 230 |
return_dict if return_dict is not None else self.config.use_return_dict
|
| 231 |
)
|
| 232 |
|
| 233 |
+
assert head_mask is None
|
| 234 |
+
assert inputs_embeds is None
|
| 235 |
+
assert output_attentions is None
|
| 236 |
+
assert output_hidden_states is None
|
| 237 |
+
assert return_dict is None
|
| 238 |
outputs = self.bert(
|
| 239 |
input_ids,
|
| 240 |
attention_mask=attention_mask,
|
| 241 |
token_type_ids=token_type_ids,
|
| 242 |
position_ids=position_ids,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 243 |
)
|
| 244 |
|
| 245 |
sequence_output = outputs[0]
|