From d0f71c1efa693203ef033b90053f2b73985745ed Mon Sep 17 00:00:00 2001 From: vasqu Date: Fri, 5 Dec 2025 17:15:45 +0100 Subject: [PATCH] fix warning --- src/transformers/models/glm4v/configuration_glm4v.py | 4 +++- src/transformers/models/glm4v/modular_glm4v.py | 4 +++- .../models/glm4v_moe/configuration_glm4v_moe.py | 4 +++- src/transformers/models/glm4v_moe/modular_glm4v_moe.py | 2 +- .../models/qwen2_5_omni/configuration_qwen2_5_omni.py | 6 ++++-- .../models/qwen2_5_omni/modular_qwen2_5_omni.py | 6 ++++-- .../models/qwen2_5_vl/configuration_qwen2_5_vl.py | 2 +- src/transformers/models/qwen2_vl/configuration_qwen2_vl.py | 2 +- 8 files changed, 20 insertions(+), 10 deletions(-) diff --git a/src/transformers/models/glm4v/configuration_glm4v.py b/src/transformers/models/glm4v/configuration_glm4v.py index 35c29f07246d..f707ab291a8c 100644 --- a/src/transformers/models/glm4v/configuration_glm4v.py +++ b/src/transformers/models/glm4v/configuration_glm4v.py @@ -234,7 +234,9 @@ def __init__( self.attention_dropout = attention_dropout self.rope_parameters = rope_parameters - super().__init__(tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope"}, **kwargs) + super().__init__( + tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope_section"}, **kwargs + ) class Glm4vConfig(PreTrainedConfig): diff --git a/src/transformers/models/glm4v/modular_glm4v.py b/src/transformers/models/glm4v/modular_glm4v.py index 2cd6c5d0fd06..7f81d03f8ac9 100644 --- a/src/transformers/models/glm4v/modular_glm4v.py +++ b/src/transformers/models/glm4v/modular_glm4v.py @@ -271,7 +271,9 @@ def __init__( self.attention_dropout = attention_dropout self.rope_parameters = rope_parameters - super().__init__(tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope"}, **kwargs) + super().__init__( + tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope_section"}, **kwargs + ) class Glm4vConfig(PreTrainedConfig): diff --git a/src/transformers/models/glm4v_moe/configuration_glm4v_moe.py b/src/transformers/models/glm4v_moe/configuration_glm4v_moe.py index 20e4f3ad492c..fdfb96f75294 100644 --- a/src/transformers/models/glm4v_moe/configuration_glm4v_moe.py +++ b/src/transformers/models/glm4v_moe/configuration_glm4v_moe.py @@ -280,7 +280,9 @@ def __init__( self.first_k_dense_replace = first_k_dense_replace self.norm_topk_prob = norm_topk_prob self.router_aux_loss_coef = router_aux_loss_coef - super().__init__(tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope"}, **kwargs) + super().__init__( + tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope_section"}, **kwargs + ) class Glm4vMoeConfig(PreTrainedConfig): diff --git a/src/transformers/models/glm4v_moe/modular_glm4v_moe.py b/src/transformers/models/glm4v_moe/modular_glm4v_moe.py index 06967fb07642..71c213f940d1 100644 --- a/src/transformers/models/glm4v_moe/modular_glm4v_moe.py +++ b/src/transformers/models/glm4v_moe/modular_glm4v_moe.py @@ -227,7 +227,7 @@ def __init__( self.norm_topk_prob = norm_topk_prob self.router_aux_loss_coef = router_aux_loss_coef PreTrainedConfig.__init__( - self, tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope"}, **kwargs + self, tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope_section"}, **kwargs ) diff --git a/src/transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py b/src/transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py index 6a23e0668083..8ae45c5104f3 100644 --- a/src/transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py +++ b/src/transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py @@ -365,7 +365,7 @@ def __init__( self.rope_parameters = rope_parameters super().__init__( tie_word_embeddings=tie_word_embeddings, - ignore_keys_at_rope_validation={"mrope"}, + ignore_keys_at_rope_validation={"mrope_section"}, **kwargs, ) @@ -713,7 +713,9 @@ def __init__( layer_type_validation(self.layer_types, self.num_hidden_layers) self.rope_parameters = rope_parameters - super().__init__(tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope"}, **kwargs) + super().__init__( + tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope_section"}, **kwargs + ) class Qwen2_5OmniDiTConfig(PreTrainedConfig): diff --git a/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py b/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py index 3f0b62102644..2bad5d01d7bb 100644 --- a/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py +++ b/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py @@ -399,7 +399,7 @@ def __init__( self.rope_parameters = rope_parameters super().__init__( tie_word_embeddings=tie_word_embeddings, - ignore_keys_at_rope_validation={"mrope"}, + ignore_keys_at_rope_validation={"mrope_section"}, **kwargs, ) @@ -747,7 +747,9 @@ def __init__( layer_type_validation(self.layer_types, self.num_hidden_layers) self.rope_parameters = rope_parameters - super().__init__(tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope"}, **kwargs) + super().__init__( + tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope_section"}, **kwargs + ) class Qwen2_5OmniDiTConfig(PreTrainedConfig): diff --git a/src/transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py b/src/transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py index 084b4d8c9ce6..8832400df55d 100644 --- a/src/transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +++ b/src/transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py @@ -230,7 +230,7 @@ def __init__( bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, - ignore_keys_at_rope_validation={"mrope"}, + ignore_keys_at_rope_validation={"mrope_section"}, **kwargs, ) diff --git a/src/transformers/models/qwen2_vl/configuration_qwen2_vl.py b/src/transformers/models/qwen2_vl/configuration_qwen2_vl.py index e4578375036f..8372690ef471 100644 --- a/src/transformers/models/qwen2_vl/configuration_qwen2_vl.py +++ b/src/transformers/models/qwen2_vl/configuration_qwen2_vl.py @@ -218,7 +218,7 @@ def __init__( bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, - ignore_keys_at_rope_validation={"mrope"}, + ignore_keys_at_rope_validation={"mrope_section"}, **kwargs, )