Skip to content

vllm.model_executor.models.mistral_large_3_eagle

logger module-attribute

logger = init_logger(__name__)

EagleMistralLarge3ForCausalLM

Bases: MistralLarge3ForCausalLM

Source code in vllm/model_executor/models/mistral_large_3_eagle.py
class EagleMistralLarge3ForCausalLM(MistralLarge3ForCausalLM):
    remapping = MistralLarge3ForCausalLM.remapping | {
        r"eagle_linear\.weight": r"model.fc.weight",
        r"eagle_linear\.qscale_act": r"model.fc.input_scale",
        r"eagle_linear\.qscale_weight": r"model.fc.weight_scale",
    }

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        target_layer_num = vllm_config.model_config.get_num_layers(
            vllm_config.parallel_config
        )
        vllm_config.model_config = vllm_config.speculative_config.draft_model_config
        # draft model quantization config may differ from target model
        self.quant_config = VllmConfig.get_quantization_config(
            vllm_config.speculative_config.draft_model_config, vllm_config.load_config
        )
        vllm_config.quant_config = self.quant_config
        self.model_cls = partial(
            EagleMistralLarge3Model, start_layer_id=target_layer_num
        )
        super().__init__(vllm_config=vllm_config, prefix=prefix)

    def get_input_embeddings(
        self,
        input_ids: torch.Tensor,
        multimodal_embeddings: MultiModalEmbeddings | None = None,
        *,
        is_multimodal: torch.Tensor | None = None,
        handle_oov_mm_token: bool = False,
    ) -> torch.Tensor:
        inputs_embeds = super().embed_input_ids(input_ids)

        if multimodal_embeddings is None or len(multimodal_embeddings) == 0:
            return inputs_embeds

        assert is_multimodal is not None

        return _merge_multimodal_embeddings(
            inputs_embeds=inputs_embeds,
            multimodal_embeddings=multimodal_embeddings,
            is_multimodal=is_multimodal,
        )

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
        inputs_embeds: torch.Tensor | None = None,
    ) -> tuple[torch.Tensor, torch.Tensor]:
        hidden_states = self.model(input_ids, positions, hidden_states, inputs_embeds)
        return hidden_states, hidden_states

    def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
        # Pretend we've loaded the embedding and lm_head weights
        # (later copied from target model)
        return super().load_weights(weights) | {
            "model.embed_tokens.weight",
            "lm_head.weight",
        }

    def embed_input_ids(
        self,
        input_ids: torch.Tensor,
        multimodal_embeddings: NestedTensors | None = None,
        is_multimodal: torch.Tensor | None = None,
    ) -> torch.Tensor:
        return self.model.embed_input_ids(input_ids)

model_cls instance-attribute

model_cls = partial(
    EagleMistralLarge3Model, start_layer_id=target_layer_num
)

quant_config instance-attribute

quant_config = get_quantization_config(
    draft_model_config, load_config
)

remapping class-attribute instance-attribute

remapping = remapping | {
    "eagle_linear\\.weight": "model.fc.weight",
    "eagle_linear\\.qscale_act": "model.fc.input_scale",
    "eagle_linear\\.qscale_weight": "model.fc.weight_scale",
}

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/mistral_large_3_eagle.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    target_layer_num = vllm_config.model_config.get_num_layers(
        vllm_config.parallel_config
    )
    vllm_config.model_config = vllm_config.speculative_config.draft_model_config
    # draft model quantization config may differ from target model
    self.quant_config = VllmConfig.get_quantization_config(
        vllm_config.speculative_config.draft_model_config, vllm_config.load_config
    )
    vllm_config.quant_config = self.quant_config
    self.model_cls = partial(
        EagleMistralLarge3Model, start_layer_id=target_layer_num
    )
    super().__init__(vllm_config=vllm_config, prefix=prefix)

embed_input_ids

embed_input_ids(
    input_ids: Tensor,
    multimodal_embeddings: NestedTensors | None = None,
    is_multimodal: Tensor | None = None,
) -> Tensor
Source code in vllm/model_executor/models/mistral_large_3_eagle.py
def embed_input_ids(
    self,
    input_ids: torch.Tensor,
    multimodal_embeddings: NestedTensors | None = None,
    is_multimodal: torch.Tensor | None = None,
) -> torch.Tensor:
    return self.model.embed_input_ids(input_ids)

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    hidden_states: Tensor,
    inputs_embeds: Tensor | None = None,
) -> tuple[Tensor, Tensor]
Source code in vllm/model_executor/models/mistral_large_3_eagle.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    hidden_states: torch.Tensor,
    inputs_embeds: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
    hidden_states = self.model(input_ids, positions, hidden_states, inputs_embeds)
    return hidden_states, hidden_states

get_input_embeddings

get_input_embeddings(
    input_ids: Tensor,
    multimodal_embeddings: MultiModalEmbeddings
    | None = None,
    *,
    is_multimodal: Tensor | None = None,
    handle_oov_mm_token: bool = False,
) -> Tensor
Source code in vllm/model_executor/models/mistral_large_3_eagle.py
def get_input_embeddings(
    self,
    input_ids: torch.Tensor,
    multimodal_embeddings: MultiModalEmbeddings | None = None,
    *,
    is_multimodal: torch.Tensor | None = None,
    handle_oov_mm_token: bool = False,
) -> torch.Tensor:
    inputs_embeds = super().embed_input_ids(input_ids)

    if multimodal_embeddings is None or len(multimodal_embeddings) == 0:
        return inputs_embeds

    assert is_multimodal is not None

    return _merge_multimodal_embeddings(
        inputs_embeds=inputs_embeds,
        multimodal_embeddings=multimodal_embeddings,
        is_multimodal=is_multimodal,
    )

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/mistral_large_3_eagle.py
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
    # Pretend we've loaded the embedding and lm_head weights
    # (later copied from target model)
    return super().load_weights(weights) | {
        "model.embed_tokens.weight",
        "lm_head.weight",
    }

EagleMistralLarge3Model

Bases: DeepseekV2Model

Source code in vllm/model_executor/models/mistral_large_3_eagle.py
@support_torch_compile
class EagleMistralLarge3Model(DeepseekV2Model):
    def __init__(
        self, *, vllm_config: VllmConfig, prefix: str = "", start_layer_id: int = 0
    ):
        nn.Module.__init__(self)

        config = vllm_config.model_config.hf_config
        quant_config = vllm_config.quant_config
        self.config = config
        self.vllm_config = vllm_config

        self.vocab_size = config.vocab_size

        assert get_pp_group().world_size == 1
        self.embed_tokens = VocabParallelEmbedding(
            config.vocab_size,
            config.hidden_size,
            quant_config=quant_config,
            prefix=f"{prefix}.embed_tokens",
        )

        self.layers = nn.ModuleList(
            [
                DeepseekV2DecoderLayer(
                    vllm_config=vllm_config,
                    prefix=maybe_prefix(prefix, f"layers.{i + start_layer_id}"),
                )
                for i in range(self.config.num_hidden_layers)
            ]
        )
        self.start_layer = 0
        self.end_layer = self.config.num_hidden_layers

        self.fc = RowParallelLinear(
            self.config.hidden_size * 2,
            self.config.hidden_size,
            bias=False,
            input_is_parallel=False,
            quant_config=quant_config,
            return_bias=False,
        )
        self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
        self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
            ["hidden_states", "residual"], config.hidden_size
        )

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
        inputs_embeds: torch.Tensor | None = None,
    ) -> torch.Tensor:
        if inputs_embeds is None:
            inputs_embeds = self.embed_input_ids(input_ids)
        inputs_embeds = self.fc(torch.cat((inputs_embeds, hidden_states), dim=-1))
        output = super().forward(
            input_ids, positions, intermediate_tensors=None, inputs_embeds=inputs_embeds
        )
        assert isinstance(output, torch.Tensor)
        return output

config instance-attribute

config = config

embed_tokens instance-attribute

embed_tokens = VocabParallelEmbedding(
    vocab_size,
    hidden_size,
    quant_config=quant_config,
    prefix=f"{prefix}.embed_tokens",
)

end_layer instance-attribute

end_layer = num_hidden_layers

fc instance-attribute

fc = RowParallelLinear(
    hidden_size * 2,
    hidden_size,
    bias=False,
    input_is_parallel=False,
    quant_config=quant_config,
    return_bias=False,
)

layers instance-attribute

layers = ModuleList(
    [
        (
            DeepseekV2DecoderLayer(
                vllm_config=vllm_config,
                prefix=maybe_prefix(
                    prefix, f"layers.{i + start_layer_id}"
                ),
            )
        )
        for i in (range(num_hidden_layers))
    ]
)

make_empty_intermediate_tensors instance-attribute

make_empty_intermediate_tensors = (
    make_empty_intermediate_tensors_factory(
        ["hidden_states", "residual"], hidden_size
    )
)

norm instance-attribute

norm = RMSNorm(hidden_size, eps=rms_norm_eps)

start_layer instance-attribute

start_layer = 0

vllm_config instance-attribute

vllm_config = vllm_config

vocab_size instance-attribute

vocab_size = vocab_size

__init__

__init__(
    *,
    vllm_config: VllmConfig,
    prefix: str = "",
    start_layer_id: int = 0,
)
Source code in vllm/model_executor/models/mistral_large_3_eagle.py
def __init__(
    self, *, vllm_config: VllmConfig, prefix: str = "", start_layer_id: int = 0
):
    nn.Module.__init__(self)

    config = vllm_config.model_config.hf_config
    quant_config = vllm_config.quant_config
    self.config = config
    self.vllm_config = vllm_config

    self.vocab_size = config.vocab_size

    assert get_pp_group().world_size == 1
    self.embed_tokens = VocabParallelEmbedding(
        config.vocab_size,
        config.hidden_size,
        quant_config=quant_config,
        prefix=f"{prefix}.embed_tokens",
    )

    self.layers = nn.ModuleList(
        [
            DeepseekV2DecoderLayer(
                vllm_config=vllm_config,
                prefix=maybe_prefix(prefix, f"layers.{i + start_layer_id}"),
            )
            for i in range(self.config.num_hidden_layers)
        ]
    )
    self.start_layer = 0
    self.end_layer = self.config.num_hidden_layers

    self.fc = RowParallelLinear(
        self.config.hidden_size * 2,
        self.config.hidden_size,
        bias=False,
        input_is_parallel=False,
        quant_config=quant_config,
        return_bias=False,
    )
    self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
    self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
        ["hidden_states", "residual"], config.hidden_size
    )

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    hidden_states: Tensor,
    inputs_embeds: Tensor | None = None,
) -> Tensor
Source code in vllm/model_executor/models/mistral_large_3_eagle.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    hidden_states: torch.Tensor,
    inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor:
    if inputs_embeds is None:
        inputs_embeds = self.embed_input_ids(input_ids)
    inputs_embeds = self.fc(torch.cat((inputs_embeds, hidden_states), dim=-1))
    output = super().forward(
        input_ids, positions, intermediate_tensors=None, inputs_embeds=inputs_embeds
    )
    assert isinstance(output, torch.Tensor)
    return output