Skip to content

vllm.tokenizers.deepseekv32

DeepseekV32Tokenizer

Bases: HfTokenizer

Source code in vllm/tokenizers/deepseekv32.py
@TokenizerRegistry.register("deepseek_v32")
class DeepseekV32Tokenizer(HfTokenizer):
    def __init__(self, tokenizer: TokenizerLike):
        self.tokenizer = tokenizer
        self.name_or_path = (
            tokenizer.name_or_path if hasattr(tokenizer, "name_or_path") else ""
        )

    @classmethod
    def from_pretrained(
        cls,
        path_or_repo_id: str | Path,
        *args,
        trust_remote_code: bool = False,
        revision: str | None = None,
        download_dir: str | None = None,
        **kwargs,
    ) -> "TokenizerLike":
        tokenizer = super().from_pretrained(
            path_or_repo_id,
            *args,
            trust_remote_code=trust_remote_code,
            revision=revision,
            download_dir=download_dir,
            **kwargs,
        )
        return DeepseekV32Tokenizer(tokenizer)

    def apply_chat_template(self, messages, tools=None, **kwargs):
        thinking = kwargs.get("thinking", False)
        thinking_mode = "thinking"
        if not thinking:
            thinking_mode = "chat"
        messages = messages.copy()
        drop_thinking = True
        if tools is not None and len(tools) > 0:
            messages.insert(0, {"role": "system"})
            messages[0]["tools"] = tools
            drop_thinking = False
        encode_config = dict(thinking_mode=thinking_mode, drop_thinking=drop_thinking)
        prompt_str = encode_messages(messages, **encode_config)  # type: ignore
        return prompt_str

    @property
    def all_special_tokens(self) -> list[str]:
        return self.tokenizer.all_special_tokens

    @property
    def all_special_ids(self) -> list[int]:
        return self.tokenizer.all_special_ids

    @property
    def bos_token_id(self) -> int:
        return self.tokenizer.bos_token_id

    @property
    def eos_token_id(self) -> int:
        return self.tokenizer.eos_token_id

    @property
    def pad_token_id(self) -> int:
        return self.tokenizer.pad_token_id

    @property
    def is_fast(self) -> bool:
        return self.tokenizer.is_fast

    @property
    def vocab_size(self) -> int:
        return self.tokenizer.vocab_size

    @property
    def max_token_id(self) -> int:
        return self.tokenizer.max_token_id

    @property
    def truncation_side(self) -> str:
        return self.tokenizer.truncation_side

    def __hash__(self) -> int:
        return hash(id(self))

    def __len__(self) -> int:
        # </think> is an added token in DeepseekV32 tokenizer
        return self.vocab_size + len(self.get_added_vocab())

    def __call__(
        self,
        text: str | list[str],
        text_pair: str | None = None,
        add_special_tokens: bool = True,
        truncation: bool = False,
        max_length: int | None = None,
    ) -> "BatchEncoding":
        return self.tokenizer(
            text,
            text_pair=text_pair,
            add_special_tokens=add_special_tokens,
            truncation=truncation,
            max_length=max_length,
        )

    def get_vocab(self) -> dict[str, int]:
        return self.tokenizer.get_vocab()

    def get_added_vocab(self) -> dict[str, int]:
        return self.tokenizer.get_added_vocab()

    def encode(
        self,
        text: str,
        truncation: bool | None = None,
        max_length: int | None = None,
        add_special_tokens: bool = True,
    ) -> list[int]:
        return self.tokenizer.encode(
            text,
            truncation=truncation,
            max_length=max_length,
            add_special_tokens=add_special_tokens,
        )

    def convert_tokens_to_string(self, tokens: list[str]) -> str:
        return self.tokenizer.convert_tokens_to_string(tokens)

    def decode(self, ids: list[int] | int, skip_special_tokens: bool = False) -> str:
        return self.tokenizer.decode(ids, skip_special_tokens=skip_special_tokens)

    def convert_ids_to_tokens(
        self,
        ids: list[int],
        skip_special_tokens: bool = False,
    ) -> list[str]:
        return self.tokenizer.convert_ids_to_tokens(
            ids, skip_special_tokens=skip_special_tokens
        )

all_special_ids property

all_special_ids: list[int]

all_special_tokens property

all_special_tokens: list[str]

bos_token_id property

bos_token_id: int

eos_token_id property

eos_token_id: int

is_fast property

is_fast: bool

max_token_id property

max_token_id: int

name_or_path instance-attribute

name_or_path = (
    name_or_path
    if hasattr(tokenizer, "name_or_path")
    else ""
)

pad_token_id property

pad_token_id: int

tokenizer instance-attribute

tokenizer = tokenizer

truncation_side property

truncation_side: str

vocab_size property

vocab_size: int

__call__

__call__(
    text: str | list[str],
    text_pair: str | None = None,
    add_special_tokens: bool = True,
    truncation: bool = False,
    max_length: int | None = None,
) -> BatchEncoding
Source code in vllm/tokenizers/deepseekv32.py
def __call__(
    self,
    text: str | list[str],
    text_pair: str | None = None,
    add_special_tokens: bool = True,
    truncation: bool = False,
    max_length: int | None = None,
) -> "BatchEncoding":
    return self.tokenizer(
        text,
        text_pair=text_pair,
        add_special_tokens=add_special_tokens,
        truncation=truncation,
        max_length=max_length,
    )

__hash__

__hash__() -> int
Source code in vllm/tokenizers/deepseekv32.py
def __hash__(self) -> int:
    return hash(id(self))

__init__

__init__(tokenizer: TokenizerLike)
Source code in vllm/tokenizers/deepseekv32.py
def __init__(self, tokenizer: TokenizerLike):
    self.tokenizer = tokenizer
    self.name_or_path = (
        tokenizer.name_or_path if hasattr(tokenizer, "name_or_path") else ""
    )

__len__

__len__() -> int
Source code in vllm/tokenizers/deepseekv32.py
def __len__(self) -> int:
    # </think> is an added token in DeepseekV32 tokenizer
    return self.vocab_size + len(self.get_added_vocab())

apply_chat_template

apply_chat_template(messages, tools=None, **kwargs)
Source code in vllm/tokenizers/deepseekv32.py
def apply_chat_template(self, messages, tools=None, **kwargs):
    thinking = kwargs.get("thinking", False)
    thinking_mode = "thinking"
    if not thinking:
        thinking_mode = "chat"
    messages = messages.copy()
    drop_thinking = True
    if tools is not None and len(tools) > 0:
        messages.insert(0, {"role": "system"})
        messages[0]["tools"] = tools
        drop_thinking = False
    encode_config = dict(thinking_mode=thinking_mode, drop_thinking=drop_thinking)
    prompt_str = encode_messages(messages, **encode_config)  # type: ignore
    return prompt_str

convert_ids_to_tokens

convert_ids_to_tokens(
    ids: list[int], skip_special_tokens: bool = False
) -> list[str]
Source code in vllm/tokenizers/deepseekv32.py
def convert_ids_to_tokens(
    self,
    ids: list[int],
    skip_special_tokens: bool = False,
) -> list[str]:
    return self.tokenizer.convert_ids_to_tokens(
        ids, skip_special_tokens=skip_special_tokens
    )

convert_tokens_to_string

convert_tokens_to_string(tokens: list[str]) -> str
Source code in vllm/tokenizers/deepseekv32.py
def convert_tokens_to_string(self, tokens: list[str]) -> str:
    return self.tokenizer.convert_tokens_to_string(tokens)

decode

decode(
    ids: list[int] | int, skip_special_tokens: bool = False
) -> str
Source code in vllm/tokenizers/deepseekv32.py
def decode(self, ids: list[int] | int, skip_special_tokens: bool = False) -> str:
    return self.tokenizer.decode(ids, skip_special_tokens=skip_special_tokens)

encode

encode(
    text: str,
    truncation: bool | None = None,
    max_length: int | None = None,
    add_special_tokens: bool = True,
) -> list[int]
Source code in vllm/tokenizers/deepseekv32.py
def encode(
    self,
    text: str,
    truncation: bool | None = None,
    max_length: int | None = None,
    add_special_tokens: bool = True,
) -> list[int]:
    return self.tokenizer.encode(
        text,
        truncation=truncation,
        max_length=max_length,
        add_special_tokens=add_special_tokens,
    )

from_pretrained classmethod

from_pretrained(
    path_or_repo_id: str | Path,
    *args,
    trust_remote_code: bool = False,
    revision: str | None = None,
    download_dir: str | None = None,
    **kwargs,
) -> TokenizerLike
Source code in vllm/tokenizers/deepseekv32.py
@classmethod
def from_pretrained(
    cls,
    path_or_repo_id: str | Path,
    *args,
    trust_remote_code: bool = False,
    revision: str | None = None,
    download_dir: str | None = None,
    **kwargs,
) -> "TokenizerLike":
    tokenizer = super().from_pretrained(
        path_or_repo_id,
        *args,
        trust_remote_code=trust_remote_code,
        revision=revision,
        download_dir=download_dir,
        **kwargs,
    )
    return DeepseekV32Tokenizer(tokenizer)

get_added_vocab

get_added_vocab() -> dict[str, int]
Source code in vllm/tokenizers/deepseekv32.py
def get_added_vocab(self) -> dict[str, int]:
    return self.tokenizer.get_added_vocab()

get_vocab

get_vocab() -> dict[str, int]
Source code in vllm/tokenizers/deepseekv32.py
def get_vocab(self) -> dict[str, int]:
    return self.tokenizer.get_vocab()