How to run Deep Seek OCR 2 in vllm

I tried to run the deepseek ocr model in Colab with T4 GPU

from vllm import LLM, SamplingParams
from vllm.model_executor.models.deepseek_ocr import NGramPerReqLogitsProcessor
from PIL import Image


# Create model instance
llm = LLM(
    model="unsloth/DeepSeek-OCR-2",
    enable_prefix_caching=False,
    mm_processor_cache_gb=0,
    logits_processors=[NGramPerReqLogitsProcessor]
)

And here is what I see:

----> 7 llm = LLM(
      8     model="unsloth/DeepSeek-OCR-2",
      9     enable_prefix_caching=False,

4 frames
/usr/local/lib/python3.12/dist-packages/vllm/entrypoints/llm.py in __init__(self, model, runner, convert, tokenizer, tokenizer_mode, skip_tokenizer_init, trust_remote_code, allowed_local_media_path, allowed_media_domains, tensor_parallel_size, dtype, quantization, revision, tokenizer_revision, seed, gpu_memory_utilization, swap_space, cpu_offload_gb, enforce_eager, enable_return_routed_experts, disable_custom_all_reduce, hf_token, hf_overrides, mm_processor_kwargs, pooler_config, structured_outputs_config, profiler_config, attention_config, kv_cache_memory_bytes, compilation_config, logits_processors, **kwargs)
    332         log_non_default_args(engine_args)
    333 
--> 334         self.llm_engine = LLMEngine.from_engine_args(
    335             engine_args=engine_args, usage_context=UsageContext.LLM_CLASS
    336         )

/usr/local/lib/python3.12/dist-packages/vllm/v1/engine/llm_engine.py in from_engine_args(cls, engine_args, usage_context, stat_loggers, enable_multiprocessing)
    162 
    163         # Create the engine configs.
--> 164         vllm_config = engine_args.create_engine_config(usage_context)
    165         executor_class = Executor.get_class(vllm_config)
    166 

/usr/local/lib/python3.12/dist-packages/vllm/engine/arg_utils.py in create_engine_config(self, usage_context, headless)
   1372             )
   1373 
-> 1374         model_config = self.create_model_config()
   1375         self.model = model_config.model
   1376         self.model_weights = model_config.model_weights

/usr/local/lib/python3.12/dist-packages/vllm/engine/arg_utils.py in create_model_config(self)
   1226             )
   1227 
-> 1228         return ModelConfig(
   1229             model=self.model,
   1230             model_weights=self.model_weights,

/usr/local/lib/python3.12/dist-packages/pydantic/_internal/_dataclasses.py in __init__(__dataclass_self__, *args, **kwargs)
    119         __tracebackhide__ = True
    120         s = __dataclass_self__
--> 121         s.__pydantic_validator__.validate_python(ArgsKwargs(args, kwargs), self_instance=s)
    122 
    123     __init__.__qualname__ = f'{cls.__qualname__}.__init__'

ValidationError: 1 validation error for ModelConfig
  Value error, Model architectures ['DeepseekOCR2ForCausalLM'] are not supported for now. Supported architectures: dict_keys(['AfmoeForCausalLM', 'ApertusForCausalLM', 'AquilaModel', 'AquilaForCausalLM', 'ArceeForCausalLM', 'ArcticForCausalLM', 'BaiChuanForCausalLM', 'BaichuanForCausalLM', 'BailingMoeForCausalLM', 'BailingMoeV2ForCausalLM', 'BambaForCausalLM', 'BloomForCausalLM', 'ChatGLMModel', 'ChatGLMForConditionalGeneration', 'CohereForCausalLM', 'Cohere2ForCausalLM', 'CwmForCausalLM', 'DbrxForCausalLM', 'DeciLMForCausalLM', 'DeepseekForCausalLM', 'DeepseekV2ForCausalLM', 'DeepseekV3ForCausalLM', 'DeepseekV32ForCausalLM', 'Dots1ForCausalLM', 'Ernie4_5ForCausalLM', 'Ernie4_5_MoeForCausalLM', 'ExaoneForCausalLM', 'Exaone4ForCausalLM', 'ExaoneMoEForCausalLM', 'Fairseq2LlamaForCausalLM', 'FalconForCausalLM', 'FalconMambaForCausalLM', 'FalconH1ForCausalLM', 'FlexOlmoForCausalLM', 'GemmaForCausalLM', 'Gemma2ForCausalLM', 'Gemma3ForCausalLM', 'Gemma3nForCausalLM', 'Qwen3NextForCausalLM', 'GlmForCausalLM', 'Glm4ForCausalLM', 'Glm4MoeForCausalLM', 'Glm4MoeLiteForCausalLM', 'GptOssForCausalLM', 'GPT2LMHeadModel', 'GPTBigCodeForCausalLM', 'GPTJForCausalLM', 'GPTNeoXForCausalLM', 'GraniteForCausalLM', 'GraniteMoeForCausalLM', 'GraniteMoeHybridForCausalLM', 'GraniteMoeSharedForCausalLM', 'GritLM', 'Grok1ModelForCausalLM', 'Grok1ForCausalLM', 'HunYuanMoEV1ForCausalLM', 'HunYuanDenseV1ForCausalLM', 'HCXVisionForCausalLM', 'InternLMForCausalLM', 'InternLM2ForCausalLM', 'InternLM2VEFor...
    For further information visit https://errors.pydantic.dev/2.12/v/value_error

The error occurs because the model architecture DeepseekOCR2ForCausalLM (from “unsloth/DeepSeek-OCR-2”) is not currently supported by vLLM. Only DeepseekOCRForCausalLM (from “deepseek-ai/DeepSeek-OCR”) is listed as supported in the vLLM model registry and documentation. You need to use the official “deepseek-ai/DeepSeek-OCR” model, not the “unsloth/DeepSeek-OCR-2” variant, for compatibility with vLLM’s multimodal OCR pipeline[^1^][^2^].

Would you like more detail on how to run DeepSeek-OCR with vLLM, including example code?

Sources:

Hint: Mention @RunLLM in the post for followups.