VLLm: Unterschied zwischen den Versionen
Zur Navigation springen
Zur Suche springen
| (2 dazwischenliegende Versionen desselben Benutzers werden nicht angezeigt) | |||
| Zeile 33: | Zeile 33: | ||
-e PYTORCH_TUNABLEOP_ENABLED=1 | -e PYTORCH_TUNABLEOP_ENABLED=1 | ||
rocm/vllm-dev:nightly | rocm/vllm-dev:nightly | ||
</syntaxhighlight>Ohne Tensor Parallism:<syntaxhighlight lang="bash"> | </syntaxhighlight>Für gfx1201:<syntaxhighlight lang="bash"> | ||
sudo docker run -it --rm --network=host \ | |||
--group-add=video --ipc=host --cap-add=SYS_PTRACE \ | |||
--security-opt seccomp=unconfined --device /dev/kfd \ | |||
--device /dev/dri \ | |||
-v /home/hendrik/.lmstudio/models/:/app/models \ | |||
-e HF_HOME="/app/models" \ | |||
-e HF_TOKEN="<TOKEN>" \ | |||
-e NCCL_P2P_DISABLE=1 \ | |||
-e VLLM_CUSTOM_OPS=all \ | |||
-e VLLM_ROCM_USE_AITER=0 \ | |||
-e SAFETENSORS_FAST_GPU=1 \ | |||
-e PYTORCH_TUNABLEOP_ENABLED=1 | |||
kyuz0/vllm-therock-gfx1201 | |||
</syntaxhighlight> | |||
Ohne Tensor Parallism:<syntaxhighlight lang="bash"> | |||
vllm serve Qwen/Qwen3-VL-8B-Thinking --served-model-name Homelab --max_model_len 4096 --enable-auto-tool-choice --tool-call-parser hermes --reasoning-parser qwen3 | vllm serve Qwen/Qwen3-VL-8B-Thinking --served-model-name Homelab --max_model_len 4096 --enable-auto-tool-choice --tool-call-parser hermes --reasoning-parser qwen3 | ||
</syntaxhighlight>Mit:<syntaxhighlight lang="bash"> | </syntaxhighlight>Mit:<syntaxhighlight lang="bash"> | ||
vllm serve Qwen/Qwen3-VL-8B-Thinking --served-model-name Homelab --tp 2 --max_model_len 4096 --enable-auto-tool-choice --tool-call-parser hermes --reasoning-parser qwen3 | vllm serve Qwen/Qwen3-VL-8B-Thinking --served-model-name Homelab --tp 2 --max_model_len 4096 --enable-auto-tool-choice --tool-call-parser hermes --reasoning-parser qwen3 | ||
</syntaxhighlight>Benchmark: | </syntaxhighlight> | ||
Benchmark: | |||
<syntaxhighlight lang="bash"> | |||
vllm bench serve --num-prompts 1 --dataset-name=random --input-len 512 --output-len 128 --model Qwen/Qwen3-4B-Instruct-2507-FP8 | |||
</syntaxhighlight> | |||
=== Test === | === Test === | ||
Aktuelle Version vom 18. Dezember 2025, 23:06 Uhr
Beschreibung
Docker normal installieren
Download
Normal (ROCm)
docker pull rocm/vllm-dev:nightlygfx906
docker pull nalanzeyu/vllm-gfx906Ausführen
Variante 1:
docker run -it --rm --shm-size=8g --device=/dev/kfd --device=/dev/dri \
--group-add video -p 8086:8000 \
-v /mnt/share/models:/models \
nalanzeyu/vllm-gfx906 \
vllm serve /models/Qwen3-Coder-30B-A3B-Instruct-AWQ-4bit --served-model-name Homelab --max-model-len 30000 --enable-auto-tool-choice --tool-call-parser hermesVariante 2, getestet 18.12.2025:
sudo docker run -it --rm --network=host \
--group-add=video --ipc=host --cap-add=SYS_PTRACE \
--security-opt seccomp=unconfined --device /dev/kfd \
--device /dev/dri \
-v /home/hendrik/.lmstudio/models/:/app/models \
-e HF_HOME="/app/models" \
-e HF_TOKEN="<TOKEN>" \
-e NCCL_P2P_DISABLE=1 \
-e VLLM_CUSTOM_OPS=all \
-e VLLM_ROCM_USE_AITER=0 \
-e SAFETENSORS_FAST_GPU=1 \
-e PYTORCH_TUNABLEOP_ENABLED=1
rocm/vllm-dev:nightlyFür gfx1201:
sudo docker run -it --rm --network=host \
--group-add=video --ipc=host --cap-add=SYS_PTRACE \
--security-opt seccomp=unconfined --device /dev/kfd \
--device /dev/dri \
-v /home/hendrik/.lmstudio/models/:/app/models \
-e HF_HOME="/app/models" \
-e HF_TOKEN="<TOKEN>" \
-e NCCL_P2P_DISABLE=1 \
-e VLLM_CUSTOM_OPS=all \
-e VLLM_ROCM_USE_AITER=0 \
-e SAFETENSORS_FAST_GPU=1 \
-e PYTORCH_TUNABLEOP_ENABLED=1
kyuz0/vllm-therock-gfx1201
Ohne Tensor Parallism:
vllm serve Qwen/Qwen3-VL-8B-Thinking --served-model-name Homelab --max_model_len 4096 --enable-auto-tool-choice --tool-call-parser hermes --reasoning-parser qwen3Mit:
vllm serve Qwen/Qwen3-VL-8B-Thinking --served-model-name Homelab --tp 2 --max_model_len 4096 --enable-auto-tool-choice --tool-call-parser hermes --reasoning-parser qwen3Benchmark:
vllm bench serve --num-prompts 1 --dataset-name=random --input-len 512 --output-len 128 --model Qwen/Qwen3-4B-Instruct-2507-FP8