62 lines
1.9 KiB
Docker
62 lines
1.9 KiB
Docker
FROM intel/deep-learning-essentials:2025.1.3-0-devel-ubuntu24.04 AS vllm-base
|
|
|
|
RUN rm /etc/apt/sources.list.d/intel-graphics.list
|
|
|
|
RUN apt clean && apt-get update -y && \
|
|
apt-get install -y software-properties-common && \
|
|
add-apt-repository ppa:deadsnakes/ppa && \
|
|
apt-get install -y python3.10 python3.10-distutils && \
|
|
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 && \
|
|
apt-get install -y --no-install-recommends --fix-missing \
|
|
curl \
|
|
ffmpeg \
|
|
git \
|
|
libsndfile1 \
|
|
libsm6 \
|
|
libxext6 \
|
|
libgl1 \
|
|
lsb-release \
|
|
numactl \
|
|
python3.10-dev \
|
|
wget
|
|
|
|
|
|
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.10 1
|
|
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.10 1
|
|
|
|
WORKDIR /workspace/vllm
|
|
COPY requirements/xpu.txt /workspace/vllm/requirements/xpu.txt
|
|
COPY requirements/common.txt /workspace/vllm/requirements/common.txt
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
pip install --no-cache-dir \
|
|
-r requirements/xpu.txt
|
|
|
|
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/"
|
|
|
|
COPY . .
|
|
ARG GIT_REPO_CHECK=0
|
|
RUN --mount=type=bind,source=.git,target=.git \
|
|
if [ "$GIT_REPO_CHECK" != 0 ]; then bash tools/check_repo.sh; fi
|
|
|
|
ENV VLLM_TARGET_DEVICE=xpu
|
|
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
--mount=type=bind,source=.git,target=.git \
|
|
python3 setup.py install
|
|
|
|
CMD ["/bin/bash"]
|
|
|
|
FROM vllm-base AS vllm-openai
|
|
|
|
# install additional dependencies for openai api server
|
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
pip install accelerate hf_transfer pytest pytest_asyncio lm_eval[api] modelscope
|
|
|
|
ENV VLLM_USAGE_SOURCE production-docker-image \
|
|
TRITON_XPU_PROFILE 1
|
|
# install development dependencies (for testing)
|
|
RUN python3 -m pip install -e tests/vllm_test_utils
|
|
ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"]
|