mirror of
https://github.com/volcengine/verl.git
synced 2025-10-20 13:43:50 +08:00
### Checklist Before Starting - [x] Search for similar PR(s). ### What does this PR do? Always use verl instead of veRL in the codebase, and add a CI check for this. ### Specific Changes mostly doc changes ### Test Added to sanity tests. ### Additional Info. - **Issue Number**: Fixes issue # or discussion # if any. - **Training**: [Note which backend this PR will affect: FSDP, Megatron, both, or none] - **Inference**: [Note which backend this PR will affect: vLLM, SGLang, both, or none] ### Checklist Before Submitting - [x] Read the [Contribute Guide](https://github.com/volcengine/verl?tab=readme-ov-file#contribution-guide). - [x] Apply [pre-commit checks](https://github.com/volcengine/verl?tab=readme-ov-file#code-linting-and-formatting). - [x] Add `[BREAKING]` to the PR title if it breaks any API. - [x] Update the documentation about your changes in the [docs](https://github.com/volcengine/verl/tree/main/docs). - [x] Add CI test(s) if neccessary. cc @ShaohonChen
42 lines
1.7 KiB
Docker
42 lines
1.7 KiB
Docker
# docker buildx build --platform linux/x86_64 -t "verlai/verl:$TAG" -f docker/$FILE .
|
|
|
|
# the one in docker.io is an alias for the one veturbo
|
|
# FROM vemlp-cn-beijing.cr.volces.com/veturbo/pytorch:2.4-cu124
|
|
FROM docker.io/haibinlin/verl:v0.0.5-th2.4.0-cu124-base
|
|
|
|
# only config pip index with https://pypi.tuna.tsinghua.edu.cn/simple if needed
|
|
# unset for now
|
|
RUN pip3 config unset global.index-url
|
|
|
|
# transformers 4.47.0 contains the following bug:
|
|
# AttributeError: 'Gemma2Attention' object has no attribute '_flash_attn_uses_top_left_mask'
|
|
RUN pip3 install --no-cache-dir \
|
|
torch==2.4.0 \
|
|
accelerate \
|
|
codetiming \
|
|
dill \
|
|
hydra-core \
|
|
numpy \
|
|
pybind11 \
|
|
tensordict \
|
|
"transformers <= 4.46.0"
|
|
|
|
RUN pip3 install --no-cache-dir flash-attn==2.7.0.post2 --no-build-isolation
|
|
|
|
# vllm depends on ray
|
|
RUN pip3 install --no-cache-dir vllm==0.6.3 ray==2.10
|
|
|
|
# install apex
|
|
RUN MAX_JOBS=4 pip3 install -v --disable-pip-version-check --no-cache-dir --no-build-isolation \
|
|
--config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" \
|
|
git+https://github.com/NVIDIA/apex
|
|
|
|
# install Transformer Engine
|
|
# - flash-attn pinned to 2.5.3 by TransformerEngine, switch to eric-haibin-lin/TransformerEngine.git@v1.7.0 to relax version req
|
|
# - install with: MAX_JOBS=1 NINJA_FLAGS="-j1" TE_BUILD_WITH_NINJA=0 to avoid OOM
|
|
# - cudnn is required by TransformerEngine
|
|
# RUN CUDNN_PATH=/opt/conda/lib/python3.11/site-packages/nvidia/cudnn \
|
|
# pip3 install git+https://github.com/eric-haibin-lin/TransformerEngine.git@v1.7.0
|
|
RUN MAX_JOBS=1 NINJA_FLAGS="-j1" pip3 install flash-attn==2.5.3 --no-cache-dir --no-build-isolation
|
|
RUN MAX_JOBS=1 NINJA_FLAGS="-j1" pip3 install git+https://github.com/NVIDIA/TransformerEngine.git@v1.7
|