这是indexloc提供的服务,不要输入任何密码
Skip to content

Bump alibabacloud-credentials from 0.3.6 to 1.0.2 #101

Bump alibabacloud-credentials from 0.3.6 to 1.0.2

Bump alibabacloud-credentials from 0.3.6 to 1.0.2 #101

Workflow file for this run

name: e2e_sft
on:
# Trigger the workflow on push or pull request,
# but only for the main branch
push:
pull_request:
branches:
- main
- v0.*
paths:
- "**/*.py"
# Entrypoints
- ".github/workflows/e2e_sft.yml"
- "examples/data_preprocess/gsm8k.py"
- "tests/e2e/sft"
- "verl/trainer/fsdp_sft_trainer.py"
- "verl/trainer/config/sft_trainer.yaml"
- "!examples"
- "!verl/trainer/main_*.py"
# Recipes
- "!recipe"
# Megatron
- "!verl/workers/**/megatron_*.py"
# Cancel jobs on the same ref if a new one is triggered
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
# Declare permissions just read content.
permissions:
contents: read
jobs:
e2e_sft:
runs-on: [L20x8]
timeout-minutes: 20 # Increase this timeout value as needed
env:
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
NO_PROXY: "localhost,127.0.0.1"
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable
container:
image: whatcanyousee/verl:ngc-th2.6.0-cu124-vllm0.8.2-mcore0.11.0-te2.0
options: --gpus all --shm-size=10g
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Install the current repository
run: |
pip3 install peft
pip3 install -e .[test,gpu]
- name: Prepare gsm8k dataset
run: |
ray stop --force
python3 examples/data_preprocess/gsm8k.py
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm
run: |
ray stop --force
bash tests/e2e/sft/run_sft.sh
- name: Running GSM8K E2E training tests on 8 L20 GPUs w/o rmpad using function rm
run: |
ray stop --force
RM_PAD=False bash tests/e2e/sft/run_sft.sh
- name: Running GSM8K E2E training tests on 8 L20 GPUs with sequence parallism
run: |
ray stop --force
SP_SIZE=2 bash tests/e2e/sft/run_sft.sh
- name: Check loss difference between sequence parallel vs. default implementation
run: |
ray stop --force
ENTRYPOINT="tests/e2e/sft/test_sp_loss_match.py" SP_SIZE=2 bash tests/e2e/sft/run_sft.sh
- name: Running GSM8K E2E training tests on 8 L20 GPUs with sequence parallism and liger
run: |
ray stop --force
SP_SIZE=2 LIGER=True bash tests/e2e/sft/run_sft.sh
- name: Running GSM8K E2E training tests with LoRA
run: |
ray stop --force
LORA_RANK=32 bash tests/e2e/sft/run_sft.sh
# TODO: multiturn