Skip to content

Commit 7f4b76a

Browse files
[rollout] fix: remove dtype cast (#5117)
### What does this PR do? - As title and comment ### Checklist Before Starting - [ ] Search for similar PRs. Paste at least one query link here: ... - [ ] Format the PR title as `[{modules}] {type}: {description}` (This will be checked by the CI) - `{modules}` include `fsdp`, `megatron`, `veomni`, `sglang`, `vllm`, `rollout`, `trainer`, `ci`, `training_utils`, `recipe`, `hardware`, `deployment`, `ray`, `worker`, `single_controller`, `misc`, `perf`, `model`, `algo`, `env`, `tool`, `ckpt`, `doc`, `data`, `cfg`, `reward` - If this PR involves multiple modules, separate them with `,` like `[megatron, fsdp, doc]` - `{type}` is in `feat`, `fix`, `refactor`, `chore`, `test` - If this PR breaks any API (CLI arguments, config, function signature, etc.), add `[BREAKING]` to the beginning of the title. - Example: `[BREAKING][fsdp, megatron] feat: dynamic batching` ### Test > For changes that can not be tested by CI (e.g., algorithm implementation, new model support), validate by experiment(s) and show results like training curve plots, evaluation results, etc. ### API and Usage Example > Demonstrate how the API changes if any, and provide usage example(s) if possible. ```python # Add code snippet or script demonstrating how to use this ``` ### Design & Code Changes > Demonstrate the high-level design if this PR is complex, and list the specific changes. ### Checklist Before Submitting > [!IMPORTANT] > Please check all the following items before requesting a review, otherwise the reviewer might deprioritize this PR for review. - [ ] Read the [Contribute Guide](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md). - [ ] Apply [pre-commit checks](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md#code-linting-and-formatting): `pre-commit install && pre-commit run --all-files --show-diff-on-failure --color=always` - [ ] Add / Update [the documentation](https://github.com/volcengine/verl/tree/main/docs). - [ ] Add unit or end-to-end test(s) to [the CI workflow](https://github.com/volcengine/verl/tree/main/.github/workflows) to cover all the code. If not feasible, explain why: ... - [ ] Once your PR is ready for CI, send a message in [the `ci-request` channel](https://verl-project.slack.com/archives/C091TCESWB1) in [the `verl` Slack workspace](https://join.slack.com/t/verl-project/shared_invite/zt-3855yhg8g-CTkqXu~hKojPCmo7k_yXTQ). (If not accessible, please try [the Feishu group (飞书群)](https://applink.larkoffice.com/client/chat/chatter/add_by_link?link_token=772jd4f1-cd91-441e-a820-498c6614126a).) - [ ] If your PR is related to the `recipe` submodule, please also update the reference to the submodule commit via `git submodule update --remote` or `cd recipe && git pull origin main`. --------- Co-authored-by: wuxibin <wuxibin@bytedance.com>
1 parent de9880d commit 7f4b76a

File tree

5 files changed

+13
-13
lines changed

5 files changed

+13
-13
lines changed

verl/checkpoint_engine/hccl_checkpoint_engine.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -246,9 +246,6 @@ async def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None,
246246
bucket_meta: dict[str, TensorMeta] = {}
247247
offset = 0
248248
for name, weight in weights:
249-
# model parameters are in fp32 full precsion
250-
weight = weight.to(self.rollout_dtype)
251-
252249
# fill the tensor bucket
253250
if offset + weight.nbytes > self.bucket_size:
254251
torch.npu.synchronize()

verl/checkpoint_engine/nccl_checkpoint_engine.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -240,9 +240,6 @@ async def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None,
240240
bucket_meta: dict[str, TensorMeta] = {}
241241
offset = 0
242242
for name, weight in weights:
243-
# model parameters are in fp32 full precsion
244-
weight = weight.to(self.rollout_dtype)
245-
246243
# fill the tensor bucket
247244
if offset + weight.nbytes > self.bucket_size:
248245
torch.cuda.synchronize()

verl/checkpoint_engine/nixl_checkpoint_engine.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -385,9 +385,6 @@ async def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None,
385385
bucket_meta: dict[str, TensorMeta] = {}
386386
offset = 0
387387
for name, weight in weights:
388-
# model parameters are in fp32 full precision
389-
weight = weight.to(self.rollout_dtype)
390-
391388
# fill the tensor bucket
392389
if offset + weight.nbytes > self.bucket_size:
393390
torch.cuda.synchronize()

verl/workers/engine/fsdp/transformer_impl.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -664,8 +664,14 @@ def get_per_tensor_param(self, layered_summon=False, base_sync_done=False):
664664
per_tensor_param = params
665665
else:
666666
device = get_device_id() # used when fsdp2 set cpu_offload_policy
667+
# TODO: cast fp32 to bf16 to reduce weight sync overhead, need more fine-grained control, e.g MoE gate
667668
per_tensor_param = (
668-
(name, param.to(device, non_blocking=True).full_tensor() if isinstance(param, DTensor) else param)
669+
(
670+
name,
671+
param.to(device, non_blocking=True).full_tensor().to(torch.bfloat16, non_blocking=True)
672+
if isinstance(param, DTensor)
673+
else param,
674+
)
669675
for name, param in params.items()
670676
)
671677
return per_tensor_param, peft_config

verl/workers/rollout/vllm_rollout/vllm_rollout.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,6 @@
4242
from verl import DataProto
4343
from verl.third_party.vllm import VLLM_SLEEP_LEVEL, get_version
4444
from verl.utils.device import get_device_id, get_device_name, get_torch_device, is_support_ipc
45-
from verl.utils.torch_dtypes import PrecisionType
4645
from verl.workers.config import HFModelConfig, RolloutConfig
4746
from verl.workers.rollout.base import BaseRollout
4847
from verl.workers.rollout.utils import ensure_async_iterator
@@ -190,10 +189,14 @@ async def update_weights(self, weights: Generator[tuple[str, torch.Tensor], None
190189
# send bucket weights
191190
offset = 0
192191
bucket_meta: dict[str, TensorMetadata] = {}
193-
dtype = PrecisionType.to_dtype(self.config.dtype)
192+
# dtype = PrecisionType.to_dtype(self.config.dtype)
194193
async for name, weight in ensure_async_iterator(weights):
195194
# model parameters are in fp32 full precision
196-
weight = weight.to(dtype, non_blocking=True)
195+
# (vermouth1992) we should not force cast weight here because some parameters
196+
# (such as moe gate) have to keep fp32 precision. If a weight is bf16 in the rollout side,
197+
# the rollout should automatically cast on demand. However, this would incur a higher weight
198+
# transfer volume.
199+
# weight = weight.to(dtype, non_blocking=True)
197200

198201
# fill the tensor bucket
199202
if offset + weight.nbytes > bucket_size:

0 commit comments

Comments
 (0)