Skip to content

Commit 081ffbf

Browse files
committed
Add gdpo reward and reward_score
1 parent 09fa81d commit 081ffbf

File tree

2 files changed

+424
-0
lines changed
  • verl
    • experimental/reward_loop/reward_manager
    • utils/reward_score

2 files changed

+424
-0
lines changed
Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import inspect
16+
17+
from verl import DataProto
18+
from verl.experimental.reward_loop.reward_manager import register
19+
from verl.experimental.reward_loop.reward_manager.base import RewardManagerBase
20+
from verl.utils.reward_score import default_compute_score
21+
22+
23+
@register("gdpo")
24+
class GDPOdRewardManager(RewardManagerBase):
25+
"""GDPO Reward Manager."""
26+
27+
def __init__(self, config, tokenizer, compute_score, reward_router_address=None, reward_model_tokenizer=None):
28+
super().__init__(config, tokenizer, compute_score)
29+
self.compute_score = compute_score or default_compute_score
30+
self.is_async_reward_score = inspect.iscoroutinefunction(self.compute_score)
31+
32+
# GDPO Reward Config
33+
self.max_resp_len = config.reward.get("reward_kwargs", {}).get("max_resp_len", None)
34+
35+
self.reward_router_address = reward_router_address
36+
self.reward_model_tokenizer = reward_model_tokenizer
37+
38+
async def run_single(self, data: DataProto) -> dict:
39+
assert len(data) == 1, "Only support single data item"
40+
data_item = data[0]
41+
response_ids = data_item.batch["responses"]
42+
response_length = response_ids.shape[-1]
43+
valid_response_length = data_item.batch["attention_mask"][-response_length:].sum()
44+
valid_response_ids = response_ids[:valid_response_length]
45+
46+
data_source = data_item.non_tensor_batch["data_source"]
47+
ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"]
48+
extra_info = data_item.non_tensor_batch.get("extra_info", {})
49+
50+
response_str = await self.loop.run_in_executor(
51+
None, lambda: self.tokenizer.decode(valid_response_ids, skip_special_tokens=True)
52+
)
53+
extra_reward_kwargs = (
54+
{
55+
"reward_router_address": self.reward_router_address,
56+
"reward_model_tokenizer": self.reward_model_tokenizer,
57+
}
58+
if self.reward_router_address is not None
59+
else {}
60+
)
61+
if self.is_async_reward_score:
62+
result = await self.compute_score(
63+
data_source=data_source,
64+
solution_str=response_str,
65+
ground_truth=ground_truth,
66+
extra_info=extra_info,
67+
**extra_reward_kwargs,
68+
)
69+
else:
70+
result = await self.loop.run_in_executor(
71+
None,
72+
lambda: self.compute_score(
73+
data_source=data_source,
74+
solution_str=response_str,
75+
ground_truth=ground_truth,
76+
extra_info=extra_info,
77+
**extra_reward_kwargs,
78+
),
79+
)
80+
81+
reward_extra_info = {}
82+
83+
score: float
84+
if isinstance(result, dict):
85+
score = result["score"]
86+
for key, value in result.items():
87+
reward_extra_info[key] = value
88+
else:
89+
score = result
90+
reward_extra_info["acc"] = score
91+
92+
reward = score
93+
94+
return {"reward_score": reward, "reward_extra_info": reward_extra_info}

0 commit comments

Comments
 (0)