Skip to content

Commit 8cf5adf

Browse files
committed
[BugFix] Fix ascend config check
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
1 parent 973f993 commit 8cf5adf

File tree

5 files changed

+135
-41
lines changed

5 files changed

+135
-41
lines changed

docs/source/user_guide/additional_config.md

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,8 @@ The following table lists the additional configuration options available in vLLM
2828
| ---- | ---- | ------- | ----------- |
2929
| `torchair_graph_config` | dict | `{}` | The config options for torchair graph mode |
3030
| `ascend_scheduler_config` | dict | `{}` | The config options for ascend scheduler |
31-
| `expert_tensor_parallel_size` | str | `1` | Expert tensor parallel size the model to use. |
31+
| `expert_tensor_parallel_size` | str | `0` | Expert tensor parallel size the model to use. |
32+
| `refresh` | bool | `false` | Whether to refresh global ascend config content. This value is usually used by rlhf case. |
3233

3334
The details of each config option are as follows:
3435

@@ -40,6 +41,7 @@ The details of each config option are as follows:
4041
| `use_cached_graph` | bool | `False` | Whether to use cached graph |
4142
| `graph_batch_sizes` | list[int] | `[]` | The batch size for torchair graph cache |
4243
| `graph_batch_sizes_init` | bool | `False` | Init graph batch size dynamically if `graph_batch_sizes` is empty |
44+
| `enable_multistream_shared_expert`| bool | `False` | Whether to enable multistream shared expert |
4345

4446
**ascend_scheduler_config**
4547

@@ -59,12 +61,14 @@ A full example of additional configuration is as follows:
5961
"enabled": true,
6062
"use_cached_graph": true,
6163
"graph_batch_sizes": [1, 2, 4, 8],
62-
"graph_batch_sizes_init": true
64+
"graph_batch_sizes_init": false,
65+
"enable_multistream_shared_expert": false
6366
},
6467
"ascend_scheduler_config": {
6568
"enabled": true,
6669
"chunked_prefill_enabled": true,
6770
},
68-
"expert_tensor_parallel_size": 1
71+
"expert_tensor_parallel_size": 1,
72+
"refresh": false,
6973
}
7074
```

tests/singlecard/test_ascend_config.py

Lines changed: 73 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,13 @@
1313
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1414
# See the License for the specific language governing permissions and
1515
# limitations under the License.
16+
import os
17+
1618
import pytest
1719

1820
from tests.conftest import VllmRunner
19-
from vllm_ascend.ascend_config import clear_ascend_config, get_ascend_config
21+
from vllm_ascend.ascend_config import (clear_ascend_config, get_ascend_config,
22+
init_ascend_config)
2023

2124

2225
def _clean_up_ascend_config(func):
@@ -44,23 +47,30 @@ def test_run_without_ascend_config():
4447

4548
@_clean_up_ascend_config
4649
def test_run_with_ascend_config():
47-
input_additional_config = {
50+
if os.getenv("VLLM_USE_V1") == "0":
51+
pytest.skip("graph only works on v1")
52+
53+
input_additional_config_1 = {
4854
"torchair_graph_config": {
4955
# torchair graph only works with deepseek. The e2e test should be added
5056
# in multicard test with deepseek models.
5157
"enabled": False,
5258
"use_cached_graph": True,
5359
"graph_batch_sizes": [1, 2, 4, 8],
5460
"graph_batch_sizes_init": False,
61+
"enable_multistream_shared_expert": True,
5562
},
5663
"ascend_scheduler_config": {
5764
"enabled": True,
5865
"enable_chunked_prefill": True,
5966
},
6067
"expert_tensor_parallel_size": 1
6168
}
69+
70+
# check passed with eager mode
6271
with VllmRunner("facebook/opt-125m",
63-
additional_config=input_additional_config):
72+
enforce_eager=True,
73+
additional_config=input_additional_config_1):
6474
ascend_config = get_ascend_config()
6575

6676
assert not ascend_config.torchair_graph_config.enabled
@@ -69,6 +79,7 @@ def test_run_with_ascend_config():
6979
1, 2, 4, 8
7080
]
7181
assert not ascend_config.torchair_graph_config.graph_batch_sizes_init
82+
assert ascend_config.torchair_graph_config.enable_multistream_shared_expert
7283
assert ascend_config.ascend_scheduler_config.enabled
7384
assert ascend_config.ascend_scheduler_config.enable_chunked_prefill
7485
assert ascend_config.expert_tensor_parallel_size == 1
@@ -83,6 +94,8 @@ def test_ascend_config_init_error():
8394

8495
@_clean_up_ascend_config
8596
def test_ascend_config_load_error():
97+
if os.getenv("VLLM_USE_V1") == "0":
98+
pytest.skip("graph only works on v1")
8699
# graph_batch_sizes should be list.
87100
with pytest.raises(TypeError):
88101
input_additional_config_fake_1 = {
@@ -117,3 +130,60 @@ def test_ascend_config_load_error():
117130
enforce_eager=False,
118131
additional_config=input_additional_config_fake_2):
119132
pass
133+
134+
# torchair graph should not be enabled with eager mode
135+
with pytest.raises(RuntimeError):
136+
input_additional_config_fake_3 = {
137+
"torchair_graph_config": {
138+
"enabled": True,
139+
},
140+
}
141+
with VllmRunner("facebook/opt-125m",
142+
enforce_eager=True,
143+
additional_config=input_additional_config_fake_3):
144+
pass
145+
146+
147+
@_clean_up_ascend_config
148+
def test_check_ascend_config_v0():
149+
if os.getenv("VLLM_USE_V1") == "1":
150+
pytest.skip("graph only works on v1, this is the test for v0")
151+
with pytest.raises(NotImplementedError):
152+
input_additional_config_fake_1 = {
153+
"torchair_graph_config": {
154+
"enabled": True,
155+
},
156+
}
157+
with VllmRunner("facebook/opt-125m",
158+
additional_config=input_additional_config_fake_1):
159+
pass
160+
161+
162+
@_clean_up_ascend_config
163+
def test_ascend_config_refresh():
164+
from vllm.config import get_current_vllm_config
165+
vllm_config = get_current_vllm_config()
166+
# set additional_config with none
167+
init_ascend_config(vllm_config)
168+
169+
input_additional_config = {
170+
"torchair_graph_config": {
171+
"enabled": False,
172+
"use_cached_graph": True,
173+
"graph_batch_sizes": [1, 2, 4, 8],
174+
"graph_batch_sizes_init": False,
175+
},
176+
"refresh": True,
177+
}
178+
179+
# refresh ascend config
180+
with VllmRunner("facebook/opt-125m",
181+
additional_config=input_additional_config):
182+
ascend_config = get_ascend_config()
183+
184+
assert not ascend_config.torchair_graph_config.enabled
185+
assert ascend_config.torchair_graph_config.use_cached_graph
186+
assert ascend_config.torchair_graph_config.graph_batch_sizes == [
187+
1, 2, 4, 8
188+
]
189+
assert not ascend_config.torchair_graph_config.graph_batch_sizes_init

vllm_ascend/ascend_config.py

Lines changed: 53 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def __init__(self, vllm_config):
3737
ascend_scheduler_config)
3838

3939
self.expert_tensor_parallel_size = int(
40-
additional_config.get("expert_tensor_parallel_size", 1))
40+
additional_config.get("expert_tensor_parallel_size", 0))
4141

4242

4343
class TorchairGraphConfig:
@@ -82,8 +82,11 @@ def __init__(self, ascend_scheduler_config: dict):
8282

8383

8484
def init_ascend_config(vllm_config):
85+
additional_config = vllm_config.additional_config if vllm_config.additional_config is not None else {}
86+
refresh = additional_config.get("refresh",
87+
False) if additional_config else False
8588
global _ASCEND_CONFIG
86-
if _ASCEND_CONFIG is not None:
89+
if _ASCEND_CONFIG is not None and not refresh:
8790
return _ASCEND_CONFIG
8891
_ASCEND_CONFIG = AscendConfig(vllm_config)
8992
return _ASCEND_CONFIG
@@ -106,35 +109,52 @@ def get_ascend_config():
106109
def check_ascend_config(vllm_config, enforce_eager):
107110
ascend_config = get_ascend_config()
108111

109-
# Both for V0 and V1 Engine, torchair_graph cannot be enabled with eager mode.
110-
if ascend_config.torchair_graph_config.enabled and enforce_eager:
111-
raise RuntimeError(
112-
"Can't enable graph mode and eager mode at the same time. Please set `enforce_eager=False` if you attempt to enable NPU graph mode."
113-
)
114-
115-
# torchair_graph only work with deepseek model and mla enabled.
116-
if ascend_config.torchair_graph_config.enabled:
117-
if envs.VLLM_MLA_DISABLE:
118-
logger.warning(
119-
"Torchair graph mode is still experimental and not supported for V1 without mla currently, "
120-
"it has been disabled automatically.")
121-
ascend_config.ascend_scheduler_config.enabled = False
122-
if vllm_config.model_config:
123-
model_type = vllm_config.model_config.hf_config.model_type
124-
if "deepseek" not in model_type:
125-
raise NotImplementedError(
126-
"Torchair graph mode only works with deepseek model.")
127-
128-
# for V1 Engine, aclgraph doesn't work with deepseek model and only qwen model is well tested.
129-
if envs.VLLM_USE_V1 and vllm_config.model_config is not None and not enforce_eager:
130-
model_type = vllm_config.model_config.hf_config.model_type
131-
if "deepseek" in model_type:
112+
# for v0 engine
113+
if not envs.VLLM_USE_V1:
114+
if ascend_config.torchair_graph_config.enabled:
115+
raise NotImplementedError(
116+
"Torchair graph mode is only supported for V1 Engine.")
117+
if ascend_config.ascend_scheduler_config.enabled:
132118
raise NotImplementedError(
133-
"ACL Graph does not support deepseek. Please "
134-
"try torchair graph mode to serve deepseek models on vllm-ascend."
135-
" Or set `enforce_eager=True` to use eager mode.")
136-
if "qwen" not in model_type:
137-
logger.warning(
138-
"ACL Graph is currently experimental. Please "
139-
"raise an issue on https://github.com/vllm-project/vllm-ascend/issues"
140-
" if you encourage any Error")
119+
"Ascend scheduler is only supported for V1 Engine.")
120+
# for v1 engine
121+
else:
122+
# for eager mode
123+
if enforce_eager:
124+
# torchair_graph cannot be enabled with eager mode.
125+
if ascend_config.torchair_graph_config.enabled:
126+
raise RuntimeError(
127+
"Can't enable graph mode and eager mode at the same time. Please set `enforce_eager=False` if you attempt to enable NPU graph mode."
128+
)
129+
# for graph mode
130+
else:
131+
# torchair_graph case
132+
if ascend_config.torchair_graph_config.enabled:
133+
# torchair_graph is not supported for V1 without mla currently.
134+
if envs.VLLM_MLA_DISABLE:
135+
logger.warning(
136+
"Torchair graph mode is still experimental and not supported for V1 without mla currently, "
137+
"it has been disabled automatically.")
138+
ascend_config.torchair_graph_config.enabled = False
139+
# torchair_graph is supported for deepseek model only currently.
140+
if vllm_config.model_config:
141+
model_type = vllm_config.model_config.hf_config.model_type
142+
if "deepseek" not in model_type:
143+
raise NotImplementedError(
144+
"Torchair graph mode only works with deepseek model."
145+
)
146+
# aclgraph case
147+
else:
148+
# aclgraph doesn't work with deepseek model and only qwen model is well tested.
149+
if vllm_config.model_config:
150+
model_type = vllm_config.model_config.hf_config.model_type
151+
if "deepseek" in model_type:
152+
raise NotImplementedError(
153+
"ACL Graph does not support deepseek. Please "
154+
"try torchair graph mode to serve deepseek models on vllm-ascend."
155+
" Or set `enforce_eager=True` to use eager mode.")
156+
if "qwen" not in model_type:
157+
logger.warning(
158+
"ACL Graph is currently experimental. Please "
159+
"raise an issue on https://github.com/vllm-project/vllm-ascend/issues"
160+
" if you encourage any Error")

vllm_ascend/platform.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
133133

134134
# NOTE: When enable_expert_parallel is True, we follow vLLM convention:
135135
# ep_size = world_size, which means expert_tensor_parallel_size must be 1
136-
if ascend_config.expert_tensor_parallel_size > 1 and not parallel_config.enable_expert_parallel:
136+
if ascend_config.expert_tensor_parallel_size > 0 and not parallel_config.enable_expert_parallel:
137137
parallel_config.expert_tensor_parallel_size = ascend_config.expert_tensor_parallel_size
138138

139139
# Calculate expert parallel size based on world size

vllm_ascend/worker/model_runner_v1.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -323,7 +323,7 @@ def __init__(self, vllm_config: VllmConfig, device: torch.device):
323323

324324
ascend_config = get_ascend_config()
325325
self.torchair_graph_enabled = ascend_config.torchair_graph_config.enabled and self.vllm_config.model_config.use_mla
326-
self.torchair_graph_use_cached_npu_graph = ascend_config.torchair_graph_config.use_cached_graph
326+
self.use_cached_npu_graph = ascend_config.torchair_graph_config.use_cached_graph
327327
self.torchair_graph_batch_sizes = ascend_config.torchair_graph_config.graph_batch_sizes
328328

329329
if ascend_config.torchair_graph_config.graph_batch_sizes_init:

0 commit comments

Comments
 (0)