Skip to content

Commit aa22b62

Browse files
authored
feat: add qwen3 support (microsoft#6528)
## Why are these changes needed? Add ollama qwen 3 support
1 parent cc2693b commit aa22b62

File tree

2 files changed

+18
-2
lines changed

2 files changed

+18
-2
lines changed

python/packages/autogen-ext/src/autogen_ext/models/ollama/_model_info.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -258,6 +258,13 @@
258258
"family": ModelFamily.UNKNOWN,
259259
"structured_output": True,
260260
},
261+
"qwen3": {
262+
"vision": False,
263+
"function_calling": True,
264+
"json_output": True,
265+
"family": ModelFamily.UNKNOWN,
266+
"structured_output": True,
267+
},
261268
"snowflake-arctic-embed": {
262269
"vision": False,
263270
"function_calling": False,
@@ -351,6 +358,15 @@
351358
"qwen2.5-coder:0.5b": 32768,
352359
"qwen2.5-coder:1.5b": 32768,
353360
"qwen2.5-coder:3b": 32768,
361+
"qwen3": 40960,
362+
"qwen3:0.6b": 40960,
363+
"qwen3:1.7b": 40960,
364+
"qwen3:4b": 40960,
365+
"qwen3:8b": 40960,
366+
"qwen3:14b": 40960,
367+
"qwen3:30b": 40960,
368+
"qwen3:32b": 40960,
369+
"qwen3:235b": 40960,
354370
"snowflake-arctic-embed": 512,
355371
"starcoder2": 16384,
356372
"tinyllama": 2048,

python/packages/autogen-ext/tests/models/test_ollama_chat_completion_client.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -570,7 +570,7 @@ class ResponseType(BaseModel):
570570

571571

572572
@pytest.mark.asyncio
573-
@pytest.mark.parametrize("model", ["qwen2.5:0.5b", "llama3.2:1b"])
573+
@pytest.mark.parametrize("model", ["qwen2.5:0.5b", "llama3.2:1b", "qwen3:0.6b"])
574574
async def test_ollama_create_tools(model: str, ollama_client: OllamaChatCompletionClient) -> None:
575575
def add(x: int, y: int) -> str:
576576
return str(x + y)
@@ -653,7 +653,7 @@ def add(x: int, y: int) -> str:
653653

654654

655655
@pytest.mark.asyncio
656-
@pytest.mark.parametrize("model", ["qwen2.5:0.5b", "llama3.2:1b"])
656+
@pytest.mark.parametrize("model", ["qwen2.5:0.5b", "llama3.2:1b", "qwen3:0.6b"])
657657
async def test_ollama_create_stream_tools(model: str, ollama_client: OllamaChatCompletionClient) -> None:
658658
def add(x: int, y: int) -> str:
659659
return str(x + y)

0 commit comments

Comments
 (0)