Skip to content
This repository has been archived by the owner on May 28, 2024. It is now read-only.

Commit

Permalink
Merge pull request #102 from ray-project/shrekris/track_usage_serve_c…
Browse files Browse the repository at this point in the history
…onfigs

Record telemetry when RayLLM is launched using a Serve config
  • Loading branch information
shrekris-anyscale authored Dec 6, 2023
2 parents fa3a766 + d2518d3 commit 78e076e
Showing 1 changed file with 1 addition and 1 deletion.
2 changes: 1 addition & 1 deletion rayllm/backend/server/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ def router_deployment(


def router_application(args):
ray._private.usage.usage_lib.record_library_usage("ray-llm")
router_args = RouterArgs.parse_obj(args)
llm_apps = parse_args(router_args.models, llm_app_cls=VLLMApp)
return router_deployment(llm_apps, enable_duplicate_models=False)
Expand All @@ -125,7 +126,6 @@ def run(
models: The paths of the model yamls to deploy
"""
ray._private.usage.usage_lib.record_library_usage("aviary")
router_app = router_application({"models": vllm_base_args})

host = "0.0.0.0"
Expand Down

0 comments on commit 78e076e

Please sign in to comment.