Skip to content

Commit a2f57af

Browse files
committed
dgstreams examples refactoring (#2)
1 parent 841e2b4 commit a2f57af

File tree

1 file changed

+66
-135
lines changed

1 file changed

+66
-135
lines changed

examples/dgstreams/multi_camera_multi_model_detection.ipynb

+66-135
Original file line numberDiff line numberDiff line change
@@ -28,26 +28,9 @@
2828
},
2929
{
3030
"cell_type": "code",
31-
"execution_count": 1,
31+
"execution_count": null,
3232
"metadata": {},
33-
"outputs": [
34-
{
35-
"name": "stdout",
36-
"output_type": "stream",
37-
"text": [
38-
"Name: degirum_tools\n",
39-
"Version: 0.10.1\n",
40-
"Summary: Tools for PySDK\n",
41-
"Home-page: \n",
42-
"Author: DeGirum\n",
43-
"Author-email: \n",
44-
"License: \n",
45-
"Location: c:\\users\\shashichilappagari\\anaconda3\\envs\\supervision\\lib\\site-packages\n",
46-
"Requires: degirum, ipython, numpy, opencv-python, pafy, pillow, psutil, pycocotools, python-dotenv, pyyaml, requests, scipy, youtube-dl\n",
47-
"Required-by: \n"
48-
]
49-
}
50-
],
33+
"outputs": [],
5134
"source": [
5235
"# make sure degirum-tools package is installed\n",
5336
"!pip show degirum-tools || pip install degirum-tools"
@@ -63,7 +46,7 @@
6346
},
6447
{
6548
"cell_type": "code",
66-
"execution_count": 2,
49+
"execution_count": 1,
6750
"metadata": {},
6851
"outputs": [],
6952
"source": [
@@ -81,167 +64,115 @@
8164
"# '': ai server serving models from local folder\n",
8265
"# path to json file: single model zoo in case of @local inference\n",
8366
"# model_names: list of AI models to use for inferences (NOTE: they should have the same input size)\n",
84-
"# allow_frame_drop:\n",
85-
"# when True, we drop video frames in case when AI performance is not enough to work in real time\n",
86-
"# when False, we buffer video frames to keep up with AI performance\n",
8767
"hw_location = \"@cloud\"\n",
8868
"video_sources = [\n",
69+
" \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/WalkingPeople.mp4\",\n",
8970
" \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/Traffic.mp4\",\n",
90-
" \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/TrafficHD.mp4\",\n",
9171
"]\n",
9272
"model_zoo_url = \"degirum/public\"\n",
9373
"model_names = [\n",
9474
" \"yolo_v5s_hand_det--512x512_quant_n2x_orca1_1\",\n",
9575
" \"yolo_v5s_face_det--512x512_quant_n2x_orca1_1\",\n",
9676
" \"yolo_v5n_car_det--512x512_quant_n2x_orca1_1\",\n",
9777
" \"yolo_v5s_person_det--512x512_quant_n2x_orca1_1\",\n",
98-
"]\n",
99-
"allow_frame_drop = False"
78+
"]"
10079
]
10180
},
10281
{
10382
"cell_type": "markdown",
10483
"metadata": {},
10584
"source": [
106-
"#### Specify where do you want to run your inferences"
85+
"#### The rest of the cells below should run without any modifications"
10786
]
10887
},
10988
{
11089
"cell_type": "code",
111-
"execution_count": 3,
90+
"execution_count": null,
11291
"metadata": {},
113-
"outputs": [
114-
{
115-
"ename": "",
116-
"evalue": "",
117-
"output_type": "error",
118-
"traceback": [
119-
"\u001b[1;31mThe Kernel crashed while executing code in the current cell or a previous cell. \n",
120-
"\u001b[1;31mPlease review the code in the cell(s) to identify a possible cause of the failure. \n",
121-
"\u001b[1;31mClick <a href='https://aka.ms/vscodeJupyterKernelCrash'>here</a> for more info. \n",
122-
"\u001b[1;31mView Jupyter <a href='command:jupyter.viewOutput'>log</a> for further details."
123-
]
124-
}
125-
],
126-
"source": [
127-
"import degirum as dg, degirum_tools"
128-
]
129-
},
130-
{
131-
"cell_type": "code",
132-
"execution_count": 4,
133-
"metadata": {},
134-
"outputs": [
135-
{
136-
"name": "stdout",
137-
"output_type": "stream",
138-
"text": [
139-
"Successfully opened video stream 'https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/Traffic.mp4'Successfully opened video stream 'https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/TrafficHD.mp4'\n",
140-
"\n"
141-
]
142-
},
143-
{
144-
"name": "stderr",
145-
"output_type": "stream",
146-
"text": [
147-
"packet queue is empty, aborting\n",
148-
"packet queue is empty, aborting\n",
149-
"packet queue is empty, aborting\n",
150-
"packet queue is empty, aborting\n"
151-
]
152-
}
153-
],
92+
"outputs": [],
15493
"source": [
94+
"import degirum as dg, degirum_tools\n",
15595
"from degirum_tools import streams as dgstreams\n",
15696
"\n",
157-
"c = dgstreams.Composition()\n",
158-
"\n",
159-
"batch_size = len(\n",
160-
" video_sources\n",
161-
") # set AI server batch size equal to the # of video sources for lowest latency\n",
162-
"\n",
16397
"# create PySDK AI model objects\n",
164-
"models = []\n",
165-
"for mi, model_name in enumerate(model_names):\n",
166-
" model = dg.load_model(\n",
98+
"models = [\n",
99+
" dg.load_model(\n",
167100
" model_name=model_name,\n",
168101
" inference_host_address=hw_location,\n",
169102
" zoo_url=model_zoo_url,\n",
170103
" token=degirum_tools.get_token(),\n",
104+
" overlay_line_width=2,\n",
171105
" )\n",
172-
" model.measure_time = True\n",
173-
" model.eager_batch_size = batch_size\n",
174-
" model.frame_queue_depth = batch_size\n",
175-
" models.append(model)\n",
106+
" for model_name in model_names\n",
107+
"]\n",
176108
"\n",
177109
"# check that all models have the same input configuration\n",
178-
"models_have_same_input = True\n",
179-
"for model in models[1:]:\n",
180-
" if (\n",
181-
" type(model._preprocessor) != type(models[0]._preprocessor)\n",
182-
" or model.model_info.InputH != models[0].model_info.InputH\n",
183-
" or model.model_info.InputW != models[0].model_info.InputW\n",
184-
" ):\n",
185-
" models_have_same_input = False\n",
186-
"\n",
187-
"resizers = []\n",
188-
"\n",
189-
"# create video sources and image resizers\n",
190-
"# (we use separate resizers to do resize only once per source when possible, to improve performance),\n",
191-
"# connect each resizer to corresponding video source\n",
192-
"for src in video_sources:\n",
193-
" source = c.add(dgstreams.VideoSourceGizmo(src))\n",
194-
" if models_have_same_input:\n",
195-
" resizer = c.add(\n",
196-
" dgstreams.AiPreprocessGizmo(\n",
197-
" models[0], stream_depth=2, allow_drop=allow_frame_drop\n",
198-
" )\n",
199-
" )\n",
200-
" else:\n",
201-
" resizer = c.add(dgstreams.FanoutGizmo(allow_drop=allow_frame_drop))\n",
110+
"assert all(\n",
111+
" type(model._preprocessor) == type(models[0]._preprocessor)\n",
112+
" and model.model_info.InputH == models[0].model_info.InputH\n",
113+
" and model.model_info.InputW == models[0].model_info.InputW\n",
114+
" for model in models[1:]\n",
115+
")\n",
202116
"\n",
203-
" resizer.connect_to(source) # connect resizer to video source\n",
204-
" resizers.append(resizer)\n",
117+
"# create video source gizmos;\n",
118+
"# stop_composition_on_end=True to stop whole composition when one (shorter) video source ends\n",
119+
"sources = [\n",
120+
" dgstreams.VideoSourceGizmo(src, stop_composition_on_end=True)\n",
121+
" for src in video_sources\n",
122+
"]\n",
205123
"\n",
206-
"# create result combiner\n",
207-
"combiner = c.add(dgstreams.AiResultCombiningGizmo(len(models)))\n",
124+
"# create image resizer gizmos, one per video source\n",
125+
"# (we use separate resizers to do resize only once per source to improve performance)\n",
126+
"resizers = [dgstreams.AiPreprocessGizmo(models[0]) for _ in video_sources]\n",
208127
"\n",
209-
"# create multi-input detector gizmos,\n",
210-
"# connect each detector gizmo to every resizer gizmo,\n",
211-
"# connect result combiner gizmo to each detector gizmo\n",
212-
"for mi, model in enumerate(models):\n",
213-
" # create AI gizmo (aka detector) from the model\n",
214-
" detector = c.add(\n",
215-
" dgstreams.AiSimpleGizmo(model, stream_depth=2, inp_cnt=len(video_sources))\n",
216-
" )\n",
217-
"\n",
218-
" # connect detector gizmo to each resizer gizmo\n",
219-
" for fi, resizer in enumerate(resizers):\n",
220-
" detector.connect_to(resizer, fi)\n",
128+
"# create multi-input detector gizmos, one per model\n",
129+
"detectors = [\n",
130+
" dgstreams.AiSimpleGizmo(model, inp_cnt=len(video_sources)) for model in models\n",
131+
"]\n",
221132
"\n",
222-
" # connect result combiner gizmo to detector gizmo\n",
223-
" combiner.connect_to(detector, mi)\n",
133+
"# create result combiner gizmo to combine results from all detectors into single result\n",
134+
"combiner = dgstreams.AiResultCombiningGizmo(len(models))\n",
224135
"\n",
225136
"# create multi-window video multiplexing display gizmo\n",
226-
"# and connect it to combiner gizmo\n",
227137
"win_captions = [f\"Stream #{i}: {str(src)}\" for i, src in enumerate(video_sources)]\n",
228-
"display = c.add(\n",
229-
" dgstreams.VideoDisplayGizmo(\n",
230-
" win_captions, show_ai_overlay=True, show_fps=True, multiplex=True\n",
231-
" )\n",
138+
"display = dgstreams.VideoDisplayGizmo(\n",
139+
" win_captions, show_ai_overlay=True, show_fps=True, multiplex=True\n",
232140
")\n",
233-
"display.connect_to(combiner)\n",
234141
"\n",
235-
"# start composition\n",
236-
"c.start()"
142+
"# connect all gizmos in the pipeline\n",
143+
"# source[i] -> resizer[i] -> detector[j] -> combiner -> display\n",
144+
"pipeline = (\n",
145+
" # each source is connected to corresponding resizer\n",
146+
" (source >> resizer for source, resizer in zip(sources, resizers)),\n",
147+
" # each resizer is connected to every detector\n",
148+
" (\n",
149+
" resizer >> detector[ri]\n",
150+
" for detector in detectors\n",
151+
" for ri, resizer in enumerate(resizers)\n",
152+
" ),\n",
153+
" # each detector is connected to result combiner\n",
154+
" (detector >> combiner[di] for di, detector in enumerate(detectors)),\n",
155+
" # result combiner is connected to display\n",
156+
" combiner >> display,\n",
157+
")\n",
158+
"\n",
159+
"# create and start composition with given pipeline\n",
160+
"dgstreams.Composition(*pipeline).start()"
237161
]
162+
},
163+
{
164+
"cell_type": "code",
165+
"execution_count": null,
166+
"metadata": {},
167+
"outputs": [],
168+
"source": []
238169
}
239170
],
240171
"metadata": {
241172
"kernelspec": {
242-
"display_name": "Python (supervision)",
173+
"display_name": "base",
243174
"language": "python",
244-
"name": "supervision"
175+
"name": "python3"
245176
},
246177
"language_info": {
247178
"codemirror_mode": {
@@ -253,7 +184,7 @@
253184
"name": "python",
254185
"nbconvert_exporter": "python",
255186
"pygments_lexer": "ipython3",
256-
"version": "3.9.18"
187+
"version": "3.9.16"
257188
},
258189
"orig_nbformat": 4
259190
},

0 commit comments

Comments
 (0)