|
24 | 24 | },
|
25 | 25 | {
|
26 | 26 | "cell_type": "code",
|
27 |
| - "execution_count": null, |
| 27 | + "execution_count": 13, |
28 | 28 | "id": "0b76d324",
|
29 | 29 | "metadata": {},
|
30 | 30 | "outputs": [],
|
|
34 | 34 | "# URL of RTSP stream\n",
|
35 | 35 | "# URL of YouTube Video\n",
|
36 | 36 | "# path to video file (mp4 etc)\n",
|
37 |
| - "video_source = (\n", |
38 |
| - " \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/example_video.mp4\"\n", |
39 |
| - ")" |
| 37 | + "video_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/example_video.mp4\"" |
40 | 38 | ]
|
41 | 39 | },
|
42 | 40 | {
|
|
61 | 59 | "\n",
|
62 | 60 | "# create gizmos:\n",
|
63 | 61 | "source = VideoSourceGizmo(video_source) # video source gizmo\n",
|
64 |
| - "display = VideoDisplayGizmo(\n", |
65 |
| - " \"press `x` or `q` to stop\", allow_drop=False\n", |
66 |
| - ") # video display gizmo\n", |
| 62 | + "display = VideoDisplayGizmo(\"`q` to exit\", allow_drop=False) # video display gizmo\n", |
67 | 63 | "\n",
|
68 | 64 | "# Create pipeline: connect display input to source output\n",
|
69 | 65 | "display.connect_to(source)\n",
|
|
95 | 91 | "source": [
|
96 | 92 | "from degirum_tools.streams import *\n",
|
97 | 93 | "\n",
|
98 |
| - "c = Composition()\n", |
99 |
| - "\n", |
100 | 94 | "# Create gizmos and pipeline as a single-liner:\n",
|
101 |
| - "# we use __call__() operator of Composition class instead of add() method\n", |
| 95 | + "# we construct composition passing gizmo pipeline as a parameter\n", |
102 | 96 | "# and we use `>>` operator of gizmo classes instead of connect_to() method\n",
|
103 |
| - "c(VideoSourceGizmo(video_source)) >> c(VideoDisplayGizmo())\n", |
104 |
| - "\n", |
105 |
| - "c.start()" |
| 97 | + "Composition(VideoSourceGizmo(video_source) >> VideoDisplayGizmo()).start()" |
106 | 98 | ]
|
107 | 99 | },
|
108 | 100 | {
|
|
124 | 116 | "source": [
|
125 | 117 | "from degirum_tools.streams import *\n",
|
126 | 118 | "\n",
|
127 |
| - "c = Composition()\n", |
128 |
| - "\n", |
129 | 119 | "# create and add to composition all required gizmos\n",
|
130 |
| - "source = c.add(VideoSourceGizmo(video_source)) # video source gizmo\n", |
131 |
| - "display = c.add(\n", |
132 |
| - " VideoDisplayGizmo([\"Original\", \"Resized\"])\n", |
133 |
| - ") # two-input display gizmo: will show two windows\n", |
134 |
| - "resizer = c.add(ResizingGizmo(300, 200)) # resizing gizmo\n", |
135 |
| - "\n", |
136 |
| - "# Create pipeline: the image source is fed to a display and to the image resizing gizmo,\n", |
137 |
| - "# which is then fed to another display.\n", |
138 |
| - "\n", |
139 |
| - "display.connect_to(source, 0) # display input 0 is \"Original\"\n", |
140 |
| - "resizer.connect_to(source)\n", |
141 |
| - "display.connect_to(resizer, 1) # display input 1 is \"Resized\"\n", |
142 |
| - "\n", |
143 |
| - "c.start()" |
| 120 | + "source = VideoSourceGizmo(video_source) # video source gizmo\n", |
| 121 | + "display = VideoDisplayGizmo([\"Original\", \"Resized\"]) # two-input display gizmo\n", |
| 122 | + "resizer = ResizingGizmo(300, 200) # resizing gizmo\n", |
| 123 | + "\n", |
| 124 | + "# Create pipeline: the image source is connected to a display input 0. \n", |
| 125 | + "# Also it is connected to the image resizing gizmo which is then connected to display input 1.\n", |
| 126 | + "# Both pipelines are passed to the Composition object constructor.\n", |
| 127 | + "# Note, how `[]` operator is used to select the input of display gizmo to connect to.\n", |
| 128 | + "Composition(source >> display[0], source >> resizer >> display[1]).start()" |
144 | 129 | ]
|
145 | 130 | },
|
146 | 131 | {
|
|
162 | 147 | "source": [
|
163 | 148 | "from degirum_tools.streams import *\n",
|
164 | 149 | "\n",
|
165 |
| - "c = Composition()\n", |
166 |
| - "\n", |
167 |
| - "source = c.add(VideoSourceGizmo(video_source))\n", |
168 |
| - "display = c.add(VideoDisplayGizmo())\n", |
169 |
| - "saver = c.add(VideoSaverGizmo(\"temp/mycapture.mp4\"))\n", |
170 |
| - "\n", |
171 |
| - "source >> display\n", |
172 |
| - "source >> saver\n", |
| 150 | + "# create gizmos\n", |
| 151 | + "source = VideoSourceGizmo(video_source)\n", |
| 152 | + "display = VideoDisplayGizmo()\n", |
| 153 | + "saver = VideoSaverGizmo(\"temp/mycapture.mp4\")\n", |
173 | 154 | "\n",
|
174 |
| - "c.start()" |
| 155 | + "# create pipeline and composition, then start it\n", |
| 156 | + "Composition(source >> display, source >> saver).start()" |
175 | 157 | ]
|
176 | 158 | },
|
177 | 159 | {
|
|
198 | 180 | },
|
199 | 181 | {
|
200 | 182 | "cell_type": "code",
|
201 |
| - "execution_count": null, |
| 183 | + "execution_count": 18, |
202 | 184 | "id": "193a0c25",
|
203 | 185 | "metadata": {},
|
204 | 186 | "outputs": [],
|
|
228 | 210 | "from degirum_tools.streams import *\n",
|
229 | 211 | "\n",
|
230 | 212 | "# load some object detection AI model\n",
|
231 |
| - "model=dg.load_model(\n", |
| 213 | + "model = dg.load_model(\n", |
232 | 214 | " model_name=model_name,\n",
|
233 | 215 | " inference_host_address=hw_location,\n",
|
234 | 216 | " zoo_url=model_zoo_url,\n",
|
235 | 217 | " token=degirum_tools.get_token(),\n",
|
236 | 218 | ")\n",
|
237 |
| - "c = Composition()\n", |
238 | 219 | "\n",
|
239 | 220 | "# create gizmos\n",
|
240 |
| - "source = c.add(VideoSourceGizmo(video_source)) # video source\n", |
241 |
| - "detection = c.add(AiSimpleGizmo(model)) # AI model\n", |
242 |
| - "display = c.add(\n", |
243 |
| - " VideoDisplayGizmo(\"Detection\", show_ai_overlay=True, show_fps=True)\n", |
244 |
| - ") # display\n", |
| 221 | + "source = VideoSourceGizmo(video_source) # video source\n", |
| 222 | + "detection = AiSimpleGizmo(model) # AI model\n", |
| 223 | + "display = VideoDisplayGizmo(\"Detection\", show_ai_overlay=True, show_fps=True) # display\n", |
245 | 224 | "\n",
|
246 |
| - "# create pipeline\n", |
247 |
| - "source >> detection >> display\n", |
248 |
| - "\n", |
249 |
| - "c.start()" |
| 225 | + "# create pipeline and composition, then start it\n", |
| 226 | + "Composition(source >> detection >> display).start()" |
250 | 227 | ]
|
251 | 228 | },
|
252 | 229 | {
|
|
277 | 254 | },
|
278 | 255 | {
|
279 | 256 | "cell_type": "code",
|
280 |
| - "execution_count": null, |
| 257 | + "execution_count": 20, |
281 | 258 | "id": "2d31645f",
|
282 | 259 | "metadata": {},
|
283 | 260 | "outputs": [],
|
|
307 | 284 | "from degirum_tools.streams import *\n",
|
308 | 285 | "\n",
|
309 | 286 | "# load some object detection AI model\n",
|
310 |
| - "model=dg.load_model(\n", |
| 287 | + "model = dg.load_model(\n", |
311 | 288 | " model_name=model_name,\n",
|
312 | 289 | " inference_host_address=hw_location,\n",
|
313 | 290 | " zoo_url=model_zoo_url,\n",
|
314 | 291 | " token=degirum_tools.get_token(),\n",
|
315 | 292 | ")\n",
|
316 | 293 | "\n",
|
317 |
| - "c = Composition()\n", |
318 |
| - "\n", |
319 | 294 | "# create gizmos\n",
|
320 |
| - "source = c.add(VideoSourceGizmo(video_source)) # video source\n", |
321 |
| - "preprocessor = c.add(AiPreprocessGizmo(model))\n", |
322 |
| - "detection = c.add(AiSimpleGizmo(model))\n", |
323 |
| - "display = c.add(\n", |
324 |
| - " VideoDisplayGizmo(\"Objects\", show_ai_overlay=True, show_fps=True)\n", |
325 |
| - ") # display\n", |
326 |
| - "\n", |
327 |
| - "# create pipeline\n", |
328 |
| - "source >> preprocessor >> detection >> display\n", |
| 295 | + "source = VideoSourceGizmo(video_source) # video source\n", |
| 296 | + "preprocessor = AiPreprocessGizmo(model) # AI model preprocessor\n", |
| 297 | + "detection = AiSimpleGizmo(model) # AI model\n", |
| 298 | + "display = VideoDisplayGizmo(\"Objects\", show_ai_overlay=True, show_fps=True) # display\n", |
329 | 299 | "\n",
|
330 |
| - "c.start()" |
| 300 | + "# create pipeline and composition, then start it\n", |
| 301 | + "Composition(source >> preprocessor >> detection >> display).start()" |
331 | 302 | ]
|
| 303 | + }, |
| 304 | + { |
| 305 | + "cell_type": "code", |
| 306 | + "execution_count": null, |
| 307 | + "id": "596abdcf", |
| 308 | + "metadata": {}, |
| 309 | + "outputs": [], |
| 310 | + "source": [] |
332 | 311 | }
|
333 | 312 | ],
|
334 | 313 | "metadata": {
|
335 | 314 | "kernelspec": {
|
336 |
| - "display_name": "Python (supervision)", |
| 315 | + "display_name": "base", |
337 | 316 | "language": "python",
|
338 |
| - "name": "supervision" |
| 317 | + "name": "python3" |
339 | 318 | },
|
340 | 319 | "language_info": {
|
341 | 320 | "codemirror_mode": {
|
|
347 | 326 | "name": "python",
|
348 | 327 | "nbconvert_exporter": "python",
|
349 | 328 | "pygments_lexer": "ipython3",
|
350 |
| - "version": "3.9.18" |
| 329 | + "version": "3.9.16" |
351 | 330 | }
|
352 | 331 | },
|
353 | 332 | "nbformat": 4,
|
|
0 commit comments