Reference#
A customizable lightweight Python library for real-time multi-object tracking.
Examples:
>>> from norfair import Detection, Tracker, Video, draw_tracked_objects
>>> detector = MyDetector() # Set up a detector
>>> video = Video(input_path="video.mp4")
>>> tracker = Tracker(distance_function="euclidean", distance_threshold=50)
>>> for frame in video:
>>> detections = detector(frame)
>>> norfair_detections = [Detection(points) for points in detections]
>>> tracked_objects = tracker.update(detections=norfair_detections)
>>> draw_tracked_objects(frame, tracked_objects)
>>> video.write(frame)
Color
#
Contains predefined colors.
Colors are defined as a Tuple of integers between 0 and 255 expressing the values in BGR This is the format opencv uses.
Source code in norfair/drawing/color.py
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
|
Palette
#
Class to control the color pallete for drawing.
Examples:
Change palette:
>>> from norfair import Palette
>>> Palette.set("colorblind")
>>> # or a custom palette
>>> from norfair import Color
>>> Palette.set([Color.red, Color.blue, "#ffeeff"])
Source code in norfair/drawing/color.py
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 |
|
set(palette)
classmethod
#
Selects a color palette.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
palette |
Union[str, Iterable[ColorLike]]
|
can be either
- the name of one of the predefined palettes |
required |
Source code in norfair/drawing/color.py
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 |
|
set_default_color(color)
classmethod
#
Selects the default color of choose_color
when hashable is None.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
color |
ColorLike
|
The new default color. |
required |
Source code in norfair/drawing/color.py
355 356 357 358 359 360 361 362 363 364 365 |
|
Drawable
#
Class to standardize Drawable objects like Detections and TrackedObjects
Parameters:
Name | Type | Description | Default |
---|---|---|---|
obj |
Union[Detection, TrackedObject]
|
A Detection or a TrackedObject that will be used to initialized the drawable. If this parameter is passed, all other arguments are ignored |
None
|
points |
ndarray
|
Points included in the drawable, shape is |
None
|
id |
Any
|
Id of this object. Ignored if |
None
|
label |
Any
|
Label specifying the class of the object. Ignored if |
None
|
scores |
ndarray
|
Confidence scores of each point, shape is |
None
|
live_points |
ndarray
|
Bolean array indicating which points are alive, shape is |
None
|
Raises:
Type | Description |
---|---|
ValueError
|
If obj is not an instance of the supported classes. |
Source code in norfair/drawing/drawer.py
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 |
|
FixedCamera
#
Class used to stabilize video based on the camera motion.
Starts with a larger frame, where the original frame is drawn on top of a black background. As the camera moves, the smaller frame moves in the opposite direction, stabilizing the objects in it.
Useful for debugging or demoing the camera motion.
Warning
This only works with TranslationTransformation
,
using HomographyTransformation
will result in
unexpected behaviour.
Warning
If using other drawers, always apply this one last. Using other drawers on the scaled up frame will not work as expected.
Note
Sometimes the camera moves so far from the original point that the result won't fit in the scaled-up frame. In this case, a warning will be logged and the frames will be cropped to avoid errors.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
scale |
float
|
The resulting video will have a resolution of |
2
|
attenuation |
float
|
Controls how fast the older frames fade to black. |
0.05
|
Examples:
>>> # setup
>>> tracker = Tracker("frobenious", 100)
>>> motion_estimator = MotionEstimator()
>>> video = Video(input_path="video.mp4")
>>> fixed_camera = FixedCamera()
>>> # process video
>>> for frame in video:
>>> coord_transformations = motion_estimator.update(frame)
>>> detections = get_detections(frame)
>>> tracked_objects = tracker.update(detections, coord_transformations)
>>> draw_tracked_objects(frame, tracked_objects) # fixed_camera should always be the last drawer
>>> bigger_frame = fixed_camera.adjust_frame(frame, coord_transformations)
>>> video.write(bigger_frame)
Source code in norfair/drawing/fixed_camera.py
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
|
adjust_frame(frame, coord_transformation)
#
Render scaled up frame.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
frame |
ndarray
|
The OpenCV frame. |
required |
coord_transformation |
TranslationTransformation
|
The coordinate transformation as returned by the |
required |
Returns:
Type | Description |
---|---|
ndarray
|
The new bigger frame with the original frame drawn on it. |
Source code in norfair/drawing/fixed_camera.py
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
|
AbsolutePaths
#
Class that draws the absolute paths taken by a set of points.
Works just like Paths
but supports camera motion.
Warning
This drawer is not optimized so it can be stremely slow. Performance degrades linearly with
max_history * number_of_tracked_objects
.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
get_points_to_draw |
Optional[Callable[[array], array]]
|
Function that takes a list of points (the By default it is the mean point of all the points in the tracker. |
None
|
thickness |
Optional[int]
|
Thickness of the circles representing the paths of interest. |
None
|
color |
Optional[Tuple[int, int, int]]
|
Color of the circles representing the paths of interest. |
None
|
radius |
Optional[int]
|
Radius of the circles representing the paths of interest. |
None
|
max_history |
int
|
Number of past points to include in the path. High values make the drawing slower |
20
|
Examples:
>>> from norfair import Tracker, Video, Path
>>> video = Video("video.mp4")
>>> tracker = Tracker(...)
>>> path_drawer = Path()
>>> for frame in video:
>>> detections = get_detections(frame) # runs detector and returns Detections
>>> tracked_objects = tracker.update(detections)
>>> frame = path_drawer.draw(frame, tracked_objects)
>>> video.write(frame)
Source code in norfair/drawing/path.py
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 |
|
Paths
#
Class that draws the paths taken by a set of points of interest defined from the coordinates of each tracker estimation.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
get_points_to_draw |
Optional[Callable[[array], array]]
|
Function that takes a list of points (the By default it is the mean point of all the points in the tracker. |
None
|
thickness |
Optional[int]
|
Thickness of the circles representing the paths of interest. |
None
|
color |
Optional[Tuple[int, int, int]]
|
Color of the circles representing the paths of interest. |
None
|
radius |
Optional[int]
|
Radius of the circles representing the paths of interest. |
None
|
attenuation |
float
|
A float number in [0, 1] that dictates the speed at which the path is erased.
if it is |
0.01
|
Examples:
>>> from norfair import Tracker, Video, Path
>>> video = Video("video.mp4")
>>> tracker = Tracker(...)
>>> path_drawer = Path()
>>> for frame in video:
>>> detections = get_detections(frame) # runs detector and returns Detections
>>> tracked_objects = tracker.update(detections)
>>> frame = path_drawer.draw(frame, tracked_objects)
>>> video.write(frame)
Source code in norfair/drawing/path.py
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
|
draw(frame, tracked_objects)
#
Draw the paths of the points interest on a frame.
Warning
This method does not draw frames in place as other drawers do, the resulting frame is returned.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
frame |
ndarray
|
The OpenCV frame to draw on. |
required |
tracked_objects |
Sequence[TrackedObject]
|
List of |
required |
Returns:
Type | Description |
---|---|
array
|
The resulting frame. |
Source code in norfair/drawing/path.py
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
|
frobenius(detection, tracked_object)
#
Frobernius norm on the difference of the points in detection and the estimates in tracked_object.
The Frobenius distance and norm are given by:
Parameters:
Name | Type | Description | Default |
---|---|---|---|
detection |
Detection
|
A detection. |
required |
tracked_object |
TrackedObject
|
A tracked object. |
required |
Returns:
Type | Description |
---|---|
float
|
The distance. |
See Also
Source code in norfair/distances.py
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 |
|
mean_euclidean(detection, tracked_object)
#
Average euclidean distance between the points in detection and estimates in tracked_object.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
detection |
Detection
|
A detection. |
required |
tracked_object |
TrackedObject
|
A tracked object |
required |
Returns:
Type | Description |
---|---|
float
|
The distance. |
See Also
Source code in norfair/distances.py
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 |
|
mean_manhattan(detection, tracked_object)
#
Average manhattan distance between the points in detection and the estimates in tracked_object
Given by:
Where \(||a||_1\) is the manhattan norm.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
detection |
Detection
|
A detection. |
required |
tracked_object |
TrackedObject
|
a tracked object. |
required |
Returns:
Type | Description |
---|---|
float
|
The distance. |
See Also
Source code in norfair/distances.py
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 |
|
iou(candidates, objects)
#
Calculate IoU between two sets of bounding boxes. Both sets of boxes are expected
to be in [x_min, y_min, x_max, y_max]
format.
Normal IoU is 1 when the boxes are the same and 0 when they don't overlap,
to transform that into a distance that makes sense we return 1 - iou
.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
candidates |
ndarray
|
(N, 4) numpy.ndarray containing candidates bounding boxes. |
required |
objects |
ndarray
|
(K, 4) numpy.ndarray containing objects bounding boxes. |
required |
Returns:
Type | Description |
---|---|
ndarray
|
(N, K) numpy.ndarray of |
Source code in norfair/distances.py
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 |
|
get_distance_by_name(name)
#
Select a distance by name.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
name |
str
|
A string defining the metric to get. |
required |
Returns:
Type | Description |
---|---|
Distance
|
The distance object. |
Source code in norfair/distances.py
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 |
|
create_keypoints_voting_distance(keypoint_distance_threshold, detection_threshold)
#
Construct a keypoint voting distance function configured with the thresholds.
Count how many points in a detection match the with a tracked_object.
A match is considered when distance between the points is < keypoint_distance_threshold
and the score of the last_detection of the tracked_object is > detection_threshold
.
Notice the if multiple points are tracked, the ith point in detection can only match the ith
point in the tracked object.
Distance is 1 if no point matches and approximates 0 as more points are matched.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
keypoint_distance_threshold |
float
|
Points closer than this threshold are considered a match. |
required |
detection_threshold |
float
|
Detections and objects with score lower than this threshold are ignored. |
required |
Returns:
Type | Description |
---|---|
Callable
|
The distance funtion that must be passed to the Tracker. |
Source code in norfair/distances.py
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 |
|
create_normalized_mean_euclidean_distance(height, width)
#
Construct a normalized mean euclidean distance function configured with the max height and width.
The result distance is bound to [0, 1] where 1 indicates oposite corners of the image.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
height |
int
|
Height of the image. |
required |
width |
int
|
Width of the image. |
required |
Returns:
Type | Description |
---|---|
Callable
|
The distance funtion that must be passed to the Tracker. |
Source code in norfair/distances.py
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 |
|
draw_absolute_grid(frame, coord_transformations, grid_size=20, radius=2, thickness=1, color=Color.black, polar=False)
#
Draw a grid of points in absolute coordinates.
Useful for debugging camera motion.
The points are drawn as if the camera were in the center of a sphere and points are drawn in the intersection of latitude and longitude lines over the surface of the sphere.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
frame |
ndarray
|
The OpenCV frame to draw on. |
required |
coord_transformations |
CoordinatesTransformation
|
The coordinate transformation as returned by the |
required |
grid_size |
int
|
How many points to draw. |
20
|
radius |
int
|
Size of each point. |
2
|
thickness |
int
|
Thickness of each point |
1
|
color |
ColorType
|
Color of the points. |
black
|
polar |
Bool
|
If True, the points on the first frame are drawn as if the camera were pointing to a pole (viewed from the center of the earth). By default, False is used which means the points are drawn as if the camera were pointing to the Equator. |
False
|
Source code in norfair/drawing/absolute_grid.py
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
|
draw_tracked_boxes(frame, objects, border_colors=None, border_width=None, id_size=None, id_thickness=None, draw_box=True, color_by_label=False, draw_labels=False, label_size=None, label_width=None)
#
Deprecated. Use draw_box
Source code in norfair/drawing/draw_boxes.py
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 |
|
draw_tracked_objects(frame, objects, radius=None, color=None, id_size=None, id_thickness=None, draw_points=True, color_by_label=False, draw_labels=False, label_size=None)
#
Deprecated use draw_points
Source code in norfair/drawing/draw_points.py
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
|