Skip to content

Reference#

A customizable lightweight Python library for real-time multi-object tracking.

Examples:

>>> from norfair import Detection, Tracker, Video, draw_tracked_objects
>>> detector = MyDetector()  # Set up a detector
>>> video = Video(input_path="video.mp4")
>>> tracker = Tracker(distance_function="euclidean", distance_threshold=50)
>>> for frame in video:
>>>    detections = detector(frame)
>>>    norfair_detections = [Detection(points) for points in detections]
>>>    tracked_objects = tracker.update(detections=norfair_detections)
>>>    draw_tracked_objects(frame, tracked_objects)
>>>    video.write(frame)

Color #

Contains predefined colors.

Colors are defined as a Tuple of integers between 0 and 255 expressing the values in BGR This is the format opencv uses.

Source code in norfair/drawing/color.py
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
class Color:
    """
    Contains predefined colors.

    Colors are defined as a Tuple of integers between 0 and 255 expressing the values in BGR
    This is the format opencv uses.
    """

    # from PIL.ImageColors.colormap
    aliceblue = hex_to_bgr("#f0f8ff")
    antiquewhite = hex_to_bgr("#faebd7")
    aqua = hex_to_bgr("#00ffff")
    aquamarine = hex_to_bgr("#7fffd4")
    azure = hex_to_bgr("#f0ffff")
    beige = hex_to_bgr("#f5f5dc")
    bisque = hex_to_bgr("#ffe4c4")
    black = hex_to_bgr("#000000")
    blanchedalmond = hex_to_bgr("#ffebcd")
    blue = hex_to_bgr("#0000ff")
    blueviolet = hex_to_bgr("#8a2be2")
    brown = hex_to_bgr("#a52a2a")
    burlywood = hex_to_bgr("#deb887")
    cadetblue = hex_to_bgr("#5f9ea0")
    chartreuse = hex_to_bgr("#7fff00")
    chocolate = hex_to_bgr("#d2691e")
    coral = hex_to_bgr("#ff7f50")
    cornflowerblue = hex_to_bgr("#6495ed")
    cornsilk = hex_to_bgr("#fff8dc")
    crimson = hex_to_bgr("#dc143c")
    cyan = hex_to_bgr("#00ffff")
    darkblue = hex_to_bgr("#00008b")
    darkcyan = hex_to_bgr("#008b8b")
    darkgoldenrod = hex_to_bgr("#b8860b")
    darkgray = hex_to_bgr("#a9a9a9")
    darkgrey = hex_to_bgr("#a9a9a9")
    darkgreen = hex_to_bgr("#006400")
    darkkhaki = hex_to_bgr("#bdb76b")
    darkmagenta = hex_to_bgr("#8b008b")
    darkolivegreen = hex_to_bgr("#556b2f")
    darkorange = hex_to_bgr("#ff8c00")
    darkorchid = hex_to_bgr("#9932cc")
    darkred = hex_to_bgr("#8b0000")
    darksalmon = hex_to_bgr("#e9967a")
    darkseagreen = hex_to_bgr("#8fbc8f")
    darkslateblue = hex_to_bgr("#483d8b")
    darkslategray = hex_to_bgr("#2f4f4f")
    darkslategrey = hex_to_bgr("#2f4f4f")
    darkturquoise = hex_to_bgr("#00ced1")
    darkviolet = hex_to_bgr("#9400d3")
    deeppink = hex_to_bgr("#ff1493")
    deepskyblue = hex_to_bgr("#00bfff")
    dimgray = hex_to_bgr("#696969")
    dimgrey = hex_to_bgr("#696969")
    dodgerblue = hex_to_bgr("#1e90ff")
    firebrick = hex_to_bgr("#b22222")
    floralwhite = hex_to_bgr("#fffaf0")
    forestgreen = hex_to_bgr("#228b22")
    fuchsia = hex_to_bgr("#ff00ff")
    gainsboro = hex_to_bgr("#dcdcdc")
    ghostwhite = hex_to_bgr("#f8f8ff")
    gold = hex_to_bgr("#ffd700")
    goldenrod = hex_to_bgr("#daa520")
    gray = hex_to_bgr("#808080")
    grey = hex_to_bgr("#808080")
    green = (0, 128, 0)
    greenyellow = hex_to_bgr("#adff2f")
    honeydew = hex_to_bgr("#f0fff0")
    hotpink = hex_to_bgr("#ff69b4")
    indianred = hex_to_bgr("#cd5c5c")
    indigo = hex_to_bgr("#4b0082")
    ivory = hex_to_bgr("#fffff0")
    khaki = hex_to_bgr("#f0e68c")
    lavender = hex_to_bgr("#e6e6fa")
    lavenderblush = hex_to_bgr("#fff0f5")
    lawngreen = hex_to_bgr("#7cfc00")
    lemonchiffon = hex_to_bgr("#fffacd")
    lightblue = hex_to_bgr("#add8e6")
    lightcoral = hex_to_bgr("#f08080")
    lightcyan = hex_to_bgr("#e0ffff")
    lightgoldenrodyellow = hex_to_bgr("#fafad2")
    lightgreen = hex_to_bgr("#90ee90")
    lightgray = hex_to_bgr("#d3d3d3")
    lightgrey = hex_to_bgr("#d3d3d3")
    lightpink = hex_to_bgr("#ffb6c1")
    lightsalmon = hex_to_bgr("#ffa07a")
    lightseagreen = hex_to_bgr("#20b2aa")
    lightskyblue = hex_to_bgr("#87cefa")
    lightslategray = hex_to_bgr("#778899")
    lightslategrey = hex_to_bgr("#778899")
    lightsteelblue = hex_to_bgr("#b0c4de")
    lightyellow = hex_to_bgr("#ffffe0")
    lime = hex_to_bgr("#00ff00")
    limegreen = hex_to_bgr("#32cd32")
    linen = hex_to_bgr("#faf0e6")
    magenta = hex_to_bgr("#ff00ff")
    maroon = hex_to_bgr("#800000")
    mediumaquamarine = hex_to_bgr("#66cdaa")
    mediumblue = hex_to_bgr("#0000cd")
    mediumorchid = hex_to_bgr("#ba55d3")
    mediumpurple = hex_to_bgr("#9370db")
    mediumseagreen = hex_to_bgr("#3cb371")
    mediumslateblue = hex_to_bgr("#7b68ee")
    mediumspringgreen = hex_to_bgr("#00fa9a")
    mediumturquoise = hex_to_bgr("#48d1cc")
    mediumvioletred = hex_to_bgr("#c71585")
    midnightblue = hex_to_bgr("#191970")
    mintcream = hex_to_bgr("#f5fffa")
    mistyrose = hex_to_bgr("#ffe4e1")
    moccasin = hex_to_bgr("#ffe4b5")
    navajowhite = hex_to_bgr("#ffdead")
    navy = hex_to_bgr("#000080")
    oldlace = hex_to_bgr("#fdf5e6")
    olive = hex_to_bgr("#808000")
    olivedrab = hex_to_bgr("#6b8e23")
    orange = hex_to_bgr("#ffa500")
    orangered = hex_to_bgr("#ff4500")
    orchid = hex_to_bgr("#da70d6")
    palegoldenrod = hex_to_bgr("#eee8aa")
    palegreen = hex_to_bgr("#98fb98")
    paleturquoise = hex_to_bgr("#afeeee")
    palevioletred = hex_to_bgr("#db7093")
    papayawhip = hex_to_bgr("#ffefd5")
    peachpuff = hex_to_bgr("#ffdab9")
    peru = hex_to_bgr("#cd853f")
    pink = hex_to_bgr("#ffc0cb")
    plum = hex_to_bgr("#dda0dd")
    powderblue = hex_to_bgr("#b0e0e6")
    purple = hex_to_bgr("#800080")
    rebeccapurple = hex_to_bgr("#663399")
    red = hex_to_bgr("#ff0000")
    rosybrown = hex_to_bgr("#bc8f8f")
    royalblue = hex_to_bgr("#4169e1")
    saddlebrown = hex_to_bgr("#8b4513")
    salmon = hex_to_bgr("#fa8072")
    sandybrown = hex_to_bgr("#f4a460")
    seagreen = hex_to_bgr("#2e8b57")
    seashell = hex_to_bgr("#fff5ee")
    sienna = hex_to_bgr("#a0522d")
    silver = hex_to_bgr("#c0c0c0")
    skyblue = hex_to_bgr("#87ceeb")
    slateblue = hex_to_bgr("#6a5acd")
    slategray = hex_to_bgr("#708090")
    slategrey = hex_to_bgr("#708090")
    snow = hex_to_bgr("#fffafa")
    springgreen = hex_to_bgr("#00ff7f")
    steelblue = hex_to_bgr("#4682b4")
    tan = hex_to_bgr("#d2b48c")
    teal = hex_to_bgr("#008080")
    thistle = hex_to_bgr("#d8bfd8")
    tomato = hex_to_bgr("#ff6347")
    turquoise = hex_to_bgr("#40e0d0")
    violet = hex_to_bgr("#ee82ee")
    wheat = hex_to_bgr("#f5deb3")
    white = hex_to_bgr("#ffffff")
    whitesmoke = hex_to_bgr("#f5f5f5")
    yellow = hex_to_bgr("#ffff00")
    yellowgreen = hex_to_bgr("#9acd32")

    # seaborn tab20 colors
    tab1 = hex_to_bgr("#1f77b4")
    tab2 = hex_to_bgr("#aec7e8")
    tab3 = hex_to_bgr("#ff7f0e")
    tab4 = hex_to_bgr("#ffbb78")
    tab5 = hex_to_bgr("#2ca02c")
    tab6 = hex_to_bgr("#98df8a")
    tab7 = hex_to_bgr("#d62728")
    tab8 = hex_to_bgr("#ff9896")
    tab9 = hex_to_bgr("#9467bd")
    tab10 = hex_to_bgr("#c5b0d5")
    tab11 = hex_to_bgr("#8c564b")
    tab12 = hex_to_bgr("#c49c94")
    tab13 = hex_to_bgr("#e377c2")
    tab14 = hex_to_bgr("#f7b6d2")
    tab15 = hex_to_bgr("#7f7f7f")
    tab16 = hex_to_bgr("#c7c7c7")
    tab17 = hex_to_bgr("#bcbd22")
    tab18 = hex_to_bgr("#dbdb8d")
    tab19 = hex_to_bgr("#17becf")
    tab20 = hex_to_bgr("#9edae5")
    # seaborn colorblind
    cb1 = hex_to_bgr("#0173b2")
    cb2 = hex_to_bgr("#de8f05")
    cb3 = hex_to_bgr("#029e73")
    cb4 = hex_to_bgr("#d55e00")
    cb5 = hex_to_bgr("#cc78bc")
    cb6 = hex_to_bgr("#ca9161")
    cb7 = hex_to_bgr("#fbafe4")
    cb8 = hex_to_bgr("#949494")
    cb9 = hex_to_bgr("#ece133")
    cb10 = hex_to_bgr("#56b4e9")

Palette #

Class to control the color pallete for drawing.

Examples:

Change palette:

>>> from norfair import Palette
>>> Palette.set("colorblind")
>>> # or a custom palette
>>> from norfair import Color
>>> Palette.set([Color.red, Color.blue, "#ffeeff"])
Source code in norfair/drawing/color.py
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
class Palette:
    """
    Class to control the color pallete for drawing.

    Examples
    --------
    Change palette:
    >>> from norfair import Palette
    >>> Palette.set("colorblind")
    >>> # or a custom palette
    >>> from norfair import Color
    >>> Palette.set([Color.red, Color.blue, "#ffeeff"])
    """

    _colors = PALETTES["tab10"]
    _default_color = Color.black

    @classmethod
    def set(cls, palette: Union[str, Iterable[ColorLike]]):
        """
        Selects a color palette.

        Parameters
        ----------
        palette : Union[str, Iterable[ColorLike]]
            can be either
            - the name of one of the predefined palettes `tab10`, `tab20`, or `colorblind`
            - a list of ColorLike objects that can be parsed by [`parse_color`][norfair.drawing.color.parse_color]
        """
        if isinstance(palette, str):
            try:
                cls._colors = PALETTES[palette]
            except KeyError as e:
                raise ValueError(
                    f"Invalid palette name '{palette}', valid values are {PALETTES.keys()}"
                ) from e
        else:
            colors = []
            for c in palette:
                colors.append(parse_color(c))

            cls._colors = colors

    @classmethod
    def set_default_color(cls, color: ColorLike):
        """
        Selects the default color of `choose_color` when hashable is None.

        Parameters
        ----------
        color : ColorLike
            The new default color.
        """
        cls._default_color = parse_color(color)

    @classmethod
    def choose_color(cls, hashable: Hashable) -> ColorType:
        if hashable is None:
            return cls._default_color
        return cls._colors[abs(hash(hashable)) % len(cls._colors)]

set(palette) classmethod #

Selects a color palette.

Parameters:

Name Type Description Default
palette Union[str, Iterable[ColorLike]]

can be either - the name of one of the predefined palettes tab10, tab20, or colorblind - a list of ColorLike objects that can be parsed by parse_color

required
Source code in norfair/drawing/color.py
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
@classmethod
def set(cls, palette: Union[str, Iterable[ColorLike]]):
    """
    Selects a color palette.

    Parameters
    ----------
    palette : Union[str, Iterable[ColorLike]]
        can be either
        - the name of one of the predefined palettes `tab10`, `tab20`, or `colorblind`
        - a list of ColorLike objects that can be parsed by [`parse_color`][norfair.drawing.color.parse_color]
    """
    if isinstance(palette, str):
        try:
            cls._colors = PALETTES[palette]
        except KeyError as e:
            raise ValueError(
                f"Invalid palette name '{palette}', valid values are {PALETTES.keys()}"
            ) from e
    else:
        colors = []
        for c in palette:
            colors.append(parse_color(c))

        cls._colors = colors

set_default_color(color) classmethod #

Selects the default color of choose_color when hashable is None.

Parameters:

Name Type Description Default
color ColorLike

The new default color.

required
Source code in norfair/drawing/color.py
355
356
357
358
359
360
361
362
363
364
365
@classmethod
def set_default_color(cls, color: ColorLike):
    """
    Selects the default color of `choose_color` when hashable is None.

    Parameters
    ----------
    color : ColorLike
        The new default color.
    """
    cls._default_color = parse_color(color)

Drawable #

Class to standardize Drawable objects like Detections and TrackedObjects

Parameters:

Name Type Description Default
obj Union[Detection, TrackedObject]

A Detection or a TrackedObject that will be used to initialized the drawable. If this parameter is passed, all other arguments are ignored

None
points ndarray

Points included in the drawable, shape is (N_points, N_dimensions). Ignored if obj is passed

None
id Any

Id of this object. Ignored if obj is passed

None
label Any

Label specifying the class of the object. Ignored if obj is passed

None
scores ndarray

Confidence scores of each point, shape is (N_points,). Ignored if obj is passed

None
live_points ndarray

Bolean array indicating which points are alive, shape is (N_points,). Ignored if obj is passed

None

Raises:

Type Description
ValueError

If obj is not an instance of the supported classes.

Source code in norfair/drawing/drawer.py
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
class Drawable:
    """
    Class to standardize Drawable objects like Detections and TrackedObjects

    Parameters
    ----------
    obj : Union[Detection, TrackedObject], optional
        A [Detection][norfair.tracker.Detection] or a [TrackedObject][norfair.tracker.TrackedObject]
        that will be used to initialized the drawable.
        If this parameter is passed, all other arguments are ignored
    points : np.ndarray, optional
        Points included in the drawable, shape is `(N_points, N_dimensions)`. Ignored if `obj` is passed
    id : Any, optional
        Id of this object. Ignored if `obj` is passed
    label : Any, optional
        Label specifying the class of the object. Ignored if `obj` is passed
    scores : np.ndarray, optional
        Confidence scores of each point, shape is `(N_points,)`. Ignored if `obj` is passed
    live_points : np.ndarray, optional
        Bolean array indicating which points are alive, shape is `(N_points,)`. Ignored if `obj` is passed

    Raises
    ------
    ValueError
        If obj is not an instance of the supported classes.
    """

    def __init__(
        self,
        obj: Union[Detection, TrackedObject] = None,
        points: np.ndarray = None,
        id: Any = None,
        label: Any = None,
        scores: np.ndarray = None,
        live_points: np.ndarray = None,
    ) -> None:
        if isinstance(obj, Detection):
            self.points = obj.points
            self.id = None
            self.label = obj.label
            self.scores = obj.scores
            # TODO: alive points for detections could be the ones over the threshold
            # but that info is not available here
            self.live_points = np.ones(obj.points.shape[0]).astype(bool)

        elif isinstance(obj, TrackedObject):
            self.points = obj.estimate
            self.id = obj.id
            self.label = obj.label
            # TODO: TrackedObject.scores could be an interesting thing to have
            # it could be the scores of the last detection or some kind of moving average
            self.scores = None
            self.live_points = obj.live_points
        elif obj is None:
            self.points = points
            self.id = id
            self.label = label
            self.scores = scores
            self.live_points = live_points
        else:
            raise ValueError(
                f"Extecting a Detection or a TrackedObject but received {type(obj)}"
            )

FixedCamera #

Class used to stabilize video based on the camera motion.

Starts with a larger frame, where the original frame is drawn on top of a black background. As the camera moves, the smaller frame moves in the opposite direction, stabilizing the objects in it.

Useful for debugging or demoing the camera motion. Example GIF

Warning

This only works with TranslationTransformation, using HomographyTransformation will result in unexpected behaviour.

Warning

If using other drawers, always apply this one last. Using other drawers on the scaled up frame will not work as expected.

Note

Sometimes the camera moves so far from the original point that the result won't fit in the scaled-up frame. In this case, a warning will be logged and the frames will be cropped to avoid errors.

Parameters:

Name Type Description Default
scale float

The resulting video will have a resolution of scale * (H, W) where HxW is the resolution of the original video. Use a bigger scale if the camera is moving too much.

2
attenuation float

Controls how fast the older frames fade to black.

0.05

Examples:

>>> # setup
>>> tracker = Tracker("frobenious", 100)
>>> motion_estimator = MotionEstimator()
>>> video = Video(input_path="video.mp4")
>>> fixed_camera = FixedCamera()
>>> # process video
>>> for frame in video:
>>>     coord_transformations = motion_estimator.update(frame)
>>>     detections = get_detections(frame)
>>>     tracked_objects = tracker.update(detections, coord_transformations)
>>>     draw_tracked_objects(frame, tracked_objects)  # fixed_camera should always be the last drawer
>>>     bigger_frame = fixed_camera.adjust_frame(frame, coord_transformations)
>>>     video.write(bigger_frame)
Source code in norfair/drawing/fixed_camera.py
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
class FixedCamera:
    """
    Class used to stabilize video based on the camera motion.

    Starts with a larger frame, where the original frame is drawn on top of a black background.
    As the camera moves, the smaller frame moves in the opposite direction, stabilizing the objects in it.

    Useful for debugging or demoing the camera motion.
    ![Example GIF](../../videos/camera_stabilization.gif)

    !!! Warning
        This only works with [`TranslationTransformation`][norfair.camera_motion.TranslationTransformation],
        using [`HomographyTransformation`][norfair.camera_motion.HomographyTransformation] will result in
        unexpected behaviour.

    !!! Warning
        If using other drawers, always apply this one last. Using other drawers on the scaled up frame will not work as expected.

    !!! Note
        Sometimes the camera moves so far from the original point that the result won't fit in the scaled-up frame.
        In this case, a warning will be logged and the frames will be cropped to avoid errors.

    Parameters
    ----------
    scale : float, optional
        The resulting video will have a resolution of `scale * (H, W)` where HxW is the resolution of the original video.
        Use a bigger scale if the camera is moving too much.
    attenuation : float, optional
        Controls how fast the older frames fade to black.

    Examples
    --------
    >>> # setup
    >>> tracker = Tracker("frobenious", 100)
    >>> motion_estimator = MotionEstimator()
    >>> video = Video(input_path="video.mp4")
    >>> fixed_camera = FixedCamera()
    >>> # process video
    >>> for frame in video:
    >>>     coord_transformations = motion_estimator.update(frame)
    >>>     detections = get_detections(frame)
    >>>     tracked_objects = tracker.update(detections, coord_transformations)
    >>>     draw_tracked_objects(frame, tracked_objects)  # fixed_camera should always be the last drawer
    >>>     bigger_frame = fixed_camera.adjust_frame(frame, coord_transformations)
    >>>     video.write(bigger_frame)
    """

    def __init__(self, scale: float = 2, attenuation: float = 0.05):
        self.scale = scale
        self._background = None
        self._attenuation_factor = 1 - attenuation

    def adjust_frame(
        self, frame: np.ndarray, coord_transformation: TranslationTransformation
    ) -> np.ndarray:
        """
        Render scaled up frame.

        Parameters
        ----------
        frame : np.ndarray
            The OpenCV frame.
        coord_transformation : TranslationTransformation
            The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]

        Returns
        -------
        np.ndarray
            The new bigger frame with the original frame drawn on it.
        """

        # initialize background if necessary
        if self._background is None:
            original_size = (
                frame.shape[1],
                frame.shape[0],
            )  # OpenCV format is (width, height)

            scaled_size = tuple(
                (np.array(original_size) * np.array(self.scale)).round().astype(int)
            )
            self._background = np.zeros(
                [scaled_size[1], scaled_size[0], frame.shape[-1]],
                frame.dtype,
            )
        else:
            self._background = (self._background * self._attenuation_factor).astype(
                frame.dtype
            )

        # top_left is the anchor coordinate from where we start drawing the fame on top of the background
        # aim to draw it in the center of the background but transformations will move this point
        top_left = (
            np.array(self._background.shape[:2]) // 2 - np.array(frame.shape[:2]) // 2
        )
        top_left = (
            coord_transformation.rel_to_abs(top_left[::-1]).round().astype(int)[::-1]
        )
        # box of the background that will be updated and the limits of it
        background_y0, background_y1 = (top_left[0], top_left[0] + frame.shape[0])
        background_x0, background_x1 = (top_left[1], top_left[1] + frame.shape[1])
        background_size_y, background_size_x = self._background.shape[:2]

        # define box of the frame that will be used
        # if the scale is not enough to support the movement, warn the user but keep drawing
        # cropping the frame so that the operation doesn't fail
        frame_y0, frame_y1, frame_x0, frame_x1 = (0, frame.shape[0], 0, frame.shape[1])
        if (
            background_y0 < 0
            or background_x0 < 0
            or background_y1 > background_size_y
            or background_x1 > background_size_x
        ):
            warn_once(
                "moving_camera_scale is not enough to cover the range of camera movement, frame will be cropped"
            )
            # crop left or top of the frame if necessary
            frame_y0 = max(-background_y0, 0)
            frame_x0 = max(-background_x0, 0)
            # crop right or bottom of the frame if necessary
            frame_y1 = max(
                min(background_size_y - background_y0, background_y1 - background_y0), 0
            )
            frame_x1 = max(
                min(background_size_x - background_x0, background_x1 - background_x0), 0
            )
            # handle cases where the limits of the background become negative which numpy will interpret incorrectly
            background_y0 = max(background_y0, 0)
            background_x0 = max(background_x0, 0)
            background_y1 = max(background_y1, 0)
            background_x1 = max(background_x1, 0)
        self._background[
            background_y0:background_y1, background_x0:background_x1, :
        ] = frame[frame_y0:frame_y1, frame_x0:frame_x1, :]
        return self._background

adjust_frame(frame, coord_transformation) #

Render scaled up frame.

Parameters:

Name Type Description Default
frame ndarray

The OpenCV frame.

required
coord_transformation TranslationTransformation

The coordinate transformation as returned by the MotionEstimator

required

Returns:

Type Description
ndarray

The new bigger frame with the original frame drawn on it.

Source code in norfair/drawing/fixed_camera.py
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
def adjust_frame(
    self, frame: np.ndarray, coord_transformation: TranslationTransformation
) -> np.ndarray:
    """
    Render scaled up frame.

    Parameters
    ----------
    frame : np.ndarray
        The OpenCV frame.
    coord_transformation : TranslationTransformation
        The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]

    Returns
    -------
    np.ndarray
        The new bigger frame with the original frame drawn on it.
    """

    # initialize background if necessary
    if self._background is None:
        original_size = (
            frame.shape[1],
            frame.shape[0],
        )  # OpenCV format is (width, height)

        scaled_size = tuple(
            (np.array(original_size) * np.array(self.scale)).round().astype(int)
        )
        self._background = np.zeros(
            [scaled_size[1], scaled_size[0], frame.shape[-1]],
            frame.dtype,
        )
    else:
        self._background = (self._background * self._attenuation_factor).astype(
            frame.dtype
        )

    # top_left is the anchor coordinate from where we start drawing the fame on top of the background
    # aim to draw it in the center of the background but transformations will move this point
    top_left = (
        np.array(self._background.shape[:2]) // 2 - np.array(frame.shape[:2]) // 2
    )
    top_left = (
        coord_transformation.rel_to_abs(top_left[::-1]).round().astype(int)[::-1]
    )
    # box of the background that will be updated and the limits of it
    background_y0, background_y1 = (top_left[0], top_left[0] + frame.shape[0])
    background_x0, background_x1 = (top_left[1], top_left[1] + frame.shape[1])
    background_size_y, background_size_x = self._background.shape[:2]

    # define box of the frame that will be used
    # if the scale is not enough to support the movement, warn the user but keep drawing
    # cropping the frame so that the operation doesn't fail
    frame_y0, frame_y1, frame_x0, frame_x1 = (0, frame.shape[0], 0, frame.shape[1])
    if (
        background_y0 < 0
        or background_x0 < 0
        or background_y1 > background_size_y
        or background_x1 > background_size_x
    ):
        warn_once(
            "moving_camera_scale is not enough to cover the range of camera movement, frame will be cropped"
        )
        # crop left or top of the frame if necessary
        frame_y0 = max(-background_y0, 0)
        frame_x0 = max(-background_x0, 0)
        # crop right or bottom of the frame if necessary
        frame_y1 = max(
            min(background_size_y - background_y0, background_y1 - background_y0), 0
        )
        frame_x1 = max(
            min(background_size_x - background_x0, background_x1 - background_x0), 0
        )
        # handle cases where the limits of the background become negative which numpy will interpret incorrectly
        background_y0 = max(background_y0, 0)
        background_x0 = max(background_x0, 0)
        background_y1 = max(background_y1, 0)
        background_x1 = max(background_x1, 0)
    self._background[
        background_y0:background_y1, background_x0:background_x1, :
    ] = frame[frame_y0:frame_y1, frame_x0:frame_x1, :]
    return self._background

AbsolutePaths #

Class that draws the absolute paths taken by a set of points.

Works just like Paths but supports camera motion.

Warning

This drawer is not optimized so it can be stremely slow. Performance degrades linearly with max_history * number_of_tracked_objects.

Parameters:

Name Type Description Default
get_points_to_draw Optional[Callable[[array], array]]

Function that takes a list of points (the .estimate attribute of a TrackedObject) and returns a list of points for which we want to draw their paths.

By default it is the mean point of all the points in the tracker.

None
thickness Optional[int]

Thickness of the circles representing the paths of interest.

None
color Optional[Tuple[int, int, int]]

Color of the circles representing the paths of interest.

None
radius Optional[int]

Radius of the circles representing the paths of interest.

None
max_history int

Number of past points to include in the path. High values make the drawing slower

20

Examples:

>>> from norfair import Tracker, Video, Path
>>> video = Video("video.mp4")
>>> tracker = Tracker(...)
>>> path_drawer = Path()
>>> for frame in video:
>>>    detections = get_detections(frame)  # runs detector and returns Detections
>>>    tracked_objects = tracker.update(detections)
>>>    frame = path_drawer.draw(frame, tracked_objects)
>>>    video.write(frame)
Source code in norfair/drawing/path.py
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
class AbsolutePaths:
    """
    Class that draws the absolute paths taken by a set of points.

    Works just like [`Paths`][norfair.drawing.Paths] but supports camera motion.

    !!! warning
        This drawer is not optimized so it can be stremely slow. Performance degrades linearly with
        `max_history * number_of_tracked_objects`.

    Parameters
    ----------
    get_points_to_draw : Optional[Callable[[np.array], np.array]], optional
        Function that takes a list of points (the `.estimate` attribute of a [`TrackedObject`][norfair.tracker.TrackedObject])
        and returns a list of points for which we want to draw their paths.

        By default it is the mean point of all the points in the tracker.
    thickness : Optional[int], optional
        Thickness of the circles representing the paths of interest.
    color : Optional[Tuple[int, int, int]], optional
        [Color][norfair.drawing.Color] of the circles representing the paths of interest.
    radius : Optional[int], optional
        Radius of the circles representing the paths of interest.
    max_history : int, optional
        Number of past points to include in the path. High values make the drawing slower

    Examples
    --------
    >>> from norfair import Tracker, Video, Path
    >>> video = Video("video.mp4")
    >>> tracker = Tracker(...)
    >>> path_drawer = Path()
    >>> for frame in video:
    >>>    detections = get_detections(frame)  # runs detector and returns Detections
    >>>    tracked_objects = tracker.update(detections)
    >>>    frame = path_drawer.draw(frame, tracked_objects)
    >>>    video.write(frame)
    """

    def __init__(
        self,
        get_points_to_draw: Optional[Callable[[np.array], np.array]] = None,
        thickness: Optional[int] = None,
        color: Optional[Tuple[int, int, int]] = None,
        radius: Optional[int] = None,
        max_history=20,
    ):

        if get_points_to_draw is None:

            def get_points_to_draw(points):
                return [np.mean(np.array(points), axis=0)]

        self.get_points_to_draw = get_points_to_draw

        self.radius = radius
        self.thickness = thickness
        self.color = color
        self.past_points = defaultdict(lambda: [])
        self.max_history = max_history
        self.alphas = np.linspace(0.99, 0.01, max_history)

    def draw(self, frame, tracked_objects, coord_transform=None):
        frame_scale = frame.shape[0] / 100

        if self.radius is None:
            self.radius = int(max(frame_scale * 0.7, 1))
        if self.thickness is None:
            self.thickness = int(max(frame_scale / 7, 1))
        for obj in tracked_objects:
            if not obj.live_points.any():
                continue

            if self.color is None:
                color = Palette.choose_color(obj.id)
            else:
                color = self.color

            points_to_draw = self.get_points_to_draw(obj.get_estimate(absolute=True))

            for point in coord_transform.abs_to_rel(points_to_draw):
                Drawer.circle(
                    frame,
                    position=tuple(point.astype(int)),
                    radius=self.radius,
                    color=color,
                    thickness=self.thickness,
                )

            last = points_to_draw
            for i, past_points in enumerate(self.past_points[obj.id]):
                overlay = frame.copy()
                last = coord_transform.abs_to_rel(last)
                for j, point in enumerate(coord_transform.abs_to_rel(past_points)):
                    Drawer.line(
                        overlay,
                        tuple(last[j].astype(int)),
                        tuple(point.astype(int)),
                        color=color,
                        thickness=self.thickness,
                    )
                last = past_points

                alpha = self.alphas[i]
                frame = Drawer.alpha_blend(overlay, frame, alpha=alpha)
            self.past_points[obj.id].insert(0, points_to_draw)
            self.past_points[obj.id] = self.past_points[obj.id][: self.max_history]
        return frame

Paths #

Class that draws the paths taken by a set of points of interest defined from the coordinates of each tracker estimation.

Parameters:

Name Type Description Default
get_points_to_draw Optional[Callable[[array], array]]

Function that takes a list of points (the .estimate attribute of a TrackedObject) and returns a list of points for which we want to draw their paths.

By default it is the mean point of all the points in the tracker.

None
thickness Optional[int]

Thickness of the circles representing the paths of interest.

None
color Optional[Tuple[int, int, int]]

Color of the circles representing the paths of interest.

None
radius Optional[int]

Radius of the circles representing the paths of interest.

None
attenuation float

A float number in [0, 1] that dictates the speed at which the path is erased. if it is 0 then the path is never erased.

0.01

Examples:

>>> from norfair import Tracker, Video, Path
>>> video = Video("video.mp4")
>>> tracker = Tracker(...)
>>> path_drawer = Path()
>>> for frame in video:
>>>    detections = get_detections(frame)  # runs detector and returns Detections
>>>    tracked_objects = tracker.update(detections)
>>>    frame = path_drawer.draw(frame, tracked_objects)
>>>    video.write(frame)
Source code in norfair/drawing/path.py
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
class Paths:
    """
    Class that draws the paths taken by a set of points of interest defined from the coordinates of each tracker estimation.

    Parameters
    ----------
    get_points_to_draw : Optional[Callable[[np.array], np.array]], optional
        Function that takes a list of points (the `.estimate` attribute of a [`TrackedObject`][norfair.tracker.TrackedObject])
        and returns a list of points for which we want to draw their paths.

        By default it is the mean point of all the points in the tracker.
    thickness : Optional[int], optional
        Thickness of the circles representing the paths of interest.
    color : Optional[Tuple[int, int, int]], optional
        [Color][norfair.drawing.Color] of the circles representing the paths of interest.
    radius : Optional[int], optional
        Radius of the circles representing the paths of interest.
    attenuation : float, optional
        A float number in [0, 1] that dictates the speed at which the path is erased.
        if it is `0` then the path is never erased.

    Examples
    --------
    >>> from norfair import Tracker, Video, Path
    >>> video = Video("video.mp4")
    >>> tracker = Tracker(...)
    >>> path_drawer = Path()
    >>> for frame in video:
    >>>    detections = get_detections(frame)  # runs detector and returns Detections
    >>>    tracked_objects = tracker.update(detections)
    >>>    frame = path_drawer.draw(frame, tracked_objects)
    >>>    video.write(frame)
    """

    def __init__(
        self,
        get_points_to_draw: Optional[Callable[[np.array], np.array]] = None,
        thickness: Optional[int] = None,
        color: Optional[Tuple[int, int, int]] = None,
        radius: Optional[int] = None,
        attenuation: float = 0.01,
    ):
        if get_points_to_draw is None:

            def get_points_to_draw(points):
                return [np.mean(np.array(points), axis=0)]

        self.get_points_to_draw = get_points_to_draw

        self.radius = radius
        self.thickness = thickness
        self.color = color
        self.mask = None
        self.attenuation_factor = 1 - attenuation

    def draw(
        self, frame: np.ndarray, tracked_objects: Sequence[TrackedObject]
    ) -> np.array:
        """
        Draw the paths of the points interest on a frame.

        !!! warning
            This method does **not** draw frames in place as other drawers do, the resulting frame is returned.

        Parameters
        ----------
        frame : np.ndarray
            The OpenCV frame to draw on.
        tracked_objects : Sequence[TrackedObject]
            List of [`TrackedObject`][norfair.tracker.TrackedObject] to get the points of interest in order to update the paths.

        Returns
        -------
        np.array
            The resulting frame.
        """
        if self.mask is None:
            frame_scale = frame.shape[0] / 100

            if self.radius is None:
                self.radius = int(max(frame_scale * 0.7, 1))
            if self.thickness is None:
                self.thickness = int(max(frame_scale / 7, 1))

            self.mask = np.zeros(frame.shape, np.uint8)

        self.mask = (self.mask * self.attenuation_factor).astype("uint8")

        for obj in tracked_objects:
            if obj.abs_to_rel is not None:
                warn_once(
                    "It seems that your using the Path drawer together with MotionEstimator. This is not fully supported and the results will not be what's expected"
                )

            if self.color is None:
                color = Palette.choose_color(obj.id)
            else:
                color = self.color

            points_to_draw = self.get_points_to_draw(obj.estimate)

            for point in points_to_draw:
                self.mask = Drawer.circle(
                    self.mask,
                    position=tuple(point.astype(int)),
                    radius=self.radius,
                    color=color,
                    thickness=self.thickness,
                )

        return Drawer.alpha_blend(self.mask, frame, alpha=1, beta=1)

draw(frame, tracked_objects) #

Draw the paths of the points interest on a frame.

Warning

This method does not draw frames in place as other drawers do, the resulting frame is returned.

Parameters:

Name Type Description Default
frame ndarray

The OpenCV frame to draw on.

required
tracked_objects Sequence[TrackedObject]

List of TrackedObject to get the points of interest in order to update the paths.

required

Returns:

Type Description
array

The resulting frame.

Source code in norfair/drawing/path.py
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
def draw(
    self, frame: np.ndarray, tracked_objects: Sequence[TrackedObject]
) -> np.array:
    """
    Draw the paths of the points interest on a frame.

    !!! warning
        This method does **not** draw frames in place as other drawers do, the resulting frame is returned.

    Parameters
    ----------
    frame : np.ndarray
        The OpenCV frame to draw on.
    tracked_objects : Sequence[TrackedObject]
        List of [`TrackedObject`][norfair.tracker.TrackedObject] to get the points of interest in order to update the paths.

    Returns
    -------
    np.array
        The resulting frame.
    """
    if self.mask is None:
        frame_scale = frame.shape[0] / 100

        if self.radius is None:
            self.radius = int(max(frame_scale * 0.7, 1))
        if self.thickness is None:
            self.thickness = int(max(frame_scale / 7, 1))

        self.mask = np.zeros(frame.shape, np.uint8)

    self.mask = (self.mask * self.attenuation_factor).astype("uint8")

    for obj in tracked_objects:
        if obj.abs_to_rel is not None:
            warn_once(
                "It seems that your using the Path drawer together with MotionEstimator. This is not fully supported and the results will not be what's expected"
            )

        if self.color is None:
            color = Palette.choose_color(obj.id)
        else:
            color = self.color

        points_to_draw = self.get_points_to_draw(obj.estimate)

        for point in points_to_draw:
            self.mask = Drawer.circle(
                self.mask,
                position=tuple(point.astype(int)),
                radius=self.radius,
                color=color,
                thickness=self.thickness,
            )

    return Drawer.alpha_blend(self.mask, frame, alpha=1, beta=1)

frobenius(detection, tracked_object) #

Frobernius norm on the difference of the points in detection and the estimates in tracked_object.

The Frobenius distance and norm are given by:

\[ d_f(a, b) = ||a - b||_F \]
\[ ||A||_F = [\sum_{i,j} abs(a_{i,j})^2]^{1/2} \]

Parameters:

Name Type Description Default
detection Detection

A detection.

required
tracked_object TrackedObject

A tracked object.

required

Returns:

Type Description
float

The distance.

See Also

np.linalg.norm

Source code in norfair/distances.py
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
def frobenius(detection: "Detection", tracked_object: "TrackedObject") -> float:
    """
    Frobernius norm on the difference of the points in detection and the estimates in tracked_object.

    The Frobenius distance and norm are given by:

    $$
    d_f(a, b) = ||a - b||_F
    $$

    $$
    ||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}
    $$

    Parameters
    ----------
    detection : Detection
        A detection.
    tracked_object : TrackedObject
        A tracked object.

    Returns
    -------
    float
        The distance.

    See Also
    --------
    [`np.linalg.norm`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html)
    """
    return np.linalg.norm(detection.points - tracked_object.estimate)

mean_euclidean(detection, tracked_object) #

Average euclidean distance between the points in detection and estimates in tracked_object.

\[ d(a, b) = \frac{\sum_{i=0}^N ||a_i - b_i||_2}{N} \]

Parameters:

Name Type Description Default
detection Detection

A detection.

required
tracked_object TrackedObject

A tracked object

required

Returns:

Type Description
float

The distance.

See Also

np.linalg.norm

Source code in norfair/distances.py
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
def mean_euclidean(detection: "Detection", tracked_object: "TrackedObject") -> float:
    """
    Average euclidean distance between the points in detection and estimates in tracked_object.

    $$
    d(a, b) = \\frac{\\sum_{i=0}^N ||a_i - b_i||_2}{N}
    $$

    Parameters
    ----------
    detection : Detection
        A detection.
    tracked_object : TrackedObject
        A tracked object

    Returns
    -------
    float
        The distance.

    See Also
    --------
    [`np.linalg.norm`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html)
    """
    return np.linalg.norm(detection.points - tracked_object.estimate, axis=1).mean()

mean_manhattan(detection, tracked_object) #

Average manhattan distance between the points in detection and the estimates in tracked_object

Given by:

\[ d(a, b) = \frac{\sum_{i=0}^N ||a_i - b_i||_1}{N} \]

Where \(||a||_1\) is the manhattan norm.

Parameters:

Name Type Description Default
detection Detection

A detection.

required
tracked_object TrackedObject

a tracked object.

required

Returns:

Type Description
float

The distance.

See Also

np.linalg.norm

Source code in norfair/distances.py
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
def mean_manhattan(detection: "Detection", tracked_object: "TrackedObject") -> float:
    """
    Average manhattan distance between the points in detection and the estimates in tracked_object

    Given by:

    $$
    d(a, b) = \\frac{\\sum_{i=0}^N ||a_i - b_i||_1}{N}
    $$

    Where $||a||_1$ is the manhattan norm.

    Parameters
    ----------
    detection : Detection
        A detection.
    tracked_object : TrackedObject
        a tracked object.

    Returns
    -------
    float
        The distance.

    See Also
    --------
    [`np.linalg.norm`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html)
    """
    return np.linalg.norm(
        detection.points - tracked_object.estimate, ord=1, axis=1
    ).mean()

iou(candidates, objects) #

Calculate IoU between two sets of bounding boxes. Both sets of boxes are expected to be in [x_min, y_min, x_max, y_max] format.

Normal IoU is 1 when the boxes are the same and 0 when they don't overlap, to transform that into a distance that makes sense we return 1 - iou.

Parameters:

Name Type Description Default
candidates ndarray

(N, 4) numpy.ndarray containing candidates bounding boxes.

required
objects ndarray

(K, 4) numpy.ndarray containing objects bounding boxes.

required

Returns:

Type Description
ndarray

(N, K) numpy.ndarray of 1 - iou between candidates and objects.

Source code in norfair/distances.py
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
def iou(candidates: np.ndarray, objects: np.ndarray) -> np.ndarray:
    """
    Calculate IoU between two sets of bounding boxes. Both sets of boxes are expected
    to be in `[x_min, y_min, x_max, y_max]` format.

    Normal IoU is 1 when the boxes are the same and 0 when they don't overlap,
    to transform that into a distance that makes sense we return `1 - iou`.

    Parameters
    ----------
    candidates : numpy.ndarray
        (N, 4) numpy.ndarray containing candidates bounding boxes.
    objects : numpy.ndarray
        (K, 4) numpy.ndarray containing objects bounding boxes.

    Returns
    -------
    numpy.ndarray
        (N, K) numpy.ndarray of `1 - iou` between candidates and objects.
    """
    _validate_bboxes(candidates)

    area_candidates = _boxes_area(candidates.T)
    area_objects = _boxes_area(objects.T)

    top_left = np.maximum(candidates[:, None, :2], objects[:, :2])
    bottom_right = np.minimum(candidates[:, None, 2:], objects[:, 2:])

    area_intersection = np.prod(
        np.clip(bottom_right - top_left, a_min=0, a_max=None), 2
    )
    return 1 - area_intersection / (
        area_candidates[:, None] + area_objects - area_intersection
    )

get_distance_by_name(name) #

Select a distance by name.

Parameters:

Name Type Description Default
name str

A string defining the metric to get.

required

Returns:

Type Description
Distance

The distance object.

Source code in norfair/distances.py
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
def get_distance_by_name(name: str) -> Distance:
    """
    Select a distance by name.

    Parameters
    ----------
    name : str
        A string defining the metric to get.

    Returns
    -------
    Distance
        The distance object.
    """

    if name in _SCALAR_DISTANCE_FUNCTIONS:
        warning(
            "You are using a scalar distance function. If you want to speed up the"
            " tracking process please consider using a vectorized distance function"
            f" such as {AVAILABLE_VECTORIZED_DISTANCES}."
        )
        distance = _SCALAR_DISTANCE_FUNCTIONS[name]
        distance_function = ScalarDistance(distance)
    elif name in _SCIPY_DISTANCE_FUNCTIONS:
        distance_function = ScipyDistance(name)
    elif name in _VECTORIZED_DISTANCE_FUNCTIONS:
        if name == "iou_opt":
            warning("iou_opt is deprecated, use iou instead")
        distance = _VECTORIZED_DISTANCE_FUNCTIONS[name]
        distance_function = VectorizedDistance(distance)
    else:
        raise ValueError(
            f"Invalid distance '{name}', expecting one of"
            f" {list(_SCALAR_DISTANCE_FUNCTIONS.keys()) + AVAILABLE_VECTORIZED_DISTANCES}"
        )

    return distance_function

create_keypoints_voting_distance(keypoint_distance_threshold, detection_threshold) #

Construct a keypoint voting distance function configured with the thresholds.

Count how many points in a detection match the with a tracked_object. A match is considered when distance between the points is < keypoint_distance_threshold and the score of the last_detection of the tracked_object is > detection_threshold. Notice the if multiple points are tracked, the ith point in detection can only match the ith point in the tracked object.

Distance is 1 if no point matches and approximates 0 as more points are matched.

Parameters:

Name Type Description Default
keypoint_distance_threshold float

Points closer than this threshold are considered a match.

required
detection_threshold float

Detections and objects with score lower than this threshold are ignored.

required

Returns:

Type Description
Callable

The distance funtion that must be passed to the Tracker.

Source code in norfair/distances.py
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
def create_keypoints_voting_distance(
    keypoint_distance_threshold: float, detection_threshold: float
) -> Callable[["Detection", "TrackedObject"], float]:
    """
    Construct a keypoint voting distance function configured with the thresholds.

    Count how many points in a detection match the with a tracked_object.
    A match is considered when distance between the points is < `keypoint_distance_threshold`
    and the score of the last_detection of the tracked_object is > `detection_threshold`.
    Notice the if multiple points are tracked, the ith point in detection can only match the ith
    point in the tracked object.

    Distance is 1 if no point matches and approximates 0 as more points are matched.

    Parameters
    ----------
    keypoint_distance_threshold: float
        Points closer than this threshold are considered a match.
    detection_threshold: float
        Detections and objects with score lower than this threshold are ignored.

    Returns
    -------
    Callable
        The distance funtion that must be passed to the Tracker.
    """

    def keypoints_voting_distance(
        detection: "Detection", tracked_object: "TrackedObject"
    ) -> float:
        distances = np.linalg.norm(detection.points - tracked_object.estimate, axis=1)
        match_num = np.count_nonzero(
            (distances < keypoint_distance_threshold)
            * (detection.scores > detection_threshold)
            * (tracked_object.last_detection.scores > detection_threshold)
        )
        return 1 / (1 + match_num)

    return keypoints_voting_distance

create_normalized_mean_euclidean_distance(height, width) #

Construct a normalized mean euclidean distance function configured with the max height and width.

The result distance is bound to [0, 1] where 1 indicates oposite corners of the image.

Parameters:

Name Type Description Default
height int

Height of the image.

required
width int

Width of the image.

required

Returns:

Type Description
Callable

The distance funtion that must be passed to the Tracker.

Source code in norfair/distances.py
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
def create_normalized_mean_euclidean_distance(
    height: int, width: int
) -> Callable[["Detection", "TrackedObject"], float]:
    """
    Construct a normalized mean euclidean distance function configured with the max height and width.

    The result distance is bound to [0, 1] where 1 indicates oposite corners of the image.

    Parameters
    ----------
    height: int
        Height of the image.
    width: int
        Width of the image.

    Returns
    -------
    Callable
        The distance funtion that must be passed to the Tracker.
    """

    def normalized__mean_euclidean_distance(
        detection: "Detection", tracked_object: "TrackedObject"
    ) -> float:
        """Normalized mean euclidean distance"""
        # calculate distances and normalized it by width and height
        difference = (detection.points - tracked_object.estimate).astype(float)
        difference[:, 0] /= width
        difference[:, 1] /= height

        # calculate eucledean distance and average
        return np.linalg.norm(difference, axis=1).mean()

    return normalized__mean_euclidean_distance

draw_absolute_grid(frame, coord_transformations, grid_size=20, radius=2, thickness=1, color=Color.black, polar=False) #

Draw a grid of points in absolute coordinates.

Useful for debugging camera motion.

The points are drawn as if the camera were in the center of a sphere and points are drawn in the intersection of latitude and longitude lines over the surface of the sphere.

Parameters:

Name Type Description Default
frame ndarray

The OpenCV frame to draw on.

required
coord_transformations CoordinatesTransformation

The coordinate transformation as returned by the MotionEstimator

required
grid_size int

How many points to draw.

20
radius int

Size of each point.

2
thickness int

Thickness of each point

1
color ColorType

Color of the points.

black
polar Bool

If True, the points on the first frame are drawn as if the camera were pointing to a pole (viewed from the center of the earth). By default, False is used which means the points are drawn as if the camera were pointing to the Equator.

False
Source code in norfair/drawing/absolute_grid.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
def draw_absolute_grid(
    frame: np.ndarray,
    coord_transformations: CoordinatesTransformation,
    grid_size: int = 20,
    radius: int = 2,
    thickness: int = 1,
    color: ColorType = Color.black,
    polar: bool = False,
):
    """
    Draw a grid of points in absolute coordinates.

    Useful for debugging camera motion.

    The points are drawn as if the camera were in the center of a sphere and points are drawn in the intersection
    of latitude and longitude lines over the surface of the sphere.

    Parameters
    ----------
    frame : np.ndarray
        The OpenCV frame to draw on.
    coord_transformations : CoordinatesTransformation
        The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]
    grid_size : int, optional
        How many points to draw.
    radius : int, optional
        Size of each point.
    thickness : int, optional
        Thickness of each point
    color : ColorType, optional
        Color of the points.
    polar : Bool, optional
        If True, the points on the first frame are drawn as if the camera were pointing to a pole (viewed from the center of the earth).
        By default, False is used which means the points are drawn as if the camera were pointing to the Equator.
    """
    h, w, _ = frame.shape

    # get absolute points grid
    points = _get_grid(grid_size, w, h, polar=polar)

    # transform the points to relative coordinates
    if coord_transformations is None:
        points_transformed = points
    else:
        points_transformed = coord_transformations.abs_to_rel(points)

    # filter points that are not visible
    visible_points = points_transformed[
        (points_transformed <= np.array([w, h])).all(axis=1)
        & (points_transformed >= 0).all(axis=1)
    ]
    for point in visible_points:
        Drawer.cross(
            frame, point.astype(int), radius=radius, thickness=thickness, color=color
        )

draw_tracked_boxes(frame, objects, border_colors=None, border_width=None, id_size=None, id_thickness=None, draw_box=True, color_by_label=False, draw_labels=False, label_size=None, label_width=None) #

Deprecated. Use draw_box

Source code in norfair/drawing/draw_boxes.py
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
def draw_tracked_boxes(
    frame: np.ndarray,
    objects: Sequence["TrackedObject"],
    border_colors: Optional[Tuple[int, int, int]] = None,
    border_width: Optional[int] = None,
    id_size: Optional[int] = None,
    id_thickness: Optional[int] = None,
    draw_box: bool = True,
    color_by_label: bool = False,
    draw_labels: bool = False,
    label_size: Optional[int] = None,
    label_width: Optional[int] = None,
) -> np.array:
    "**Deprecated**. Use [`draw_box`][norfair.drawing.draw_boxes.draw_boxes]"
    warn_once("draw_tracked_boxes is deprecated, use draw_box instead")
    return draw_boxes(
        frame=frame,
        drawables=objects,
        color="by_label" if color_by_label else border_colors,
        thickness=border_width,
        text_size=label_size or id_size,
        text_thickness=id_thickness or label_width,
        draw_labels=draw_labels,
        draw_ids=id_size is not None and id_size > 0,
        draw_box=draw_box,
    )

draw_tracked_objects(frame, objects, radius=None, color=None, id_size=None, id_thickness=None, draw_points=True, color_by_label=False, draw_labels=False, label_size=None) #

Deprecated use draw_points

Source code in norfair/drawing/draw_points.py
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
def draw_tracked_objects(
    frame: np.ndarray,
    objects: Sequence["TrackedObject"],
    radius: Optional[int] = None,
    color: Optional[ColorLike] = None,
    id_size: Optional[float] = None,
    id_thickness: Optional[int] = None,
    draw_points: bool = True,  # pylint: disable=redefined-outer-name
    color_by_label: bool = False,
    draw_labels: bool = False,
    label_size: Optional[int] = None,
):
    """
    **Deprecated** use [`draw_points`][norfair.drawing.draw_points.draw_points]
    """
    warn_once("draw_tracked_objects is deprecated, use draw_points instead")

    frame_scale = frame.shape[0] / 100
    if radius is None:
        radius = int(frame_scale * 0.5)
    if id_size is None:
        id_size = frame_scale / 10
    if id_thickness is None:
        id_thickness = int(frame_scale / 5)
    if label_size is None:
        label_size = int(max(frame_scale / 100, 1))

    _draw_points_alias(
        frame=frame,
        drawables=objects,
        color="by_label" if color_by_label else color,
        radius=radius,
        thickness=None,
        draw_labels=draw_labels,
        draw_ids=id_size is not None and id_size > 0,
        draw_points=draw_points,
        text_size=label_size or id_size,
        text_thickness=id_thickness,
        text_color=None,
        hide_dead_points=True,
    )