Skip to content

unext

U-NeXt

Overview

U-NeXt is a modification of U-Net that utilizes techniques from ResNeXt and EfficientNetV2. During the encoding phase, mbconv blocks are used to efficiently process the input.

Classes:

Functions:

Additions

The U-NeXt architecture has been modified to allow the following:

  • MBConv blocks used in the encoding phase.
  • Squeeze and excitation (SE) blocks added within blocks.

Classes

UNextBlockParams

UNext block parameters

Attributes:

  • filters (int) –

    Number of filters

  • depth (int) –

    Layer depth

  • ddepth (int | None) –

    Layer decoder depth

  • kernel (int | tuple[int, int]) –

    Kernel size

  • pool (int | tuple[int, int]) –

    Pool size

  • strides (int | tuple[int, int]) –

    Stride size

  • skip (bool) –

    Add skip connection

  • expand_ratio (float) –

    Expansion ratio

  • se_ratio (float) –

    Squeeze and excite ratio

  • dropout (float | None) –

    Dropout rate

  • norm (Literal['batch', 'layer'] | None) –

    Normalization type

UNextParams

UNext parameters

Attributes:

UNextModel

Helper class to generate model from parameters

Functions

layer_from_params staticmethod
layer_from_params(inputs: keras.Input, params: UNextParams | dict, num_classes: int | None = None)

Create layer from parameters

Source code in neuralspot_edge/models/unext.py
@staticmethod
def layer_from_params(inputs: keras.Input, params: UNextParams | dict, num_classes: int | None = None):
    """Create layer from parameters"""
    if isinstance(params, dict):
        params = UNextParams(**params)
    return unext_layer(x=inputs, params=params, num_classes=num_classes)
model_from_params staticmethod
model_from_params(inputs: keras.Input, params: UNextParams | dict, num_classes: int | None = None)

Create model from parameters

Source code in neuralspot_edge/models/unext.py
@staticmethod
def model_from_params(inputs: keras.Input, params: UNextParams | dict, num_classes: int | None = None):
    """Create model from parameters"""
    outputs = UNextModel.layer_from_params(inputs=inputs, params=params, num_classes=num_classes)
    return keras.Model(inputs=inputs, outputs=outputs)

Functions

se_block

se_block(ratio: int = 8, name: str | None = None)

Squeeze and excite block

Source code in neuralspot_edge/models/unext.py
def se_block(ratio: int = 8, name: str | None = None):
    """Squeeze and excite block"""

    def layer(x: keras.KerasTensor) -> keras.KerasTensor:
        num_chan = x.shape[-1]
        # Squeeze
        y = keras.layers.GlobalAveragePooling2D(name=f"{name}.pool" if name else None, keepdims=True)(x)

        y = keras.layers.Conv2D(
            num_chan // ratio,
            kernel_size=1,
            use_bias=True,
            name=f"{name}.sq" if name else None,
        )(y)

        y = keras.layers.Activation("relu6", name=f"{name}.relu" if name else None)(y)

        # Excite
        y = keras.layers.Conv2D(num_chan, kernel_size=1, use_bias=True, name=f"{name}.ex" if name else None)(y)
        y = keras.layers.Activation(keras.activations.hard_sigmoid, name=f"{name}.sigg" if name else None)(y)
        y = keras.layers.Multiply(name=f"{name}.mul" if name else None)([x, y])
        return y

    return layer

norm_layer

norm_layer(norm: str, name: str) -> keras.Layer

Normalization layer

Parameters:

  • norm (str) –

    Normalization type

  • name (str) –

    Name

Returns:

  • Layer

    keras.Layer: Layer

Source code in neuralspot_edge/models/unext.py
def norm_layer(norm: str, name: str) -> keras.Layer:
    """Normalization layer

    Args:
        norm (str): Normalization type
        name (str): Name

    Returns:
        keras.Layer: Layer
    """

    def layer(x: keras.KerasTensor) -> keras.KerasTensor:
        """Functional normalization layer

        Args:
            x (keras.KerasTensor): Input tensor

        Returns:
            keras.KerasTensor: Output tensor
        """
        if norm == "batch":
            return keras.layers.BatchNormalization(axis=-1, name=f"{name}.BN")(x)
        if norm == "layer":
            ln_axis = 2 if x.shape[1] == 1 else 1 if x.shape[2] == 1 else (1, 2)
            return keras.layers.LayerNormalization(axis=ln_axis, name=f"{name}.LN")(x)
        return x

    return layer

unext_block

unext_block(output_filters: int, expand_ratio: float = 1, kernel_size: int | tuple[int, int] = 3, strides: int | tuple[int, int] = 1, se_ratio: float = 4, dropout: float | None = 0, norm: Literal['batch', 'layer'] | None = 'batch', name: str | None = None) -> keras.Layer

Create UNext block

Source code in neuralspot_edge/models/unext.py
def unext_block(
    output_filters: int,
    expand_ratio: float = 1,
    kernel_size: int | tuple[int, int] = 3,
    strides: int | tuple[int, int] = 1,
    se_ratio: float = 4,
    dropout: float | None = 0,
    norm: Literal["batch", "layer"] | None = "batch",
    name: str | None = None,
) -> keras.Layer:
    """Create UNext block"""

    def layer(x: keras.KerasTensor) -> keras.KerasTensor:
        input_filters: int = x.shape[-1]
        strides_len = strides if isinstance(strides, int) else sum(strides) // len(strides)
        add_residual = input_filters == output_filters and strides_len == 1
        ln_axis = 2 if x.shape[1] == 1 else 1 if x.shape[2] == 1 else (1, 2)

        # Depthwise conv
        y = keras.layers.Conv2D(
            input_filters,
            kernel_size=kernel_size,
            groups=input_filters,
            strides=1,
            padding="same",
            use_bias=norm is None,
            kernel_initializer="he_normal",
            kernel_regularizer=keras.regularizers.L2(1e-3),
            name=f"{name}.dwconv" if name else None,
        )(x)
        if norm == "batch":
            y = keras.layers.BatchNormalization(
                name=f"{name}.norm",
            )(y)
        elif norm == "layer":
            y = keras.layers.LayerNormalization(
                axis=ln_axis,
                name=f"{name}.norm" if name else None,
            )(y)
        # END IF

        # Inverted expansion block
        if expand_ratio != 1:
            y = keras.layers.Conv2D(
                filters=int(expand_ratio * input_filters),
                kernel_size=1,
                strides=1,
                padding="same",
                use_bias=norm is None,
                groups=input_filters,
                kernel_initializer="he_normal",
                kernel_regularizer=keras.regularizers.L2(1e-3),
                name=f"{name}.expand" if name else None,
            )(y)

            y = keras.layers.Activation(
                "relu6",
                name=f"{name}.relu" if name else None,
            )(y)

        # Squeeze and excite
        if se_ratio > 1:
            name_se = f"{name}.se" if name else None
            y = se_block(ratio=se_ratio, name=name_se)(y)

        y = keras.layers.Conv2D(
            filters=output_filters,
            kernel_size=1,
            strides=1,
            padding="same",
            use_bias=norm is None,
            kernel_initializer="he_normal",
            kernel_regularizer=keras.regularizers.L2(1e-3),
            name=f"{name}.project" if name else None,
        )(y)

        if add_residual:
            if dropout and dropout > 0:
                y = keras.layers.Dropout(
                    dropout,
                    noise_shape=(y.shape),
                    name=f"{name}.drop" if name else None,
                )(y)
            y = keras.layers.Add(name=f"{name}.res" if name else None)([x, y])
        return y

    # END DEF
    return layer

unext_core

unext_core(x: keras.KerasTensor, params: UNextParams) -> keras.KerasTensor

Create UNext TF functional core

Parameters:

  • x (KerasTensor) –

    Input tensor

  • params (UNextParams) –

    Model parameters.

Returns:

  • KerasTensor

    keras.KerasTensor: Output tensor

Source code in neuralspot_edge/models/unext.py
def unext_core(
    x: keras.KerasTensor,
    params: UNextParams,
) -> keras.KerasTensor:
    """Create UNext TF functional core

    Args:
        x (keras.KerasTensor): Input tensor
        params (UNextParams): Model parameters.

    Returns:
        keras.KerasTensor: Output tensor
    """

    y = x

    #### ENCODER ####
    skip_layers: list[keras.layers.Layer | None] = []
    for i, block in enumerate(params.blocks):
        name = f"ENC{i+1}"
        for d in range(block.depth):
            y = unext_block(
                output_filters=block.filters,
                expand_ratio=block.expand_ratio,
                kernel_size=block.kernel,
                strides=1,
                se_ratio=block.se_ratio,
                dropout=block.dropout,
                norm=block.norm,
                name=f"{name}.D{d+1}",
            )(y)
        # END FOR
        skip_layers.append(y if block.skip else None)

        # Downsample using strided conv
        y = keras.layers.Conv2D(
            filters=block.filters,
            kernel_size=block.pool,
            strides=block.strides,
            padding="same",
            use_bias=block.norm is None,
            kernel_initializer="he_normal",
            kernel_regularizer=keras.regularizers.L2(1e-3),
            name=f"{name}.pool",
        )(y)
        if block.norm == "batch":
            y = keras.layers.BatchNormalization(
                name=f"{name}.norm",
            )(y)
        elif block.norm == "layer":
            ln_axis = 2 if y.shape[1] == 1 else 1 if y.shape[2] == 1 else (1, 2)
            y = keras.layers.LayerNormalization(
                axis=ln_axis,
                name=f"{name}.norm",
            )(y)
        # END IF
    # END FOR

    #### DECODER ####
    for i, block in enumerate(reversed(params.blocks)):
        name = f"DEC{i+1}"
        for d in range(block.ddepth or block.depth):
            y = unext_block(
                output_filters=block.filters,
                expand_ratio=block.expand_ratio,
                kernel_size=block.kernel,
                strides=1,
                se_ratio=block.se_ratio,
                dropout=block.dropout,
                norm=block.norm,
                name=f"{name}.D{d+1}",
            )(y)
        # END FOR

        # Upsample using transposed conv
        # y = keras.layers.Conv1DTranspose(
        #     filters=block.filters,
        #     kernel_size=block.pool,
        #     strides=block.strides,
        #     padding="same",
        #     kernel_initializer="he_normal",
        #     kernel_regularizer=keras.regularizers.L2(1e-3),
        #     name=f"{name}.unpool",
        # )(y)

        y = keras.layers.Conv2D(
            filters=block.filters,
            kernel_size=block.pool,
            strides=1,
            padding="same",
            use_bias=block.norm is None,
            kernel_initializer="he_normal",
            kernel_regularizer=keras.regularizers.L2(1e-3),
            name=f"{name}.conv",
        )(y)
        y = keras.layers.UpSampling2D(size=block.strides, name=f"{name}.unpool")(y)

        # Skip connection
        skip_layer = skip_layers.pop()
        if skip_layer is not None:
            # y = keras.layers.Concatenate(name=f"{name}.S1.cat")([y, skip_layer])
            y = keras.layers.Add(name=f"{name}.S1.cat")([y, skip_layer])

            # Use conv to reduce filters
            y = keras.layers.Conv2D(
                block.filters,
                kernel_size=1,  # block.kernel,
                padding="same",
                kernel_initializer="he_normal",
                kernel_regularizer=keras.regularizers.L2(1e-3),
                use_bias=block.norm is None,
                name=f"{name}.S1.conv",
            )(y)

            if block.norm == "batch":
                y = keras.layers.BatchNormalization(
                    name=f"{name}.S1.norm",
                )(y)
            elif block.norm == "layer":
                ln_axis = 2 if y.shape[1] == 1 else 1 if y.shape[2] == 1 else (1, 2)
                y = keras.layers.LayerNormalization(
                    axis=ln_axis,
                    name=f"{name}.S1.norm",
                )(y)
            # END IF

            y = keras.layers.Activation(
                "relu6",
                name=f"{name}.S1.relu" if name else None,
            )(y)
        # END IF

        y = unext_block(
            output_filters=block.filters,
            expand_ratio=block.expand_ratio,
            kernel_size=block.kernel,
            strides=1,
            se_ratio=block.se_ratio,
            dropout=block.dropout,
            norm=block.norm,
            name=f"{name}.D{block.depth+1}",
        )(y)

    # END FOR
    return y

unext_layer

unext_layer(inputs: keras.KerasTensor, params: UNextParams, num_classes: int | None = None) -> keras.KerasTensor

Create UNext TF functional model

Parameters:

  • inputs (KerasTensor) –

    Input tensor

  • params (UNextParams) –

    Model parameters.

  • num_classes (int, default: None ) –

    Number of classes.

Returns:

  • KerasTensor

    keras.KerasTensor: Output tensor

Source code in neuralspot_edge/models/unext.py
def unext_layer(
    inputs: keras.KerasTensor,
    params: UNextParams,
    num_classes: int | None = None,
) -> keras.KerasTensor:
    """Create UNext TF functional model

    Args:
        inputs (keras.KerasTensor): Input tensor
        params (UNextParams): Model parameters.
        num_classes (int, optional): Number of classes.

    Returns:
        keras.KerasTensor: Output tensor
    """
    requires_reshape = len(inputs.shape) == 3
    if requires_reshape:
        y = keras.layers.Reshape((1,) + inputs.shape[1:])(inputs)
    else:
        y = inputs

    y = unext_core(y, params)

    if params.include_top:
        # Add a per-point classification layer
        y = keras.layers.Conv2D(
            num_classes,
            kernel_size=params.output_kernel_size,
            padding="same",
            kernel_initializer="he_normal",
            kernel_regularizer=keras.regularizers.L2(1e-3),
            name="NECK.conv",
            use_bias=True,
        )(y)
        if not params.use_logits:
            y = keras.layers.Softmax()(y)
        # END IF
    # END IF

    # Always reshape back to original shape
    if requires_reshape:
        y = keras.layers.Reshape(y.shape[2:])(y)

    return y