Skip to content

converter

LiteRT Converter API

This module handles converting models to LiteRT format.

Classes:

Classes

LiteRTKerasConverter

LiteRTKerasConverter(model: keras.Model)

Converts Keras model to LiteRT model content.

Source code in helia_edge/converters/tflite/converter.py
def __init__(
    self,
    model: keras.Model,
):
    """Converts Keras model to TFLite model.

    Args:
        model (keras.Model): Keras model

    Example:

    ```python
    # Create simple dataset
    test_x = np.random.rand(1000, 64).astype(np.float32)
    test_y = np.random.randint(0, 10, 1000).astype(np.int32)

    # Create a dense model and train
    model = keras.Sequential([
        keras.layers.Dense(64, activation="relu", input_shape=(64,)),
        keras.layers.Dense(32, activation="relu"),
        keras.layers.Dense(10, activation="softmax"),
    ])
    model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
    model.fit(test_x, test_y, epochs=1, validation_split=0.2)

    import helia_edge as helia

    # Create converter and convert to TFLite w/ FP32 quantization
    converter = helia.converters.tflite.TfLiteKerasConverter(model=model)
    tflite_content = converter.convert(
        test_x,
        quantization=helia.converters.tflite.QuantizationType.FP32,
        io_type="float32"
    )
    y_pred_tfl = converter.predict(test_x)
    y_pred_tf = model.predict(test_x)
    print(np.allclose(y_pred_tf, y_pred_tfl, atol=1e-3))
    ```
    """
    self.model = model
    self.representative_dataset = None
    self._converter: tf.lite.TFLiteConverter | None = None
    self._tflite_content: str | None = None
    self.tf_model_path = tempfile.TemporaryDirectory()

Functions

convert
convert(test_x: npt.NDArray | None = None, quantization: QuantizationType = QuantizationType.FP32, io_type: str | None = None, mode: ConversionType = ConversionType.KERAS, strict: bool = True, verbose: int = 2) -> bytes

Convert TF model into LiteRT model content.

Source code in helia_edge/converters/litert/converter.py
def convert(
    self,
    test_x: npt.NDArray | None = None,
    quantization: QuantizationType = QuantizationType.FP32,
    io_type: str | None = None,
    mode: ConversionType = ConversionType.KERAS,
    strict: bool = True,
    verbose: int = 2,
) -> bytes:
    """Convert TF model into LiteRT model content."""
    _load_litert_interpreter()
    quantization = QuantizationType(quantization)
    mode = ConversionType(mode)
    if test_x is None and quantization in {QuantizationType.INT8, QuantizationType.INT16X8}:
        raise ValueError("LiteRT quantized conversion requires representative data passed via test_x.")
    return super().convert(
        test_x=test_x,
        quantization=quantization,
        io_type=io_type,
        mode=mode,
        strict=strict,
        verbose=verbose,
    )
predict
predict(x: npt.NDArray, input_name: str | None = None, output_name: str | None = None)

Run LiteRT inference for the converted model.

Source code in helia_edge/converters/litert/converter.py
def predict(
    self,
    x: npt.NDArray,
    input_name: str | None = None,
    output_name: str | None = None,
):
    """Run LiteRT inference for the converted model."""
    if self._tflite_content is None:
        raise ValueError("No LiteRT content to predict. Run convert() first.")

    interpreter_cls = _load_litert_interpreter()

    inputs = x.copy().astype(np.float32)
    interpreter = interpreter_cls(model_content=self._tflite_content)
    interpreter.allocate_tensors()

    if len(interpreter.get_signature_list()) == 0:
        output_details = interpreter.get_output_details()[0]
        input_details = interpreter.get_input_details()[0]

        input_scale: list[float] = input_details["quantization_parameters"]["scales"]
        input_zero_point: list[int] = input_details["quantization_parameters"]["zero_points"]
        output_scale: list[float] = output_details["quantization_parameters"]["scales"]
        output_zero_point: list[int] = output_details["quantization_parameters"]["zero_points"]

        inputs = inputs.reshape([-1] + input_details["shape_signature"].tolist())
        if len(input_scale) and len(input_zero_point):
            inputs = inputs / input_scale[0] + input_zero_point[0]
            inputs = inputs.astype(input_details["dtype"])

        outputs = []
        for sample in inputs:
            interpreter.set_tensor(input_details["index"], sample)
            interpreter.invoke()
            y = interpreter.get_tensor(output_details["index"])
            outputs.append(y)
        outputs = np.concatenate(outputs, axis=0)

        if len(output_scale) and len(output_zero_point):
            outputs = outputs.astype(np.float32)
            outputs = (outputs - output_zero_point[0]) * output_scale[0]

        return outputs

    model_sig = interpreter.get_signature_runner()
    inputs_details = model_sig.get_input_details()
    outputs_details = model_sig.get_output_details()
    if input_name is None:
        input_name = list(inputs_details.keys())[0]
    if output_name is None:
        output_name = list(outputs_details.keys())[0]
    input_details = inputs_details[input_name]
    output_details = outputs_details[output_name]
    input_scale: list[float] = input_details["quantization_parameters"]["scales"]
    input_zero_point: list[int] = input_details["quantization_parameters"]["zero_points"]
    output_scale: list[float] = output_details["quantization_parameters"]["scales"]
    output_zero_point: list[int] = output_details["quantization_parameters"]["zero_points"]

    inputs = inputs.reshape([-1] + input_details["shape_signature"].tolist()[1:])
    if len(input_scale) and len(input_zero_point):
        inputs = inputs / input_scale[0] + input_zero_point[0]
        inputs = inputs.astype(input_details["dtype"])

    outputs = np.array(
        [model_sig(**{input_name: inputs[i : i + 1]})[output_name][0] for i in range(inputs.shape[0])],
        dtype=output_details["dtype"],
    )

    if len(output_scale) and len(output_zero_point):
        outputs = outputs.astype(np.float32)
        outputs = (outputs - output_zero_point[0]) * output_scale[0]

    return outputs