DPDK  23.03.0
Data Structures | Macros | Typedefs | Enumerations | Functions
rte_mldev.h File Reference
#include <rte_common.h>
#include <rte_log.h>
#include <rte_mempool.h>

Go to the source code of this file.

Data Structures

struct  rte_ml_dev_info
 
struct  rte_ml_dev_config
 
struct  rte_ml_dev_qp_conf
 
struct  rte_ml_buff_seg
 
struct  rte_ml_op
 
struct  rte_ml_op_error
 
struct  rte_ml_dev_stats
 
struct  rte_ml_dev_xstats_map
 
struct  rte_ml_model_params
 
struct  rte_ml_io_shape
 
struct  rte_ml_io_info
 
struct  rte_ml_model_info
 

Macros

#define RTE_ML_STR_MAX   128
 

Typedefs

typedef void(* rte_ml_dev_stop_flush_t) (int16_t dev_id, uint16_t qp_id, struct rte_ml_op *op)
 

Enumerations

enum  rte_ml_op_status { RTE_ML_OP_STATUS_SUCCESS = 0, RTE_ML_OP_STATUS_NOT_PROCESSED, RTE_ML_OP_STATUS_ERROR }
 
enum  rte_ml_io_type {
  RTE_ML_IO_TYPE_UNKNOWN = 0, RTE_ML_IO_TYPE_INT8, RTE_ML_IO_TYPE_UINT8, RTE_ML_IO_TYPE_INT16,
  RTE_ML_IO_TYPE_UINT16, RTE_ML_IO_TYPE_INT32, RTE_ML_IO_TYPE_UINT32, RTE_ML_IO_TYPE_FP8,
  RTE_ML_IO_TYPE_FP16, RTE_ML_IO_TYPE_FP32, RTE_ML_IO_TYPE_BFLOAT16
}
 
enum  rte_ml_io_format {
  RTE_ML_IO_FORMAT_NCHW = 1, RTE_ML_IO_FORMAT_NHWC, RTE_ML_IO_FORMAT_CHWN, RTE_ML_IO_FORMAT_3D,
  RTE_ML_IO_FORMAT_2D, RTE_ML_IO_FORMAT_1D, RTE_ML_IO_FORMAT_SCALAR
}
 

Functions

__rte_experimental int rte_ml_dev_init (size_t dev_max)
 
__rte_experimental uint16_t rte_ml_dev_count (void)
 
__rte_experimental int rte_ml_dev_is_valid_dev (int16_t dev_id)
 
__rte_experimental int rte_ml_dev_socket_id (int16_t dev_id)
 
__rte_experimental int rte_ml_dev_info_get (int16_t dev_id, struct rte_ml_dev_info *dev_info)
 
__rte_experimental int rte_ml_dev_configure (int16_t dev_id, const struct rte_ml_dev_config *config)
 
__rte_experimental int rte_ml_dev_queue_pair_setup (int16_t dev_id, uint16_t queue_pair_id, const struct rte_ml_dev_qp_conf *qp_conf, int socket_id)
 
__rte_experimental int rte_ml_dev_start (int16_t dev_id)
 
__rte_experimental int rte_ml_dev_stop (int16_t dev_id)
 
__rte_experimental int rte_ml_dev_close (int16_t dev_id)
 
__rte_experimental uint16_t rte_ml_enqueue_burst (int16_t dev_id, uint16_t qp_id, struct rte_ml_op **ops, uint16_t nb_ops)
 
__rte_experimental uint16_t rte_ml_dequeue_burst (int16_t dev_id, uint16_t qp_id, struct rte_ml_op **ops, uint16_t nb_ops)
 
__rte_experimental int rte_ml_op_error_get (int16_t dev_id, struct rte_ml_op *op, struct rte_ml_op_error *error)
 
__rte_experimental int rte_ml_dev_stats_get (int16_t dev_id, struct rte_ml_dev_stats *stats)
 
__rte_experimental void rte_ml_dev_stats_reset (int16_t dev_id)
 
__rte_experimental int rte_ml_dev_xstats_names_get (int16_t dev_id, struct rte_ml_dev_xstats_map *xstats_map, uint32_t size)
 
__rte_experimental int rte_ml_dev_xstats_by_name_get (int16_t dev_id, const char *name, uint16_t *stat_id, uint64_t *value)
 
__rte_experimental int rte_ml_dev_xstats_get (int16_t dev_id, const uint16_t *stat_ids, uint64_t *values, uint16_t nb_ids)
 
__rte_experimental int rte_ml_dev_xstats_reset (int16_t dev_id, const uint16_t *stat_ids, uint16_t nb_ids)
 
__rte_experimental int rte_ml_dev_dump (int16_t dev_id, FILE *fd)
 
__rte_experimental int rte_ml_dev_selftest (int16_t dev_id)
 
__rte_experimental int rte_ml_model_load (int16_t dev_id, struct rte_ml_model_params *params, uint16_t *model_id)
 
__rte_experimental int rte_ml_model_unload (int16_t dev_id, uint16_t model_id)
 
__rte_experimental int rte_ml_model_start (int16_t dev_id, uint16_t model_id)
 
__rte_experimental int rte_ml_model_stop (int16_t dev_id, uint16_t model_id)
 
__rte_experimental int rte_ml_model_info_get (int16_t dev_id, uint16_t model_id, struct rte_ml_model_info *model_info)
 
__rte_experimental int rte_ml_model_params_update (int16_t dev_id, uint16_t model_id, void *buffer)
 
__rte_experimental int rte_ml_io_input_size_get (int16_t dev_id, uint16_t model_id, uint32_t nb_batches, uint64_t *input_qsize, uint64_t *input_dsize)
 
__rte_experimental int rte_ml_io_output_size_get (int16_t dev_id, uint16_t model_id, uint32_t nb_batches, uint64_t *output_qsize, uint64_t *output_dsize)
 
__rte_experimental int rte_ml_io_quantize (int16_t dev_id, uint16_t model_id, uint16_t nb_batches, void *dbuffer, void *qbuffer)
 
__rte_experimental int rte_ml_io_dequantize (int16_t dev_id, uint16_t model_id, uint16_t nb_batches, void *qbuffer, void *dbuffer)
 
__rte_experimental struct rte_mempoolrte_ml_op_pool_create (const char *name, unsigned int nb_elts, unsigned int cache_size, uint16_t user_size, int socket_id)
 
__rte_experimental void rte_ml_op_pool_free (struct rte_mempool *mempool)
 

Detailed Description

Warning
EXPERIMENTAL: All functions in this file may be changed or removed without prior notice.

ML (Machine Learning) device API.

The ML framework is built on the following model:

+-----------------+               rte_ml_[en|de]queue_burst()
|                 |                          |
|     Machine     o------+     +--------+    |
|     Learning    |      |     | queue  |    |    +------+
|     Inference   o------+-----o        |<===o===>|Core 0|
|     Engine      |      |     | pair 0 |         +------+
|                 o----+ |     +--------+
|                 |    | |
+-----------------+    | |     +--------+
         ^             | |     | queue  |         +------+
         |             | +-----o        |<=======>|Core 1|
         |             |       | pair 1 |         +------+
         |             |       +--------+
+--------+--------+    |
| +-------------+ |    |       +--------+
| |   Model 0   | |    |       | queue  |         +------+
| +-------------+ |    +-------o        |<=======>|Core N|
| +-------------+ |            | pair N |         +------+
| |   Model 1   | |            +--------+
| +-------------+ |
| +-------------+ |<------> rte_ml_model_load()
| |   Model ..  | |-------> rte_ml_model_info_get()
| +-------------+ |<------- rte_ml_model_start()
| +-------------+ |<------- rte_ml_model_stop()
| |   Model N   | |<------- rte_ml_model_params_update()
| +-------------+ |<------- rte_ml_model_unload()
+-----------------+

ML Device: A hardware or software-based implementation of ML device API for running inferences using a pre-trained ML model.

ML Model: An ML model is an algorithm trained over a dataset. A model consists of procedure/algorithm and data/pattern required to make predictions on live data. Once the model is created and trained outside of the DPDK scope, the model can be loaded via rte_ml_model_load() and then start it using rte_ml_model_start() API. The rte_ml_model_params_update() can be used to update the model parameters such as weight and bias without unloading the model using rte_ml_model_unload().

ML Inference: ML inference is the process of feeding data to the model via rte_ml_enqueue_burst() API and use rte_ml_dequeue_burst() API to get the calculated outputs/predictions from the started model.

In all functions of the ML device API, the ML device is designated by an integer >= 0 named as device identifier dev_id.

The functions exported by the ML device API to setup a device designated by its device identifier must be invoked in the following order:

 - rte_ml_dev_configure()
 - rte_ml_dev_queue_pair_setup()
 - rte_ml_dev_start()

A model is required to run the inference operations with the user specified inputs. Application needs to invoke the ML model API in the following order before queueing inference jobs.

 - rte_ml_model_load()
 - rte_ml_model_start()

A model can be loaded on a device only after the device has been configured and can be started or stopped only after a device has been started.

The rte_ml_model_info_get() API is provided to retrieve the information related to the model. The information would include the shape and type of input and output required for the inference.

Data quantization and dequantization is one of the main aspects in ML domain. This involves conversion of input data from a higher precision to a lower precision data type and vice-versa for the output. APIs are provided to enable quantization through rte_ml_io_quantize() and dequantization through rte_ml_io_dequantize(). These APIs have the capability to handle input and output buffers holding data for multiple batches.

Two utility APIs rte_ml_io_input_size_get() and rte_ml_io_output_size_get() can used to get the size of quantized and de-quantized multi-batch input and output buffers.

User can optionally update the model parameters with rte_ml_model_params_update() after invoking rte_ml_model_stop() API on a given model ID.

The application can invoke, in any order, the functions exported by the ML API to enqueue inference jobs and dequeue inference response.

If the application wants to change the device configuration (i.e., call rte_ml_dev_configure() or rte_ml_dev_queue_pair_setup()), then application must stop the device using rte_ml_dev_stop() API. Likewise, if model parameters need to be updated then the application must call rte_ml_model_stop() followed by rte_ml_model_params_update() API for the given model. The application does not need to call rte_ml_dev_stop() API for any model re-configuration such as rte_ml_model_params_update(), rte_ml_model_unload() etc.

Once the device is in the start state after invoking rte_ml_dev_start() API and the model is in start state after invoking rte_ml_model_start() API, then the application can call rte_ml_enqueue_burst() and rte_ml_dequeue_burst() API on the destined device and model ID.

Finally, an application can close an ML device by invoking the rte_ml_dev_close() function.

Typical application utilisation of the ML API will follow the following programming flow.

Regarding multi-threading, by default, all the functions of the ML Device API exported by a PMD are lock-free functions which assume to not be invoked in parallel on different logical cores on the same target object. For instance, the dequeue function of a poll mode driver cannot be invoked in parallel on two logical cores to operate on same queue pair. Of course, this function can be invoked in parallel by different logical core on different queue pair. It is the responsibility of the user application to enforce this rule.

Definition in file rte_mldev.h.

Macro Definition Documentation

◆ RTE_ML_STR_MAX

#define RTE_ML_STR_MAX   128

Maximum length of name string

Definition at line 151 of file rte_mldev.h.

Typedef Documentation

◆ rte_ml_dev_stop_flush_t

typedef void(* rte_ml_dev_stop_flush_t) (int16_t dev_id, uint16_t qp_id, struct rte_ml_op *op)

Callback function called during rte_ml_dev_stop(), invoked once per flushed ML op

Definition at line 299 of file rte_mldev.h.

Enumeration Type Documentation

◆ rte_ml_op_status

Status of ML operation

Enumerator
RTE_ML_OP_STATUS_SUCCESS 

Operation completed successfully

RTE_ML_OP_STATUS_NOT_PROCESSED 

Operation has not yet been processed by the device.

RTE_ML_OP_STATUS_ERROR 

Operation completed with error. Application can invoke rte_ml_op_error_get() to get PMD specific error code if needed.

Definition at line 388 of file rte_mldev.h.

◆ rte_ml_io_type

Input and output data types. ML models can operate on reduced precision datatypes to achieve better power efficiency, lower network latency and lower memory footprint. This enum is used to represent the lower precision integer and floating point types used by ML models.

Enumerator
RTE_ML_IO_TYPE_UNKNOWN 

Invalid or unknown type

RTE_ML_IO_TYPE_INT8 

8-bit integer

RTE_ML_IO_TYPE_UINT8 

8-bit unsigned integer

RTE_ML_IO_TYPE_INT16 

16-bit integer

RTE_ML_IO_TYPE_UINT16 

16-bit unsigned integer

RTE_ML_IO_TYPE_INT32 

32-bit integer

RTE_ML_IO_TYPE_UINT32 

32-bit unsigned integer

RTE_ML_IO_TYPE_FP8 

8-bit floating point number

RTE_ML_IO_TYPE_FP16 

IEEE 754 16-bit floating point number

RTE_ML_IO_TYPE_FP32 

IEEE 754 32-bit floating point number

RTE_ML_IO_TYPE_BFLOAT16 

16-bit brain floating point number.

Definition at line 819 of file rte_mldev.h.

◆ rte_ml_io_format

Input and output format. This is used to represent the encoding type of multi-dimensional used by ML models.

Enumerator
RTE_ML_IO_FORMAT_NCHW 

Batch size (N) x channels (C) x height (H) x width (W)

RTE_ML_IO_FORMAT_NHWC 

Batch size (N) x height (H) x width (W) x channels (C)

RTE_ML_IO_FORMAT_CHWN 

Channels (C) x height (H) x width (W) x batch size (N)

RTE_ML_IO_FORMAT_3D 

Format to represent a 3 dimensional data

RTE_ML_IO_FORMAT_2D 

Format to represent matrix data

RTE_ML_IO_FORMAT_1D 

Format to represent vector data

RTE_ML_IO_FORMAT_SCALAR 

Format to represent scalar data

Definition at line 848 of file rte_mldev.h.

Function Documentation

◆ rte_ml_dev_init()

__rte_experimental int rte_ml_dev_init ( size_t  dev_max)

Maximum number of devices if rte_ml_dev_init() is not called. Initialize the device array before probing devices. If not called, the first device probed would initialize the array to a size of RTE_MLDEV_DEFAULT_MAX.

Parameters
dev_maxMaximum number of devices.
Returns
0 on success, -rte_errno otherwise:
  • ENOMEM if out of memory
  • EINVAL if 0 size
  • EBUSY if already initialized

◆ rte_ml_dev_count()

__rte_experimental uint16_t rte_ml_dev_count ( void  )

Get the total number of ML devices that have been successfully initialised.

Returns
  • The total number of usable ML devices.

◆ rte_ml_dev_is_valid_dev()

__rte_experimental int rte_ml_dev_is_valid_dev ( int16_t  dev_id)

Check if the device is in ready state.

Parameters
dev_idThe identifier of the device.
Returns
  • 0 if device state is not in ready state.
  • 1 if device state is ready state.

◆ rte_ml_dev_socket_id()

__rte_experimental int rte_ml_dev_socket_id ( int16_t  dev_id)

Return the NUMA socket to which a device is connected.

Parameters
dev_idThe identifier of the device.
Returns
  • The NUMA socket id to which the device is connected
  • 0 If the socket could not be determined.
  • -EINVAL: if the dev_id value is not valid.

◆ rte_ml_dev_info_get()

__rte_experimental int rte_ml_dev_info_get ( int16_t  dev_id,
struct rte_ml_dev_info dev_info 
)

Retrieve the information of the device.

Parameters
dev_idThe identifier of the device.
dev_infoA pointer to a structure of type rte_ml_dev_info to be filled with the info of the device.
Returns
  • 0: Success, driver updates the information of the ML device
  • < 0: Error code returned by the driver info get function.

◆ rte_ml_dev_configure()

__rte_experimental int rte_ml_dev_configure ( int16_t  dev_id,
const struct rte_ml_dev_config config 
)

Configure an ML device.

This function must be invoked first before any other function in the API.

ML Device can be re-configured, when in a stopped state. Device cannot be re-configured after rte_ml_dev_close() is called.

The caller may use rte_ml_dev_info_get() to get the capability of each resources available for this ML device.

Parameters
dev_idThe identifier of the device to configure.
configThe ML device configuration structure.
Returns
  • 0: Success, device configured.
  • < 0: Error code returned by the driver configuration function.

◆ rte_ml_dev_queue_pair_setup()

__rte_experimental int rte_ml_dev_queue_pair_setup ( int16_t  dev_id,
uint16_t  queue_pair_id,
const struct rte_ml_dev_qp_conf qp_conf,
int  socket_id 
)

Set up a queue pair for a device. This should only be called when the device is stopped.

Parameters
dev_idThe identifier of the device.
queue_pair_idThe index of the queue pairs to set up. The value must be in the range [0, nb_queue_pairs - 1] previously supplied to rte_ml_dev_configure().
qp_confThe pointer to the configuration data to be used for the queue pair.
socket_idThe socket_id argument is the socket identifier in case of NUMA. The value can be SOCKET_ID_ANY if there is no NUMA constraint for the memory allocated for the queue pair.
Returns
  • 0: Success, queue pair correctly set up.
  • < 0: Queue pair configuration failed.

◆ rte_ml_dev_start()

__rte_experimental int rte_ml_dev_start ( int16_t  dev_id)

Start an ML device.

The device start step consists of setting the configured features and enabling the ML device to accept inference jobs.

Parameters
dev_idThe identifier of the device.
Returns
  • 0: Success, device started.
  • <0: Error code of the driver device start function.

◆ rte_ml_dev_stop()

__rte_experimental int rte_ml_dev_stop ( int16_t  dev_id)

Stop an ML device. A stopped device cannot accept inference jobs. The device can be restarted with a call to rte_ml_dev_start().

Parameters
dev_idThe identifier of the device.
Returns
  • 0: Success, device stopped.
  • <0: Error code of the driver device stop function.

◆ rte_ml_dev_close()

__rte_experimental int rte_ml_dev_close ( int16_t  dev_id)

Close an ML device. The device cannot be restarted!

Parameters
dev_idThe identifier of the device.
Returns
  • 0 on successfully closing device.
  • <0 on failure to close device.

◆ rte_ml_enqueue_burst()

__rte_experimental uint16_t rte_ml_enqueue_burst ( int16_t  dev_id,
uint16_t  qp_id,
struct rte_ml_op **  ops,
uint16_t  nb_ops 
)

Enqueue a burst of ML inferences for processing on an ML device.

The rte_ml_enqueue_burst() function is invoked to place ML inference operations on the queue qp_id of the device designated by its dev_id.

The nb_ops parameter is the number of inferences to process which are supplied in the ops array of rte_ml_op structures.

The rte_ml_enqueue_burst() function returns the number of inferences it actually enqueued for processing. A return value equal to nb_ops means that all packets have been enqueued.

Parameters
dev_idThe identifier of the device.
qp_idThe index of the queue pair which inferences are to be enqueued for processing. The value must be in the range [0, nb_queue_pairs - 1] previously supplied to rte_ml_dev_configure.
opsThe address of an array of nb_ops pointers to rte_ml_op structures which contain the ML inferences to be processed.
nb_opsThe number of operations to process.
Returns
The number of inference operations actually enqueued to the ML device. The return value can be less than the value of the nb_ops parameter when the ML device queue is full or if invalid parameters are specified in a rte_ml_op.

◆ rte_ml_dequeue_burst()

__rte_experimental uint16_t rte_ml_dequeue_burst ( int16_t  dev_id,
uint16_t  qp_id,
struct rte_ml_op **  ops,
uint16_t  nb_ops 
)

Dequeue a burst of processed ML inferences operations from a queue on the ML device. The dequeued operations are stored in rte_ml_op structures whose pointers are supplied in the ops array.

The rte_ml_dequeue_burst() function returns the number of inferences actually dequeued, which is the number of rte_ml_op data structures effectively supplied into the ops array.

A return value equal to nb_ops indicates that the queue contained at least nb_ops* operations, and this is likely to signify that other processed operations remain in the devices output queue. Application implementing a "retrieve as many processed operations as possible" policy can check this specific case and keep invoking the rte_ml_dequeue_burst() function until a value less than nb_ops is returned.

The rte_ml_dequeue_burst() function does not provide any error notification to avoid the corresponding overhead.

Parameters
dev_idThe identifier of the device.
qp_idThe index of the queue pair from which to retrieve processed packets. The value must be in the range [0, nb_queue_pairs - 1] previously supplied to rte_ml_dev_configure().
opsThe address of an array of pointers to rte_ml_op structures that must be large enough to store nb_ops pointers in it.
nb_opsThe maximum number of inferences to dequeue.
Returns
The number of operations actually dequeued, which is the number of pointers to rte_ml_op structures effectively supplied to the ops array.

◆ rte_ml_op_error_get()

__rte_experimental int rte_ml_op_error_get ( int16_t  dev_id,
struct rte_ml_op op,
struct rte_ml_op_error error 
)

Get PMD specific error information for an ML op.

When an ML operation completed with RTE_ML_OP_STATUS_ERROR as status, This API allows to get PMD specific error details.

Parameters
[in]dev_idDevice identifier
[in]opHandle of ML operation
[in]errorAddress of structure rte_ml_op_error to be filled
Returns
  • Returns 0 on success
  • Returns negative value on failure

◆ rte_ml_dev_stats_get()

__rte_experimental int rte_ml_dev_stats_get ( int16_t  dev_id,
struct rte_ml_dev_stats stats 
)

Retrieve the general I/O statistics of a device.

Parameters
dev_idThe identifier of the device.
statsPointer to structure to where statistics will be copied. On error, this location may or may not have been modified.
Returns
  • 0 on success
  • -EINVAL: If invalid parameter pointer is provided.

◆ rte_ml_dev_stats_reset()

__rte_experimental void rte_ml_dev_stats_reset ( int16_t  dev_id)

Reset the statistics of a device.

Parameters
dev_idThe identifier of the device.

◆ rte_ml_dev_xstats_names_get()

__rte_experimental int rte_ml_dev_xstats_names_get ( int16_t  dev_id,
struct rte_ml_dev_xstats_map xstats_map,
uint32_t  size 
)

Retrieve names of extended statistics of an ML device.

Parameters
dev_idThe identifier of the device.
[out]xstats_mapBlock of memory to insert id and names into. Must be at least size in capacity. If set to NULL, function returns required capacity.
sizeCapacity of xstats_map (number of name-id maps).
Returns
  • Positive value on success:
    • The return value is the number of entries filled in the stats map.
    • If xstats_map set to NULL then required capacity for xstats_map.
  • Negative value on error:
    • -ENODEV: for invalid dev_id.
    • -ENOTSUP: if the device doesn't support this function.

◆ rte_ml_dev_xstats_by_name_get()

__rte_experimental int rte_ml_dev_xstats_by_name_get ( int16_t  dev_id,
const char *  name,
uint16_t *  stat_id,
uint64_t *  value 
)

Retrieve the value of a single stat by requesting it by name.

Parameters
dev_idThe identifier of the device.
nameThe stat name to retrieve.
stat_idIf non-NULL, the numerical id of the stat will be returned, so that further requests for the stat can be got using rte_ml_dev_xstats_get, which will be faster as it doesn't need to scan a list of names for the stat.
[out]valueMust be non-NULL, retrieved xstat value will be stored in this address.
Returns
  • 0: Successfully retrieved xstat value.
  • -EINVAL: invalid parameters.
  • -ENOTSUP: if not supported.

◆ rte_ml_dev_xstats_get()

__rte_experimental int rte_ml_dev_xstats_get ( int16_t  dev_id,
const uint16_t *  stat_ids,
uint64_t *  values,
uint16_t  nb_ids 
)

Retrieve extended statistics of an ML device.

Parameters
dev_idThe identifier of the device.
stat_idsThe id numbers of the stats to get. The ids can be fetched from the stat position in the stat list from rte_ml_dev_xstats_names_get(), or by using rte_ml_dev_xstats_by_name_get().
valuesThe values for each stats request by ID.
nb_idsThe number of stats requested.
Returns
  • Positive value: number of stat entries filled into the values array
  • Negative value on error:
    • -ENODEV: for invalid dev_id.
    • -ENOTSUP: if the device doesn't support this function.

◆ rte_ml_dev_xstats_reset()

__rte_experimental int rte_ml_dev_xstats_reset ( int16_t  dev_id,
const uint16_t *  stat_ids,
uint16_t  nb_ids 
)

Reset the values of the xstats of the selected component in the device.

Parameters
dev_idThe identifier of the device.
stat_idsSelects specific statistics to be reset. When NULL, all statistics will be reset. If non-NULL, must point to array of at least nb_ids size.
nb_idsThe number of ids available from the ids array. Ignored when ids is NULL.
Returns
  • 0: Successfully reset the statistics to zero.
  • -EINVAL: invalid parameters.
  • -ENOTSUP: if not supported.

◆ rte_ml_dev_dump()

__rte_experimental int rte_ml_dev_dump ( int16_t  dev_id,
FILE *  fd 
)

Dump internal information about dev_id to the FILE* provided in fd.

Parameters
dev_idThe identifier of the device.
fdA pointer to a file for output.
Returns
  • 0: on success.
  • <0: on failure.

◆ rte_ml_dev_selftest()

__rte_experimental int rte_ml_dev_selftest ( int16_t  dev_id)

Trigger the ML device self test.

Parameters
dev_idThe identifier of the device.
Returns
  • 0: Selftest successful.
  • -ENOTSUP: if the device doesn't support selftest.
  • other values < 0 on failure.

◆ rte_ml_model_load()

__rte_experimental int rte_ml_model_load ( int16_t  dev_id,
struct rte_ml_model_params params,
uint16_t *  model_id 
)

Load an ML model to the device.

Load an ML model to the device with parameters requested in the structure rte_ml_model_params.

Parameters
[in]dev_idThe identifier of the device.
[in]paramsParameters for the model to be loaded.
[out]model_idIdentifier of the model loaded.
Returns
  • 0: Success, Model loaded.
  • < 0: Failure, Error code of the model load driver function.

◆ rte_ml_model_unload()

__rte_experimental int rte_ml_model_unload ( int16_t  dev_id,
uint16_t  model_id 
)

Unload an ML model from the device.

Parameters
[in]dev_idThe identifier of the device.
[in]model_idIdentifier of the model to be unloaded.
Returns
  • 0: Success, Model unloaded.
  • < 0: Failure, Error code of the model unload driver function.

◆ rte_ml_model_start()

__rte_experimental int rte_ml_model_start ( int16_t  dev_id,
uint16_t  model_id 
)

Start an ML model for the given device ID.

Start an ML model to accept inference requests.

Parameters
[in]dev_idThe identifier of the device.
[in]model_idIdentifier of the model to be started.
Returns
  • 0: Success, Model loaded.
  • < 0: Failure, Error code of the model start driver function.

◆ rte_ml_model_stop()

__rte_experimental int rte_ml_model_stop ( int16_t  dev_id,
uint16_t  model_id 
)

Stop an ML model for the given device ID.

Model stop would disable the ML model to be used for inference jobs. All inference jobs must have been completed before model stop is attempted.

Parameters
[in]dev_idThe identifier of the device.
[in]model_idIdentifier of the model to be stopped.
Returns
  • 0: Success, Model unloaded.
  • < 0: Failure, Error code of the model stop driver function.

◆ rte_ml_model_info_get()

__rte_experimental int rte_ml_model_info_get ( int16_t  dev_id,
uint16_t  model_id,
struct rte_ml_model_info model_info 
)

Get ML model information.

Parameters
[in]dev_idThe identifier of the device.
[in]model_idIdentifier for the model created
[out]model_infoPointer to a model info structure
Returns
  • Returns 0 on success
  • Returns negative value on failure

◆ rte_ml_model_params_update()

__rte_experimental int rte_ml_model_params_update ( int16_t  dev_id,
uint16_t  model_id,
void *  buffer 
)

Update the model parameters without unloading model.

Update model parameters such as weights and bias without unloading the model. rte_ml_model_stop() must be called before invoking this API.

Parameters
[in]dev_idThe identifier of the device.
[in]model_idIdentifier for the model created
[in]bufferPointer to the model weights and bias buffer. Size of the buffer is equal to wb_size returned in rte_ml_model_info.
Returns
  • Returns 0 on success
  • Returns negative value on failure

◆ rte_ml_io_input_size_get()

__rte_experimental int rte_ml_io_input_size_get ( int16_t  dev_id,
uint16_t  model_id,
uint32_t  nb_batches,
uint64_t *  input_qsize,
uint64_t *  input_dsize 
)

Get size of quantized and dequantized input buffers.

Calculate the size of buffers required for quantized and dequantized input data. This API would return the buffer sizes for the number of batches provided and would consider the alignment requirements as per the PMD. Input sizes computed by this API can be used by the application to allocate buffers.

Parameters
[in]dev_idThe identifier of the device.
[in]model_idIdentifier for the model created
[in]nb_batchesNumber of batches of input to be processed in a single inference job
[out]input_qsizeQuantized input size pointer. NULL value is allowed, in which case input_qsize is not calculated by the driver.
[out]input_dsizeDequantized input size pointer. NULL value is allowed, in which case input_dsize is not calculated by the driver.
Returns
  • Returns 0 on success
  • Returns negative value on failure

◆ rte_ml_io_output_size_get()

__rte_experimental int rte_ml_io_output_size_get ( int16_t  dev_id,
uint16_t  model_id,
uint32_t  nb_batches,
uint64_t *  output_qsize,
uint64_t *  output_dsize 
)

Get size of quantized and dequantized output buffers.

Calculate the size of buffers required for quantized and dequantized output data. This API would return the buffer sizes for the number of batches provided and would consider the alignment requirements as per the PMD. Output sizes computed by this API can be used by the application to allocate buffers.

Parameters
[in]dev_idThe identifier of the device.
[in]model_idIdentifier for the model created
[in]nb_batchesNumber of batches of input to be processed in a single inference job
[out]output_qsizeQuantized output size pointer. NULL value is allowed, in which case output_qsize is not calculated by the driver.
[out]output_dsizeDequantized output size pointer. NULL value is allowed, in which case output_dsize is not calculated by the driver.
Returns
  • Returns 0 on success
  • Returns negative value on failure

◆ rte_ml_io_quantize()

__rte_experimental int rte_ml_io_quantize ( int16_t  dev_id,
uint16_t  model_id,
uint16_t  nb_batches,
void *  dbuffer,
void *  qbuffer 
)

Quantize input data.

Quantization converts data from a higher precision types to a lower precision types to improve the throughput and efficiency of the model execution with minimal loss of accuracy. Types of dequantized data and quantized data are specified by the model.

Parameters
[in]dev_idThe identifier of the device.
[in]model_idIdentifier for the model
[in]nb_batchesNumber of batches in the dequantized input buffer
[in]dbufferAddress of dequantized input data
[in]qbufferAddress of quantized input data
Returns
  • Returns 0 on success
  • Returns negative value on failure

◆ rte_ml_io_dequantize()

__rte_experimental int rte_ml_io_dequantize ( int16_t  dev_id,
uint16_t  model_id,
uint16_t  nb_batches,
void *  qbuffer,
void *  dbuffer 
)

Dequantize output data.

Dequantization converts data from a lower precision type to a higher precision type. Types of quantized data and dequantized are specified by the model.

Parameters
[in]dev_idThe identifier of the device.
[in]model_idIdentifier for the model
[in]nb_batchesNumber of batches in the dequantized output buffer
[in]qbufferAddress of quantized output data
[in]dbufferAddress of dequantized output data
Returns
  • Returns 0 on success
  • Returns negative value on failure

◆ rte_ml_op_pool_create()

__rte_experimental struct rte_mempool* rte_ml_op_pool_create ( const char *  name,
unsigned int  nb_elts,
unsigned int  cache_size,
uint16_t  user_size,
int  socket_id 
)

Create an ML operation pool

Parameters
nameML operations pool name
nb_eltsNumber of elements in pool
cache_sizeNumber of elements to cache on lcore, see rte_mempool_create for further details about cache size
user_sizeSize of private data to allocate for user with each operation
socket_idSocket to identifier allocate memory on
Returns
  • On success pointer to mempool
  • On failure NULL

◆ rte_ml_op_pool_free()

__rte_experimental void rte_ml_op_pool_free ( struct rte_mempool mempool)

Free an ML operation pool

Parameters
mempoolA pointer to the mempool structure. If NULL then, the function does nothing.