C++ API minimal examples¶
Video processing¶
This example shows how to initialize the video intelligence class and process a video frame.
#include <cstdint>
#include <cstdio>
#include <vector>
#include "plumerai/video_intelligence.h"
int main() {
// Settings, to be changed as needed
constexpr int width = 1600; // camera image width in pixels
constexpr int height = 1200; // camera image height in pixels
constexpr auto image_format = plumerai::ImageFormat::PACKED_RGB888;
// Initialize the video intelligence algorithm
auto pvi = plumerai::VideoIntelligence(height, width);
// Loop over frames in a video stream (example: 10 frames)
for (int t = 0; t < 10; ++t) {
// Some example input here, normally this is where camera data is acquired
auto image = std::vector<std::uint8_t>(height * width * 3); // 3 for RGB
// The time between two video frames in seconds
// In this example we assume a constant frame rate of 30 fps, but variable
// rates are supported.
const float delta_t = 1.f / 30.f;
// Process the frame
auto error_code = pvi.process_frame(
plumerai::ImagePointer<image_format>(image.data()), delta_t);
if (error_code != plumerai::ErrorCode::SUCCESS) {
printf("Error: %s\n", plumerai::error_code_string(error_code));
return 1;
}
std::vector<BoxPrediction> predictions;
error_code = pvi.object_detection().get_detections(predictions);
if (error_code != plumerai::ErrorCode::SUCCESS) {
printf("Error: %s\n", plumerai::error_code_string(error_code));
return 1;
}
// Display the results to stdout
for (auto &p : predictions) {
printf(
"Box #%d of class %d with @ (x,y) -> (%.2f,%.2f) till (%.2f,%.2f)\n",
p.id, p.class_id, p.x_min, p.y_min, p.x_max, p.y_max);
}
}
return 0;
}
Automatic Face Enrollment¶
This example extends the code above and shows how to use the automatic face enrollment functionality.
The changes compared to the minimal example above are highlighted.
#include <cstdint>
#include <cstdio>
#include <vector>
#include "plumerai/video_intelligence.h"
int main() {
// Settings, to be changed as needed
constexpr int width = 1600; // camera image width in pixels
constexpr int height = 1200; // camera image height in pixels
constexpr auto image_format = plumerai::ImageFormat::PACKED_RGB888;
// Initialize the video intelligence algorithm
auto pvi = plumerai::VideoIntelligence(height, width);
// Loop over frames in a video stream (example: 10 frames)
for (int t = 0; t < 10; ++t) {
// Some example input here, normally this is where camera data is acquired
auto image = std::vector<std::uint8_t>(height * width * 3); // 3 for RGB
// The time between two video frames in seconds
// In this example we assume a constant frame rate of 30 fps, but variable
// rates are supported.
const float delta_t = 1.f / 30.f;
// Process the frame
auto error_code = pvi.process_frame(
plumerai::ImagePointer<image_format>(image.data()), delta_t);
if (error_code != plumerai::ErrorCode::SUCCESS) {
printf("Error: %s\n", plumerai::error_code_string(error_code));
return 1;
}
// Report the number of faces in the library so far. At first the library
// will be empty, but as soon as a face is well visible for a while, it
// will be added to the library with a new unique face-ID. The library
// will grow over time, unless `remove_face_embedding` is called.
std::vector<int> face_ids;
error_code = pvi.face_enrollment_automatic().get_face_ids(face_ids);
if (error_code != plumerai::ErrorCode::SUCCESS) {
printf("Error: %s\n", plumerai::error_code_string(error_code));
return 1;
}
printf("Total of %zu people in the familiar face-ID library\n",
face_ids.size());
std::vector<BoxPrediction> predictions;
error_code = pvi.object_detection().get_detections(predictions);
if (error_code != plumerai::ErrorCode::SUCCESS) {
printf("Error: %s\n", plumerai::error_code_string(error_code));
return 1;
}
// Display the results to stdout
for (auto &p : predictions) {
if (p.class_id == CLASS_PERSON) {
// `face_id` will be one from 'face_ids'
const auto face_id = pvi.face_identification().get_face_id(p);
printf(
"Box #%d with face id %d @ (x,y) -> (%.2f,%.2f) till (%.2f,%.2f)\n",
p.id, face_id, p.x_min, p.y_min, p.x_max, p.y_max);
}
}
}
return 0;
}
Manual face enrollment¶
This example shows how to use the manual face enrollment functionality. It consists of two main loops:
- An example enrollment loop, which runs for a fixed number of frames and computes a face embedding vector to enroll one person in the face library.
- An example video processing loop, similar to the first example.
#include <cstdint>
#include <cstdio>
#include <vector>
#include "plumerai/video_intelligence.h"
int main() {
// Settings, to be changed as needed
constexpr int width = 1600; // camera image width in pixels
constexpr int height = 1200; // camera image height in pixels
constexpr auto image_format = plumerai::ImageFormat::PACKED_RGB888;
// Initialize the `VideoIntelligence` object
auto pvi = plumerai::VideoIntelligence(height, width);
// ---------------------- Enrollment starting ------------------------------
auto error_code = pvi.face_enrollment_manual().start_enrollment();
if (error_code != plumerai::ErrorCode::ENROLLMENT_IN_PROGRESS) {
printf("Error: %s\n", plumerai::error_code_string(error_code));
return 1;
}
// Enroll for 10 frames (just an example, more frames is better)
for (int t = 0; t < 10; ++t) {
// Some example input here, normally this is where camera data is acquired
auto image = std::vector<std::uint8_t>(height * width * 3); // 3 for RGB
// Process the frame
// If the enrollment frames come from a video source, then use
// `process_frame` instead:
// error_code = pvi.process_frame(
// plumerai::ImagePointer<image_format>(image.data()), delta_t);
error_code =
pvi.single_image(plumerai::ImagePointer<image_format>(image.data()));
printf("Enrollment status: %s\n", plumerai::error_code_string(error_code));
}
// Finish enrollment
std::vector<std::int8_t> embedding;
error_code = pvi.face_enrollment_manual().finish_enrollment(embedding);
if (error_code != plumerai::ErrorCode::SUCCESS) {
printf("Error: %s\n", plumerai::error_code_string(error_code));
return 1;
}
// Add the embedding to the library with face id '1'.
error_code = pvi.face_enrollment_manual().add_embedding(embedding, 1);
if (error_code != plumerai::ErrorCode::SUCCESS) {
printf("Error: %s\n", plumerai::error_code_string(error_code));
return 1;
}
// ---------------------- Enrollment finished ------------------------------
// Loop over frames in a video stream (example: 10 frames)
for (int t = 0; t < 10; ++t) {
// Some example input here, normally this is where camera data is acquired
auto image = std::vector<std::uint8_t>(height * width * 3); // 3 for RGB
// The time between two video frames in seconds
// In this example we assume a constant frame rate of 30 fps, but variable
// rates are supported.
const float delta_t = 1.f / 30.f;
// Process the frame
error_code = pvi.process_frame(
plumerai::ImagePointer<image_format>(image.data()), delta_t);
if (error_code != plumerai::ErrorCode::SUCCESS) {
printf("Error: %s\n", plumerai::error_code_string(error_code));
return 1;
}
std::vector<BoxPrediction> predictions;
error_code = pvi.object_detection().get_detections(predictions);
if (error_code != plumerai::ErrorCode::SUCCESS) {
printf("Error: %s\n", plumerai::error_code_string(error_code));
return 1;
}
// Display the results to stdout
for (auto &p : predictions) {
if (p.class_id == CLASS_PERSON) {
printf(
"Box #%d with face id %d @ (x,y) -> (%.2f,%.2f) till (%.2f,%.2f)\n",
p.id, pvi.face_identification().get_face_id(p), p.x_min, p.y_min,
p.x_max, p.y_max);
}
}
if (predictions.size() == 0) {
printf("No bounding boxes found in frame\n");
}
}
return 0;
}