Raft (default)
C++ (Windows, Linux, MacOS / CUDA and Metal accelerated) port of RAFT.
Example Input & Outputs
| Inputs | Outputs |
![]() ![]() |
![]() |
Demo Code
1#include "blace_ai.h"
2#include <opencv2/opencv.hpp>
3
4// include the models you want to use
5#include "raft_v1_default_v2_ALL_export_version_v25.h"
6
7using namespace blace;
8
9std::shared_ptr<RawMemoryObject> memory_from_file(std::string file) {
10 // read image into memory
11 cv::Mat image = cv::imread(file, cv::IMREAD_COLOR);
12
13 // construct a hash from the filename
14 ml_core::BlaceHash random_hash(file);
15
16 // construct the memory object. We set copy_memory to true, since image will
17 // be out-of-scope upon method return and therefore we need to take ownership
18 // of the data
19 RawMemoryObject raw_mem((void *)image.data, ml_core::DataTypeEnum::BLACE_BYTE,
20 ml_core::ColorFormatEnum::BGR,
21 std::vector<int64_t>{1, image.rows, image.cols, 3},
22 ml_core::BHWC, ml_core::ZERO_TO_255, ml_core::CPU,
23 random_hash, true);
24
25 return std::make_shared<RawMemoryObject>(raw_mem);
26}
27
28int main() {
29 ::workload_management::BlaceWorld blace;
30 // load image into op
31 auto exe_path = util::getPathToExe();
32 std::filesystem::path frame_0 = exe_path / "raft_frame_0.png";
33 std::filesystem::path frame_1 = exe_path / "raft_frame_1.png";
34
35 auto frame_0_mem = memory_from_file(frame_0.string());
36 auto frame_1_mem = memory_from_file(frame_1.string());
37
38 auto frame_0_op = CONSTRUCT_OP(ops::FromRawMemoryOp(frame_0_mem));
39 auto frame_1_op = CONSTRUCT_OP(ops::FromRawMemoryOp(frame_1_mem));
40
41 auto num_flow_updates = CONSTRUCT_OP(ops::FromIntOp(12));
42
43 // construct model inference arguments
44 ml_core::InferenceArgsCollection infer_args;
45 infer_args.inference_args.backends = {
46 ml_core::TORCHSCRIPT_CUDA_FP16, ml_core::TORCHSCRIPT_MPS_FP16,
47 ml_core::TORCHSCRIPT_CUDA_FP32, ml_core::TORCHSCRIPT_MPS_FP32,
48 ml_core::ONNX_DML_FP32, ml_core::TORCHSCRIPT_CPU_FP32};
49
50 // construct inference operation
51 auto infer_op = raft_v1_default_v2_ALL_export_version_v25_run(
52 frame_0_op, frame_1_op, num_flow_updates, 0, infer_args,
53 util::getPathToExe().string());
54
55 // normalize optical flow to zero-one range for plotting. The model returns
56 // relative offsets in -1 to 1 pixel space, so the raw values are to small to
57 // plot
58 infer_op = CONSTRUCT_OP(ops::NormalizeToZeroOneOP(infer_op));
59
60 // convert uv color to rgb (by U->R, V->G, 1->B)
61 infer_op = CONSTRUCT_OP(ops::ToColorOp(infer_op, ml_core::RGB));
62
63 // we prepare the result for later copy to cv::Mat. The values set here are
64 // based on implicit knowledge of cv::Mat internal data storage.
65 auto normalized_matte = CONSTRUCT_OP(ops::PrepareForHostCopyOP(
66 infer_op, ml_core::BLACE_BYTE, ml_core::RGB, ml_core::HWC,
67 ml_core::ZERO_TO_255, ml_core::CPU));
68
69 // construct evaluator and evaluate to raw memory object
70 computation_graph::GraphEvaluator evaluator(normalized_matte);
71 auto [return_code, raw_mem] = evaluator.evaluateToRawMemory();
72
73 // get the sizes
74 int w = raw_mem->get_memory_sizes()[1];
75 int h = raw_mem->get_memory_sizes()[0];
76
77 // initialize an empty cv::Mat
78 cv::Mat cv_mat(h, w, CV_8UC3);
79 cv_mat.setTo(cv::Scalar(0, 0, 0));
80
81 // and copy the memory
82 std::memcpy(cv_mat.data, raw_mem->get_data_ptr(), raw_mem->get_memory_size());
83
84 // save to disk and return
85 auto out_file = exe_path / "optical_flow.png";
86 cv::imwrite(out_file.string(), cv_mat);
87
88 return 0;
89}
Tested on version v0.9.96 of blace.ai sdk. Might also work on newer or older releases (check if release notes of blace.ai state breaking changes).
Quickstart
- Download blace.ai SDK and unzip. In the bootstrap script
build_run_demos.ps1(Windows) orbuild_run_demos.sh(Linux/MacOS) set theBLACE_AI_CMAKE_DIRenvironment variable to thecmakefolder inside the unzipped SDK, e.g.export BLACE_AI_CMAKE_DIR="<unzip_folder>/package/cmake". - Download the model payload(s) (
.binfiles) from below and place in the same folder as the bootstrapper scripts. - Then run the bootstrap script with
powershell build_run_demo.ps1(Windows)
sh build_run_demo.sh(Linux and MacOS).
This will build and execute the demo.
Supported Backends
| Torchscript CPU | Torchscript CUDA FP16 * | Torchscript CUDA FP32 * | Torchscript MPS FP16 * | Torchscript MPS FP32 * | ONNX CPU FP32 | ONNX DirectML FP32 * |
|---|---|---|---|---|---|---|
| ✅ | ❌ | ✅ | ❌ | ✅ | ❌ | ❌ |
(*: Hardware Accelerated)
Artifacts
| Torchscript Payload | Demo Project | Header |




