#include <iostream>
#include <onnxruntime_cxx_api.h>
int main() {
// Initialize ONNX Runtime environment
Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "providers_info");
// Print version info
std::cout << "ONNX Runtime version: " << Ort::GetVersionString() << std::endl;
// Get available execution providers
auto providers = Ort::GetAvailableProviders();
std::cout << "Available providers:" << std::endl;
for (const auto& p : providers) {
std::cout << " - " << p << std::endl;
}
// Device info — equivalent to Python's ort.get_device()
// (C++ API doesn't expose a global 'get_device', so we check provider priority)
if (std::find(providers.begin(), providers.end(), "CUDAExecutionProvider") != providers.end()) {
std::cout << "Default device: GPU (CUDA)" << std::endl;
} else {
std::cout << "Default device: CPU" << std::endl;
}
return 0;
}