[NEED HELP] migration from tensorrt8 to tensorrt10
Hi guys, I'm automotive engineer. My current work is to migrate a tensorrt 8 project to tensorrt 10.
I have already checked the guide on https://docs.nvidia.com/deeplearning/tensorrt/latest/api/migration-guide.html#c
But seems there are some functions that are not listed on this guide:
- error: no matching function for call to
nvinfer1::IRuntime::deserializeCudaEngine(unsigned char*, std::vector<unsigned char>::size_type, int)(On the guide website, there is onlyIRuntime::deserializeCudaEngine (void const* blob, std::size_t size, IPluginFactory* pluginFactory) IRuntime::destroy())
Could anyone please help me, thanks!
You can find the source code of the tensorrt 8.5 project: https://github.com/NVIDIA-AI-IOT/Lidar_AI_Solution/blob/0a563c722c75b1b8d968a61ed4b62d2f5a78c06b/CUDA-CenterPoint/src/tensorrt.cpp#L116
ref deserializeCudaEngine in follow code block
namespace samplesCommon
struct InferDeleter
{
template <typename T>
void operator()(T* obj) const
{
delete obj;
}
};
}
bool SampleIOFormats::build(int32_t dataWidth)
{
auto builder = SampleUniquePtr<nvinfer1::IBuilder>(nvinfer1::createInferBuilder(sample::gLogger.getTRTLogger()));
if (!builder)
{
return false;
}
auto network = SampleUniquePtr<nvinfer1::INetworkDefinition>(builder->createNetworkV2(0));
if (!network)
{
return false;
}
auto config = SampleUniquePtr<nvinfer1::IBuilderConfig>(builder->createBuilderConfig());
if (!config)
{
return false;
}
auto parser
= SampleUniquePtr<nvonnxparser::IParser>(nvonnxparser::createParser(*network, sample::gLogger.getTRTLogger()));
if (!parser)
{
return false;
}
auto constructed = constructNetwork(builder, network, config, parser);
if (!constructed)
{
return false;
}
network->getInput(0)->setAllowedFormats(static_cast<TensorFormats>(1 << static_cast<int32_t>(mTensorFormat)));
network->getOutput(0)->setAllowedFormats(1U << static_cast<int32_t>(TensorFormat::kLINEAR));
mEngine.reset();
if (dataWidth == 1)
{
config->setFlag(BuilderFlag::kINT8);
network->getInput(0)->setType(DataType::kINT8);
network->getOutput(0)->setType(DataType::kINT8);
samplesCommon::setAllDynamicRanges(network.get(), 127.0F, 127.0F);
}
if (dataWidth == 2)
{
config->setFlag(BuilderFlag::kFP16);
network->getInput(0)->setType(DataType::kHALF);
network->getOutput(0)->setType(DataType::kHALF);
}
config->setFlag(BuilderFlag::kGPU_FALLBACK);
// CUDA stream used for profiling by the builder.
auto profileStream = samplesCommon::makeCudaStream();
if (!profileStream)
{
return false;
}
config->setProfileStream(*profileStream);
SampleUniquePtr<nvinfer1::ITimingCache> timingCache{};
// Load timing cache
if (!mParams.timingCacheFile.empty())
{
timingCache = samplesCommon::buildTimingCacheFromFile(
sample::gLogger.getTRTLogger(), *config, mParams.timingCacheFile, sample::gLogError);
}
SampleUniquePtr<IHostMemory> plan{builder->buildSerializedNetwork(*network, *config)};
if (!plan)
{
return false;
}
if (timingCache != nullptr && !mParams.timingCacheFile.empty())
{
samplesCommon::updateTimingCacheFile(
sample::gLogger.getTRTLogger(), mParams.timingCacheFile, timingCache.get(), *builder);
}
if (!mRuntime)
{
mRuntime = SampleUniquePtr<IRuntime>(createInferRuntime(sample::gLogger.getTRTLogger()));
}
if (!mRuntime)
{
return false;
}
mEngine = std::shared_ptr<nvinfer1::ICudaEngine>(
mRuntime->deserializeCudaEngine(plan->data(), plan->size()), samplesCommon::InferDeleter()); /// here !!!!!!!
if (!mEngine)
{
return false;
}
ASSERT(network->getNbInputs() == 1);
mInputDims = network->getInput(0)->getDimensions();
ASSERT(mInputDims.nbDims == 4);
ASSERT(network->getNbOutputs() == 1);
mOutputDims = network->getOutput(0)->getDimensions();
ASSERT(mOutputDims.nbDims == 2);
return true;
}
@lix19937 can you also run tensorrt engine with golang?
@geraldstanje1 No