@@ -18,14 +18,14 @@ static int get_depth(int x, float gd) {
1818 return std::max<int >(r, 1 );
1919}
2020
21- static nvinfer1::IElementWiseLayer* Proto (nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights>& weightMap,
21+ static nvinfer1::IElementWiseLayer* Proto (nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights>& weightMap,
2222 nvinfer1::ITensor& input, std::string lname, float gw, int max_channels) {
2323 int mid_channel = get_width (256 , gw, max_channels);
2424 auto cv1 = convBnSiLU (network, weightMap, input, mid_channel, 3 , 1 , 1 , " model.22.proto.cv1" );
2525 float * convTranpsose_bais = (float *)weightMap[" model.22.proto.upsample.bias" ].values ;
2626 int convTranpsose_bais_len = weightMap[" model.22.proto.upsample.bias" ].count ;
2727 nvinfer1::Weights bias{nvinfer1::DataType::kFLOAT , convTranpsose_bais, convTranpsose_bais_len};
28- auto convTranpsose = network->addDeconvolutionNd (*cv1->getOutput (0 ), mid_channel, nvinfer1::DimsHW{2 ,2 }, weightMap[" model.22.proto.upsample.weight" ], bias);
28+ auto convTranpsose = network->addDeconvolutionNd (*cv1->getOutput (0 ), mid_channel, nvinfer1::DimsHW{2 ,2 }, weightMap[" model.22.proto.upsample.weight" ], bias);
2929 assert (convTranpsose);
3030 convTranpsose->setStrideNd (nvinfer1::DimsHW{2 , 2 });
3131 auto cv2 = convBnSiLU (network,weightMap,*convTranpsose->getOutput (0 ), mid_channel, 3 , 1 , 1 , " model.22.proto.cv2" );
@@ -34,9 +34,9 @@ static nvinfer1::IElementWiseLayer* Proto(nvinfer1::INetworkDefinition* network,
3434 return cv3;
3535}
3636
37- static nvinfer1::IShuffleLayer* ProtoCoef (nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights>& weightMap,
37+ static nvinfer1::IShuffleLayer* ProtoCoef (nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights>& weightMap,
3838 nvinfer1::ITensor& input, std::string lname, int grid_shape, float gw) {
39-
39+
4040 int mid_channle = 0 ;
4141 if (gw == 0.25 || gw== 0.5 ) {
4242 mid_channle = 32 ;
@@ -205,7 +205,7 @@ nvinfer1::IHostMemory* buildEngineYolov8Det(nvinfer1::IBuilder* builder,
205205 std::cout << " Your platform support int8: " << (builder->platformHasFastInt8 () ? " true" : " false" ) << std::endl;
206206 assert (builder->platformHasFastInt8 ());
207207 config->setFlag (nvinfer1::BuilderFlag::kINT8 );
208- nvinfer1::IInt8EntropyCalibrator2 * calibrator = new Calibrator (1 , kInputW , kInputH , " ../calibrator /" , " int8calib.table" , kInputTensorName );
208+ auto * calibrator = new Int8EntropyCalibrator2 (1 , kInputW , kInputH , " ../coco_calib /" , " int8calib.table" , kInputTensorName );
209209 config->setInt8Calibrator (calibrator);
210210#endif
211211
@@ -377,7 +377,7 @@ nvinfer1::IHostMemory* buildEngineYolov8Seg(nvinfer1::IBuilder* builder,
377377 std::cout << " Your platform support int8: " << (builder->platformHasFastInt8 () ? " true" : " false" ) << std::endl;
378378 assert (builder->platformHasFastInt8 ());
379379 config->setFlag (nvinfer1::BuilderFlag::kINT8 );
380- nvinfer1::IInt8EntropyCalibrator2 * calibrator = new Calibrator (1 , kInputW , kInputH , " ../calibrator /" , " int8calib.table" , kInputTensorName );
380+ auto * calibrator = new Int8EntropyCalibrator2 (1 , kInputW , kInputH , " ../coco_calib /" , " int8calib.table" , kInputTensorName );
381381 config->setInt8Calibrator (calibrator);
382382#endif
383383
0 commit comments