Skip to content

Commit adac14f

Browse files
committed
fix some
1 parent 6fe9ba9 commit adac14f

4 files changed

Lines changed: 42 additions & 25 deletions

File tree

yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_tensorrt.hpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,16 +25,16 @@ namespace yolox_cpp{
2525
auto ret = (status);\
2626
if (ret != 0)\
2727
{\
28-
std::cerr << "Cuda failure: " << ret << std::endl;\
28+
std::cerr << "CUDA Failure: " << ret << std::endl;\
2929
abort();\
3030
}\
3131
} while (0)
3232

3333

3434
class YoloXTensorRT: public AbcYoloX{
3535
public:
36-
YoloXTensorRT(file_name_t path_to_engine, int device=0,
37-
float nms_th=0.45, float conf_th=0.3, std::string model_version="0.1.1rc0",
36+
YoloXTensorRT(const file_name_t &path_to_engine, int device=0,
37+
float nms_th=0.45, float conf_th=0.3, const std::string &model_version="0.1.1rc0",
3838
int num_classes=80, bool p6=false);
3939
~YoloXTensorRT();
4040
std::vector<Object> inference(const cv::Mat& frame) override;

yolox_ros_cpp/yolox_cpp/include/yolox_cpp/yolox_tflite.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,8 @@ namespace yolox_cpp{
3030

3131
class YoloXTflite: public AbcYoloX{
3232
public:
33-
YoloXTflite(file_name_t path_to_model, int num_threads,
34-
float nms_th=0.45, float conf_th=0.3, std::string model_version="0.1.1rc0",
33+
YoloXTflite(const file_name_t &path_to_model, int num_threads,
34+
float nms_th=0.45, float conf_th=0.3, const std::string &model_version="0.1.1rc0",
3535
int num_classes=80, bool p6=false, bool is_nchw=true);
3636
~YoloXTflite();
3737
std::vector<Object> inference(const cv::Mat& frame) override;

yolox_ros_cpp/yolox_cpp/src/yolox_tensorrt.cpp

Lines changed: 19 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
namespace yolox_cpp
44
{
55

6-
YoloXTensorRT::YoloXTensorRT(file_name_t path_to_engine, int device,
7-
float nms_th, float conf_th, std::string model_version,
6+
YoloXTensorRT::YoloXTensorRT(const file_name_t &path_to_engine, int device,
7+
float nms_th, float conf_th, const std::string &model_version,
88
int num_classes, bool p6)
99
: AbcYoloX(nms_th, conf_th, model_version, num_classes, p6),
1010
DEVICE_(device)
@@ -27,8 +27,9 @@ namespace yolox_cpp
2727
}
2828
else
2929
{
30-
std::cerr << "invalid arguments path_to_engine: " << path_to_engine << std::endl;
31-
return;
30+
std::string msg = "invalid arguments path_to_engine: ";
31+
msg += path_to_engine;
32+
throw std::runtime_error(msg.c_str());
3233
}
3334

3435
this->runtime_ = std::unique_ptr<IRuntime>(createInferRuntime(this->gLogger_));
@@ -102,13 +103,16 @@ namespace yolox_cpp
102103
// inference
103104
this->doInference(input_blob_.data(), output_blob_.data());
104105

106+
// postprocess
105107
const float scale = std::min(
106108
static_cast<float>(this->input_w_) / static_cast<float>(frame.cols),
107109
static_cast<float>(this->input_h_) / static_cast<float>(frame.rows)
108110
);
109111

110112
std::vector<Object> objects;
111-
decode_outputs(output_blob_.data(), this->grid_strides_, objects, this->bbox_conf_thresh_, scale, frame.cols, frame.rows);
113+
decode_outputs(
114+
output_blob_.data(), this->grid_strides_, objects,
115+
this->bbox_conf_thresh_, scale, frame.cols, frame.rows);
112116

113117
return objects;
114118
}
@@ -120,13 +124,21 @@ namespace yolox_cpp
120124
CHECK(cudaStreamCreate(&stream));
121125

122126
// DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
123-
CHECK(cudaMemcpyAsync(this->inference_buffers_[this->inputIndex_], input, 3 * this->input_h_ * this->input_w_ * sizeof(float), cudaMemcpyHostToDevice, stream));
127+
CHECK(
128+
cudaMemcpyAsync(
129+
this->inference_buffers_[this->inputIndex_], input,
130+
3 * this->input_h_ * this->input_w_ * sizeof(float),
131+
cudaMemcpyHostToDevice, stream));
124132

125133
bool success = context_->enqueueV3(stream);
126134
if (!success)
127135
throw std::runtime_error("failed inference");
128136

129-
CHECK(cudaMemcpyAsync(output, this->inference_buffers_[this->outputIndex_], this->output_size_ * sizeof(float), cudaMemcpyDeviceToHost, stream));
137+
CHECK(
138+
cudaMemcpyAsync(
139+
output, this->inference_buffers_[this->outputIndex_],
140+
this->output_size_ * sizeof(float),
141+
cudaMemcpyDeviceToHost, stream));
130142

131143
CHECK(cudaStreamSynchronize(stream));
132144

yolox_ros_cpp/yolox_cpp/src/yolox_tflite.cpp

Lines changed: 18 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
namespace yolox_cpp
44
{
55

6-
YoloXTflite::YoloXTflite(file_name_t path_to_model, int num_threads,
7-
float nms_th, float conf_th, std::string model_version,
6+
YoloXTflite::YoloXTflite(const file_name_t &path_to_model, int num_threads,
7+
float nms_th, float conf_th, const std::string &model_version,
88
int num_classes, bool p6, bool is_nchw)
99
: AbcYoloX(nms_th, conf_th, model_version, num_classes, p6), is_nchw_(is_nchw)
1010
{
@@ -25,8 +25,8 @@ namespace yolox_cpp
2525
status = this->interpreter_->SetNumThreads(num_threads);
2626
if (status != TfLiteStatus::kTfLiteOk)
2727
{
28-
std::cerr << "Failed to SetNumThreads." << std::endl;
29-
exit(1);
28+
std::string msg = "Failed to SetNumThreads.";
29+
throw std::runtime_error(msg.c_str());
3030
}
3131

3232
// XNNPACK Delegate
@@ -36,8 +36,8 @@ namespace yolox_cpp
3636
status = this->interpreter_->ModifyGraphWithDelegate(this->delegate_);
3737
if (status != TfLiteStatus::kTfLiteOk)
3838
{
39-
std::cerr << "Failed to ModifyGraphWithDelegate." << std::endl;
40-
exit(1);
39+
std::string msg = "Failed to ModifyGraphWithDelegate.";
40+
throw std::runtime_error(msg.c_str());
4141
}
4242

4343
// // GPU Delegate
@@ -67,8 +67,8 @@ namespace yolox_cpp
6767

6868
if (this->interpreter_->AllocateTensors() != TfLiteStatus::kTfLiteOk)
6969
{
70-
std::cerr << "Failed to allocate tensors." << std::endl;
71-
exit(1);
70+
std::string msg = "Failed to allocate tensors.";
71+
throw std::runtime_error(msg.c_str());
7272
}
7373

7474
{
@@ -97,7 +97,7 @@ namespace yolox_cpp
9797
{
9898
this->input_size_ = sizeof(float);
9999
}
100-
for (size_t i = 0; i < tensor->dims->size; i++)
100+
for (int i = 0; i < tensor->dims->size; i++)
101101
{
102102
this->input_size_ *= tensor->dims->data[i];
103103
std::cout << " - " << tensor->dims->data[i] << std::endl;
@@ -120,7 +120,7 @@ namespace yolox_cpp
120120
{
121121
this->output_size_ = sizeof(float);
122122
}
123-
for (size_t i = 0; i < tensor->dims->size; i++)
123+
for (int i = 0; i < tensor->dims->size; i++)
124124
{
125125
this->output_size_ *= tensor->dims->data[i];
126126
std::cout << " - " << tensor->dims->data[i] << std::endl;
@@ -166,10 +166,15 @@ namespace yolox_cpp
166166
}
167167

168168
// postprocess
169+
const float scale = std::min(
170+
static_cast<float>(this->input_w_) / static_cast<float>(frame.cols),
171+
static_cast<float>(this->input_h_) / static_cast<float>(frame.rows)
172+
);
169173
std::vector<Object> objects;
170-
float scale = std::min(this->input_w_ / (frame.cols * 1.0), this->input_h_ / (frame.rows * 1.0));
171-
float *output_blob = this->interpreter_->typed_output_tensor<float>(0);
172-
decode_outputs(output_blob, this->grid_strides_, objects, this->bbox_conf_thresh_, scale, frame.cols, frame.rows);
174+
decode_outputs(
175+
this->interpreter_->typed_output_tensor<float>(0),
176+
this->grid_strides_, objects,
177+
this->bbox_conf_thresh_, scale, frame.cols, frame.rows);
173178

174179
return objects;
175180
}

0 commit comments

Comments
 (0)