From 7e983136035f91a433b3d02085fa30d1e711195c Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Wed, 10 Aug 2022 15:00:56 +0000 Subject: [PATCH 1/2] Validate all backends for detection models and add demo code and doc --- .../backends/paddle/paddle_backend.cc | 3 + .../backends/paddle/paddle_backend.h | 2 + csrc/fastdeploy/fastdeploy_model.cc | 51 ++++++- csrc/fastdeploy/fastdeploy_runtime.cc | 5 + csrc/fastdeploy/fastdeploy_runtime.h | 6 + csrc/fastdeploy/pybind/fastdeploy_runtime.cc | 2 + .../vision/detection/ppdet/picodet.cc | 4 +- .../vision/detection/ppdet/ppyoloe.cc | 4 +- .../vision/detection/ppdet/yolov3.cc | 4 +- .../vision/detection/ppdet/yolox.cc | 12 +- csrc/fastdeploy/vision/visualize/detection.cc | 6 +- csrc/fastdeploy/vision/visualize/visualize.h | 3 +- .../vision/visualize/visualize_pybind.cc | 4 +- .../detection/paddledetection/.README.md.swp | Bin 0 -> 12288 bytes .../detection/paddledetection/README.md | 45 +++++++ .../paddledetection/cpp/CMakeLists.txt | 28 ++++ .../detection/paddledetection/cpp/README.md | 75 +++++++++++ .../paddledetection/cpp/infer_faster_rcnn.cc | 94 +++++++++++++ .../paddledetection/cpp/infer_picodet.cc | 127 ++++++++++++++++++ .../paddledetection/cpp/infer_ppyolo.cc | 94 +++++++++++++ .../paddledetection/cpp/infer_ppyoloe.cc | 127 ++++++++++++++++++ .../paddledetection/cpp/infer_yolov3.cc | 94 +++++++++++++ .../paddledetection/cpp/infer_yolox.cc | 127 ++++++++++++++++++ .../paddledetection/python/README.md | 72 ++++++++++ .../python/infer_faster_rcnn.py | 61 +++++++++ .../paddledetection/python/infer_picodet.py | 61 +++++++++ .../paddledetection/python/infer_ppyolo.py | 62 +++++++++ .../paddledetection/python/infer_ppyoloe.py | 61 +++++++++ .../paddledetection/python/infer_yolov3.py | 62 +++++++++ .../paddledetection/python/infer_yolox.py | 61 +++++++++ fastdeploy/runtime.py | 6 + fastdeploy/vision/__init__.py | 3 +- fastdeploy/vision/visualize/__init__.py | 10 +- 33 files changed, 1351 insertions(+), 25 deletions(-) create mode 100644 examples/vision/detection/paddledetection/.README.md.swp create mode 100644 examples/vision/detection/paddledetection/README.md create mode 100644 examples/vision/detection/paddledetection/cpp/CMakeLists.txt create mode 100644 examples/vision/detection/paddledetection/cpp/README.md create mode 100644 examples/vision/detection/paddledetection/cpp/infer_faster_rcnn.cc create mode 100644 examples/vision/detection/paddledetection/cpp/infer_picodet.cc create mode 100644 examples/vision/detection/paddledetection/cpp/infer_ppyolo.cc create mode 100644 examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc create mode 100644 examples/vision/detection/paddledetection/cpp/infer_yolov3.cc create mode 100644 examples/vision/detection/paddledetection/cpp/infer_yolox.cc create mode 100644 examples/vision/detection/paddledetection/python/README.md create mode 100644 examples/vision/detection/paddledetection/python/infer_faster_rcnn.py create mode 100644 examples/vision/detection/paddledetection/python/infer_picodet.py create mode 100644 examples/vision/detection/paddledetection/python/infer_ppyolo.py create mode 100644 examples/vision/detection/paddledetection/python/infer_ppyoloe.py create mode 100644 examples/vision/detection/paddledetection/python/infer_yolov3.py create mode 100644 examples/vision/detection/paddledetection/python/infer_yolox.py diff --git a/csrc/fastdeploy/backends/paddle/paddle_backend.cc b/csrc/fastdeploy/backends/paddle/paddle_backend.cc index 2fae38937d8..f1d7605fc29 100644 --- a/csrc/fastdeploy/backends/paddle/paddle_backend.cc +++ b/csrc/fastdeploy/backends/paddle/paddle_backend.cc @@ -26,6 +26,9 @@ void PaddleBackend::BuildOption(const PaddleBackendOption& option) { config_.SetMkldnnCacheCapacity(option.mkldnn_cache_size); } } + if (!option.enable_log_info) { + config_.DisableGlogInfo(); + } config_.SetCpuMathLibraryNumThreads(option.cpu_thread_num); } diff --git a/csrc/fastdeploy/backends/paddle/paddle_backend.h b/csrc/fastdeploy/backends/paddle/paddle_backend.h index 99ca5eb1b0b..22078ab14ad 100644 --- a/csrc/fastdeploy/backends/paddle/paddle_backend.h +++ b/csrc/fastdeploy/backends/paddle/paddle_backend.h @@ -32,6 +32,8 @@ struct PaddleBackendOption { #endif bool enable_mkldnn = true; + bool enable_log_info = false; + int mkldnn_cache_size = 1; int cpu_thread_num = 8; // initialize memory size(MB) for GPU diff --git a/csrc/fastdeploy/fastdeploy_model.cc b/csrc/fastdeploy/fastdeploy_model.cc index c4dbc70a711..31781ac3adb 100644 --- a/csrc/fastdeploy/fastdeploy_model.cc +++ b/csrc/fastdeploy/fastdeploy_model.cc @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. #include "fastdeploy/fastdeploy_model.h" -#include "fastdeploy/utils/unique_ptr.h" #include "fastdeploy/utils/utils.h" namespace fastdeploy { @@ -54,12 +53,52 @@ bool FastDeployModel::InitRuntime() { << std::endl; return false; } - runtime_ = utils::make_unique(); - if (!runtime_->Init(runtime_option)) { - return false; + + bool use_gpu = (runtime_option.device == Device::GPU); +#ifndef WITH_GPU + use_gpu = false; +#endif + + // whether the model is supported by the setted backend + bool is_supported = false; + if (use_gpu) { + for (auto& item : valid_gpu_backends) { + if (item == runtime_option.backend) { + is_supported = true; + break; + } + } + } else { + for (auto& item : valid_cpu_backends) { + if (item == runtime_option.backend) { + is_supported = true; + break; + } + } + } + + if (is_supported) { + runtime_ = std::unique_ptr(new Runtime()); + if (!runtime_->Init(runtime_option)) { + return false; + } + runtime_initialized_ = true; + return true; + } else { + FDWARNING << ModelName() << " is not supported with backend " + << Str(runtime_option.backend) << "." << std::endl; + if (use_gpu) { + FDASSERT(valid_gpu_backends.size() > 0, + "There's no valid gpu backend for " + ModelName() + "."); + FDWARNING << "FastDeploy will choose " << Str(valid_gpu_backends[0]) + << " for model inference." << std::endl; + } else { + FDASSERT(valid_gpu_backends.size() > 0, + "There's no valid cpu backend for " + ModelName() + "."); + FDWARNING << "FastDeploy will choose " << Str(valid_cpu_backends[0]) + << " for model inference." << std::endl; + } } - runtime_initialized_ = true; - return true; } if (runtime_option.device == Device::CPU) { diff --git a/csrc/fastdeploy/fastdeploy_runtime.cc b/csrc/fastdeploy/fastdeploy_runtime.cc index e5c41a29a34..c2a16b90321 100644 --- a/csrc/fastdeploy/fastdeploy_runtime.cc +++ b/csrc/fastdeploy/fastdeploy_runtime.cc @@ -181,6 +181,10 @@ void RuntimeOption::EnablePaddleMKLDNN() { pd_enable_mkldnn = true; } void RuntimeOption::DisablePaddleMKLDNN() { pd_enable_mkldnn = false; } +void RuntimeOption::EnablePaddleLogInfo() { pd_enable_log_info = true; } + +void RuntimeOption::DisablePaddleLogInfo() { pd_enable_log_info = false; } + void RuntimeOption::SetPaddleMKLDNNCacheSize(int size) { FDASSERT(size > 0, "Parameter size must greater than 0."); pd_mkldnn_cache_size = size; @@ -272,6 +276,7 @@ void Runtime::CreatePaddleBackend() { #ifdef ENABLE_PADDLE_BACKEND auto pd_option = PaddleBackendOption(); pd_option.enable_mkldnn = option.pd_enable_mkldnn; + pd_option.enable_log_info = option.pd_enable_log_info; pd_option.mkldnn_cache_size = option.pd_mkldnn_cache_size; pd_option.use_gpu = (option.device == Device::GPU) ? true : false; pd_option.gpu_id = option.device_id; diff --git a/csrc/fastdeploy/fastdeploy_runtime.h b/csrc/fastdeploy/fastdeploy_runtime.h index 780945458ab..ab6b4a188a7 100644 --- a/csrc/fastdeploy/fastdeploy_runtime.h +++ b/csrc/fastdeploy/fastdeploy_runtime.h @@ -68,6 +68,11 @@ struct FASTDEPLOY_DECL RuntimeOption { // disable mkldnn while use paddle inference in CPU void DisablePaddleMKLDNN(); + // enable debug information of paddle backend + void EnablePaddleLogInfo(); + // disable debug information of paddle backend + void DisablePaddleLogInfo(); + // set size of cached shape while enable mkldnn with paddle inference backend void SetPaddleMKLDNNCacheSize(int size); @@ -108,6 +113,7 @@ struct FASTDEPLOY_DECL RuntimeOption { // ======Only for Paddle Backend===== bool pd_enable_mkldnn = true; + bool pd_enable_log_info = false; int pd_mkldnn_cache_size = 1; // ======Only for Trt Backend======= diff --git a/csrc/fastdeploy/pybind/fastdeploy_runtime.cc b/csrc/fastdeploy/pybind/fastdeploy_runtime.cc index 412b1ccefd3..86e5b69c751 100644 --- a/csrc/fastdeploy/pybind/fastdeploy_runtime.cc +++ b/csrc/fastdeploy/pybind/fastdeploy_runtime.cc @@ -28,6 +28,8 @@ void BindRuntime(pybind11::module& m) { .def("use_trt_backend", &RuntimeOption::UseTrtBackend) .def("enable_paddle_mkldnn", &RuntimeOption::EnablePaddleMKLDNN) .def("disable_paddle_mkldnn", &RuntimeOption::DisablePaddleMKLDNN) + .def("enable_paddle_log_info", &RuntimeOption::EnablePaddleLogInfo) + .def("disable_paddle_log_info", &RuntimeOption::DisablePaddleLogInfo) .def("set_paddle_mkldnn_cache_size", &RuntimeOption::SetPaddleMKLDNNCacheSize) .def("set_trt_input_shape", &RuntimeOption::SetTrtInputShape) diff --git a/csrc/fastdeploy/vision/detection/ppdet/picodet.cc b/csrc/fastdeploy/vision/detection/ppdet/picodet.cc index d89fab2aed7..7c961d1f8c8 100644 --- a/csrc/fastdeploy/vision/detection/ppdet/picodet.cc +++ b/csrc/fastdeploy/vision/detection/ppdet/picodet.cc @@ -24,8 +24,8 @@ PicoDet::PicoDet(const std::string& model_file, const std::string& params_file, const RuntimeOption& custom_option, const Frontend& model_format) { config_file_ = config_file; - valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; - valid_gpu_backends = {Backend::PDINFER, Backend::ORT}; + valid_cpu_backends = {Backend::ORT, Backend::PDINFER}; + valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT}; runtime_option = custom_option; runtime_option.model_format = model_format; runtime_option.model_file = model_file; diff --git a/csrc/fastdeploy/vision/detection/ppdet/ppyoloe.cc b/csrc/fastdeploy/vision/detection/ppdet/ppyoloe.cc index 2e4b56ecb8a..12786a08a4a 100644 --- a/csrc/fastdeploy/vision/detection/ppdet/ppyoloe.cc +++ b/csrc/fastdeploy/vision/detection/ppdet/ppyoloe.cc @@ -14,8 +14,8 @@ PPYOLOE::PPYOLOE(const std::string& model_file, const std::string& params_file, const RuntimeOption& custom_option, const Frontend& model_format) { config_file_ = config_file; - valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; - valid_gpu_backends = {Backend::PDINFER, Backend::ORT}; + valid_cpu_backends = {Backend::ORT, Backend::PDINFER}; + valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT}; runtime_option = custom_option; runtime_option.model_format = model_format; runtime_option.model_file = model_file; diff --git a/csrc/fastdeploy/vision/detection/ppdet/yolov3.cc b/csrc/fastdeploy/vision/detection/ppdet/yolov3.cc index 309d65640c3..8de0ec231a5 100644 --- a/csrc/fastdeploy/vision/detection/ppdet/yolov3.cc +++ b/csrc/fastdeploy/vision/detection/ppdet/yolov3.cc @@ -23,8 +23,8 @@ YOLOv3::YOLOv3(const std::string& model_file, const std::string& params_file, const RuntimeOption& custom_option, const Frontend& model_format) { config_file_ = config_file; - valid_cpu_backends = {Backend::PDINFER}; - valid_gpu_backends = {Backend::PDINFER}; + valid_cpu_backends = {Backend::ORT, Backend::PDINFER}; + valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT}; runtime_option = custom_option; runtime_option.model_format = model_format; runtime_option.model_file = model_file; diff --git a/csrc/fastdeploy/vision/detection/ppdet/yolox.cc b/csrc/fastdeploy/vision/detection/ppdet/yolox.cc index a60ebfcc45c..dbf0824ba39 100644 --- a/csrc/fastdeploy/vision/detection/ppdet/yolox.cc +++ b/csrc/fastdeploy/vision/detection/ppdet/yolox.cc @@ -18,12 +18,14 @@ namespace fastdeploy { namespace vision { namespace detection { -PaddleYOLOX::PaddleYOLOX(const std::string& model_file, const std::string& params_file, - const std::string& config_file, const RuntimeOption& custom_option, - const Frontend& model_format) { +PaddleYOLOX::PaddleYOLOX(const std::string& model_file, + const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option, + const Frontend& model_format) { config_file_ = config_file; - valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; - valid_gpu_backends = {Backend::PDINFER, Backend::ORT}; + valid_cpu_backends = {Backend::ORT, Backend::PDINFER}; + valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT}; runtime_option = custom_option; runtime_option.model_format = model_format; runtime_option.model_file = model_file; diff --git a/csrc/fastdeploy/vision/visualize/detection.cc b/csrc/fastdeploy/vision/visualize/detection.cc index 147ef6556b0..693e9da72b4 100644 --- a/csrc/fastdeploy/vision/visualize/detection.cc +++ b/csrc/fastdeploy/vision/visualize/detection.cc @@ -24,13 +24,17 @@ namespace vision { // If need to visualize num_classes > 1000 // Please call Visualize::GetColorMap(num_classes) first cv::Mat Visualize::VisDetection(const cv::Mat& im, - const DetectionResult& result, int line_size, + const DetectionResult& result, + float score_threshold, int line_size, float font_size) { auto color_map = GetColorMap(); int h = im.rows; int w = im.cols; auto vis_im = im.clone(); for (size_t i = 0; i < result.boxes.size(); ++i) { + if (result.scores[i] < score_threshold) { + continue; + } cv::Rect rect(result.boxes[i][0], result.boxes[i][1], result.boxes[i][2] - result.boxes[i][0], result.boxes[i][3] - result.boxes[i][1]); diff --git a/csrc/fastdeploy/vision/visualize/visualize.h b/csrc/fastdeploy/vision/visualize/visualize.h index bee62c30122..e8709d73037 100644 --- a/csrc/fastdeploy/vision/visualize/visualize.h +++ b/csrc/fastdeploy/vision/visualize/visualize.h @@ -26,7 +26,8 @@ class FASTDEPLOY_DECL Visualize { static std::vector color_map_; static const std::vector& GetColorMap(int num_classes = 1000); static cv::Mat VisDetection(const cv::Mat& im, const DetectionResult& result, - int line_size = 1, float font_size = 0.5f); + float score_threshold = 0.0, int line_size = 1, + float font_size = 0.5f); static cv::Mat VisFaceDetection(const cv::Mat& im, const FaceDetectionResult& result, int line_size = 1, float font_size = 0.5f); diff --git a/csrc/fastdeploy/vision/visualize/visualize_pybind.cc b/csrc/fastdeploy/vision/visualize/visualize_pybind.cc index 36010acf1f8..508ac84c648 100644 --- a/csrc/fastdeploy/vision/visualize/visualize_pybind.cc +++ b/csrc/fastdeploy/vision/visualize/visualize_pybind.cc @@ -20,10 +20,10 @@ void BindVisualize(pybind11::module& m) { .def(pybind11::init<>()) .def_static("vis_detection", [](pybind11::array& im_data, vision::DetectionResult& result, - int line_size, float font_size) { + float score_threshold, int line_size, float font_size) { auto im = PyArrayToCvMat(im_data); auto vis_im = vision::Visualize::VisDetection( - im, result, line_size, font_size); + im, result, score_threshold, line_size, font_size); FDTensor out; vision::Mat(vis_im).ShareWithTensor(&out); return TensorToPyArray(out); diff --git a/examples/vision/detection/paddledetection/.README.md.swp b/examples/vision/detection/paddledetection/.README.md.swp new file mode 100644 index 0000000000000000000000000000000000000000..eb2e1a7c21247adc3b90dd73dbecba16c5f6a9e3 GIT binary patch literal 12288 zcmeHNTTdHD6dtO!NiRvO^tsZeG!iKCdhO+wytRQ!mBOWzR!LE`@?|kj%`k6^ar$3FW#|Tsmeni`r5NQYcLR!0^yqdT*)jyzs|ekQ{pofb)KH6zP*9dNqxngf`)ucVo7$<`d`i3R zoGkg;{F0Anjb_&E@_XA^kK6`E>@6-;^NLD9?zD4N4$l4#)T>Ed>~OUy0a3Mw{HkA; z>|Rk5J(}@Jae2J~ajo81^Hz0j4cq2@l3TQ&DnJ!jr@#TkI~|t$?N!p^ssT&sfv6S!L0N(yG#eDnJ#W3Qz^80#pI2 z09Al0Koy`0Pz9(0RDpk?0xtL&{f}o6N<9b8!|(t3@BhEPg3vF3?*ZQdS^+-5dw>SO zn}A}#;z5M!0Y4u=r~_~aF!eIz5db~`3w{!alY0wBQe;ByY}1HkZq2|zEZ09Al0@V`*N#x$Qe zdNkJ)&7|&~De)*uDT1bkPOVjz@~w(lD}~*T$c>eU^25M7OLO30+oVR%^SvFt6+JrdZ8) zzLW_wjtZ{!Z79n)w(3Zj!Q_aATD!#0y?m@SG%S<-Fjm+$ray%~<44zS1P;kFkzbaz!zG4LOCNgL8oV zUXLX373Dja^J6XL=kKg?5&@2}m4**nKLBFRhziRq#}0yGD5gnOG|s%ceOi9G%j?7-DvJg&954jW5sZ)3^2ch+!+tWbQ5NiCOYlpYF#C zQQ$*J?|YDr&yh|%+>dYc z!v=+hLKoZ;=dAKyLPeus`>A_b)qLJ+Eh3v{ic51k;_I-nU zG@1K+8h53Daz(35&tT6LockPhtDMXhBh2=Zi;{TKqoM zJin~-&pbg-*PpCEwMSg$nY79h;`fjy#FO`d24g;jJSF@+N#*SKx6?~-V6@l$vb2p1c#Un#AJ}=@86sXsp * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **config_file**(str): 配置文件路径,即PaddleDetection导出的部署yaml文件 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为PADDLE格式 + +#### Predict函数 + +> ``` +> PPYOLOE::Predict(cv::Mat* im, DetectionResult* result) +> ``` +> +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/) + +- [模型介绍](../../) +- [Python部署](../python) +- [视觉模型预测结果](../../../../../docs/api/vision_results/) diff --git a/examples/vision/detection/paddledetection/cpp/infer_faster_rcnn.cc b/examples/vision/detection/paddledetection/cpp/infer_faster_rcnn.cc new file mode 100644 index 00000000000..7bd7fd91c94 --- /dev/null +++ b/examples/vision/detection/paddledetection/cpp/infer_faster_rcnn.cc @@ -0,0 +1,94 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +#ifdef WIN32 +const char sep = '\\'; +#else +const char sep = '/'; +#endif + +void CpuInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "infer_cfg.yml"; + auto model = fastdeploy::vision::detection::FasterRCNN( + model_file, params_file, config_file); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +void GpuInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "infer_cfg.yml"; + + auto option = fastdeploy::RuntimeOption(); + option.UseGpu(); + auto model = fastdeploy::vision::detection::FasterRCNN( + model_file, params_file, config_file, option); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +int main(int argc, char* argv[]) { + if (argc < 4) { + std::cout + << "Usage: infer_demo path/to/model_dir path/to/image run_option, " + "e.g ./infer_model ./faster_rcnn_r50_vd_fpn_2x_coco ./test.jpeg 0" + << std::endl; + std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " + "with gpu." + << std::endl; + return -1; + } + + if (std::atoi(argv[3]) == 0) { + CpuInfer(argv[1], argv[2]); + } else if (std::atoi(argv[3]) == 1) { + GpuInfer(argv[1], argv[2]); + } + return 0; +} diff --git a/examples/vision/detection/paddledetection/cpp/infer_picodet.cc b/examples/vision/detection/paddledetection/cpp/infer_picodet.cc new file mode 100644 index 00000000000..19c2a6837ea --- /dev/null +++ b/examples/vision/detection/paddledetection/cpp/infer_picodet.cc @@ -0,0 +1,127 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +#ifdef WIN32 +const char sep = '\\'; +#else +const char sep = '/'; +#endif + +void CpuInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "infer_cfg.yml"; + auto model = fastdeploy::vision::detection::PicoDet(model_file, params_file, + config_file); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +void GpuInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "infer_cfg.yml"; + + auto option = fastdeploy::RuntimeOption(); + option.UseGpu(); + auto model = fastdeploy::vision::detection::PicoDet(model_file, params_file, + config_file, option); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +void TrtInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "infer_cfg.yml"; + + auto option = fastdeploy::RuntimeOption(); + option.UseGpu(); + option.UseTrtBackend(); + option.SetTrtInputShape("image", {1, 3, 320, 320}); + option.SetTrtInputShape("scale_Factor", {1, 2}); + auto model = fastdeploy::vision::detection::PicoDet(model_file, params_file, + config_file, option); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +int main(int argc, char* argv[]) { + if (argc < 4) { + std::cout + << "Usage: infer_demo path/to/model_dir path/to/image run_option, " + "e.g ./infer_model ./picodet_model_dir ./test.jpeg 0" + << std::endl; + std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " + "with gpu; 2: run with gpu and use tensorrt backend." + << std::endl; + return -1; + } + + if (std::atoi(argv[3]) == 0) { + CpuInfer(argv[1], argv[2]); + } else if (std::atoi(argv[3]) == 1) { + GpuInfer(argv[1], argv[2]); + } else if (std::atoi(argv[3]) == 2) { + TrtInfer(argv[1], argv[2]); + } + return 0; +} diff --git a/examples/vision/detection/paddledetection/cpp/infer_ppyolo.cc b/examples/vision/detection/paddledetection/cpp/infer_ppyolo.cc new file mode 100644 index 00000000000..a111e70f560 --- /dev/null +++ b/examples/vision/detection/paddledetection/cpp/infer_ppyolo.cc @@ -0,0 +1,94 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +#ifdef WIN32 +const char sep = '\\'; +#else +const char sep = '/'; +#endif + +void CpuInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "infer_cfg.yml"; + auto model = fastdeploy::vision::detection::PPYOLO(model_file, params_file, + config_file); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +void GpuInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "infer_cfg.yml"; + + auto option = fastdeploy::RuntimeOption(); + option.UseGpu(); + auto model = fastdeploy::vision::detection::PPYOLO(model_file, params_file, + config_file, option); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +int main(int argc, char* argv[]) { + if (argc < 4) { + std::cout + << "Usage: infer_demo path/to/model_dir path/to/image run_option, " + "e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0" + << std::endl; + std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " + "with gpu." + << std::endl; + return -1; + } + + if (std::atoi(argv[3]) == 0) { + CpuInfer(argv[1], argv[2]); + } else if (std::atoi(argv[3]) == 1) { + GpuInfer(argv[1], argv[2]); + } + return 0; +} diff --git a/examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc b/examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc new file mode 100644 index 00000000000..ec01d3914db --- /dev/null +++ b/examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc @@ -0,0 +1,127 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +#ifdef WIN32 +const char sep = '\\'; +#else +const char sep = '/'; +#endif + +void CpuInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "infer_cfg.yml"; + auto model = fastdeploy::vision::detection::PPYOLOE(model_file, params_file, + config_file); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +void GpuInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "infer_cfg.yml"; + + auto option = fastdeploy::RuntimeOption(); + option.UseGpu(); + auto model = fastdeploy::vision::detection::PPYOLOE(model_file, params_file, + config_file, option); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +void TrtInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "infer_cfg.yml"; + + auto option = fastdeploy::RuntimeOption(); + option.UseGpu(); + option.UseTrtBackend(); + option.SetTrtInputShape("image", {1, 3, 640, 640}); + option.SetTrtInputShape("scale_factor", {1, 2}); + auto model = fastdeploy::vision::detection::PPYOLOE(model_file, params_file, + config_file, option); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +int main(int argc, char* argv[]) { + if (argc < 4) { + std::cout + << "Usage: infer_demo path/to/model_dir path/to/image run_option, " + "e.g ./infer_model ./ppyoloe_model_dir ./test.jpeg 0" + << std::endl; + std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " + "with gpu; 2: run with gpu and use tensorrt backend." + << std::endl; + return -1; + } + + if (std::atoi(argv[3]) == 0) { + CpuInfer(argv[1], argv[2]); + } else if (std::atoi(argv[3]) == 1) { + GpuInfer(argv[1], argv[2]); + } else if (std::atoi(argv[3]) == 2) { + TrtInfer(argv[1], argv[2]); + } + return 0; +} diff --git a/examples/vision/detection/paddledetection/cpp/infer_yolov3.cc b/examples/vision/detection/paddledetection/cpp/infer_yolov3.cc new file mode 100644 index 00000000000..54571b8d272 --- /dev/null +++ b/examples/vision/detection/paddledetection/cpp/infer_yolov3.cc @@ -0,0 +1,94 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +#ifdef WIN32 +const char sep = '\\'; +#else +const char sep = '/'; +#endif + +void CpuInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "infer_cfg.yml"; + auto model = fastdeploy::vision::detection::YOLOv3(model_file, params_file, + config_file); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +void GpuInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "infer_cfg.yml"; + + auto option = fastdeploy::RuntimeOption(); + option.UseGpu(); + auto model = fastdeploy::vision::detection::YOLOv3(model_file, params_file, + config_file, option); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +int main(int argc, char* argv[]) { + if (argc < 4) { + std::cout + << "Usage: infer_demo path/to/model_dir path/to/image run_option, " + "e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0" + << std::endl; + std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " + "with gpu." + << std::endl; + return -1; + } + + if (std::atoi(argv[3]) == 0) { + CpuInfer(argv[1], argv[2]); + } else if (std::atoi(argv[3]) == 1) { + GpuInfer(argv[1], argv[2]); + } + return 0; +} diff --git a/examples/vision/detection/paddledetection/cpp/infer_yolox.cc b/examples/vision/detection/paddledetection/cpp/infer_yolox.cc new file mode 100644 index 00000000000..8eb9b422427 --- /dev/null +++ b/examples/vision/detection/paddledetection/cpp/infer_yolox.cc @@ -0,0 +1,127 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +#ifdef WIN32 +const char sep = '\\'; +#else +const char sep = '/'; +#endif + +void CpuInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "infer_cfg.yml"; + auto model = fastdeploy::vision::detection::PaddleYOLOX( + model_file, params_file, config_file); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +void GpuInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "infer_cfg.yml"; + + auto option = fastdeploy::RuntimeOption(); + option.UseGpu(); + auto model = fastdeploy::vision::detection::PaddleYOLOX( + model_file, params_file, config_file, option); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +void TrtInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "infer_cfg.yml"; + + auto option = fastdeploy::RuntimeOption(); + option.UseGpu(); + option.UseTrtBackend(); + option.SetTrtInputShape("image", {1, 3, 640, 640}); + option.SetTrtInputShape("scale_factor", {1, 2}); + auto model = fastdeploy::vision::detection::PaddleYOLOX( + model_file, params_file, config_file, option); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +int main(int argc, char* argv[]) { + if (argc < 4) { + std::cout + << "Usage: infer_demo path/to/model_dir path/to/image run_option, " + "e.g ./infer_model ./paddle_yolox_dirname ./test.jpeg 0" + << std::endl; + std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " + "with gpu; 2: run with gpu by tensorrt." + << std::endl; + return -1; + } + + if (std::atoi(argv[3]) == 0) { + CpuInfer(argv[1], argv[2]); + } else if (std::atoi(argv[3]) == 1) { + GpuInfer(argv[1], argv[2]); + } else if (std::atoi(argv[3]) == 2) { + TrtInfer(argv[1], argv[2]); + } + return 0; +} diff --git a/examples/vision/detection/paddledetection/python/README.md b/examples/vision/detection/paddledetection/python/README.md new file mode 100644 index 00000000000..3863481abff --- /dev/null +++ b/examples/vision/detection/paddledetection/python/README.md @@ -0,0 +1,72 @@ +# PaddleDetection Python部署示例 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md) + +本目录下提供`infer_xxx.py`快速完成PPYOLOE/PicoDet等模型在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 + +``` +#下载PPYOLOE模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg +tar xvf ppyoloe_crn_l_300e_coco.tgz + +#下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd examples/vison/detection/paddledetection/python/ + +# CPU推理 +python infer.py --model_dir ppyoloe_crn_l_300e_coco --image 000000087038.jpg --device cpu +# GPU推理 +python infer.py --model_dir ppyoloe_crn_l_300e_coco --image 000000087038.jpg --device gpu +# GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待) +python infer.py --model_dir ppyoloe_crn_l_300e_coco --image 000000087038.jpg --device gpu --use_trt True +``` + +运行完成可视化结果如下图所示 + +## PaddleDetection Python接口 + +``` +fastdeploy.vision.detection.PPYOLOE(model_file, params_file, config_file, runtime_option=None, model_format=Frontend.PADDLE) +fastdeploy.vision.detection.PicoDet(model_file, params_file, config_file, runtime_option=None, model_format=Frontend.PADDLE) +fastdeploy.vision.detection.PaddleYOLOX(model_file, params_file, config_file, runtime_option=None, model_format=Frontend.PADDLE) +fastdeploy.vision.detection.YOLOv3(model_file, params_file, config_file, runtime_option=None, model_format=Frontend.PADDLE) +fastdeploy.vision.detection.PPYOLO(model_file, params_file, config_file, runtime_option=None, model_format=Frontend.PADDLE) +fastdeploy.vision.detection.FasterRCNN(model_file, params_file, config_file, runtime_option=None, model_format=Frontend.PADDLE) +``` + +PaddleDetection模型加载和初始化,其中model_file, params_file为导出的Paddle部署模型格式, config_file为PaddleDetection同时导出的部署配置yaml文件 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **config_file**(str): 推理配置yaml文件路径 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为Paddle + +### predict函数 + +PaddleDetection中各个模型,包括PPYOLOE/PicoDet/PaddleYOLOX/YOLOv3/PPYOLO/FasterRCNN,均提供如下同样的成员函数用于进去图像的检测 +> ``` +> PPYOLOE.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 + +> **返回** +> +> > 返回`fastdeploy.vision.DetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) + +## 其它文档 + +- [PaddleDetection 模型介绍](..) +- [PaddleDetection C++部署](../cpp) +- [模型预测结果说明](../../../../../docs/api/vision_results/) diff --git a/examples/vision/detection/paddledetection/python/infer_faster_rcnn.py b/examples/vision/detection/paddledetection/python/infer_faster_rcnn.py new file mode 100644 index 00000000000..1100aa8a602 --- /dev/null +++ b/examples/vision/detection/paddledetection/python/infer_faster_rcnn.py @@ -0,0 +1,61 @@ +import fastdeploy as fd +import cv2 +import os + + +def parse_arguments(): + import argparse + import ast + parser = argparse.ArgumentParser() + parser.add_argument( + "--model_dir", + required=True, + help="Path of PaddleDetection model directory") + parser.add_argument( + "--image", required=True, help="Path of test image file.") + parser.add_argument( + "--device", + type=str, + default='cpu', + help="Type of inference device, support 'cpu' or 'gpu'.") + parser.add_argument( + "--use_trt", + type=ast.literal_eval, + default=False, + help="Wether to use tensorrt.") + return parser.parse_args() + + +def build_option(args): + option = fd.RuntimeOption() + + if args.device.lower() == "gpu": + option.use_gpu() + + if args.use_trt: + option.use_trt_backend() + option.set_trt_input_shape("image", [1, 3, 640, 640]) + option.set_trt_input_shape("scale_factor", [1, 2]) + return option + + +args = parse_arguments() + +model_file = os.path.join(args.model_dir, "model.pdmodel") +params_file = os.path.join(args.model_dir, "model.pdiparams") +config_file = os.path.join(args.model_dir, "infer_cfg.yml") + +# 配置runtime,加载模型 +runtime_option = build_option(args) +model = fd.vision.detection.FasterRCNN( + model_file, params_file, config_file, runtime_option=runtime_option) + +# 预测图片检测结果 +im = cv2.imread(args.image) +result = model.predict(im) +print(result) + +# 预测结果可视化 +vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5) +cv2.imwrite("visualized_result.jpg", vis_im) +print("Visualized result save in ./visualized_result.jpg") diff --git a/examples/vision/detection/paddledetection/python/infer_picodet.py b/examples/vision/detection/paddledetection/python/infer_picodet.py new file mode 100644 index 00000000000..06bfad03c0d --- /dev/null +++ b/examples/vision/detection/paddledetection/python/infer_picodet.py @@ -0,0 +1,61 @@ +import fastdeploy as fd +import cv2 +import os + + +def parse_arguments(): + import argparse + import ast + parser = argparse.ArgumentParser() + parser.add_argument( + "--model_dir", + required=True, + help="Path of PaddleDetection model directory") + parser.add_argument( + "--image", required=True, help="Path of test image file.") + parser.add_argument( + "--device", + type=str, + default='cpu', + help="Type of inference device, support 'cpu' or 'gpu'.") + parser.add_argument( + "--use_trt", + type=ast.literal_eval, + default=False, + help="Wether to use tensorrt.") + return parser.parse_args() + + +def build_option(args): + option = fd.RuntimeOption() + + if args.device.lower() == "gpu": + option.use_gpu() + + if args.use_trt: + option.use_trt_backend() + option.set_trt_input_shape("image", [1, 3, 320, 320]) + option.set_trt_input_shape("scale_factor", [1, 2]) + return option + + +args = parse_arguments() + +model_file = os.path.join(args.model_dir, "model.pdmodel") +params_file = os.path.join(args.model_dir, "model.pdiparams") +config_file = os.path.join(args.model_dir, "infer_cfg.yml") + +# 配置runtime,加载模型 +runtime_option = build_option(args) +model = fd.vision.detection.PicoDet( + model_file, params_file, config_file, runtime_option=runtime_option) + +# 预测图片检测结果 +im = cv2.imread(args.image) +result = model.predict(im) +print(result) + +# 预测结果可视化 +vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5) +cv2.imwrite("visualized_result.jpg", vis_im) +print("Visualized result save in ./visualized_result.jpg") diff --git a/examples/vision/detection/paddledetection/python/infer_ppyolo.py b/examples/vision/detection/paddledetection/python/infer_ppyolo.py new file mode 100644 index 00000000000..029f3dc21d5 --- /dev/null +++ b/examples/vision/detection/paddledetection/python/infer_ppyolo.py @@ -0,0 +1,62 @@ +import fastdeploy as fd +import cv2 +import os + + +def parse_arguments(): + import argparse + import ast + parser = argparse.ArgumentParser() + parser.add_argument( + "--model_dir", + required=True, + help="Path of PaddleDetection model directory") + parser.add_argument( + "--image", required=True, help="Path of test image file.") + parser.add_argument( + "--device", + type=str, + default='cpu', + help="Type of inference device, support 'cpu' or 'gpu'.") + parser.add_argument( + "--use_trt", + type=ast.literal_eval, + default=False, + help="Wether to use tensorrt.") + return parser.parse_args() + + +def build_option(args): + option = fd.RuntimeOption() + + if args.device.lower() == "gpu": + option.use_gpu() + + if args.use_trt: + option.use_trt_backend() + option.set_trt_input_shape("image", [1, 3, 640, 640]) + option.set_trt_input_shape("scale_factor", [1, 2]) + return option + + +args = parse_arguments() + +model_file = os.path.join(args.model_dir, "model.pdmodel") +params_file = os.path.join(args.model_dir, "model.pdiparams") +config_file = os.path.join(args.model_dir, "infer_cfg.yml") + +# 配置runtime,加载模型 +runtime_option = build_option(args) +model = fd.vision.detection.PPYOLO( + model_file, params_file, config_file, runtime_option=runtime_option) + +# 预测图片检测结果 +im = cv2.imread(args.image) +result = model.predict(im) +print(result) + +# 预测结果可视化 +vis_im = fd.vision.vis_detection( + im, result, score_threshold=0.5, score_threshold=0.5) +cv2.imwrite("visualized_result.jpg", vis_im) +print("Visualized result save in ./visualized_result.jpg") diff --git a/examples/vision/detection/paddledetection/python/infer_ppyoloe.py b/examples/vision/detection/paddledetection/python/infer_ppyoloe.py new file mode 100644 index 00000000000..ae533a50937 --- /dev/null +++ b/examples/vision/detection/paddledetection/python/infer_ppyoloe.py @@ -0,0 +1,61 @@ +import fastdeploy as fd +import cv2 +import os + + +def parse_arguments(): + import argparse + import ast + parser = argparse.ArgumentParser() + parser.add_argument( + "--model_dir", + required=True, + help="Path of PaddleDetection model directory") + parser.add_argument( + "--image", required=True, help="Path of test image file.") + parser.add_argument( + "--device", + type=str, + default='cpu', + help="Type of inference device, support 'cpu' or 'gpu'.") + parser.add_argument( + "--use_trt", + type=ast.literal_eval, + default=False, + help="Wether to use tensorrt.") + return parser.parse_args() + + +def build_option(args): + option = fd.RuntimeOption() + + if args.device.lower() == "gpu": + option.use_gpu() + + if args.use_trt: + option.use_trt_backend() + option.set_trt_input_shape("image", [1, 3, 640, 640]) + option.set_trt_input_shape("scale_factor", [1, 2]) + return option + + +args = parse_arguments() + +model_file = os.path.join(args.model_dir, "model.pdmodel") +params_file = os.path.join(args.model_dir, "model.pdiparams") +config_file = os.path.join(args.model_dir, "infer_cfg.yml") + +# 配置runtime,加载模型 +runtime_option = build_option(args) +model = fd.vision.detection.PPYOLOE( + model_file, params_file, config_file, runtime_option=runtime_option) + +# 预测图片检测结果 +im = cv2.imread(args.image) +result = model.predict(im) +print(result) + +# 预测结果可视化 +vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5) +cv2.imwrite("visualized_result.jpg", vis_im) +print("Visualized result save in ./visualized_result.jpg") diff --git a/examples/vision/detection/paddledetection/python/infer_yolov3.py b/examples/vision/detection/paddledetection/python/infer_yolov3.py new file mode 100644 index 00000000000..7ea372ff238 --- /dev/null +++ b/examples/vision/detection/paddledetection/python/infer_yolov3.py @@ -0,0 +1,62 @@ +import fastdeploy as fd +import cv2 +import os + + +def parse_arguments(): + import argparse + import ast + parser = argparse.ArgumentParser() + parser.add_argument( + "--model_dir", + required=True, + help="Path of PaddleDetection model directory") + parser.add_argument( + "--image", required=True, help="Path of test image file.") + parser.add_argument( + "--device", + type=str, + default='cpu', + help="Type of inference device, support 'cpu' or 'gpu'.") + parser.add_argument( + "--use_trt", + type=ast.literal_eval, + default=False, + help="Wether to use tensorrt.") + return parser.parse_args() + + +def build_option(args): + option = fd.RuntimeOption() + + if args.device.lower() == "gpu": + option.use_gpu() + + if args.use_trt: + option.use_trt_backend() + option.set_trt_input_shape("image", [1, 3, 608, 608]) + option.set_trt_input_shape("im_shape", [1, 2]) + option.set_trt_input_shape("scale_factor", [1, 2]) + return option + + +args = parse_arguments() + +model_file = os.path.join(args.model_dir, "model.pdmodel") +params_file = os.path.join(args.model_dir, "model.pdiparams") +config_file = os.path.join(args.model_dir, "infer_cfg.yml") + +# 配置runtime,加载模型 +runtime_option = build_option(args) +model = fd.vision.detection.YOLOv3( + model_file, params_file, config_file, runtime_option=runtime_option) + +# 预测图片检测结果 +im = cv2.imread(args.image) +result = model.predict(im) +print(result) + +# 预测结果可视化 +vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5) +cv2.imwrite("visualized_result.jpg", vis_im) +print("Visualized result save in ./visualized_result.jpg") diff --git a/examples/vision/detection/paddledetection/python/infer_yolox.py b/examples/vision/detection/paddledetection/python/infer_yolox.py new file mode 100644 index 00000000000..f65b1d8b122 --- /dev/null +++ b/examples/vision/detection/paddledetection/python/infer_yolox.py @@ -0,0 +1,61 @@ +import fastdeploy as fd +import cv2 +import os + + +def parse_arguments(): + import argparse + import ast + parser = argparse.ArgumentParser() + parser.add_argument( + "--model_dir", + required=True, + help="Path of PaddleDetection model directory") + parser.add_argument( + "--image", required=True, help="Path of test image file.") + parser.add_argument( + "--device", + type=str, + default='cpu', + help="Type of inference device, support 'cpu' or 'gpu'.") + parser.add_argument( + "--use_trt", + type=ast.literal_eval, + default=False, + help="Wether to use tensorrt.") + return parser.parse_args() + + +def build_option(args): + option = fd.RuntimeOption() + + if args.device.lower() == "gpu": + option.use_gpu() + + if args.use_trt: + option.use_trt_backend() + option.set_trt_input_shape("image", [1, 3, 640, 640]) + option.set_trt_input_shape("scale_factor", [1, 2]) + return option + + +args = parse_arguments() + +model_file = os.path.join(args.model_dir, "model.pdmodel") +params_file = os.path.join(args.model_dir, "model.pdiparams") +config_file = os.path.join(args.model_dir, "infer_cfg.yml") + +# 配置runtime,加载模型 +runtime_option = build_option(args) +model = fd.vision.detection.PaddleYOLOX( + model_file, params_file, config_file, runtime_option=runtime_option) + +# 预测图片检测结果 +im = cv2.imread(args.image) +result = model.predict(im) +print(result) + +# 预测结果可视化 +vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5) +cv2.imwrite("visualized_result.jpg", vis_im) +print("Visualized result save in ./visualized_result.jpg") diff --git a/fastdeploy/runtime.py b/fastdeploy/runtime.py index 38c89549805..43936533da0 100644 --- a/fastdeploy/runtime.py +++ b/fastdeploy/runtime.py @@ -81,6 +81,12 @@ def enable_paddle_mkldnn(self): def disable_paddle_mkldnn(self): return self._option.disable_paddle_mkldnn() + def enable_paddle_log_info(self): + return self._option.enable_paddle_log_info() + + def disable_paddle_log_info(self): + return self._option.disable_paddle_log_info() + def set_paddle_mkldnn_cache_size(self, cache_size): return self._option.set_paddle_mkldnn_cache_size(cache_size) diff --git a/fastdeploy/vision/__init__.py b/fastdeploy/vision/__init__.py index e99c019c301..97c81eb5035 100644 --- a/fastdeploy/vision/__init__.py +++ b/fastdeploy/vision/__init__.py @@ -14,11 +14,12 @@ from __future__ import absolute_import from . import detection +from . import classification + from . import matting from . import facedet from . import faceid -from . import ppcls from . import ppseg from . import evaluation from .visualize import * diff --git a/fastdeploy/vision/visualize/__init__.py b/fastdeploy/vision/visualize/__init__.py index b2b8e90add8..faa54f82477 100644 --- a/fastdeploy/vision/visualize/__init__.py +++ b/fastdeploy/vision/visualize/__init__.py @@ -17,9 +17,13 @@ from ... import c_lib_wrap as C -def vis_detection(im_data, det_result, line_size=1, font_size=0.5): - return C.vision.Visualize.vis_detection(im_data, det_result, line_size, - font_size) +def vis_detection(im_data, + det_result, + score_threshold=0.0, + line_size=1, + font_size=0.5): + return C.vision.Visualize.vis_detection( + im_data, det_result, score_threshold, line_size, font_size) def vis_face_detection(im_data, face_det_result, line_size=1, font_size=0.5): From 82010c30c52df27d4fa605b496ce6c9834829440 Mon Sep 17 00:00:00 2001 From: Jason Date: Wed, 10 Aug 2022 23:04:10 +0800 Subject: [PATCH 2/2] Delete .README.md.swp --- .../detection/paddledetection/.README.md.swp | Bin 12288 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 examples/vision/detection/paddledetection/.README.md.swp diff --git a/examples/vision/detection/paddledetection/.README.md.swp b/examples/vision/detection/paddledetection/.README.md.swp deleted file mode 100644 index eb2e1a7c21247adc3b90dd73dbecba16c5f6a9e3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12288 zcmeHNTTdHD6dtO!NiRvO^tsZeG!iKCdhO+wytRQ!mBOWzR!LE`@?|kj%`k6^ar$3FW#|Tsmeni`r5NQYcLR!0^yqdT*)jyzs|ekQ{pofb)KH6zP*9dNqxngf`)ucVo7$<`d`i3R zoGkg;{F0Anjb_&E@_XA^kK6`E>@6-;^NLD9?zD4N4$l4#)T>Ed>~OUy0a3Mw{HkA; z>|Rk5J(}@Jae2J~ajo81^Hz0j4cq2@l3TQ&DnJ!jr@#TkI~|t$?N!p^ssT&sfv6S!L0N(yG#eDnJ#W3Qz^80#pI2 z09Al0Koy`0Pz9(0RDpk?0xtL&{f}o6N<9b8!|(t3@BhEPg3vF3?*ZQdS^+-5dw>SO zn}A}#;z5M!0Y4u=r~_~aF!eIz5db~`3w{!alY0wBQe;ByY}1HkZq2|zEZ09Al0@V`*N#x$Qe zdNkJ)&7|&~De)*uDT1bkPOVjz@~w(lD}~*T$c>eU^25M7OLO30+oVR%^SvFt6+JrdZ8) zzLW_wjtZ{!Z79n)w(3Zj!Q_aATD!#0y?m@SG%S<-Fjm+$ray%~<44zS1P;kFkzbaz!zG4LOCNgL8oV zUXLX373Dja^J6XL=kKg?5&@2}m4**nKLBFRhziRq#}0yGD5gnOG|s%ceOi9G%j?7-DvJg&954jW5sZ)3^2ch+!+tWbQ5NiCOYlpYF#C zQQ$*J?|YDr&yh|%+>dYc z!v=+hLKoZ;=dAKyLPeus`>A_b)qLJ+Eh3v{ic51k;_I-nU zG@1K+8h53Daz(35&tT6LockPhtDMXhBh2=Zi;{TKqoM zJin~-&pbg-*PpCEwMSg$nY79h;`fjy#FO`d24g;jJSF@+N#*SKx6?~-V6@l$vb2p1c#Un#AJ}=@86sXsp