diff --git a/.gitattributes b/.gitattributes deleted file mode 100644 index d9728021..00000000 --- a/.gitattributes +++ /dev/null @@ -1,23 +0,0 @@ -# Caffe -*.caffemodel filter=lfs diff=lfs merge=lfs -text - -# Tensorflow -*.pb filter=lfs diff=lfs merge=lfs -text -*.pbtxt filter=lfs diff=lfs merge=lfs -text - -# Torch -*.t7 filter=lfs diff=lfs merge=lfs -text -*.net filter=lfs diff=lfs merge=lfs -text - -# Darknet -*.weights filter=lfs diff=lfs merge=lfs -text - -# ONNX -*.onnx filter=lfs diff=lfs merge=lfs -text - -# Images -*.jpg filter=lfs diff=lfs merge=lfs -text -*.gif filter=lfs diff=lfs merge=lfs -text -*.png filter=lfs diff=lfs merge=lfs -text -*.webp filter=lfs diff=lfs merge=lfs -text - diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 2df6ebfd..00000000 --- a/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -*.pyc -**/__pycache__ -**/__pycache__/** - -.vscode - -build/ -**/build -**/build/** diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/README.md b/README.md deleted file mode 100644 index 9c747cc5..00000000 --- a/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# ***This Project is now hosted in Hugging Face. Check https://huggingface.co/opencv to get models and online demos!*** - -# OpenCV Zoo and Benchmark - -A zoo for models tuned for OpenCV DNN with benchmarks on different platforms. - -Guidelines: - -- Install latest `opencv-python`: - ```shell - python3 -m pip install opencv-python - # Or upgrade to latest version - python3 -m pip install --upgrade opencv-python - ``` -- Clone this repo to download all models and demo scripts: - ```shell - # Install git-lfs from https://git-lfs.github.com/ - git clone https://github.com/opencv/opencv_zoo && cd opencv_zoo - git lfs install - git lfs pull - ``` -- To run benchmarks on your hardware settings, please refer to [benchmark/README](./benchmark/README.md). - -## Models & Benchmark Results - -![](benchmark/color_table.svg?raw=true) - -Hardware Setup: - -x86-64: -- [Intel Core i7-12700K](https://www.intel.com/content/www/us/en/products/sku/134594/intel-core-i712700k-processor-25m-cache-up-to-5-00-ghz/specifications.html): 8 Performance-cores (3.60 GHz, turbo up to 4.90 GHz), 4 Efficient-cores (2.70 GHz, turbo up to 3.80 GHz), 20 threads. - -ARM: -- [Khadas VIM3](https://www.khadas.com/vim3): Amlogic A311D SoC with a 2.2GHz Quad core ARM Cortex-A73 + 1.8GHz dual core Cortex-A53 ARM CPU, and a 5 TOPS NPU. Benchmarks are done using **per-tensor quantized** models. Follow [this guide](https://github.com/opencv/opencv/wiki/TIM-VX-Backend-For-Running-OpenCV-On-NPU) to build OpenCV with TIM-VX backend enabled. -- [Khadas VIM4](https://www.khadas.com/vim4): Amlogic A311D2 SoC with 2.2GHz Quad core ARM Cortex-A73 and 2.0GHz Quad core Cortex-A53 CPU, and 3.2 TOPS Build-in NPU. -- [Khadas Edge 2](https://www.khadas.com/edge2): Rockchip RK3588S SoC with a CPU of 2.25 GHz Quad Core ARM Cortex-A76 + 1.8 GHz Quad Core Cortex-A55, and a 6 TOPS NPU. -- [Atlas 200 DK](https://e.huawei.com/en/products/computing/ascend/atlas-200): Ascend 310 NPU with 22 TOPS @ INT8. Follow [this guide](https://github.com/opencv/opencv/wiki/Huawei-CANN-Backend) to build OpenCV with CANN backend enabled. -- [Atlas 200I DK A2](https://www.hiascend.com/hardware/developer-kit-a2): SoC with 1.0GHz Quad-core CPU and Ascend 310B NPU with 8 TOPS @ INT8. -- [NVIDIA Jetson Nano B01](https://developer.nvidia.com/embedded/jetson-nano-developer-kit): a Quad-core ARM A57 @ 1.43 GHz CPU, and a 128-core NVIDIA Maxwell GPU. -- [NVIDIA Jetson Nano Orin](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/jetson-orin/): a 6-core Arm® Cortex®-A78AE v8.2 64-bit CPU, and a 1024-core NVIDIA Ampere architecture GPU with 32 Tensor Cores (max freq 625MHz). -- [Raspberry Pi 4B](https://www.raspberrypi.com/products/raspberry-pi-4-model-b/specifications/): Broadcom BCM2711 SoC with a Quad core Cortex-A72 (ARM v8) 64-bit @ 1.5 GHz. -- [Horizon Sunrise X3](https://developer.horizon.ai/sunrise): an SoC from Horizon Robotics with a quad-core ARM Cortex-A53 1.2 GHz CPU and a 5 TOPS BPU (a.k.a NPU). -- [MAIX-III AXera-Pi](https://wiki.sipeed.com/hardware/en/maixIII/ax-pi/axpi.html#Hardware): Axera AX620A SoC with a quad-core ARM Cortex-A7 CPU and a 3.6 TOPS @ int8 NPU. -- [Toybrick RV1126](https://t.rock-chips.com/en/portal.php?mod=view&aid=26): Rockchip RV1126 SoC with a quard-core ARM Cortex-A7 CPU and a 2.0 TOPs NPU. - -RISC-V: -- [StarFive VisionFive 2](https://doc-en.rvspace.org/VisionFive2/Product_Brief/VisionFive_2/specification_pb.html): `StarFive JH7110` SoC with a RISC-V quad-core CPU, which can turbo up to 1.5GHz, and an GPU of model `IMG BXE-4-32 MC1` from Imagination, which has a work freq up to 600MHz. -- [Allwinner Nezha D1](https://d1.docs.aw-ol.com/en): Allwinner D1 SoC with a 1.0 GHz single-core RISC-V [Xuantie C906 CPU](https://www.t-head.cn/product/C906?spm=a2ouz.12986968.0.0.7bfc1384auGNPZ) with RVV 0.7.1 support. YuNet is tested for now. Visit [here](https://github.com/fengyuentau/opencv_zoo_cpp) for more details. - -***Important Notes***: - -- The data under each column of hardware setups on the above table represents the elapsed time of an inference (preprocess, forward and postprocess). -- The time data is the mean of 10 runs after some warmup runs. Different metrics may be applied to some specific models. -- Batch size is 1 for all benchmark results. -- `---` represents the model is not availble to run on the device. -- View [benchmark/config](./benchmark/config) for more details on benchmarking different models. - -## Some Examples - -Some examples are listed below. You can find more in the directory of each model! - -### Face Detection with [YuNet](./models/face_detection_yunet/) - -![largest selfie](./models/face_detection_yunet/example_outputs/largest_selfie.jpg) - -### Face Recognition with [SFace](./models/face_recognition_sface/) - -![sface demo](./models/face_recognition_sface/example_outputs/demo.jpg) - -### Facial Expression Recognition with [Progressive Teacher](./models/facial_expression_recognition/) - -![fer demo](./models/facial_expression_recognition/example_outputs/selfie.jpg) - -### Human Segmentation with [PP-HumanSeg](./models/human_segmentation_pphumanseg/) - -![messi](./models/human_segmentation_pphumanseg/example_outputs/messi.jpg) - -### Image Segmentation with [EfficientSAM](./models/image_segmentation_efficientsam/) - -![sam_present](./models/image_segmentation_efficientsam/example_outputs/sam_present.gif) - -### License Plate Detection with [LPD_YuNet](./models/license_plate_detection_yunet/) - -![license plate detection](./models/license_plate_detection_yunet/example_outputs/lpd_yunet_demo.gif) - -### Object Detection with [NanoDet](./models/object_detection_nanodet/) & [YOLOX](./models/object_detection_yolox/) - -![nanodet demo](./models/object_detection_nanodet/example_outputs/1_res.jpg) - -![yolox demo](./models/object_detection_yolox/example_outputs/3_res.jpg) - -### Object Tracking with [VitTrack](./models/object_tracking_vittrack/) - -![webcam demo](./models/object_tracking_vittrack/example_outputs/vittrack_demo.gif) - -### Palm Detection with [MP-PalmDet](./models/palm_detection_mediapipe/) - -![palm det](./models/palm_detection_mediapipe/example_outputs/mppalmdet_demo.gif) - -### Hand Pose Estimation with [MP-HandPose](models/handpose_estimation_mediapipe/) - -![handpose estimation](models/handpose_estimation_mediapipe/example_outputs/mphandpose_demo.webp) - -### Person Detection with [MP-PersonDet](./models/person_detection_mediapipe) - -![person det](./models/person_detection_mediapipe/example_outputs/mppersondet_demo.webp) - -### Pose Estimation with [MP-Pose](models/pose_estimation_mediapipe) - -![pose_estimation](models/pose_estimation_mediapipe/example_outputs/mpposeest_demo.webp) - -### QR Code Detection and Parsing with [WeChatQRCode](./models/qrcode_wechatqrcode/) - -![qrcode](./models/qrcode_wechatqrcode/example_outputs/wechat_qrcode_demo.gif) - -### Chinese Text detection [PPOCR-Det](./models/text_detection_ppocr/) - -![mask](./models/text_detection_ppocr/example_outputs/mask.jpg) - -### English Text detection [PPOCR-Det](./models/text_detection_ppocr/) - -![gsoc](./models/text_detection_ppocr/example_outputs/gsoc.jpg) - -### Text Detection with [CRNN](./models/text_recognition_crnn/) - -![crnn_demo](./models/text_recognition_crnn/example_outputs/CRNNCTC.gif) - -## License - -OpenCV Zoo is licensed under the [Apache 2.0 license](./LICENSE). Please refer to licenses of different models. diff --git a/benchmark/README.md b/benchmark/README.md deleted file mode 100644 index a4bb9c8f..00000000 --- a/benchmark/README.md +++ /dev/null @@ -1,987 +0,0 @@ -# OpenCV Zoo Benchmark - -Benchmarking the speed of OpenCV DNN inferring different models in the zoo. Result of each model includes the time of its preprocessing, inference and postprocessing stages. - -Data for benchmarking will be downloaded and loaded in [data](./data) based on given config. - -## Preparation - -1. Install `python >= 3.6`. -2. Install dependencies: `pip install -r requirements.txt`. -3. Download data for benchmarking. - 1. Download all data: `python download_data.py` - 2. Download one or more specified data: `python download_data.py face text`. Available names can be found in `download_data.py`. - 3. You can also download all data from https://pan.baidu.com/s/18sV8D4vXUb2xC9EG45k7bg (code: pvrw). Please place and extract data packages under [./data](./data). - -## Benchmarking - -**Linux**: - -```shell -export PYTHONPATH=$PYTHONPATH:.. - -# Single config -python benchmark.py --cfg ./config/face_detection_yunet.yaml - -# All configs -python benchmark.py --all - -# All configs but only fp32 models (--fp32, --fp16, --int8 --int8bq are available for now) -python benchmark.py --all --fp32 - -# All configs but exclude some of them (fill with config name keywords, not sensitive to upper/lower case, seperate with colons) -python benchmark.py --all --cfg_exclude wechat -python benchmark.py --all --cfg_exclude wechat:crnn - -# All configs but exclude some of the models (fill with exact model names, sensitive to upper/lower case, seperate with colons) -python benchmark.py --all --model_exclude license_plate_detection_lpd_yunet_2023mar_int8.onnx:human_segmentation_pphumanseg_2023mar_int8.onnx - -# All configs with overwritten backend and target (run with --help to get available combinations) -python benchmark.py --all --cfg_overwrite_backend_target 1 -``` - -**Windows**: -- CMD - ```shell - set PYTHONPATH=%PYTHONPATH%;.. - python benchmark.py --cfg ./config/face_detection_yunet.yaml - ``` - -- PowerShell - ```shell - $env:PYTHONPATH=$env:PYTHONPATH+";.." - python benchmark.py --cfg ./config/face_detection_yunet.yaml - ``` - -## Detailed Results - -Benchmark is done with latest opencv-python & opencv-contrib-python (current 4.10.0) on the following platforms. Some models are excluded because of support issues. - -### Intel 12700K - -Specs: [details](https://www.intel.com/content/www/us/en/products/sku/134594/intel-core-i712700k-processor-25m-cache-up-to-5-00-ghz/specifications.html) -- CPU: 8 Performance-cores, 4 Efficient-cores, 20 threads - - Performance-core: 3.60 GHz base freq, turbo up to 4.90 GHz - - Efficient-core: 2.70 GHz base freq, turbo up to 3.80 GHz - -CPU: - -``` -$ python3 benchmark.py --all -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_OPENCV -target=cv.dnn.DNN_TARGET_CPU -mean median min input size model -0.69 0.70 0.68 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -0.79 0.80 0.68 [160, 120] YuNet with ['face_detection_yunet_2023mar_int8.onnx'] -5.09 5.13 4.96 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -6.50 6.79 4.96 [150, 150] SFace with ['face_recognition_sface_2021dec_int8.onnx'] -1.79 1.76 1.75 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -2.92 3.11 1.75 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july_int8.onnx'] -2.40 2.43 2.37 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -3.11 3.15 2.37 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb_int8.onnx'] -5.59 5.56 5.28 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -6.07 6.22 5.28 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar_int8.onnx'] -3.13 3.14 3.05 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -3.04 3.02 2.92 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -3.46 3.03 2.92 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr_int8.onnx'] -3.84 3.77 2.92 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr_int8.onnx'] -19.47 19.47 19.08 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -21.52 21.86 19.08 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan_int8.onnx'] -5.68 5.66 5.51 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -7.41 7.36 5.51 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar_int8.onnx'] -41.02 40.99 40.86 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -42.23 42.30 40.86 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx'] -78.77 79.76 77.16 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -75.69 75.58 72.57 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx'] -4.01 3.84 3.79 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -5.35 5.41 5.22 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -6.73 6.85 5.22 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx'] -7.65 7.65 7.55 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -15.56 15.57 15.10 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -16.67 16.57 15.10 [128, 256] YoutuReID with ['person_reid_youtu_2021nov_int8.onnx'] -6.33 6.63 6.14 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -1.19 1.30 1.07 [100, 100] WeChatQRCode with ['detect_2021nov.prototxt', 'detect_2021nov.caffemodel', 'sr_2021nov.prototxt', 'sr_2021nov.caffemodel'] -18.76 19.59 18.48 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -18.59 19.33 18.12 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -22.05 18.60 18.12 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may_int8.onnx'] -24.47 25.06 18.12 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may_int8.onnx'] -10.61 10.66 10.50 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -11.03 11.23 10.50 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -9.85 11.62 7.74 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -10.02 9.71 7.74 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2023feb_fp16.onnx'] -9.53 7.83 7.74 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2023feb_fp16.onnx'] -9.68 9.21 7.74 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2022oct_int8.onnx'] -9.85 10.63 7.74 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov_int8.onnx'] -9.63 9.28 7.74 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2022oct_int8.onnx'] -``` - -### Raspberry Pi 4B - -Specs: [details](https://www.raspberrypi.com/products/raspberry-pi-4-model-b/specifications/) -- CPU: Broadcom BCM2711, Quad core Cortex-A72 (ARM v8) 64-bit SoC @ 1.5 GHz. - -CPU: - -``` -$ python3 benchmark.py --all -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_OPENCV -target=cv.dnn.DNN_TARGET_CPU -mean median min input size model -6.23 6.27 6.18 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -6.68 6.73 6.18 [160, 120] YuNet with ['face_detection_yunet_2023mar_int8.onnx'] -68.82 69.06 68.45 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -87.42 89.84 68.45 [150, 150] SFace with ['face_recognition_sface_2021dec_int8.onnx'] -27.81 27.77 27.67 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -35.71 36.67 27.67 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july_int8.onnx'] -42.58 42.41 42.25 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -46.49 46.95 42.25 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb_int8.onnx'] -71.35 71.62 70.78 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -73.81 74.23 70.78 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar_int8.onnx'] -64.20 64.30 63.98 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -57.91 58.41 52.53 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -61.35 52.83 52.53 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr_int8.onnx'] -61.49 61.28 52.53 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr_int8.onnx'] -420.93 420.73 419.04 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -410.96 395.74 364.68 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan_int8.onnx'] -153.87 152.71 140.85 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -157.86 145.90 140.85 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar_int8.onnx'] -214.59 211.95 210.98 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -215.09 238.39 208.18 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx'] -1614.13 1639.80 1476.58 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -1597.92 1599.12 1476.58 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx'] -48.55 46.87 41.75 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -97.05 95.40 80.93 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -112.39 116.22 80.93 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx'] -105.60 113.27 88.55 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -478.89 498.05 444.14 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -442.56 477.87 369.59 [128, 256] YoutuReID with ['person_reid_youtu_2021nov_int8.onnx'] -116.15 120.13 106.81 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -5.90 5.90 5.81 [100, 100] WeChatQRCode with ['detect_2021nov.prototxt', 'detect_2021nov.caffemodel', 'sr_2021nov.prototxt', 'sr_2021nov.caffemodel'] -325.02 325.88 303.55 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -323.54 332.45 303.55 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -372.32 328.56 303.55 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may_int8.onnx'] -407.90 411.97 303.55 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may_int8.onnx'] -235.70 236.07 234.87 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -240.95 241.14 234.87 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -226.09 247.02 200.44 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -229.25 224.63 200.44 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2023feb_fp16.onnx'] -224.10 201.29 200.44 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2023feb_fp16.onnx'] -223.58 219.82 200.44 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2022oct_int8.onnx'] -225.60 243.89 200.44 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov_int8.onnx'] -220.97 223.16 193.91 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2022oct_int8.onnx'] -``` - -### Jetson Nano B01 - -Specs: [details](https://developer.nvidia.com/embedded/jetson-nano-developer-kit) -- CPU: Quad-core ARM A57 @ 1.43 GHz -- GPU: 128-core NVIDIA Maxwell - -CPU: - -``` -$ python3 benchmark.py --all -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_OPENCV -target=cv.dnn.DNN_TARGET_CPU -mean median min input size model -5.62 5.54 5.52 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -6.14 6.24 5.52 [160, 120] YuNet with ['face_detection_yunet_2023mar_int8.onnx'] -64.80 64.95 64.60 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -78.31 79.85 64.60 [150, 150] SFace with ['face_recognition_sface_2021dec_int8.onnx'] -26.54 26.61 26.37 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -33.96 34.85 26.37 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july_int8.onnx'] -38.45 41.45 38.20 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -42.62 43.20 38.20 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb_int8.onnx'] -64.95 64.85 64.73 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -72.39 73.16 64.73 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar_int8.onnx'] -65.72 65.98 65.59 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -56.66 57.56 49.10 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -62.09 49.27 49.10 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr_int8.onnx'] -62.17 62.02 49.10 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr_int8.onnx'] -346.78 348.06 345.53 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -371.11 373.54 345.53 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan_int8.onnx'] -134.36 134.33 133.45 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -140.62 140.94 133.45 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar_int8.onnx'] -215.67 216.76 214.69 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -216.58 216.78 214.69 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx'] -1209.12 1213.05 1201.68 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -1240.02 1249.95 1201.68 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx'] -48.39 47.38 45.00 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -75.30 75.25 74.96 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -83.83 84.99 74.96 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx'] -87.65 87.59 87.37 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -356.78 357.77 355.69 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -346.84 351.10 335.96 [128, 256] YoutuReID with ['person_reid_youtu_2021nov_int8.onnx'] -75.20 79.36 73.71 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -5.56 5.56 5.48 [100, 100] WeChatQRCode with ['detect_2021nov.prototxt', 'detect_2021nov.caffemodel', 'sr_2021nov.prototxt', 'sr_2021nov.caffemodel'] -209.80 210.04 208.84 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -209.60 212.74 208.49 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -254.56 211.17 208.49 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may_int8.onnx'] -286.57 296.56 208.49 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may_int8.onnx'] -252.60 252.48 252.21 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -259.28 261.38 252.21 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -245.18 266.94 220.49 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -247.72 244.25 220.49 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2023feb_fp16.onnx'] -241.63 221.43 219.06 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2023feb_fp16.onnx'] -243.46 238.98 219.06 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2022oct_int8.onnx'] -246.87 256.05 219.06 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov_int8.onnx'] -243.37 238.90 219.06 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2022oct_int8.onnx'] -``` - -GPU (CUDA-FP32): - -``` -$ python3 benchmark.py --all --fp32 --cfg_exclude wechat --cfg_overwrite_backend_target 1 -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_CUDA -target=cv.dnn.DNN_TARGET_CUDA -mean median min input size model -10.99 10.71 9.64 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -25.25 25.81 24.54 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -13.97 14.01 13.72 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -24.47 24.36 23.69 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -67.25 67.99 64.90 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -28.96 28.92 28.85 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -28.61 28.45 27.92 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -98.80 100.11 94.57 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -54.88 56.51 52.78 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -63.86 63.59 63.35 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -371.32 374.79 367.78 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -47.26 45.56 44.69 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -37.61 37.61 33.64 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -37.39 37.71 37.03 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -90.84 91.34 85.77 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -76.44 78.00 74.90 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -112.68 112.21 110.42 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -112.48 111.86 110.04 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -43.99 43.33 41.68 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -44.97 44.42 41.68 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -36.77 46.38 21.77 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -``` - -GPU (CUDA-FP16): - -``` -$ python3 benchmark.py --all --fp32 --cfg_exclude wechat --cfg_overwrite_backend_target 2 -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_CUDA -target=cv.dnn.DNN_TARGET_CUDA_FP16 -mean median min input size model -25.05 25.05 24.95 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -117.82 126.96 113.17 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -88.54 88.33 88.04 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -97.43 97.38 96.98 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -69.40 68.28 66.36 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -120.92 131.57 119.37 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -128.43 128.08 119.37 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -64.90 63.88 62.81 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -370.21 371.97 366.38 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -164.28 164.75 162.94 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -299.22 300.54 295.64 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -49.61 47.58 47.14 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -149.50 151.12 147.24 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -156.59 154.01 153.92 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -43.66 43.64 43.31 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -75.87 77.33 74.38 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -428.97 428.99 426.11 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -428.66 427.46 425.66 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -32.41 31.90 31.68 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -33.42 35.75 31.68 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -29.34 36.44 21.27 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -``` - -### Khadas VIM3 - -Specs: [details](https://www.khadas.com/vim3) -- (SoC) CPU: Amlogic A311D, 2.2 GHz Quad core ARM Cortex-A73 and 1.8 GHz dual core Cortex-A53 -- NPU: 5 TOPS Performance NPU INT8 inference up to 1536 MAC Supports all major deep learning frameworks including TensorFlow and Caffe - -CPU: - -``` -$ python3 benchmark.py --all --cfg_exclude wechat -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_OPENCV -target=cv.dnn.DNN_TARGET_CPU -mean median min input size model -4.62 4.62 4.53 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -5.24 5.29 4.53 [160, 120] YuNet with ['face_detection_yunet_2023mar_int8.onnx'] -55.04 54.55 53.54 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -67.34 67.96 53.54 [150, 150] SFace with ['face_recognition_sface_2021dec_int8.onnx'] -29.50 45.62 26.14 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -35.59 36.22 26.14 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july_int8.onnx'] -35.80 35.08 34.76 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -40.32 45.32 34.76 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb_int8.onnx'] -71.92 66.92 62.98 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -70.68 72.31 62.98 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar_int8.onnx'] -59.27 53.91 52.09 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -52.17 67.58 41.23 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -55.44 47.28 41.23 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr_int8.onnx'] -55.83 56.80 41.23 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr_int8.onnx'] -335.75 329.39 325.42 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -340.42 335.78 325.42 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan_int8.onnx'] -128.58 127.15 124.03 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -125.85 126.47 110.14 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar_int8.onnx'] -179.93 170.66 166.76 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -178.61 213.72 164.61 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx'] -1108.12 1100.93 1072.45 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -1100.58 1121.31 982.74 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx'] -32.20 32.84 30.99 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -78.26 78.96 75.60 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -87.18 88.22 75.60 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx'] -83.22 84.20 80.07 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -327.07 339.80 321.98 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -316.56 302.60 269.10 [128, 256] YoutuReID with ['person_reid_youtu_2021nov_int8.onnx'] -75.38 73.67 70.15 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -211.02 213.14 199.28 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -210.19 217.15 199.28 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -242.34 225.59 199.28 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may_int8.onnx'] -265.33 271.87 199.28 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may_int8.onnx'] -194.77 195.13 192.69 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -197.16 200.94 192.69 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -185.45 199.47 161.37 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -187.64 180.57 161.37 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2023feb_fp16.onnx'] -182.53 166.96 161.37 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2023feb_fp16.onnx'] -182.90 178.97 161.37 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2022oct_int8.onnx'] -184.26 194.43 161.37 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov_int8.onnx'] -180.65 180.59 155.36 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2022oct_int8.onnx'] -``` - -NPU (TIMVX): - -``` -$ python3 benchmark.py --all --int8 --cfg_overwrite_backend_target 3 -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_TIMVX -target=cv.dnn.DNN_TARGET_NPU -mean median min input size model -5.24 7.45 4.77 [160, 120] YuNet with ['face_detection_yunet_2023mar_int8.onnx'] -45.96 46.10 43.21 [150, 150] SFace with ['face_recognition_sface_2021dec_int8.onnx'] -30.25 30.30 28.68 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july_int8.onnx'] -19.75 20.18 18.19 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb_int8.onnx'] -28.75 28.85 28.47 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar_int8.onnx'] -148.80 148.85 143.45 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr_int8.onnx'] -143.17 141.11 136.58 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr_int8.onnx'] -73.19 78.57 62.89 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan_int8.onnx'] -32.11 30.50 29.97 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar_int8.onnx'] -116.32 120.72 99.40 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx'] -408.18 418.89 374.12 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx'] -37.34 38.57 32.03 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx'] -41.82 39.84 37.63 [128, 256] YoutuReID with ['person_reid_youtu_2021nov_int8.onnx'] -160.70 160.90 153.15 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may_int8.onnx'] -160.47 160.48 151.88 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may_int8.onnx'] -239.38 237.47 231.95 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2022oct_int8.onnx'] -197.61 201.16 162.69 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov_int8.onnx'] -196.69 164.78 162.69 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2022oct_int8.onnx'] -``` - -### Atlas 200 DK - -Specs: [details_en](https://e.huawei.com/uk/products/cloud-computing-dc/atlas/atlas-200), [details_cn](https://www.hiascend.com/zh/hardware/developer-kit) -- (SoC) CPU: 8-core Coretext-A55 @ 1.6 GHz (max) -- NPU: Ascend 310, dual DaVinci AI cores, 22/16/8 TOPS INT8. - -CPU: - -``` -$ python3 benchmark.py --all --cfg_exclude wechat -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_OPENCV -target=cv.dnn.DNN_TARGET_CPU -mean median min input size model -7.82 7.82 7.77 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -8.57 8.77 7.77 [160, 120] YuNet with ['face_detection_yunet_2023mar_int8.onnx'] -92.21 92.11 91.87 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -122.07 126.02 91.87 [150, 150] SFace with ['face_recognition_sface_2021dec_int8.onnx'] -42.93 43.26 42.75 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -55.91 57.40 42.75 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july_int8.onnx'] -67.85 67.91 67.47 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -70.06 70.21 67.47 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb_int8.onnx'] -102.49 102.65 102.10 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -114.02 116.16 102.10 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar_int8.onnx'] -92.66 92.49 92.36 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -79.39 80.75 68.47 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -89.66 68.66 68.47 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr_int8.onnx'] -90.59 92.13 68.47 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr_int8.onnx'] -499.55 500.15 498.36 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -571.85 580.88 498.36 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan_int8.onnx'] -201.99 201.55 200.62 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -216.72 217.34 200.62 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar_int8.onnx'] -313.66 313.85 312.13 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -322.98 323.45 312.13 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx'] -1875.33 1877.53 1871.26 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -1989.04 2005.25 1871.26 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx'] -143.62 143.19 137.16 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -159.80 159.62 159.40 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -152.18 152.86 145.56 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx'] -145.83 145.77 145.45 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -521.46 521.66 520.28 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -541.50 544.02 520.28 [128, 256] YoutuReID with ['person_reid_youtu_2021nov_int8.onnx'] -134.02 136.01 132.06 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -360.26 360.82 359.13 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -361.22 361.51 359.13 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -427.85 362.87 359.13 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may_int8.onnx'] -475.44 490.06 359.13 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may_int8.onnx'] -285.19 284.91 284.69 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -318.96 323.30 284.69 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -289.82 360.87 244.07 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -285.40 303.13 244.07 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2023feb_fp16.onnx'] -274.67 244.47 243.87 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2023feb_fp16.onnx'] -277.84 262.99 243.87 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2022oct_int8.onnx'] -283.02 280.77 243.87 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov_int8.onnx'] -279.21 262.55 243.87 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2022oct_int8.onnx'] -``` - -NPU (CANN): - - - -``` -$ python3 benchmark.py --all --fp32 --cfg_exclude wechat:crnn:vittrack --model_exclude pose_estimation_mediapipe_2023mar.onnx --cfg_overwrite_backend_target 4 -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_CANN -target=cv.dnn.DNN_TARGET_NPU -mean median min input size model -2.24 2.21 2.19 [160, 120] YuNet with ['face_detection_yunet_2022mar.onnx'] -2.66 2.66 2.64 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -2.19 2.19 2.16 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -6.27 6.22 6.17 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -6.94 6.94 6.85 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -5.15 5.13 5.10 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -5.41 5.42 5.10 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -6.99 6.99 6.95 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -7.63 7.64 7.43 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -20.62 22.09 19.16 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -28.59 28.60 27.91 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -5.17 5.26 5.09 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -16.45 16.44 16.31 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -5.58 5.57 5.54 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -``` - -### Toybrick RV1126 - -Specs: [details](https://t.rock-chips.com/en/portal.php?mod=view&aid=26) -- CPU: Quard core ARM Cortex-A7, up to 1.5GHz -- NPU (Not supported by OpenCV): 2.0TOPS, support 8bit / 16bit - -CPU: - -``` -$ python3 benchmark.py --all --cfg_exclude wechat -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_OPENCV -target=cv.dnn.DNN_TARGET_CPU -mean median min input size model -56.78 56.74 56.46 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -51.16 51.41 45.18 [160, 120] YuNet with ['face_detection_yunet_2023mar_int8.onnx'] -1737.74 1733.23 1723.65 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -1298.48 1336.02 920.44 [150, 150] SFace with ['face_recognition_sface_2021dec_int8.onnx'] -609.51 611.79 584.89 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -500.21 517.38 399.97 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july_int8.onnx'] -465.12 471.89 445.36 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -389.95 385.01 318.29 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb_int8.onnx'] -10.16.66.1781623.94 1607.90 1595.09 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -1109.61 1186.03 671.15 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar_int8.onnx'] -1567.09 1578.61 1542.75 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -1188.83 1219.46 850.92 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -996.30 884.80 689.11 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr_int8.onnx'] -849.51 805.93 507.78 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr_int8.onnx'] -11855.64 11836.80 11750.10 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -7752.60 8149.00 4429.83 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan_int8.onnx'] -3260.22 3251.14 3204.85 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -2287.10 2400.53 1482.04 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar_int8.onnx'] -2335.89 2335.93 2313.63 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -1899.16 1945.72 1529.46 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx'] -37600.81 37558.85 37414.98 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -24185.35 25519.27 13395.47 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx'] -411.41 448.29 397.86 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -905.77 890.22 866.06 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -780.94 817.69 653.26 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx'] -1315.48 1321.44 1299.68 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -11143.23 11155.05 11105.11 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -7056.60 7457.76 3753.42 [128, 256] YoutuReID with ['person_reid_youtu_2021nov_int8.onnx'] -736.02 732.90 701.14 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -4267.03 4288.42 4229.69 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -4265.58 4276.54 4222.22 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -3678.65 4265.95 2636.57 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may_int8.onnx'] -3383.73 3490.66 2636.57 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may_int8.onnx'] -2180.44 2197.45 2152.67 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -2217.08 2241.77 2152.67 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -2217.15 2251.65 2152.67 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -2206.73 2219.60 2152.63 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2023feb_fp16.onnx'] -2208.84 2219.14 2152.63 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2023feb_fp16.onnx'] -2035.98 2185.05 1268.94 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2022oct_int8.onnx'] -1927.93 2178.84 1268.94 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov_int8.onnx'] -1822.23 2213.30 1183.93 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2022oct_int8.onnx'] -``` - -### Khadas Edge2 (with RK3588) - -Board specs: [details](https://www.khadas.com/edge2) -SoC specs: [details](https://www.rock-chips.com/a/en/products/RK35_Series/2022/0926/1660.html) -- CPU: 2.25GHz Quad Core ARM Cortex-A76 + 1.8GHz Quad Core Cortex-A55 -- NPU (Not supported by OpenCV): Build-in 6 TOPS Performance NPU, triple core, support int4 / int8 / int16 / fp16 / bf16 / tf32 - -CPU: - -``` -$ python3 benchmark.py --all --cfg_exclude wechat -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_OPENCV -target=cv.dnn.DNN_TARGET_CPU -mean median min input size model -2.30 2.29 2.26 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -2.70 2.73 2.26 [160, 120] YuNet with ['face_detection_yunet_2023mar_int8.onnx'] -28.94 29.00 28.60 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -37.46 38.85 28.60 [150, 150] SFace with ['face_recognition_sface_2021dec_int8.onnx'] -12.44 12.40 12.36 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -17.14 17.64 12.36 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july_int8.onnx'] -20.22 20.36 20.08 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -23.11 23.50 20.08 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb_int8.onnx'] -29.63 29.78 28.61 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -35.57 35.61 28.61 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar_int8.onnx'] -27.45 27.46 27.25 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -22.95 23.37 19.13 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -27.50 19.40 19.13 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr_int8.onnx'] -28.46 29.33 19.13 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr_int8.onnx'] -151.10 151.79 146.96 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -181.69 184.19 146.96 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan_int8.onnx'] -53.83 52.64 50.24 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -60.95 60.06 50.24 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar_int8.onnx'] -98.03 104.53 83.47 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -106.91 110.68 83.47 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx'] -554.30 550.32 538.99 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -591.95 599.62 538.99 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx'] -14.02 13.89 13.56 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -45.03 44.65 43.28 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -50.87 52.24 43.28 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx'] -42.90 42.68 42.40 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -148.01 146.42 139.56 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -159.16 155.98 139.56 [128, 256] YoutuReID with ['person_reid_youtu_2021nov_int8.onnx'] -37.06 37.43 36.39 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -103.42 104.24 101.26 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -103.41 104.41 100.08 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -126.21 103.90 100.08 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may_int8.onnx'] -142.53 147.66 100.08 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may_int8.onnx'] -69.49 69.52 69.17 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -70.63 70.69 69.17 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -67.15 72.03 61.13 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -67.74 66.72 61.13 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2023feb_fp16.onnx'] -66.26 61.46 61.13 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2023feb_fp16.onnx'] -67.36 65.65 61.13 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2022oct_int8.onnx'] -68.52 69.93 61.13 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov_int8.onnx'] -68.36 65.65 61.13 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2022oct_int8.onnx'] -``` - -### Horizon Sunrise X3 PI - -Specs: [details_cn](https://developer.horizon.ai/sunrise) -- CPU: ARM Cortex-A53,4xCore, 1.2G -- BPU (aka NPU, not supported by OpenCV): (Bernoulli Arch) 2×Core,up to 1.0G, ~5Tops - -CPU: - -``` -$ python3 benchmark.py --all -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_OPENCV -target=cv.dnn.DNN_TARGET_CPU -mean median min input size model -10.56 10.69 10.46 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -12.45 12.60 10.46 [160, 120] YuNet with ['face_detection_yunet_2023mar_int8.onnx'] -124.80 127.36 124.45 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -168.67 174.03 124.45 [150, 150] SFace with ['face_recognition_sface_2021dec_int8.onnx'] -55.12 55.38 54.91 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -76.31 79.00 54.91 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july_int8.onnx'] -77.44 77.53 77.07 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -89.22 90.40 77.07 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb_int8.onnx'] -132.95 133.21 132.35 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -147.40 149.99 132.35 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar_int8.onnx'] -119.71 120.69 119.32 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -102.57 104.40 88.49 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -114.56 88.81 88.49 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr_int8.onnx'] -117.12 116.07 88.49 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr_int8.onnx'] -653.39 653.85 651.99 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -706.43 712.61 651.99 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan_int8.onnx'] -252.05 252.16 250.98 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -273.03 274.27 250.98 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar_int8.onnx'] -399.35 405.40 390.82 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -413.37 410.75 390.82 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx'] -2516.91 2516.82 2506.54 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -2544.65 2551.55 2506.54 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx'] -84.15 85.18 77.31 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -168.54 169.05 168.15 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -196.46 199.81 168.15 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx'] -172.55 172.83 171.85 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -678.74 678.04 677.44 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -653.71 655.74 631.68 [128, 256] YoutuReID with ['person_reid_youtu_2021nov_int8.onnx'] -162.87 165.82 160.04 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -9.93 9.97 9.82 [100, 100] WeChatQRCode with ['detect_2021nov.prototxt', 'detect_2021nov.caffemodel', 'sr_2021nov.prototxt', 'sr_2021nov.caffemodel'] -475.98 475.34 472.72 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -475.90 477.57 472.44 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -585.72 475.98 472.44 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may_int8.onnx'] -663.34 687.10 472.44 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may_int8.onnx'] -446.82 445.92 444.32 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -453.60 456.07 444.32 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -427.47 463.88 381.10 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -432.15 421.18 381.10 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2023feb_fp16.onnx'] -420.61 386.28 380.35 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2023feb_fp16.onnx'] -425.24 426.69 380.35 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2022oct_int8.onnx'] -431.14 447.85 380.35 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov_int8.onnx'] -424.77 417.01 380.35 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2022oct_int8.onnx'] -``` - -### MAIX-III AX-PI - -Specs: [details_en](https://wiki.sipeed.com/hardware/en/maixIII/ax-pi/axpi.html#Hardware), [details_cn](https://wiki.sipeed.com/hardware/zh/maixIII/ax-pi/axpi.html#%E7%A1%AC%E4%BB%B6%E5%8F%82%E6%95%B0) -SoC specs: [details_cn](https://axera-tech.com/product/T7297367876123493768) -- CPU: Quad cores ARM Cortex-A7 -- NPU (Not supported by OpenCV): 14.4Tops@int4,3.6Tops@int8 - -CPU: - -``` -$ python3 benchmark.py --all --cfg_exclude wechat -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_OPENCV -target=cv.dnn.DNN_TARGET_CPU -mean median min input size model -83.95 83.76 83.62 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -79.35 79.92 75.47 [160, 120] YuNet with ['face_detection_yunet_2023mar_int8.onnx'] -2326.96 2326.49 2326.08 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -1950.83 1988.86 1648.47 [150, 150] SFace with ['face_recognition_sface_2021dec_int8.onnx'] -823.42 823.35 822.50 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -750.31 757.91 691.41 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july_int8.onnx'] -664.73 664.61 663.84 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -596.29 603.96 540.72 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb_int8.onnx'] -2175.34 2173.62 2172.91 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -1655.11 1705.43 1236.22 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar_int8.onnx'] -2123.08 2122.92 2122.18 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -1619.08 1672.32 1215.05 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -1470.74 1216.86 1215.05 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr_int8.onnx'] -1287.09 1242.01 873.92 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr_int8.onnx'] -15841.89 15841.20 15828.32 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -11652.03 12079.50 8299.15 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan_int8.onnx'] -4371.75 4396.81 4370.29 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -3428.89 3521.87 2670.46 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar_int8.onnx'] -3421.19 3412.22 3411.20 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -2990.22 3034.11 2645.09 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx'] -50633.38 50617.44 50614.78 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -36260.23 37731.28 24683.40 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx'] -548.36 551.97 537.90 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -1285.54 1285.40 1284.43 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -1204.04 1211.89 1137.65 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx'] -1849.87 1848.78 1847.80 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -14895.99 14894.27 14884.17 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -10496.44 10931.97 6976.60 [128, 256] YoutuReID with ['person_reid_youtu_2021nov_int8.onnx'] -1045.98 1052.05 1040.56 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -5899.23 5900.08 5896.73 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -5889.39 5890.58 5878.81 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -5436.61 5884.03 4665.77 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may_int8.onnx'] -5185.53 5273.76 4539.47 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may_int8.onnx'] -3230.95 3226.14 3225.53 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -3281.31 3295.46 3225.53 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -3247.56 3337.52 3196.25 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -3243.20 3276.35 3196.25 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2023feb_fp16.onnx'] -3230.49 3196.80 3195.02 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2023feb_fp16.onnx'] -3065.33 3217.99 2348.42 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2022oct_int8.onnx'] -2976.24 3244.75 2348.42 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov_int8.onnx'] -2864.72 3219.46 2208.44 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2022oct_int8.onnx'] -``` - -### StarFive VisionFive 2 - -Specs: [details_cn](https://doc.rvspace.org/VisionFive2/PB/VisionFive_2/specification_pb.html), [details_en](https://doc-en.rvspace.org/VisionFive2/Product_Brief/VisionFive_2/specification_pb.html) -- CPU: StarFive JH7110 with RISC-V quad-core CPU with 2 MB L2 cache and a monitor core, supporting RV64GC ISA, working up to 1.5 GHz -- GPU: IMG BXE-4-32 MC1 with work frequency up to 600 MHz - -CPU: - -``` -$ python3 benchmark.py --all --cfg_exclude wechat -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_OPENCV -target=cv.dnn.DNN_TARGET_CPU -mean median min input size model -41.13 41.07 41.06 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -37.43 37.83 34.35 [160, 120] YuNet with ['face_detection_yunet_2023mar_int8.onnx'] -1169.96 1169.72 1168.74 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -887.13 987.00 659.71 [150, 150] SFace with ['face_recognition_sface_2021dec_int8.onnx'] -423.91 423.98 423.62 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -350.89 358.26 292.27 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july_int8.onnx'] -319.69 319.26 318.76 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -278.74 282.75 245.22 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb_int8.onnx'] -1127.61 1127.36 1127.17 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -785.44 819.07 510.77 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar_int8.onnx'] -1079.69 1079.66 1079.31 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -820.15 845.54 611.26 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -698.13 612.64 516.41 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr_int8.onnx'] -600.12 564.13 382.59 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr_int8.onnx'] -8116.21 8127.96 8113.70 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -5408.02 5677.71 3240.16 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan_int8.onnx'] -2267.96 2268.26 2266.59 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -1605.80 1671.91 1073.50 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar_int8.onnx'] -1731.61 1733.17 1730.54 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -1435.43 1477.52 1196.01 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx'] -26185.41 26190.85 26168.68 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -17019.14 17923.20 9673.68 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx'] -288.95 290.28 260.40 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -628.64 628.47 628.27 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -562.90 569.91 509.93 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx'] -910.38 910.94 909.64 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -7613.64 7626.26 7606.07 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -4895.28 5166.85 2716.65 [128, 256] YoutuReID with ['person_reid_youtu_2021nov_int8.onnx'] -524.52 526.33 522.71 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -2988.22 2996.51 2980.17 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -2981.84 2979.74 2975.80 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -2610.78 2979.14 1979.37 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may_int8.onnx'] -2425.29 2478.92 1979.37 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may_int8.onnx'] -1404.01 1415.46 1401.36 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -1425.42 1426.51 1401.36 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -1432.21 1450.47 1401.36 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -1425.24 1448.27 1401.36 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2023feb_fp16.onnx'] -1428.84 1446.76 1401.36 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2023feb_fp16.onnx'] -1313.68 1427.46 808.70 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2022oct_int8.onnx'] -1242.07 1408.93 808.70 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov_int8.onnx'] -1174.32 1426.07 774.78 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2022oct_int8.onnx'] -``` - -### Khadas VIM4 - -Board specs: https://www.khadas.com/vim4, https://dl.khadas.com/products/vim4/specs/vim4-specs.pdf - -SoC specs: -- CPU: Amlogic A311D2, 2.2GHz Quad core ARM Cortex-A73 and 2.0GHz Quad core Cortex-A53 CPU, with 32-bit STM32G031K6 microprocessor. -- GPU: Mali G52MP8(8EE) 800Mhz GPU. -- NPU: 3.2 TOPS Build-in NPU (Not supported by dnn yet) - -CPU: - -``` -$ python3 benchmark.py --all --cfg_exclude wechat -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_OPENCV -target=cv.dnn.DNN_TARGET_CPU -mean median min input size model -4.27 4.33 4.17 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -4.58 4.58 4.17 [160, 120] YuNet with ['face_detection_yunet_2023mar_int8.onnx'] -39.94 39.98 39.42 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -49.33 50.59 39.42 [150, 150] SFace with ['face_recognition_sface_2021dec_int8.onnx'] -17.28 17.63 16.93 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -22.78 23.27 16.93 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july_int8.onnx'] -25.83 25.46 25.30 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -28.23 28.87 25.30 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb_int8.onnx'] -47.68 47.72 45.65 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -49.25 49.45 45.65 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar_int8.onnx'] -38.73 38.18 37.89 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -33.68 33.99 29.16 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -36.22 29.50 29.16 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr_int8.onnx'] -36.12 35.69 29.16 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr_int8.onnx'] -219.81 220.21 215.97 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -224.03 222.27 215.97 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan_int8.onnx'] -81.46 84.07 77.95 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -81.46 83.07 77.95 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar_int8.onnx'] -136.14 136.12 128.61 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -136.57 136.30 128.61 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx'] -805.54 805.23 795.82 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -768.87 766.00 727.12 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx'] -29.47 29.39 28.49 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -54.45 54.76 53.45 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -60.84 61.07 53.45 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx'] -57.22 57.22 56.14 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -218.22 224.50 215.54 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -199.53 203.24 179.85 [128, 256] YoutuReID with ['person_reid_youtu_2021nov_int8.onnx'] -53.06 54.61 51.82 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -148.82 149.62 146.73 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -148.91 148.99 146.59 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -175.33 150.60 146.59 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may_int8.onnx'] -194.12 201.48 146.59 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may_int8.onnx'] -133.27 132.90 132.54 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -135.27 135.12 132.54 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -127.49 137.43 113.82 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -129.18 125.95 113.82 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2023feb_fp16.onnx'] -125.82 114.44 113.82 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2023feb_fp16.onnx'] -127.63 124.81 113.82 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2022oct_int8.onnx'] -129.24 134.50 113.82 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov_int8.onnx'] -126.64 125.09 110.45 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2022oct_int8.onnx'] -``` - -### Jetson Nano Orin - -Specs: https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/jetson-orin/ -- CPU: 6-core Arm® Cortex®-A78AE v8.2 64-bit CPU, 1.5MB L2 + 4MB L3 -- GPU: 1024-core NVIDIA Ampere architecture GPU with 32 Tensor Cores, max freq 625MHz - -CPU: - -``` -$ python3 benchmark.py --all -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_OPENCV -target=cv.dnn.DNN_TARGET_CPU -mean median min input size model -2.59 2.62 2.50 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -2.98 2.97 2.50 [160, 120] YuNet with ['face_detection_yunet_2023mar_int8.onnx'] -20.05 24.76 19.75 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -31.84 32.72 19.75 [150, 150] SFace with ['face_recognition_sface_2021dec_int8.onnx'] -9.15 9.22 9.04 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -14.33 15.35 9.04 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july_int8.onnx'] -15.00 15.17 14.80 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -18.37 18.63 14.80 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb_int8.onnx'] -24.86 25.09 24.12 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -30.17 34.51 24.12 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar_int8.onnx'] -18.47 18.55 18.23 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -17.08 17.30 15.80 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -21.26 15.89 15.80 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr_int8.onnx'] -23.19 24.15 15.80 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr_int8.onnx'] -102.30 101.90 101.44 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -142.33 146.24 101.44 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan_int8.onnx'] -39.91 39.01 38.46 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -51.35 50.70 38.46 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar_int8.onnx'] -125.31 126.50 121.92 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -132.95 133.67 121.92 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx'] -400.91 430.48 384.87 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -476.63 509.48 384.87 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx'] -19.16 19.91 18.04 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -27.73 26.93 26.72 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -35.16 41.14 26.72 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx'] -33.05 33.18 32.67 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -93.58 94.02 92.36 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -119.80 153.20 92.36 [128, 256] YoutuReID with ['person_reid_youtu_2021nov_int8.onnx'] -31.51 32.19 30.69 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -3.53 3.53 3.51 [100, 100] WeChatQRCode with ['detect_2021nov.prototxt', 'detect_2021nov.caffemodel', 'sr_2021nov.prototxt', 'sr_2021nov.caffemodel'] -78.10 77.77 77.17 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -78.03 78.38 77.17 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -99.09 79.42 77.17 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may_int8.onnx'] -112.82 116.06 77.17 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may_int8.onnx'] -142.97 142.84 135.56 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -144.53 148.52 135.56 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -134.47 146.62 112.91 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -136.37 131.39 112.91 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2023feb_fp16.onnx'] -132.08 117.15 109.24 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2023feb_fp16.onnx'] -135.17 130.23 109.24 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2022oct_int8.onnx'] -138.38 143.25 109.24 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov_int8.onnx'] -137.08 134.22 109.24 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2022oct_int8.onnx'] -``` - -GPU (CUDA-FP32): - -``` -$ python3 benchmark.py --all --fp32 --cfg_exclude wechat --cfg_overwrite_backend_target 1 -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_CUDA -target=cv.dnn.DNN_TARGET_CUDA -mean median min input size model -5.23 5.27 5.17 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -7.59 7.62 7.55 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -8.48 8.46 8.37 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -12.29 13.04 11.11 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -12.91 13.28 12.79 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -8.41 8.42 8.35 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -9.36 9.43 8.35 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -32.58 32.71 31.11 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -16.33 16.08 16.04 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -24.46 24.35 24.01 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -103.28 103.41 102.37 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -19.75 19.78 19.10 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -10.84 10.76 10.75 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -14.50 14.50 14.36 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -23.53 23.36 23.16 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -26.54 27.22 25.99 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -27.49 27.80 26.97 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -27.53 27.75 26.95 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -15.66 16.30 15.41 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -15.91 15.80 15.41 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -13.58 16.70 9.48 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -``` - -GPU (CUDA-FP16): - -``` -$ python3 benchmark.py --all --fp32 --cfg_exclude wechat --cfg_overwrite_backend_target 2 -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_CUDA -target=cv.dnn.DNN_TARGET_CUDA_FP16 -mean median min input size model -5.00 5.04 4.92 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -5.09 5.08 5.05 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -6.81 6.86 6.66 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -9.19 10.18 9.06 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -16.20 16.62 15.93 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -6.84 6.82 6.80 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -7.46 7.87 6.80 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -14.18 14.16 14.03 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -13.35 13.10 13.04 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -19.94 19.95 19.50 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -72.25 72.91 70.99 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -22.37 22.44 21.60 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -8.92 8.92 8.84 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -11.11 11.13 10.98 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -13.22 13.23 13.12 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -26.79 27.04 26.24 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -19.71 19.75 19.47 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -19.76 19.93 19.47 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -16.30 15.88 15.80 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -16.36 16.51 15.80 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -13.64 16.27 8.90 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -``` - -### Atlas 200I DK A2 - -Specs: https://www.hiascend.com/hardware/developer-kit-a2 (cn) -- CPU: 4 core * 1.0 GHz -- NPU: Ascend 310B, 8 TOPS INT8, 4 TFLOPS FP16 (Benchmark results are coming later) - -CPU: - -``` -$ python3 benchmark.py --all --cfg_exclude wechat -Benchmarking ... -backend=cv.dnn.DNN_BACKEND_OPENCV -target=cv.dnn.DNN_TARGET_CPU -mean median min input size model -6.67 6.80 5.17 [160, 120] YuNet with ['face_detection_yunet_2023mar.onnx'] -8.70 9.22 5.17 [160, 120] YuNet with ['face_detection_yunet_2023mar_int8.onnx'] -78.90 81.48 74.18 [150, 150] SFace with ['face_recognition_sface_2021dec.onnx'] -113.79 115.49 74.18 [150, 150] SFace with ['face_recognition_sface_2021dec_int8.onnx'] -36.94 38.64 33.23 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july.onnx'] -55.14 60.34 33.23 [112, 112] FacialExpressionRecog with ['facial_expression_recognition_mobilefacenet_2022july_int8.onnx'] -56.00 55.56 51.99 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb.onnx'] -71.09 72.20 51.99 [224, 224] MPHandPose with ['handpose_estimation_mediapipe_2023feb_int8.onnx'] -78.01 80.36 73.97 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar.onnx'] -111.56 113.84 73.97 [192, 192] PPHumanSeg with ['human_segmentation_pphumanseg_2023mar_int8.onnx'] -70.20 68.69 65.12 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr.onnx'] -61.72 63.39 48.28 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr.onnx'] -80.12 54.37 48.28 [224, 224] MobileNet with ['image_classification_mobilenetv1_2022apr_int8.onnx'] -87.42 96.71 48.28 [224, 224] MobileNet with ['image_classification_mobilenetv2_2022apr_int8.onnx'] -417.31 417.30 406.17 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan.onnx'] -597.15 619.24 406.17 [224, 224] PPResNet with ['image_classification_ppresnet50_2022jan_int8.onnx'] -155.73 153.40 145.10 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx'] -200.41 200.24 145.10 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar_int8.onnx'] -253.05 252.73 245.91 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx'] -274.44 269.76 245.91 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx'] -1407.75 1416.44 1357.23 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx'] -1716.25 1709.35 1357.23 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx'] -37.02 37.66 32.50 [1280, 720] VitTrack with ['object_tracking_vittrack_2023sep.onnx'] -92.56 97.78 87.87 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx'] -119.29 123.56 87.87 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx'] -90.13 90.75 87.78 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx'] -285.75 284.54 278.06 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx'] -389.02 405.12 278.06 [128, 256] YoutuReID with ['person_reid_youtu_2021nov_int8.onnx'] -83.16 85.91 77.83 [256, 256] MPPose with ['pose_estimation_mediapipe_2023mar.onnx'] -219.28 220.74 214.53 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may.onnx'] -217.18 227.44 207.15 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may.onnx'] -319.73 210.22 207.15 [640, 480] PPOCRDet with ['text_detection_cn_ppocrv3_2023may_int8.onnx'] -396.47 399.45 207.15 [640, 480] PPOCRDet with ['text_detection_en_ppocrv3_2023may_int8.onnx'] -165.34 172.10 156.36 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2021sep.onnx'] -169.22 174.21 156.36 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov.onnx'] -158.82 172.23 135.52 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2021sep.onnx'] -159.39 156.42 135.52 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2023feb_fp16.onnx'] -155.87 146.82 135.52 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2023feb_fp16.onnx'] -163.43 152.16 135.52 [1280, 720] CRNN with ['text_recognition_CRNN_CH_2022oct_int8.onnx'] -173.46 162.85 135.52 [1280, 720] CRNN with ['text_recognition_CRNN_CN_2021nov_int8.onnx'] -175.28 145.22 135.52 [1280, 720] CRNN with ['text_recognition_CRNN_EN_2022oct_int8.onnx'] -``` diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index cbe67e55..d14ae8f3 100644 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -111,6 +111,19 @@ def __init__(self, **kwargs): self._backend = available_backends[backend_id] self._target = available_targets[target_id] + # --- GSoC 'Quantization Paradox' Mitigation --- + import platform + arch = platform.machine().lower() + if arch in ['aarch64', 'armv8l', 'arm64']: + is_int8 = getattr(args, 'int8', False) or getattr(args, 'int8bq', False) + if is_int8 and target_id == 'cpu': + print("[WARN] ARM64 Edge AI Context: 'Quantization Paradox' detected.") + print("[WARN] INT8 quantized models on fallback CPU incur severe latency.") + print("[WARN] Auto-routing backend to TIM-VX / NPU for hardware intrinsic execution.") + self._backend = cv.dnn.DNN_BACKEND_TIMVX + self._target = cv.dnn.DNN_TARGET_NPU + # ---------------------------------------------- + self._benchmark_results = dict() self._benchmark_results_brief = dict() diff --git a/benchmark/color_table.svg b/benchmark/color_table.svg deleted file mode 100644 index 480584c0..00000000 --- a/benchmark/color_table.svg +++ /dev/null @@ -1,5161 +0,0 @@ - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - Faster - - - Slower - - - - - - - - - - - - Model - - - - - - - - - - Task - - - - - - - - - - Input Size - - - - - - - - - - Intel - - - 12700K - - - CPU - - - - - - - - - - Atlas 200I DK A2 - - - Ascend 310B - - - CPU - - - - - - - - - - Atlas 200 DK - - - Ascend 310 - - - CPU - - - - - - - - - - Khadas VIM3 - - - A311D - - - CPU - - - - - - - - - - Khadas VIM4 - - - A311D2 - - - CPU - - - - - - - - - - Khadas Edge2 - - - RK3588S - - - CPU - - - - - - - - - - Jetson Nano - - - B01 - - - CPU - - - - - - - - - - Jetson Nano - - - Orin - - - CPU - - - - - - - - - - Raspberry Pi 4B - - - BCM2711 - - - CPU - - - - - - - - - - Horizon Sunrise Pi - - - X3 - - - CPU - - - - - - - - - - MAIX-III AX-Pi - - - AX620A - - - CPU - - - - - - - - - - Toybrick - - - RV1126 - - - CPU - - - - - - - - - - StarFive VisionFive 2 - - - StarFive JH7110 - - - CPU - - - - - - - - - - Jetson Nano - - - B01 - - - GPU - - - - - - - - - - Jetson Nano - - - Orin - - - GPU - - - - - - - - - - Khadas VIM3 - - - A311D - - - NPU - - - - - - - - - - Atlas 200 DK - - - Ascend 310 - - - NPU - - - - - - - - - - YuNet - - - - - - - Face Detection - - - - - - 160x120 - - - - - - 0.69 - - - - - - 6.67 - - - - - - 7.82 - - - - - - 4.62 - - - - - - 4.27 - - - - - - 2.30 - - - - - - 5.62 - - - - - - 2.59 - - - - - - 6.23 - - - - - - 10.56 - - - - - - 83.95 - - - - - - 56.78 - - - - - - 41.13 - - - - - - 10.99 - - - - - - 5.23 - - - - - - 5.24 - - - - - - 2.24 - - - - - - - - - SFace - - - - - - - Face Recognition - - - - - - 112x112 - - - - - - 5.09 - - - - - - 78.90 - - - - - - 92.21 - - - - - - 55.04 - - - - - - 39.94 - - - - - - 28.94 - - - - - - 64.80 - - - - - - 20.05 - - - - - - 68.82 - - - - - - 124.80 - - - - - - 2326.96 - - - - - - 1737.74 - - - - - - 1169.96 - - - - - - 25.25 - - - - - - 7.59 - - - - - - 45.96 - - - - - - 2.66 - - - - - - - - - FER - - - - - - - Face Expression Recognition - - - - - - 112x112 - - - - - - 1.79 - - - - - - 36.94 - - - - - - 42.93 - - - - - - 29.50 - - - - - - 17.28 - - - - - - 12.44 - - - - - - 26.54 - - - - - - 9.15 - - - - - - 27.81 - - - - - - 55.12 - - - - - - 823.42 - - - - - - 609.51 - - - - - - 423.91 - - - - - - 13.97 - - - - - - 8.48 - - - - - - 30.25 - - - - - - 2.19 - - - - - - - - - LPD_YuNet - - - - - - - License Plate Detection - - - - - - 320x240 - - - - - - 5.68 - - - - - - 155.73 - - - - - - 201.99 - - - - - - 128.58 - - - - - - 81.46 - - - - - - 53.83 - - - - - - 134.36 - - - - - - 39.91 - - - - - - 153.87 - - - - - - 252.05 - - - - - - 4371.75 - - - - - - 3260.22 - - - - - - 2267.96 - - - - - - 54.88 - - - - - - 16.33 - - - - - - 32.11 - - - - - - 7.63 - - - - - - - - - YOLOX - - - - - - - Object Detection - - - - - - 640x640 - - - - - - 78.77 - - - - - - 1407.75 - - - - - - 1875.33 - - - - - - 1108.12 - - - - - - 805.54 - - - - - - 554.30 - - - - - - 1209.12 - - - - - - 400.91 - - - - - - 1614.13 - - - - - - 2516.91 - - - - - - 50633.38 - - - - - - 37600.81 - - - - - - 26185.41 - - - - - - 371.32 - - - - - - 103.28 - - - - - - 408.18 - - - - - - 28.59 - - - - - - - - - NanoDet - - - - - - - Object Detection - - - - - - 416x416 - - - - - - 41.02 - - - - - - 253.05 - - - - - - 313.66 - - - - - - 179.93 - - - - - - 136.14 - - - - - - 98.03 - - - - - - 215.67 - - - - - - 125.31 - - - - - - 214.59 - - - - - - 399.35 - - - - - - 3421.19 - - - - - - 2335.89 - - - - - - 1731.61 - - - - - - 63.86 - - - - - - 24.46 - - - - - - 116.32 - - - - - - 20.62 - - - - - - - - - PPOCRDet-CN - - - - - - - Text Detection - - - - - - 640x480 - - - - - - 18.76 - - - - - - 219.28 - - - - - - 360.26 - - - - - - 211.02 - - - - - - 148.82 - - - - - - 103.42 - - - - - - 209.80 - - - - - - 78.10 - - - - - - 325.02 - - - - - - 475.98 - - - - - - 5899.23 - - - - - - 4267.03 - - - - - - 2988.22 - - - - - - 112.68 - - - - - - 27.49 - - - - - - 160.70 - - - - - - --- - - - - - - - - - PPOCRDet-EN - - - - - - - Text Detection - - - - - - 640x480 - - - - - - 18.59 - - - - - - 217.18 - - - - - - 361.22 - - - - - - 210.19 - - - - - - 148.91 - - - - - - 103.41 - - - - - - 209.60 - - - - - - 78.03 - - - - - - 323.54 - - - - - - 475.90 - - - - - - 5889.39 - - - - - - 4265.58 - - - - - - 2981.84 - - - - - - 112.48 - - - - - - 27.53 - - - - - - 160.47 - - - - - - --- - - - - - - - - - CRNN-EN - - - - - - - Text Recognition - - - - - - 100x32 - - - - - - 9.85 - - - - - - 158.82 - - - - - - 289.82 - - - - - - 185.45 - - - - - - 127.49 - - - - - - 67.15 - - - - - - 245.18 - - - - - - 134.47 - - - - - - 226.09 - - - - - - 427.47 - - - - - - 3247.56 - - - - - - 2217.15 - - - - - - 1432.21 - - - - - - 36.77 - - - - - - 13.58 - - - - - - 196.69 - - - - - - --- - - - - - - - - - CRNN-CN - - - - - - - Text Recognition - - - - - - 100x32 - - - - - - 11.03 - - - - - - 169.22 - - - - - - 318.96 - - - - - - 197.16 - - - - - - 135.27 - - - - - - 70.63 - - - - - - 259.28 - - - - - - 144.53 - - - - - - 240.95 - - - - - - 453.60 - - - - - - 3281.31 - - - - - - 2217.08 - - - - - - 1425.42 - - - - - - 44.97 - - - - - - 15.91 - - - - - - 197.61 - - - - - - --- - - - - - - - - - PP-ResNet - - - - - - - Image Classification - - - - - - 224x224 - - - - - - 19.47 - - - - - - 417.31 - - - - - - 499.55 - - - - - - 335.75 - - - - - - 219.81 - - - - - - 151.10 - - - - - - 346.78 - - - - - - 102.30 - - - - - - 420.93 - - - - - - 653.39 - - - - - - 15841.89 - - - - - - 11855.64 - - - - - - 8116.21 - - - - - - 98.80 - - - - - - 32.58 - - - - - - 73.19 - - - - - - 6.99 - - - - - - - - - MobileNet-V1 - - - - - - - Image Classification - - - - - - 224x224 - - - - - - 3.13 - - - - - - 70.20 - - - - - - 92.66 - - - - - - 59.27 - - - - - - 38.73 - - - - - - 27.45 - - - - - - 65.72 - - - - - - 18.47 - - - - - - 64.20 - - - - - - 119.71 - - - - - - 2123.08 - - - - - - 1567.09 - - - - - - 1079.69 - - - - - - 28.96 - - - - - - 8.41 - - - - - - 148.80\* - - - - - - 5.15 - - - - - - - - - MobileNet-V2 - - - - - - - Image Classification - - - - - - 224x224 - - - - - - 3.04 - - - - - - 61.72 - - - - - - 79.39 - - - - - - 52.17 - - - - - - 33.68 - - - - - - 22.95 - - - - - - 56.66 - - - - - - 17.08 - - - - - - 57.91 - - - - - - 102.57 - - - - - - 1619.08 - - - - - - 1188.83 - - - - - - 820.15 - - - - - - 28.61 - - - - - - 9.36 - - - - - - 143.17\* - - - - - - 5.41 - - - - - - - - - PP-HumanSeg - - - - - - - Human Segmentation - - - - - - 192x192 - - - - - - 5.59 - - - - - - 78.01 - - - - - - 102.49 - - - - - - 71.92 - - - - - - 47.68 - - - - - - 29.63 - - - - - - 64.95 - - - - - - 24.86 - - - - - - 71.35 - - - - - - 132.95 - - - - - - 2175.34 - - - - - - 1109.61 - - - - - - 1127.61 - - - - - - 67.25 - - - - - - 12.91 - - - - - - 28.75 - - - - - - 6.94 - - - - - - - - - WeChatQRCode - - - - - - - QR Code Detection and Parsing - - - - - - 100x100 - - - - - - 1.19 - - - - - - --- - - - - - - --- - - - - - - --- - - - - - - --- - - - - - - --- - - - - - - 5.56 - - - - - - 3.53 - - - - - - 5.90 - - - - - - 9.93 - - - - - - --- - - - - - - --- - - - - - - --- - - - - - - --- - - - - - - --- - - - - - - --- - - - - - - --- - - - - - - - - - YoutuReID - - - - - - - Person Re-Identification - - - - - - 128x256 - - - - - - 15.56 - - - - - - 285.75 - - - - - - 521.46 - - - - - - 327.07 - - - - - - 218.22 - - - - - - 148.01 - - - - - - 356.78 - - - - - - 93.58 - - - - - - 478.89 - - - - - - 678.74 - - - - - - 14895.99 - - - - - - 11143.23 - - - - - - 7613.64 - - - - - - 90.84 - - - - - - 23.53 - - - - - - 41.82 - - - - - - 5.58 - - - - - - - - - MP-PalmDet - - - - - - - Palm Detection - - - - - - 192x192 - - - - - - 5.35 - - - - - - 92.56 - - - - - - 159.80 - - - - - - 78.26 - - - - - - 54.45 - - - - - - 45.03 - - - - - - 75.30 - - - - - - 27.73 - - - - - - 97.05 - - - - - - 168.54 - - - - - - 1285.54 - - - - - - 905.77 - - - - - - 628.64 - - - - - - 37.61 - - - - - - 10.84 - - - - - - 37.34 - - - - - - 5.17 - - - - - - - - - MP-HandPose - - - - - - - Hand Pose Estimation - - - - - - 224x224 - - - - - - 2.40 - - - - - - 56.00 - - - - - - 67.85 - - - - - - 35.80 - - - - - - 25.83 - - - - - - 20.22 - - - - - - 38.45 - - - - - - 15.00 - - - - - - 42.58 - - - - - - 77.44 - - - - - - 664.73 - - - - - - 465.12 - - - - - - 319.69 - - - - - - 24.47 - - - - - - 12.29 - - - - - - 19.75 - - - - - - 6.27 - - - - - - - - - MP-PersonDet - - - - - - - Person Detection - - - - - - 224x224 - - - - - - 7.65 - - - - - - 90.13 - - - - - - 145.83 - - - - - - 83.22 - - - - - - 57.22 - - - - - - 42.90 - - - - - - 87.65 - - - - - - 33.05 - - - - - - 105.60 - - - - - - 172.55 - - - - - - 1849.87 - - - - - - 1315.48 - - - - - - 910.38 - - - - - - 37.39 - - - - - - 14.50 - - - - - - --- - - - - - - 16.45 - - - - - - - - - MP-Pose - - - - - - - Pose Estimation - - - - - - 256x256 - - - - - - 6.33 - - - - - - 83.16 - - - - - - 134.02 - - - - - - 75.38 - - - - - - 53.06 - - - - - - 37.06 - - - - - - 75.20 - - - - - - 31.51 - - - - - - 116.15 - - - - - - 162.87 - - - - - - 1045.98 - - - - - - 736.02 - - - - - - 524.52 - - - - - - 76.44 - - - - - - 26.54 - - - - - - --- - - - - - - --- - - - - - - - - - VitTrack - - - - - - - Object Tracking - - - - - - 1280x720 - - - - - - 4.01 - - - - - - 37.02 - - - - - - 143.62 - - - - - - 32.20 - - - - - - 29.47 - - - - - - 14.02 - - - - - - 48.39 - - - - - - 19.16 - - - - - - 48.55 - - - - - - 84.15 - - - - - - 548.36 - - - - - - 411.41 - - - - - - 288.95 - - - - - - 47.26 - - - - - - 19.75 - - - - - - --- - - - - - - --- - - - - - - Units: All data in milliseconds (ms). - - - \*: Models are quantized in per-channel mode, which run slower than per-tensor quantized models on NPU. - - - - - - - - - diff --git a/benchmark/config/face_detection_yunet.yaml b/benchmark/config/face_detection_yunet.yaml deleted file mode 100644 index 3a903126..00000000 --- a/benchmark/config/face_detection_yunet.yaml +++ /dev/null @@ -1,19 +0,0 @@ -Benchmark: - name: "Face Detection Benchmark" - type: "Detection" - data: - path: "data/face_detection" - files: ["group.jpg", "concerts.jpg", "dance.jpg"] - sizes: # [[w1, h1], ...], Omit to run at original scale - - [160, 120] - metric: - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "YuNet" - confThreshold: 0.6 - nmsThreshold: 0.3 - topK: 5000 diff --git a/benchmark/config/face_recognition_sface.yaml b/benchmark/config/face_recognition_sface.yaml deleted file mode 100644 index ad66287b..00000000 --- a/benchmark/config/face_recognition_sface.yaml +++ /dev/null @@ -1,14 +0,0 @@ -Benchmark: - name: "Face Recognition Benchmark" - type: "Recognition" - data: - path: "data/face_recognition" - files: ["Aaron_Tippin_0001.jpg", "Alvaro_Uribe_0028.jpg", "Alvaro_Uribe_0029.jpg", "Jose_Luis_Rodriguez_Zapatero_0001.jpg"] - metric: # 'sizes' is omitted since this model requires input of fixed size - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "SFace" diff --git a/benchmark/config/facial_expression_recognition.yaml b/benchmark/config/facial_expression_recognition.yaml deleted file mode 100644 index 05f4af7f..00000000 --- a/benchmark/config/facial_expression_recognition.yaml +++ /dev/null @@ -1,14 +0,0 @@ -Benchmark: - name: "Facial Expression Recognition Benchmark" - type: "Recognition" - data: - path: "data/facial_expression_recognition/fer_evaluation" - files: ["RAF_test_0_61.jpg", "RAF_test_0_30.jpg", "RAF_test_6_25.jpg"] - metric: # 'sizes' is omitted since this model requires input of fixed size - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "FacialExpressionRecog" diff --git a/benchmark/config/handpose_estimation_mediapipe.yaml b/benchmark/config/handpose_estimation_mediapipe.yaml deleted file mode 100644 index 53170421..00000000 --- a/benchmark/config/handpose_estimation_mediapipe.yaml +++ /dev/null @@ -1,17 +0,0 @@ -Benchmark: - name: "Hand Pose Estimation Benchmark" - type: "Recognition" - data: - path: "data/palm_detection_20230125" - files: ["palm1.jpg", "palm2.jpg", "palm3.jpg"] - sizes: # [[w1, h1], ...], Omit to run at original scale - - [224, 224] - metric: - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "MPHandPose" - confThreshold: 0.9 diff --git a/benchmark/config/human_segmentation_pphumanseg.yaml b/benchmark/config/human_segmentation_pphumanseg.yaml deleted file mode 100644 index 1849391c..00000000 --- a/benchmark/config/human_segmentation_pphumanseg.yaml +++ /dev/null @@ -1,16 +0,0 @@ -Benchmark: - name: "Human Segmentation Benchmark" - type: "Base" - data: - path: "data/human_segmentation" - files: ["messi5.jpg", "100040721_1.jpg", "detect.jpg"] - sizes: [[192, 192]] - toRGB: True - metric: - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "PPHumanSeg" diff --git a/benchmark/config/image_classification_mobilenet.yaml b/benchmark/config/image_classification_mobilenet.yaml deleted file mode 100644 index 54d1dabb..00000000 --- a/benchmark/config/image_classification_mobilenet.yaml +++ /dev/null @@ -1,17 +0,0 @@ -Benchmark: - name: "Image Classification Benchmark" - type: "Classification" - data: - path: "data/image_classification" - files: ["coffee_mug.jpg", "umbrella.jpg", "wall_clock.jpg"] - sizes: [[256, 256]] - toRGB: True - centerCrop: 224 - metric: - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "MobileNet" diff --git a/benchmark/config/image_classification_ppresnet.yaml b/benchmark/config/image_classification_ppresnet.yaml deleted file mode 100644 index e5403804..00000000 --- a/benchmark/config/image_classification_ppresnet.yaml +++ /dev/null @@ -1,17 +0,0 @@ -Benchmark: - name: "Image Classification Benchmark" - type: "Classification" - data: - path: "data/image_classification" - files: ["coffee_mug.jpg", "umbrella.jpg", "wall_clock.jpg"] - sizes: [[256, 256]] - toRGB: True - centerCrop: 224 - metric: - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "PPResNet" diff --git a/benchmark/config/license_plate_detection_yunet.yaml b/benchmark/config/license_plate_detection_yunet.yaml deleted file mode 100644 index bd3872d6..00000000 --- a/benchmark/config/license_plate_detection_yunet.yaml +++ /dev/null @@ -1,20 +0,0 @@ -Benchmark: - name: "License Plate Detection Benchmark" - type: "Detection" - data: - path: "data/license_plate_detection" - files: ["1.jpg", "2.jpg", "3.jpg", "4.jpg"] - sizes: # [[w1, h1], ...], Omit to run at original scale - - [320, 240] - metric: - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "LPD_YuNet" - confThreshold: 0.8 - nmsThreshold: 0.3 - topK: 5000 - keepTopK: 750 diff --git a/benchmark/config/object_detection_nanodet.yaml b/benchmark/config/object_detection_nanodet.yaml deleted file mode 100644 index 77e16914..00000000 --- a/benchmark/config/object_detection_nanodet.yaml +++ /dev/null @@ -1,18 +0,0 @@ -Benchmark: - name: "Object Detection Benchmark" - type: "Detection" - data: - path: "data/object_detection" - files: ["1.png", "2.png", "3.png"] - sizes: - - [416, 416] - metric: - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "NanoDet" - prob_threshold: 0.35 - iou_threshold: 0.6 diff --git a/benchmark/config/object_detection_yolox.yaml b/benchmark/config/object_detection_yolox.yaml deleted file mode 100644 index bfda9200..00000000 --- a/benchmark/config/object_detection_yolox.yaml +++ /dev/null @@ -1,19 +0,0 @@ -Benchmark: - name: "Object Detection Benchmark" - type: "Detection" - data: - path: "data/object_detection" - files: ["1.png", "2.png", "3.png"] - sizes: - - [640, 640] - metric: - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "YoloX" - confThreshold: 0.35 - nmsThreshold: 0.5 - objThreshold: 0.5 diff --git a/benchmark/config/object_tracking_vittrack.yaml b/benchmark/config/object_tracking_vittrack.yaml deleted file mode 100644 index 6ed4cdf0..00000000 --- a/benchmark/config/object_tracking_vittrack.yaml +++ /dev/null @@ -1,14 +0,0 @@ -Benchmark: - name: "Object Tracking Benchmark" - type: "Tracking" - data: - type: "TrackingVideoLoader" - path: "data/object_tracking" - files: ["throw_cup.mp4"] - metric: - type: "Tracking" - backend: "default" - target: "cpu" - -Model: - name: "VitTrack" diff --git a/benchmark/config/palm_detection_mediapipe.yaml b/benchmark/config/palm_detection_mediapipe.yaml deleted file mode 100644 index e00bdb4a..00000000 --- a/benchmark/config/palm_detection_mediapipe.yaml +++ /dev/null @@ -1,19 +0,0 @@ -Benchmark: - name: "Palm Detection Benchmark" - type: "Detection" - data: - path: "data/palm_detection_20230125" - files: ["palm1.jpg", "palm2.jpg", "palm3.jpg"] - sizes: # [[w1, h1], ...], Omit to run at original scale - - [192, 192] - metric: - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "MPPalmDet" - scoreThreshold: 0.5 - nmsThreshold: 0.3 - topK: 1 diff --git a/benchmark/config/person_detection_mediapipe.yaml b/benchmark/config/person_detection_mediapipe.yaml deleted file mode 100644 index ba95446c..00000000 --- a/benchmark/config/person_detection_mediapipe.yaml +++ /dev/null @@ -1,19 +0,0 @@ -Benchmark: - name: "Person Detection Benchmark" - type: "Detection" - data: - path: "data/person_detection" - files: ["person1.jpg", "person2.jpg", "person3.jpg"] - sizes: # [[w1, h1], ...], Omit to run at original scale - - [224, 224] - metric: - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "MPPersonDet" - scoreThreshold: 0.5 - nmsThreshold: 0.3 - topK: 1 diff --git a/benchmark/config/person_reid_youtureid.yaml b/benchmark/config/person_reid_youtureid.yaml deleted file mode 100644 index 07ccf31a..00000000 --- a/benchmark/config/person_reid_youtureid.yaml +++ /dev/null @@ -1,15 +0,0 @@ -Benchmark: - name: "Person ReID Benchmark" - type: "Base" - data: - path: "data/person_reid" - files: ["0030_c1_f0056923.jpg", "0042_c5_f0068994.jpg", "0056_c8_f0017063.jpg"] - sizes: [[128, 256]] - metric: - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "YoutuReID" diff --git a/benchmark/config/pose_estimation_mediapipe.yaml b/benchmark/config/pose_estimation_mediapipe.yaml deleted file mode 100644 index 7cfb26ac..00000000 --- a/benchmark/config/pose_estimation_mediapipe.yaml +++ /dev/null @@ -1,17 +0,0 @@ -Benchmark: - name: "Pose Estimation Benchmark" - type: "Recognition" - data: - path: "data/person_detection" - files: ["person1.jpg", "person2.jpg", "person3.jpg"] - sizes: # [[w1, h1], ...], Omit to run at original scale - - [256, 256] - metric: - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "MPPose" - confThreshold: 0.9 diff --git a/benchmark/config/qrcode_wechatqrcode.yaml b/benchmark/config/qrcode_wechatqrcode.yaml deleted file mode 100644 index 267554c5..00000000 --- a/benchmark/config/qrcode_wechatqrcode.yaml +++ /dev/null @@ -1,16 +0,0 @@ -Benchmark: - name: "QRCode Detection and Decoding Benchmark" - type: "Detection" - data: - path: "data/qrcode" - files: ["opencv.png", "opencv_zoo.png"] - sizes: - - [100, 100] - metric: - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "WeChatQRCode" diff --git a/benchmark/config/text_detection_ppocr.yaml b/benchmark/config/text_detection_ppocr.yaml deleted file mode 100644 index 80158536..00000000 --- a/benchmark/config/text_detection_ppocr.yaml +++ /dev/null @@ -1,20 +0,0 @@ -Benchmark: - name: "Text Detection Benchmark" - type: "Detection" - data: - path: "data/text" - files: ["1.jpg", "2.jpg", "3.jpg"] - sizes: # [[w1, h1], ...], Omit to run at original scale - - [640, 480] - metric: - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "PPOCRDet" - binaryThreshold: 0.3 - polygonThreshold: 0.5 - maxCandidates: 200 - unclipRatio: 2.0 diff --git a/benchmark/config/text_recognition_crnn.yaml b/benchmark/config/text_recognition_crnn.yaml deleted file mode 100644 index e4cdc9d0..00000000 --- a/benchmark/config/text_recognition_crnn.yaml +++ /dev/null @@ -1,14 +0,0 @@ -Benchmark: - name: "Text Recognition Benchmark" - type: "Recognition" - data: - path: "data/text" - files: ["1.jpg", "2.jpg", "3.jpg"] - metric: # 'sizes' is omitted since this model requires input of fixed size - warmup: 30 - repeat: 10 - backend: "default" - target: "cpu" - -Model: - name: "CRNN" diff --git a/benchmark/data/.gitignore b/benchmark/data/.gitignore deleted file mode 100644 index c96a04f0..00000000 --- a/benchmark/data/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -* -!.gitignore \ No newline at end of file diff --git a/benchmark/download_data.py b/benchmark/download_data.py deleted file mode 100644 index 68033302..00000000 --- a/benchmark/download_data.py +++ /dev/null @@ -1,237 +0,0 @@ -import hashlib -import os -import sys -import tarfile -import zipfile -import requests -import os.path as osp - -from urllib.request import urlopen -from urllib.parse import urlparse - - -class Downloader: - MB = 1024*1024 - BUFSIZE = 10*MB - - def __init__(self, **kwargs): - self._name = kwargs.pop('name') - self._url = kwargs.pop('url', None) - self._filename = kwargs.pop('filename') - self._sha = kwargs.pop('sha', None) - self._saveTo = kwargs.pop('saveTo', './data') - self._extractTo = kwargs.pop('extractTo', './data') - - def __str__(self): - return 'Downloader for <{}>'.format(self._name) - - def printRequest(self, r): - def getMB(r): - d = dict(r.info()) - for c in ['content-length', 'Content-Length']: - if c in d: - return int(d[c]) / self.MB - return '' - print(' {} {} [{} Mb]'.format(r.getcode(), r.msg, getMB(r))) - - def verifyHash(self): - if not self._sha: - return False - sha = hashlib.sha1() - try: - with open(osp.join(self._saveTo, self._filename), 'rb') as f: - while True: - buf = f.read(self.BUFSIZE) - if not buf: - break - sha.update(buf) - if self._sha != sha.hexdigest(): - print(' actual {}'.format(sha.hexdigest())) - print(' expect {}'.format(self._sha)) - return self._sha == sha.hexdigest() - except Exception as e: - print(' catch {}'.format(e)) - - def get(self): - print(' {}: {}'.format(self._name, self._filename)) - if self.verifyHash(): - print(' hash match - skipping download') - else: - basedir = os.path.dirname(self._saveTo) - if basedir and not os.path.exists(basedir): - print(' creating directory: ' + basedir) - os.makedirs(basedir, exist_ok=True) - - print(' hash check failed - downloading') - if 'drive.google.com' in self._url: - urlquery = urlparse(self._url).query.split('&') - for q in urlquery: - if 'id=' in q: - gid = q[3:] - sz = GDrive(gid)(osp.join(self._saveTo, self._filename)) - print(' size = %.2f Mb' % (sz / (1024.0 * 1024))) - else: - print(' get {}'.format(self._url)) - self.download() - - # Verify hash after download - print(' done') - print(' file {}'.format(self._filename)) - if self.verifyHash(): - print(' hash match - extracting') - else: - print(' hash check failed - exiting') - - # Extract - if '.zip' in self._filename: - print(' extracting - ', end='') - self.extract() - print('done') - - return True - - def download(self): - try: - r = urlopen(self._url, timeout=60) - self.printRequest(r) - self.save(r) - except Exception as e: - print(' catch {}'.format(e)) - - def extract(self): - fileLocation = os.path.join(self._saveTo, self._filename) - try: - if self._filename.endswith('.zip'): - with zipfile.ZipFile(fileLocation) as f: - for member in f.namelist(): - path = osp.join(self._extractTo, member) - if osp.exists(path) or osp.isfile(path): - continue - else: - f.extract(member, self._extractTo) - except Exception as e: - print((' catch {}'.format(e))) - - def save(self, r): - with open(self._filename, 'wb') as f: - print(' progress ', end='') - sys.stdout.flush() - while True: - buf = r.read(self.BUFSIZE) - if not buf: - break - f.write(buf) - print('>', end='') - sys.stdout.flush() - - -def GDrive(gid): - def download_gdrive(dst): - session = requests.Session() # re-use cookies - - URL = "https://docs.google.com/uc?export=download" - response = session.get(URL, params = { 'id' : gid }, stream = True) - - def get_confirm_token(response): # in case of large files - for key, value in response.cookies.items(): - if key.startswith('download_warning'): - return value - return None - token = get_confirm_token(response) - - if token: - params = { 'id' : gid, 'confirm' : token } - response = session.get(URL, params = params, stream = True) - - BUFSIZE = 1024 * 1024 - PROGRESS_SIZE = 10 * 1024 * 1024 - - sz = 0 - progress_sz = PROGRESS_SIZE - with open(dst, "wb") as f: - for chunk in response.iter_content(BUFSIZE): - if not chunk: - continue # keep-alive - - f.write(chunk) - sz += len(chunk) - if sz >= progress_sz: - progress_sz += PROGRESS_SIZE - print('>', end='') - sys.stdout.flush() - print('') - return sz - return download_gdrive - -# Data will be downloaded and extracted to ./data by default -data_downloaders = dict( - face_detection=Downloader(name='face_detection', - url='https://drive.google.com/u/0/uc?id=1lOAliAIeOv4olM65YDzE55kn6XjiX2l6&export=download', - sha='0ba67a9cfd60f7fdb65cdb7c55a1ce76c1193df1', - filename='face_detection.zip'), - face_recognition=Downloader(name='face_recognition', - url='https://drive.google.com/u/0/uc?id=1BRIozREIzqkm_aMQ581j93oWoS-6TLST&export=download', - sha='03892b9036c58d9400255ff73858caeec1f46609', - filename='face_recognition.zip'), - facial_expression_recognition=Downloader(name='facial_expression_recognition', - url='https://drive.google.com/u/0/uc?id=13ZE0Pz302z1AQmBmYGuowkTiEXVLyFFZ&export=download', - sha='8f757559820c8eaa1b1e0065f9c3bbbd4f49efe2', - filename='facial_expression_recognition.zip'), - text=Downloader(name='text', - url='https://drive.google.com/u/0/uc?id=1lTQdZUau7ujHBqp0P6M1kccnnJgO-dRj&export=download', - sha='a40cf095ceb77159ddd2a5902f3b4329696dd866', - filename='text.zip'), - image_classification=Downloader(name='image_classification', - url='https://drive.google.com/u/0/uc?id=1qcsrX3CIAGTooB-9fLKYwcvoCuMgjzGU&export=download', - sha='987546f567f9f11d150eea78951024b55b015401', - filename='image_classification.zip'), - human_segmentation=Downloader(name='human_segmentation', - url='https://drive.google.com/u/0/uc?id=1Kh0qXcAZCEaqwavbUZubhRwrn_8zY7IL&export=download', - sha='ac0eedfd8568570cad135acccd08a134257314d0', - filename='human_segmentation.zip'), - qrcode=Downloader(name='qrcode', - url='https://drive.google.com/u/0/uc?id=1_OXB7eiCIYO335ewkT6EdAeXyriFlq_H&export=download', - sha='ac01c098934a353ca1545b5266de8bb4f176d1b3', - filename='qrcode.zip'), - object_tracking=Downloader(name='object_tracking', - url='https://drive.google.com/u/0/uc?id=1_cw5pUmTF-XmQVcQAI8fIp-Ewi2oMYIn&export=download', - sha='0bdb042632a245270013713bc48ad35e9221f3bb', - filename='object_tracking.zip'), - person_reid=Downloader(name='person_reid', - url='https://drive.google.com/u/0/uc?id=1G8FkfVo5qcuyMkjSs4EA6J5e16SWDGI2&export=download', - sha='5b741fbf34c1fbcf59cad8f2a65327a5899e66f1', - filename='person_reid.zip'), - palm_detection=Downloader(name='palm_detection', - url='https://drive.google.com/u/0/uc?id=1Z4KvccTZPeZ0qFLZ6saBt_TvcKYyo9JE&export=download', - sha='4b5bb24a51daab8913957e60245a4eb766c8cf2e', - filename='palm_detection_20230125.zip'), - license_plate_detection=Downloader(name='license_plate_detection', - url='https://drive.google.com/u/0/uc?id=1cf9MEyUqMMy8lLeDGd1any6tM_SsSmny&export=download', - sha='997acb143ddc4531e6e41365fb7ad4722064564c', - filename='license_plate_detection.zip'), - object_detection=Downloader(name='object_detection', - url='https://drive.google.com/u/0/uc?id=1LUUrQIWYYtiGoNAL_twZvdw5NkC39Swe&export=download', - sha='4161a5cd3b0be1f51484abacf19dc9a2231e9894', - filename='object_detection.zip'), - person_detection=Downloader(name='person_detection', - url='https://drive.google.com/u/0/uc?id=1RbLyetgqFUTt0IHaVmu6c_b7KeXJgKbc&export=download', - sha='fbae2fb0a47fe65e316bbd0ec57ba21461967550', - filename='person_detection.zip'), -) - -if __name__ == '__main__': - selected_data_names = [] - for i in range(1, len(sys.argv)): - selected_data_names.append(sys.argv[i]) - if not selected_data_names: - selected_data_names = list(data_downloaders.keys()) - print('Data will be downloaded: {}'.format(str(selected_data_names))) - - download_failed = [] - for selected_data_name in selected_data_names: - downloader = data_downloaders[selected_data_name] - if not downloader.get(): - download_failed.append(downloader._name) - - if download_failed: - print('Data have not been downloaded: {}'.format(str(download_failed))) diff --git a/benchmark/generate_table.py b/benchmark/generate_table.py deleted file mode 100644 index daf258d6..00000000 --- a/benchmark/generate_table.py +++ /dev/null @@ -1,277 +0,0 @@ -import re -import matplotlib.pyplot as plt -import matplotlib as mpl -import numpy as np -import yaml - - -# parse a '.md' file and find a table. return table information -def parse_table(filepath, cfg): - # parse benchmark data - def _parse_benchmark_data(lines): - raw_data = [] - for l in lines: - l = l.strip() - # parse each line - m = re.match(r"(\d+\.?\d*)\s+(\d+\.?\d*)\s+(\d+\.?\d*)\s+\[([^]]*)]\s+(.*)", l) - if m: - raw_data.append(m.groups()) - return raw_data - - # find each cpu, gpu, npu block - def _find_all_platform_block(lines): - cur_start = None - cur_platform = None - platform_block = dict() - for i in range(len(lines)): - l = lines[i].strip() - # found start and end of a platform - if l.startswith("CPU") or l.startswith("GPU") or l.startswith("NPU"): - if cur_platform is not None: - platform_block[cur_platform] = (cur_start, i) - cur_platform = l[:-1] - cur_start = i + 1 - continue - if cur_platform is not None and i == len(lines) - 1: - platform_block[cur_platform] = (cur_start, i) - for key in platform_block: - r = platform_block[key] - platform_block[key] = _parse_benchmark_data(lines[r[0]:r[1]]) - - return platform_block - - # find device block - def _find_all_device_block(lines, level): - cur_start = None - cur_device_name = None - device_block = dict() - for i in range(len(lines)): - l = lines[i].strip() - m = re.match(r"^(#+)\s+(.*)", l) - # found start and end of a device - if m and len(m.group(1)) == level: - if cur_device_name is not None: - device_block[cur_device_name] = (cur_start, i) - cur_device_name = m.group(2) - cur_start = i + 1 - continue - if cur_device_name is not None and i == len(lines) - 1: - device_block[cur_device_name] = (cur_start, i) - - for key in device_block: - r = device_block[key] - device_block[key] = _find_all_platform_block(lines[r[0]:r[1]]) - - return device_block - - # find detail block - def _find_detail_block(lines, title, level): - start = None - end = len(lines) - for i in range(len(lines)): - l = lines[i].strip() - m = re.match(r"^(#+)\s+(.*)", l) - # found start of detailed results block - if m and len(m.group(1)) == level and m.group(2) == title: - start = i + 1 - continue - # found end of detailed results block - if start is not None and m and len(m.group(1)) <= level: - end = i - break - - return _find_all_device_block(lines[start:end], level + 1) - - with open(filepath, "r", encoding="utf-8") as f: - content = f.read() - lines = content.split("\n") - - devices = cfg["Devices"] - models = cfg["Models"] - # display information of all devices - devices_display = [x['display_info'] for x in cfg["Devices"]] - header = ["Model", "Task", "Input Size"] + devices_display - body = [[x["name"], x["task"], x["input_size"]] + ["---"] * len(devices) for x in models] - table_raw_data = _find_detail_block(lines, title="Detailed Results", level=2) - - device_name_header = [f"{x['name']}-{x['platform']}" for x in devices] - device_name_header = [""] * (len(header) - len(device_name_header)) + device_name_header - # device name map to model col idx - device_name_to_col_idx = {k: v for v, k in enumerate(device_name_header)} - # model name map to model row idx - model_name_to_row_idx = {k[0]: v for v, k in enumerate(body)} - # convert raw data to usage data - for device in devices: - raw_data = table_raw_data[device["name"]][device["platform"]] - col_idx = device_name_to_col_idx[f"{device['name']}-{device['platform']}"] - for model in models: - # find which row idx of this model - row_idx = model_name_to_row_idx[model["name"]] - model_idxs = [i for i in range(len(raw_data)) if model["keyword"] in raw_data[i][-1]] - if len(model_idxs) > 0: - # only choose the first one - model_idx = model_idxs[0] - # choose mean as value - body[row_idx][col_idx] = raw_data[model_idx][0] - # remove used data - for idx in sorted(model_idxs, reverse=True): - raw_data.pop(idx) - - # handle suffix - for suffix in cfg["Suffixes"]: - row_idx = model_name_to_row_idx[suffix["model"]] - col_idx = device_name_to_col_idx[f"{suffix['device']}-{suffix['platform']}"] - body[row_idx][col_idx] += suffix["str"] - - return header, body - - -# render table and save -def render_table(header, body, save_path, cfg, cmap_type): - # parse models information and return some data - def _parse_data(models_info, cmap, cfg): - min_list = [] - max_list = [] - colors = [] - # model name map to idx - model_name_to_idx = {k["name"]: v for v, k in enumerate(cfg["Models"])} - for model in models_info: - # remove \* - data = [x.replace("\\*", "") for x in model] - # get max data - max_idx = -1 - min_data = 9999999 - min_idx = -1 - - for i in range(len(data)): - try: - d = float(data[i]) - if d < min_data: - min_data = d - min_idx = i - except: - pass - # set all bigger than acceptable time to red color - idx = model_name_to_idx[model[0]] - acc_time = cfg["Models"][idx]["acceptable_time"] - - min_list.append(min_idx) - max_list.append(max_idx) - - # calculate colors - color = [] - for t in data: - try: - t = float(t) - if t > acc_time: - # all bigger time will be set to red - color.append(cmap(1.)) - else: - # sqrt to make the result non-linear - t = np.sqrt((t - min_data) / (acc_time - min_data)) - color.append(cmap(t)) - except: - color.append('white') - colors.append(color) - return colors, min_list, max_list - - cmap = mpl.colormaps.get_cmap(cmap_type) - table_colors, min_list, max_list = _parse_data(body, cmap, cfg) - table_texts = [header] + body - table_colors = [['white'] * len(header)] + table_colors - - # create a figure, base width set to 1000, height set to 80 - fig, axs = plt.subplots(nrows=3, figsize=(10, 0.8)) - # turn off labels and axis - for ax in axs: - ax.set_axis_off() - ax.set_xticks([]) - ax.set_yticks([]) - - # create and add a color map - gradient = np.linspace(0, 1, 256) - gradient = np.vstack((gradient, gradient)) - axs[0].imshow(gradient, aspect='auto', cmap=cmap) - axs[0].text(-0.01, 0.5, "Faster", va='center', ha='right', fontsize=11, transform=axs[0].transAxes) - axs[0].text(1.01, 0.5, "Slower", va='center', ha='left', fontsize=11, transform=axs[0].transAxes) - - # initialize a table - table = axs[1].table(cellText=table_texts, - cellColours=table_colors, - cellLoc="left", - loc="upper left") - # set style of header, each url of hardware - ori_height = table[0, 0].get_height() - url_base = 'https://github.com/opencv/opencv_zoo/tree/main/benchmark#' - hw_urls = [f"{url_base}{x['name'].lower().replace(' ', '-')}" for x in cfg["Devices"]] - hw_urls = [""] * 3 + hw_urls - for col in range(len(header)): - cell = table[0, col] - cell.set_text_props(ha='center', weight='bold', linespacing=1.5, url=hw_urls[col]) - cell.set_url(hw_urls[col]) - cell.set_height(ori_height * 2.2) - - url_base = 'https://github.com/opencv/opencv_zoo/tree/main/models/' - model_urls = [f"{url_base}{x['folder']}" for x in cfg["Models"]] - model_urls = [""] + model_urls - for row in range(len(body) + 1): - cell = table[row, 0] - cell.set_text_props(url=model_urls[row]) - cell.set_url(model_urls[row]) - - # adjust table position - table_pos = axs[1].get_position() - axs[1].set_position([ - table_pos.x0, - table_pos.y0 - table_pos.height, - table_pos.width, - table_pos.height - ]) - - table.set_fontsize(11) - table.auto_set_font_size(False) - table.scale(1, 2) - table.auto_set_column_width(list(range(len(table_texts[0])))) - table.AXESPAD = 0 # cancel padding - - # highlight the best number - for i in range(len(min_list)): - cell = table.get_celld()[(i + 1, min_list[i])] - cell.set_text_props(weight='bold', color='white') - - # draw table and trigger changing the column width value - fig.canvas.draw() - # calculate table height and width - table_height = 0 - table_width = 0 - for i in range(len(table_texts)): - cell = table.get_celld()[(i, 0)] - table_height += cell.get_height() - for i in range(len(table_texts[0])): - cell = table.get_celld()[(0, i)] - table_width += cell.get_width() - - # add notes for table - axs[2].text(0, -table_height - 1, "Units: All data in milliseconds (ms).", va='bottom', ha='left', fontsize=11, transform=axs[1].transAxes) - axs[2].text(0, -table_height - 2, "\\*: Models are quantized in per-channel mode, which run slower than per-tensor quantized models on NPU.", va='bottom', ha='left', fontsize=11, transform=axs[1].transAxes) - - # adjust color map position to center - cm_pos = axs[0].get_position() - axs[0].set_position([ - (table_width - 1) / 2, - cm_pos.y0, - cm_pos.width, - cm_pos.height - ]) - - plt.rcParams['svg.fonttype'] = 'none' - plt.rcParams['svg.hashsalt'] = '11' # fix hash salt for avoiding id change - plt.savefig(save_path, format='svg', bbox_inches="tight", pad_inches=0, metadata={'Date': None, 'Creator': None}) - - -if __name__ == '__main__': - with open("table_config.yaml", 'r') as f: - cfg = yaml.safe_load(f) - - hw_info, model_info = parse_table("README.md", cfg) - render_table(hw_info, model_info, "color_table.svg", cfg, "RdYlGn_r") diff --git a/benchmark/requirements.txt b/benchmark/requirements.txt deleted file mode 100644 index 917b1045..00000000 --- a/benchmark/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -numpy -opencv-python<5.0 -pyyaml -requests -matplotlib>=3.7.1 \ No newline at end of file diff --git a/benchmark/table_config.yaml b/benchmark/table_config.yaml deleted file mode 100644 index c84f62f9..00000000 --- a/benchmark/table_config.yaml +++ /dev/null @@ -1,246 +0,0 @@ -# model information -# - name: model name, used for display -# task: model task, used for display -# input_size: input size, used for display -# folder: which folder the model located in, used for jumping to model detail -# acceptable_time: maximum acceptable inference time, large ones will be marked red -# keyword: used to specify this model from all benchmark results -# -# device information -# - name: full device name used to identify the device block, and jump to device detail -# display_info: device information for display -# platform: used to identify benchmark result of specific platform -# -# suffix information -# - model: which model -# device: which device -# suffix: this suffix will be appended to end of this text - -Models: - - name: "YuNet" - task: "Face Detection" - input_size: "160x120" - folder: "face_detection_yunet" - acceptable_time: 50 - keyword: "face_detection_yunet" - - - name: "SFace" - task: "Face Recognition" - input_size: "112x112" - folder: "face_recognition_sface" - acceptable_time: 200 - keyword: "face_recognition_sface" - - - name: "FER" - task: "Face Expression Recognition" - input_size: "112x112" - folder: "facial_expression_recognition" - acceptable_time: 200 - keyword: "facial_expression_recognition_mobilefacenet" - - - name: "LPD_YuNet" - task: "License Plate Detection" - input_size: "320x240" - folder: "license_plate_detection_yunet" - acceptable_time: 700 - keyword: "license_plate_detection_lpd_yunet" - - - name: "YOLOX" - task: "Object Detection" - input_size: "640x640" - folder: "object_detection_yolox" - acceptable_time: 2800 - keyword: "object_detection_yolox" - - - name: "NanoDet" - task: "Object Detection" - input_size: "416x416" - folder: "object_detection_nanodet" - acceptable_time: 2000 - keyword: "object_detection_nanodet" - - - name: "PPOCRDet-CN" - task: "Text Detection" - input_size: "640x480" - folder: "text_detection_ppocr" - acceptable_time: 2000 - keyword: "text_detection_cn_ppocrv3_2023may" - - - name: "PPOCRDet-EN" - task: "Text Detection" - input_size: "640x480" - folder: "text_detection_ppocr" - acceptable_time: 2000 - keyword: "text_detection_en_ppocrv3_2023may" - - - name: "CRNN-EN" - task: "Text Recognition" - input_size: "100x32" - folder: "text_recognition_crnn" - acceptable_time: 2000 - keyword: "text_recognition_CRNN_EN" - - - name: "CRNN-CN" - task: "Text Recognition" - input_size: "100x32" - folder: "text_recognition_crnn" - acceptable_time: 2000 - keyword: "text_recognition_CRNN_CN" - - - name: "PP-ResNet" - task: "Image Classification" - input_size: "224x224" - folder: "image_classification_ppresnet" - acceptable_time: 1000 - keyword: "image_classification_ppresnet50" - - - name: "MobileNet-V1" - task: "Image Classification" - input_size: "224x224" - folder: "image_classification_mobilenet" - acceptable_time: 500 - keyword: "image_classification_mobilenetv1" - - - name: "MobileNet-V2" - task: "Image Classification" - input_size: "224x224" - folder: "image_classification_mobilenet" - acceptable_time: 500 - keyword: "image_classification_mobilenetv2" - - - name: "PP-HumanSeg" - task: "Human Segmentation" - input_size: "192x192" - folder: "human_segmentation_pphumanseg" - acceptable_time: 700 - keyword: "human_segmentation_pphumanseg" - - - name: "WeChatQRCode" - task: "QR Code Detection and Parsing" - input_size: "100x100" - folder: "qrcode_wechatqrcode" - acceptable_time: 100 - keyword: "WeChatQRCode" - - - name: "YoutuReID" - task: "Person Re-Identification" - input_size: "128x256" - folder: "person_reid_youtureid" - acceptable_time: 800 - keyword: "person_reid_youtu" - - - name: "MP-PalmDet" - task: "Palm Detection" - input_size: "192x192" - folder: "palm_detection_mediapipe" - acceptable_time: 500 - keyword: "palm_detection_mediapipe" - - - name: "MP-HandPose" - task: "Hand Pose Estimation" - input_size: "224x224" - folder: "handpose_estimation_mediapipe" - acceptable_time: 500 - keyword: "handpose_estimation_mediapipe" - - - name: "MP-PersonDet" - task: "Person Detection" - input_size: "224x224" - folder: "person_detection_mediapipe" - acceptable_time: 1300 - keyword: "person_detection_mediapipe" - - - name: "MP-Pose" - task: "Pose Estimation" - input_size: "256x256" - folder: "pose_estimation_mediapipe" - acceptable_time: 700 - keyword: "pose_estimation_mediapipe" - - - name: "VitTrack" - task: "Object Tracking" - input_size: "1280x720" - folder: "object_tracking_vittrack" - acceptable_time: 1000 - keyword: "object_tracking_vittrack" - - -Devices: - - name: "Intel 12700K" - display_info: "Intel\n12700K\nCPU" - platform: "CPU" - - - name: "Atlas 200I DK A2" - display_info: "Atlas 200I DK A2\nAscend 310B\nCPU" - platform: "CPU" - - - name: "Atlas 200 DK" - display_info: "Atlas 200 DK\nAscend 310\nCPU" - platform: "CPU" - - - name: "Khadas VIM3" - display_info: "Khadas VIM3\nA311D\nCPU" - platform: "CPU" - - - name: "Khadas VIM4" - display_info: "Khadas VIM4\nA311D2\nCPU" - platform: "CPU" - - - name: "Khadas Edge2 (with RK3588)" - display_info: "Khadas Edge2\nRK3588S\nCPU" - platform: "CPU" - - - name: "Jetson Nano B01" - display_info: "Jetson Nano\nB01\nCPU" - platform: "CPU" - - - name: "Jetson Nano Orin" - display_info: "Jetson Nano\nOrin\nCPU" - platform: "CPU" - - - name: "Raspberry Pi 4B" - display_info: "Raspberry Pi 4B\nBCM2711\nCPU" - platform: "CPU" - - - name: "Horizon Sunrise X3 PI" - display_info: "Horizon Sunrise Pi\nX3\nCPU" - platform: "CPU" - - - name: "MAIX-III AX-PI" - display_info: "MAIX-III AX-Pi\nAX620A\nCPU" - platform: "CPU" - - - name: "Toybrick RV1126" - display_info: "Toybrick\nRV1126\nCPU" - platform: "CPU" - - - name: "StarFive VisionFive 2" - display_info: "StarFive VisionFive 2\nStarFive JH7110\nCPU" - platform: "CPU" - - - name: "Jetson Nano B01" - display_info: "Jetson Nano\nB01\nGPU" - platform: "GPU (CUDA-FP32)" - - - name: "Jetson Nano Orin" - display_info: "Jetson Nano\nOrin\nGPU" - platform: "GPU (CUDA-FP32)" - - - name: "Khadas VIM3" - display_info: "Khadas VIM3\nA311D\nNPU" - platform: "NPU (TIMVX)" - - - name: "Atlas 200 DK" - display_info: "Atlas 200 DK\nAscend 310\nNPU" - platform: "NPU (CANN)" - -Suffixes: - - model: "MobileNet-V1" - device: "Khadas VIM3" - platform: "NPU (TIMVX)" - str: "\\*" - - - model: "MobileNet-V2" - device: "Khadas VIM3" - platform: "NPU (TIMVX)" - str: "\\*" diff --git a/benchmark/utils/__init__.py b/benchmark/utils/__init__.py deleted file mode 100644 index fb908474..00000000 --- a/benchmark/utils/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .factory import (METRICS, DATALOADERS) -from .metrics import * -from .dataloaders import * - -__all__ = ['METRICS', 'DATALOADERS'] \ No newline at end of file diff --git a/benchmark/utils/dataloaders/__init__.py b/benchmark/utils/dataloaders/__init__.py deleted file mode 100644 index 5d0e4aed..00000000 --- a/benchmark/utils/dataloaders/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .base import BaseImageLoader, BaseVideoLoader -from .classification import ClassificationImageLoader -from .recognition import RecognitionImageLoader -from .tracking import TrackingVideoLoader - -__all__ = ['BaseImageLoader', 'BaseVideoLoader', 'ClassificationImageLoader', 'RecognitionImageLoader', 'TrackingVideoLoader'] \ No newline at end of file diff --git a/benchmark/utils/dataloaders/base.py b/benchmark/utils/dataloaders/base.py deleted file mode 100644 index 229b1b21..00000000 --- a/benchmark/utils/dataloaders/base.py +++ /dev/null @@ -1,12 +0,0 @@ -from .base_dataloader import _BaseImageLoader, _BaseVideoLoader -from ..factory import DATALOADERS - -@DATALOADERS.register -class BaseImageLoader(_BaseImageLoader): - def __init__(self, **kwargs): - super().__init__(**kwargs) - -@DATALOADERS.register -class BaseVideoLoader(_BaseVideoLoader): - def __init__(self, **kwargs): - super().__init__(**kwargs) \ No newline at end of file diff --git a/benchmark/utils/dataloaders/base_dataloader.py b/benchmark/utils/dataloaders/base_dataloader.py deleted file mode 100644 index 89416f19..00000000 --- a/benchmark/utils/dataloaders/base_dataloader.py +++ /dev/null @@ -1,84 +0,0 @@ -import os - -import cv2 as cv - -class _BaseImageLoader: - def __init__(self, **kwargs): - self._path = kwargs.pop('path', None) - assert self._path, 'Benchmark[\'data\'][\'path\'] cannot be empty.' - - self._files = kwargs.pop('files', None) - assert self._files, 'Benchmark[\'data\'][\'files\'] cannot be empty' - self._len_files = len(self._files) - - self._sizes = kwargs.pop('sizes', [[0, 0]]) - self._len_sizes = len(self._sizes) - - @property - def name(self): - return self.__class__.__name__ - - def __len__(self): - return self._len_files * self._len_sizes - - def __iter__(self): - for filename in self._files: - image = cv.imread(os.path.join(self._path, filename)) - if [0, 0] in self._sizes: - yield filename, image - else: - for size in self._sizes: - image_r = cv.resize(image, size) - yield filename, image_r - -class _VideoStream: - def __init__(self, filepath): - self._filepath = filepath - self._video = cv.VideoCapture(self._filepath) - - def __iter__(self): - while True: - has_frame, frame = self._video.read() - if has_frame: - yield frame - else: - break - - def __next__(self): - while True: - has_frame, frame = self._video.read() - if has_frame: - return frame - else: - break - - def reload(self): - self._video = cv.VideoCapture(self._filepath) - - def getFrameSize(self): - w = int(self._video.get(cv.CAP_PROP_FRAME_WIDTH)) - h = int(self._video.get(cv.CAP_PROP_FRAME_HEIGHT)) - return [w, h] - - -class _BaseVideoLoader: - def __init__(self, **kwargs): - self._path = kwargs.pop('path', None) - assert self._path, 'Benchmark[\'data\'][\'path\'] cannot be empty.' - - self._files = kwargs.pop('files', None) - assert self._files,'Benchmark[\'data\'][\'files\'] cannot be empty.' - - self._streams = dict() - for filename in self._files: - self._streams[filename] = _VideoStream(os.path.join(self._path, filename)) - - @property - def name(self): - return self.__class__.__name__ - - def __len__(self): - return len(self._files) - - def __getitem__(self, idx): - return self._files[idx], self._streams[idx] \ No newline at end of file diff --git a/benchmark/utils/dataloaders/classification.py b/benchmark/utils/dataloaders/classification.py deleted file mode 100644 index 72c50b89..00000000 --- a/benchmark/utils/dataloaders/classification.py +++ /dev/null @@ -1,42 +0,0 @@ -import os - -import numpy as np -import cv2 as cv - -from .base_dataloader import _BaseImageLoader -from ..factory import DATALOADERS - -@DATALOADERS.register -class ClassificationImageLoader(_BaseImageLoader): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - self._to_rgb = kwargs.pop('toRGB', False) - self._center_crop = kwargs.pop('centerCrop', None) - - def _toRGB(self, image): - return cv.cvtColor(image, cv.COLOR_BGR2RGB) - - def _centerCrop(self, image): - h, w, _ = image.shape - w_crop = int((w - self._center_crop) / 2.) - assert w_crop >= 0 - h_crop = int((h - self._center_crop) / 2.) - assert h_crop >= 0 - return image[w_crop:w-w_crop, h_crop:h-h_crop, :] - - def __iter__(self): - for filename in self._files: - image = cv.imread(os.path.join(self._path, filename)) - - if self._to_rgb: - image = self._toRGB(image) - - if [0, 0] in self._sizes: - yield filename, image - else: - for size in self._sizes: - image = cv.resize(image, size) - if self._center_crop: - image = self._centerCrop(image) - yield filename, image \ No newline at end of file diff --git a/benchmark/utils/dataloaders/recognition.py b/benchmark/utils/dataloaders/recognition.py deleted file mode 100644 index 62c77f23..00000000 --- a/benchmark/utils/dataloaders/recognition.py +++ /dev/null @@ -1,33 +0,0 @@ -import os - -import numpy as np -import cv2 as cv - -from .base_dataloader import _BaseImageLoader -from ..factory import DATALOADERS - -@DATALOADERS.register -class RecognitionImageLoader(_BaseImageLoader): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - self._labels = self._load_label() - - def _load_label(self): - labels = dict.fromkeys(self._files, None) - for filename in self._files: - if os.path.exists(os.path.join(self._path, '{}.txt'.format(filename[:-4]))): - labels[filename] = np.loadtxt(os.path.join(self._path, '{}.txt'.format(filename[:-4])), ndmin=2) - else: - labels[filename] = None - return labels - - def __iter__(self): - for filename in self._files: - image = cv.imread(os.path.join(self._path, filename)) - if [0, 0] in self._sizes: - yield filename, image, self._labels[filename] - else: - for size in self._sizes: - image_r = cv.resize(image, size) - yield filename, image_r, self._labels[filename] \ No newline at end of file diff --git a/benchmark/utils/dataloaders/tracking.py b/benchmark/utils/dataloaders/tracking.py deleted file mode 100644 index 1797fd65..00000000 --- a/benchmark/utils/dataloaders/tracking.py +++ /dev/null @@ -1,27 +0,0 @@ -import os -import numpy as np - -from .base_dataloader import _BaseVideoLoader -from ..factory import DATALOADERS - -@DATALOADERS.register -class TrackingVideoLoader(_BaseVideoLoader): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - self._first_frames = dict() - for filename in self._files: - stream = self._streams[filename] - self._first_frames[filename] = next(stream) - - self._rois = self._load_roi() - - def _load_roi(self): - rois = dict.fromkeys(self._files, None) - for filename in self._files: - rois[filename] = np.loadtxt(os.path.join(self._path, '{}.txt'.format(filename[:-4])), dtype=np.int32, ndmin=2) - return rois - - def __getitem__(self, idx): - filename = self._files[idx] - return filename, self._streams[filename], self._first_frames[filename], self._rois[filename] \ No newline at end of file diff --git a/benchmark/utils/factory.py b/benchmark/utils/factory.py deleted file mode 100644 index 6325e7a0..00000000 --- a/benchmark/utils/factory.py +++ /dev/null @@ -1,20 +0,0 @@ -class Registery: - def __init__(self, name): - self._name = name - self._dict = dict() - - def get(self, key): - if key in self._dict: - return self._dict[key] - else: - return self._dict['Base'] - - def register(self, item): - self._dict[item.__name__] = item - # renaming *ImageLoader/*VideoLoader - if 'ImageLoader' in item.__name__: - name = item.__name__.replace('ImageLoader', '') - self._dict[name] = item - -METRICS = Registery('Metrics') -DATALOADERS = Registery('DataLoaders') \ No newline at end of file diff --git a/benchmark/utils/metrics/__init__.py b/benchmark/utils/metrics/__init__.py deleted file mode 100644 index 9f524870..00000000 --- a/benchmark/utils/metrics/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .base import Base -from .detection import Detection -from .recognition import Recognition -from .tracking import Tracking - -__all__ = ['Base', 'Detection', 'Recognition', 'Tracking'] \ No newline at end of file diff --git a/benchmark/utils/metrics/base.py b/benchmark/utils/metrics/base.py deleted file mode 100644 index 15eeafb7..00000000 --- a/benchmark/utils/metrics/base.py +++ /dev/null @@ -1,24 +0,0 @@ -import cv2 as cv - -from .base_metric import BaseMetric -from ..factory import METRICS - -@METRICS.register -class Base(BaseMetric): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def forward(self, model, *args, **kwargs): - img = args[0] - - # warmup - for _ in range(self._warmup): - model.infer(img) - # repeat - self._timer.reset() - for _ in range(self._repeat): - self._timer.start() - model.infer(img) - self._timer.stop() - - return self._timer.getRecords() diff --git a/benchmark/utils/metrics/base_metric.py b/benchmark/utils/metrics/base_metric.py deleted file mode 100644 index 8cfb6c87..00000000 --- a/benchmark/utils/metrics/base_metric.py +++ /dev/null @@ -1,41 +0,0 @@ -import cv2 as cv - -from ..timer import Timer - -class BaseMetric: - def __init__(self, **kwargs): - self._warmup = kwargs.pop('warmup', 3) - self._repeat = kwargs.pop('repeat', 10) - - self._timer = Timer() - - def _calcMedian(self, records): - ''' Return the median of records - ''' - l = len(records) - mid = int(l / 2) - if l % 2 == 0: - return (records[mid] + records[mid - 1]) / 2 - else: - return records[mid] - - def _calcMean(self, records, drop_largest=1): - ''' Return the mean of records after dropping drop_largest - ''' - l = len(records) - if l <= drop_largest: - print('len(records)({}) <= drop_largest({}), stop dropping.'.format(l, drop_largest)) - records_sorted = sorted(records, reverse=True) - return sum(records_sorted[drop_largest:]) / (l - drop_largest) - - def _calcMin(self, records): - return min(records) - - def getPerfStats(self, records): - mean = self._calcMean(records, int(len(records) / 10)) - median = self._calcMedian(records) - minimum = self._calcMin(records) - return [mean, median, minimum] - - def forward(self, model, *args, **kwargs): - raise NotImplementedError('Not implemented') diff --git a/benchmark/utils/metrics/detection.py b/benchmark/utils/metrics/detection.py deleted file mode 100644 index ce1a6532..00000000 --- a/benchmark/utils/metrics/detection.py +++ /dev/null @@ -1,29 +0,0 @@ -import cv2 as cv - -from .base_metric import BaseMetric -from ..factory import METRICS - -@METRICS.register -class Detection(BaseMetric): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def forward(self, model, *args, **kwargs): - img = args[0] - size = [img.shape[1], img.shape[0]] - try: - model.setInputSize(size) - except: - pass - - # warmup - for _ in range(self._warmup): - model.infer(img) - # repeat - self._timer.reset() - for _ in range(self._repeat): - self._timer.start() - model.infer(img) - self._timer.stop() - - return self._timer.getRecords() diff --git a/benchmark/utils/metrics/recognition.py b/benchmark/utils/metrics/recognition.py deleted file mode 100644 index b34b44bd..00000000 --- a/benchmark/utils/metrics/recognition.py +++ /dev/null @@ -1,31 +0,0 @@ -import cv2 as cv - -from .base_metric import BaseMetric -from ..factory import METRICS - -@METRICS.register -class Recognition(BaseMetric): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def forward(self, model, *args, **kwargs): - img, bboxes = args - - self._timer.reset() - if bboxes is not None: - for idx, bbox in enumerate(bboxes): - for _ in range(self._warmup): - model.infer(img, bbox) - for _ in range(self._repeat): - self._timer.start() - model.infer(img, bbox) - self._timer.stop() - else: - for _ in range(self._warmup): - model.infer(img, None) - for _ in range(self._repeat): - self._timer.start() - model.infer(img, None) - self._timer.stop() - - return self._timer.getRecords() diff --git a/benchmark/utils/metrics/tracking.py b/benchmark/utils/metrics/tracking.py deleted file mode 100644 index 29cede13..00000000 --- a/benchmark/utils/metrics/tracking.py +++ /dev/null @@ -1,26 +0,0 @@ -import cv2 as cv - -from .base_metric import BaseMetric -from ..factory import METRICS - -@METRICS.register -class Tracking(BaseMetric): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - # if self._warmup or self._repeat: - # print('warmup and repeat in metric for tracking do not function.') - - def forward(self, model, *args, **kwargs): - stream, first_frame, rois = args - - for roi in rois: - stream.reload() - model.init(first_frame, tuple(roi)) - self._timer.reset() - for frame in stream: - self._timer.start() - model.infer(frame) - self._timer.stop() - - return self._timer.getRecords() diff --git a/benchmark/utils/timer.py b/benchmark/utils/timer.py deleted file mode 100644 index dcff0cbe..00000000 --- a/benchmark/utils/timer.py +++ /dev/null @@ -1,20 +0,0 @@ -import cv2 as cv - -class Timer: - def __init__(self): - self._tm = cv.TickMeter() - self._record = [] - - def start(self): - self._tm.start() - - def stop(self): - self._tm.stop() - self._record.append(self._tm.getTimeMilli()) - self._tm.reset() - - def reset(self): - self._record = [] - - def getRecords(self): - return self._record \ No newline at end of file diff --git a/models/__init__.py b/models/__init__.py deleted file mode 100644 index d51c45b7..00000000 --- a/models/__init__.py +++ /dev/null @@ -1,102 +0,0 @@ -from pathlib import Path -import glob -import os - -from .face_detection_yunet.yunet import YuNet -from .text_recognition_crnn.crnn import CRNN -from .face_recognition_sface.sface import SFace -from .image_classification_ppresnet.ppresnet import PPResNet -from .human_segmentation_pphumanseg.pphumanseg import PPHumanSeg -from .person_detection_mediapipe.mp_persondet import MPPersonDet -from .pose_estimation_mediapipe.mp_pose import MPPose -from .qrcode_wechatqrcode.wechatqrcode import WeChatQRCode -from .person_reid_youtureid.youtureid import YoutuReID -from .image_classification_mobilenet.mobilenet import MobileNet -from .palm_detection_mediapipe.mp_palmdet import MPPalmDet -from .handpose_estimation_mediapipe.mp_handpose import MPHandPose -from .license_plate_detection_yunet.lpd_yunet import LPD_YuNet -from .object_detection_nanodet.nanodet import NanoDet -from .object_detection_yolox.yolox import YoloX -from .facial_expression_recognition.facial_fer_model import FacialExpressionRecog -from .object_tracking_vittrack.vittrack import VitTrack -from .text_detection_ppocr.ppocr_det import PPOCRDet -from .image_segmentation_efficientsam.efficientSAM import EfficientSAM - -class ModuleRegistery: - def __init__(self, name): - self._name = name - self._dict = dict() - - self._base_path = Path(__file__).parent - - def get(self, key): - ''' - Returns a tuple with: - - a module handler, - - a list of model file paths - ''' - return self._dict[key] - - def register(self, item): - ''' - Registers given module handler along with paths of model files - ''' - # search for model files - model_dir = str(self._base_path / item.__module__.split(".")[1]) - fp32_model_paths = [] - fp16_model_paths = [] - int8_model_paths = [] - int8bq_model_paths = [] - # onnx - ret_onnx = sorted(glob.glob(os.path.join(model_dir, "*.onnx"))) - if "object_tracking" in item.__module__: - # object tracking models usually have multiple parts - fp32_model_paths = [ret_onnx] - else: - for r in ret_onnx: - if "int8" in r: - int8_model_paths.append([r]) - elif "fp16" in r: # exclude fp16 for now - fp16_model_paths.append([r]) - elif "blocked" in r: - int8bq_model_paths.append([r]) - else: - fp32_model_paths.append([r]) - # caffe - ret_caffemodel = sorted(glob.glob(os.path.join(model_dir, "*.caffemodel"))) - ret_prototxt = sorted(glob.glob(os.path.join(model_dir, "*.prototxt"))) - caffe_models = [] - for caffemodel, prototxt in zip(ret_caffemodel, ret_prototxt): - caffe_models += [prototxt, caffemodel] - if caffe_models: - fp32_model_paths.append(caffe_models) - - all_model_paths = dict( - fp32=fp32_model_paths, - fp16=fp16_model_paths, - int8=int8_model_paths, - int8bq=int8bq_model_paths - ) - - self._dict[item.__name__] = (item, all_model_paths) - -MODELS = ModuleRegistery('Models') -MODELS.register(YuNet) -MODELS.register(CRNN) -MODELS.register(SFace) -MODELS.register(PPResNet) -MODELS.register(PPHumanSeg) -MODELS.register(MPPersonDet) -MODELS.register(MPPose) -MODELS.register(WeChatQRCode) -MODELS.register(YoutuReID) -MODELS.register(MobileNet) -MODELS.register(MPPalmDet) -MODELS.register(MPHandPose) -MODELS.register(LPD_YuNet) -MODELS.register(NanoDet) -MODELS.register(YoloX) -MODELS.register(FacialExpressionRecog) -MODELS.register(VitTrack) -MODELS.register(PPOCRDet) -MODELS.register(EfficientSAM) \ No newline at end of file diff --git a/models/deblurring_nafnet/CMakeLists.txt b/models/deblurring_nafnet/CMakeLists.txt deleted file mode 100644 index 63d95a06..00000000 --- a/models/deblurring_nafnet/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ -cmake_minimum_required(VERSION 3.22.2) -project(opencv_zoo_deblurring_nafnet) - -set(OPENCV_VERSION "5.0.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") - -# Find OpenCV -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) - -add_executable(opencv_zoo_deblurring_nafnet demo.cpp) -target_link_libraries(opencv_zoo_deblurring_nafnet ${OpenCV_LIBS}) diff --git a/models/deblurring_nafnet/LICENSE b/models/deblurring_nafnet/LICENSE deleted file mode 100644 index 50927b3b..00000000 --- a/models/deblurring_nafnet/LICENSE +++ /dev/null @@ -1,228 +0,0 @@ -MIT License - -Copyright (c) 2022 megvii-model - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - - -BasicSR -Copyright 2018-2020 BasicSR Authors - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2018-2020 BasicSR Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/models/deblurring_nafnet/README.md b/models/deblurring_nafnet/README.md deleted file mode 100644 index 56562d62..00000000 --- a/models/deblurring_nafnet/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# NAFNet - -NAFNet is a lightweight image deblurring model that eliminates nonlinear activations to achieve state-of-the-art performance with minimal computational cost. - -Notes: - -- Model source: [.pth](https://drive.google.com/file/d/14D4V4raNYIOhETfcuuLI3bGLB-OYIv6X/view). -- ONNX Model link: [ONNX](https://drive.google.com/uc?export=dowload&id=1ZLRhkpCekNruJZggVpBgSoCx3k7bJ-5v) - -## Requirements -Install latest OpenCV >=5.0.0 and CMake >= 3.22.2 to get started with. - -## Demo - -### Python - -Run the following command to try the demo: - -```shell -# deblur the default input image -python demo.py -# deblur the user input image -python demo.py --input /path/to/image - -# get help regarding various parameters -python demo.py --help -``` - -### C++ - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# deblur the default input image -./build/demo -# deblur the user input image -./build/demo --input=/path/to/image -# get help messages -./build/demo -h -``` - -### Example outputs - -![licenseplate_motion](./example_outputs/licenseplate_motion_output.jpg) - -## License - -All files in this directory are licensed under [MIT License](./LICENSE). - -## Reference - -- https://github.com/megvii-research/NAFNet diff --git a/models/deblurring_nafnet/deblurring_nafnet_2025may.onnx b/models/deblurring_nafnet/deblurring_nafnet_2025may.onnx deleted file mode 100644 index 59f8bbfb..00000000 --- a/models/deblurring_nafnet/deblurring_nafnet_2025may.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:07263f416febecce10193dd648e950b22e397cf521eedab1a114ef77b2bc9587 -size 91736251 diff --git a/models/deblurring_nafnet/demo.cpp b/models/deblurring_nafnet/demo.cpp deleted file mode 100644 index 189920fd..00000000 --- a/models/deblurring_nafnet/demo.cpp +++ /dev/null @@ -1,89 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -using namespace cv; -using namespace cv::dnn; -using namespace std; - -class Nafnet { -public: - Nafnet(const string& modelPath) { - loadModel(modelPath); - } - - // Function to set up the input image and process it - void process(const Mat& image, Mat& result) { - Mat blob = blobFromImage(image, 0.00392, Size(image.cols, image.rows), Scalar(0, 0, 0), true, false, CV_32F); - net.setInput(blob); - Mat output = net.forward(); - postProcess(output, result); - } - -private: - Net net; - - // Load Model - void loadModel(const string modelPath) { - net = readNetFromONNX(modelPath); - net.setPreferableBackend(DNN_BACKEND_DEFAULT); - net.setPreferableTarget(DNN_TARGET_CPU); - } - - void postProcess(const Mat& output, Mat& result) { - Mat output_transposed(3, &output.size[1], CV_32F, const_cast(reinterpret_cast(output.ptr()))); - - vector channels; - for (int i = 0; i < 3; ++i) { - channels.push_back(Mat(output_transposed.size[1], output_transposed.size[2], CV_32F, - output_transposed.ptr(i))); - } - merge(channels, result); - result.convertTo(result, CV_8UC3, 255.0); - cvtColor(result, result, COLOR_RGB2BGR); - } -}; - -int main(int argc, char** argv) { - const string about = - "This sample demonstrates deblurring with nafnet deblurring model.\n\n"; - const string keys = - "{ help h | | Print help message. }" - "{ input i | example_outputs/licenseplate_motion.jpg | Path to input image.}" - "{ model | deblurring_nafnet_2025may.onnx | Path to the nafnet deblurring onnx model file }"; - - CommandLineParser parser(argc, argv, keys); - if (parser.has("help")) - { - cout << about << endl; - parser.printMessage(); - return -1; - } - - parser = CommandLineParser(argc, argv, keys); - string model = parser.get("model"); - parser.about(about); - - Mat image = imread(parser.get("input")); - if (image.empty()) { - cerr << "Error: Input image could not be loaded." << endl; - return -1; - } - - // Create an instance of Dexined - Nafnet nafnet(model); - - Mat result; - nafnet.process(image, result); - - imshow("Input", image); - imshow("Output", result); - waitKey(0); - - destroyAllWindows(); - return 0; -} diff --git a/models/deblurring_nafnet/demo.py b/models/deblurring_nafnet/demo.py deleted file mode 100644 index 61b010ab..00000000 --- a/models/deblurring_nafnet/demo.py +++ /dev/null @@ -1,41 +0,0 @@ -import cv2 as cv -import argparse - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, [p.split('-')[0] for p in str_version.split('.')])) -assert opencv_python_version(cv.__version__) >= opencv_python_version("5.0.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from nafnet import Nafnet - -def get_args_parser(func_args): - parser = argparse.ArgumentParser(add_help=False) - parser.add_argument('--input', help='Path to input image.', default='example_outputs/licenseplate_motion.jpg', required=False) - parser.add_argument('--model', help='Path to nafnet deblurring onnx model', default='deblurring_nafnet_2025may.onnx', required=False) - - args, _ = parser.parse_known_args() - parser = argparse.ArgumentParser(parents=[parser], - description='', formatter_class=argparse.RawTextHelpFormatter) - return parser.parse_args(func_args) - -def main(func_args=None): - args = get_args_parser(func_args) - - nafnet = Nafnet(modelPath=args.model) - - input_image = cv.imread(args.input) - - tm = cv.TickMeter() - tm.start() - result = nafnet.infer(input_image) - tm.stop() - label = 'Inference time: {:.2f} ms'.format(tm.getTimeMilli()) - cv.putText(result, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0)) - - cv.imshow("Input image", input_image) - cv.imshow("Output image", result) - cv.waitKey(0) - cv.destroyAllWindows() - -if __name__ == '__main__': - main() diff --git a/models/deblurring_nafnet/example_outputs/licenseplate_motion.jpg b/models/deblurring_nafnet/example_outputs/licenseplate_motion.jpg deleted file mode 100644 index e55efcc2..00000000 --- a/models/deblurring_nafnet/example_outputs/licenseplate_motion.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:571b74dde1f171fc3a502c4f412c1b88faaf393f5530673bde67c4e76ec27273 -size 57018 diff --git a/models/deblurring_nafnet/example_outputs/licenseplate_motion_output.jpg b/models/deblurring_nafnet/example_outputs/licenseplate_motion_output.jpg deleted file mode 100644 index 35ee6b84..00000000 --- a/models/deblurring_nafnet/example_outputs/licenseplate_motion_output.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a5d12b85e2394313d1bb16939927df5b904521ef866727b7a447c266e9377dcc -size 67677 diff --git a/models/deblurring_nafnet/nafnet.py b/models/deblurring_nafnet/nafnet.py deleted file mode 100644 index 245a9b97..00000000 --- a/models/deblurring_nafnet/nafnet.py +++ /dev/null @@ -1,36 +0,0 @@ -import cv2 as cv -import numpy as np - -class Nafnet: - def __init__(self, modelPath='deblurring_nafnet_2025may.onnx', backendId=0, targetId=0): - self._modelPath = modelPath - self._backendId = backendId - self._targetId = targetId - - # Load the model - self._model = cv.dnn.readNetFromONNX(self._modelPath) - self.setBackendAndTarget(self._backendId, self._targetId) - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self._backendId = backendId - self._targetId = targetId - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - def infer(self, image): - image_blob = cv.dnn.blobFromImage(image, 0.00392, (image.shape[1], image.shape[0]), (0,0,0), True, False) - - self._model.setInput(image_blob) - output = self._model.forward() - - # Postprocessing - result = output[0] - result = np.transpose(result, (1, 2, 0)) - result = np.clip(result * 255.0, 0, 255).astype(np.uint8) - result = cv.cvtColor(result, cv.COLOR_RGB2BGR) - - return result diff --git a/models/edge_detection_dexined/CMakeLists.txt b/models/edge_detection_dexined/CMakeLists.txt deleted file mode 100644 index 64c483b4..00000000 --- a/models/edge_detection_dexined/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ -cmake_minimum_required(VERSION 3.22.2) -project(opencv_zoo_edge_detection_dexined) - -set(OPENCV_VERSION "5.0.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") - -# Find OpenCV -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) - -add_executable(edge_detection edge_detection.cpp) -target_link_libraries(edge_detection ${OpenCV_LIBS}) diff --git a/models/edge_detection_dexined/LICENSE b/models/edge_detection_dexined/LICENSE deleted file mode 100644 index 1caaa72f..00000000 --- a/models/edge_detection_dexined/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2019 Xavier Soria Poma - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/models/edge_detection_dexined/README.md b/models/edge_detection_dexined/README.md deleted file mode 100644 index fadf8914..00000000 --- a/models/edge_detection_dexined/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# DexiNed - -DexiNed is a Convolutional Neural Network (CNN) architecture for edge detection. - -Notes: - -- Model source: [ONNX](https://drive.google.com/file/d/1u_qXqXqaIP_SqdGaq4CbZyjzkZb02XTs/view). -- Model source: [.pth](https://drive.google.com/file/d/1V56vGTsu7GYiQouCIKvTWl5UKCZ6yCNu/view). -- This ONNX model has fixed input shape, but OpenCV DNN infers on the exact shape of input image. See https://github.com/opencv/opencv_zoo/issues/44 for more information. - -## Requirements -Install latest OpenCV >=5.0.0 and CMake >= 3.22.2 to get started with. - -## Demo - -### Python - -Run the following command to try the demo: - -```shell -# detect on camera input -python demo.py -# detect on an image -python demo.py --input /path/to/image - -# get help regarding various parameters -python demo.py --help -``` - -### C++ - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# detect on camera input -./build/demo -# detect on an image -./build/demo --input=/path/to/image -# get help messages -./build/demo -h -``` - -### Example outputs - -![chicky](./example_outputs/chicky_output.jpg) - -## License - -All files in this directory are licensed under [MIT License](./LICENSE). - -## Reference - -- https://github.com/xavysp/DexiNed \ No newline at end of file diff --git a/models/edge_detection_dexined/demo.cpp b/models/edge_detection_dexined/demo.cpp deleted file mode 100644 index 66dfc4f5..00000000 --- a/models/edge_detection_dexined/demo.cpp +++ /dev/null @@ -1,138 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -using namespace cv; -using namespace cv::dnn; -using namespace std; - -class Dexined { -public: - Dexined(const string& modelPath) { - loadModel(modelPath); - } - - // Function to set up the input image and process it - void processFrame(const Mat& image, Mat& result) { - Mat blob = blobFromImage(image, 1.0, Size(512, 512), Scalar(103.5, 116.2, 123.6), false, false, CV_32F); - net.setInput(blob); - applyDexined(image, result); - } - -private: - Net net; - - // Load Model - void loadModel(const string modelPath) { - net = readNetFromONNX(modelPath); - net.setPreferableBackend(DNN_BACKEND_DEFAULT); - net.setPreferableTarget(DNN_TARGET_CPU); - } - - // Function to apply sigmoid activation - static void sigmoid(Mat& input) { - exp(-input, input); // e^-input - input = 1.0 / (1.0 + input); // 1 / (1 + e^-input) - } - - // Function to process the neural network output to generate edge maps - static pair postProcess(const vector& output, int height, int width) { - vector preds; - preds.reserve(output.size()); - for (const Mat &p : output) { - Mat img; - Mat processed; - if (p.dims == 4 && p.size[0] == 1 && p.size[1] == 1) { - processed = p.reshape(0, {p.size[2], p.size[3]}); - } else { - processed = p.clone(); - } - sigmoid(processed); - normalize(processed, img, 0, 255, NORM_MINMAX, CV_8U); - resize(img, img, Size(width, height)); - preds.push_back(img); - } - Mat fuse = preds.back(); - Mat ave = Mat::zeros(height, width, CV_32F); - for (Mat &pred : preds) { - Mat temp; - pred.convertTo(temp, CV_32F); - ave += temp; - } - ave /= static_cast(preds.size()); - ave.convertTo(ave, CV_8U); - return {fuse, ave}; - } - - // Function to apply the Dexined model - void applyDexined(const Mat& image, Mat& result) { - int originalWidth = image.cols; - int originalHeight = image.rows; - vector outputs; - net.forward(outputs); - pair res = postProcess(outputs, originalHeight, originalWidth); - result = res.first; // or res.second for average edge map - } -}; - -int main(int argc, char** argv) { - const string about = - "This sample demonstrates edge detection with dexined edge detection techniques.\n\n"; - const string keys = - "{ help h | | Print help message. }" - "{ input i | | Path to input image or video file. Skip this argument to capture frames from a camera.}" - "{ model | edge_detection_dexined_2024sep.onnx | Path to the dexined.onnx model file }"; - - CommandLineParser parser(argc, argv, keys); - if (parser.has("help")) - { - cout << about << endl; - parser.printMessage(); - return -1; - } - - parser = CommandLineParser(argc, argv, keys); - string model = parser.get("model"); - parser.about(about); - - VideoCapture cap; - if (parser.has("input")) - cap.open(samples::findFile(parser.get("input"))); - else - cap.open(0); - - namedWindow("Input", WINDOW_AUTOSIZE); - namedWindow("Output", WINDOW_AUTOSIZE); - moveWindow("Output", 200, 0); - - // Create an instance of Dexined - Dexined dexined(model); - Mat image; - - for (;;){ - cap >> image; - if (image.empty()) - { - cout << "Press any key to exit" << endl; - waitKey(); - break; - } - - Mat result; - dexined.processFrame(image, result); - - imshow("Input", image); - imshow("Output", result); - int key = waitKey(1); - if (key == 27 || key == 'q') - { - break; - } - } - destroyAllWindows(); - return 0; -} diff --git a/models/edge_detection_dexined/demo.py b/models/edge_detection_dexined/demo.py deleted file mode 100644 index ffc6992f..00000000 --- a/models/edge_detection_dexined/demo.py +++ /dev/null @@ -1,51 +0,0 @@ -import cv2 as cv -import argparse -from dexined import Dexined - -def get_args_parser(func_args): - parser = argparse.ArgumentParser(add_help=False) - parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.', default=0, required=False) - parser.add_argument('--model', help='Path to dexined.onnx', default='edge_detection_dexined_2024sep.onnx', required=False) - - args, _ = parser.parse_known_args() - parser = argparse.ArgumentParser(parents=[parser], - description='', formatter_class=argparse.RawTextHelpFormatter) - return parser.parse_args(func_args) - -def main(func_args=None): - args = get_args_parser(func_args) - - dexined = Dexined(modelPath=args.model) - - # Open video or capture from camera - cap = cv.VideoCapture(cv.samples.findFile(args.input) if args.input else 0) - if not cap.isOpened(): - print("Failed to open the input video") - exit(-1) - - cv.namedWindow('Input', cv.WINDOW_AUTOSIZE) - cv.namedWindow('Output', cv.WINDOW_AUTOSIZE) - cv.moveWindow('Output', 200, 50) - - # Process frames - tm = cv.TickMeter() - while cv.waitKey(1) < 0: - hasFrame, image = cap.read() - if not hasFrame: - print("Press any key to exit") - cv.waitKey(0) - break - - tm.start() - result = dexined.infer(image) - tm.stop() - label = 'Inference time: {:.2f} ms, FPS: {:.2f}'.format(tm.getTimeMilli(), tm.getFPS()) - - cv.imshow("Input", image) - cv.putText(result, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255)) - cv.imshow("Output", result) - - cv.destroyAllWindows() - -if __name__ == '__main__': - main() diff --git a/models/edge_detection_dexined/dexined.py b/models/edge_detection_dexined/dexined.py deleted file mode 100644 index 9e4e7668..00000000 --- a/models/edge_detection_dexined/dexined.py +++ /dev/null @@ -1,50 +0,0 @@ -import cv2 as cv -import numpy as np - -class Dexined: - def __init__(self, modelPath='edge_detection_dexined_2024sep.onnx', backendId=0, targetId=0): - self._modelPath = modelPath - self._backendId = backendId - self._targetId = targetId - - # Load the model - self._model = cv.dnn.readNetFromONNX(self._modelPath) - self.setBackendAndTarget(self._backendId, self._targetId) - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self._backendId = backendId - self._targetId = targetId - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - @staticmethod - def sigmoid(x): - return 1.0 / (1.0 + np.exp(-x)) - - def postProcessing(self, output, shape): - h, w = shape - preds = [] - for p in output: - img = self.sigmoid(p) - img = np.squeeze(img) - img = cv.normalize(img, None, 0, 255, cv.NORM_MINMAX, cv.CV_8U) - img = cv.resize(img, (w, h)) - preds.append(img) - fuse = preds[-1] - ave = np.array(preds, dtype=np.float32) - ave = np.uint8(np.mean(ave, axis=0)) - return fuse, ave - - def infer(self, image): - inp = cv.dnn.blobFromImage(image, 1.0, (512, 512), (103.5, 116.2, 123.6), swapRB=False, crop=False) - self._model.setInput(inp) - - # Forward pass through the model - out = self._model.forward() - result, _ = self.postProcessing(out, image.shape[:2]) - - return result diff --git a/models/edge_detection_dexined/edge_detection_dexined_2024sep.onnx b/models/edge_detection_dexined/edge_detection_dexined_2024sep.onnx deleted file mode 100644 index f573283e..00000000 --- a/models/edge_detection_dexined/edge_detection_dexined_2024sep.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a50d01dc8481549c7dedb9eb3e0123b810a016520df75e4669a504609982cdd0 -size 47235563 diff --git a/models/edge_detection_dexined/example_outputs/chicky.jpg b/models/edge_detection_dexined/example_outputs/chicky.jpg deleted file mode 100644 index 46170923..00000000 --- a/models/edge_detection_dexined/example_outputs/chicky.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:37ed3af84d13bd6cffe0ae282692a382021f21df15f8bd5cf5308c14e49bd754 -size 125551 diff --git a/models/edge_detection_dexined/example_outputs/chicky_output.jpg b/models/edge_detection_dexined/example_outputs/chicky_output.jpg deleted file mode 100644 index b0904f03..00000000 --- a/models/edge_detection_dexined/example_outputs/chicky_output.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b229c89f5b0517922795b9f34ef4d654dd8bbc5d5f4fdfb12874bf63f383bcda -size 77596 diff --git a/models/face_detection_yunet/CMakeLists.txt b/models/face_detection_yunet/CMakeLists.txt deleted file mode 100644 index 68ebadcd..00000000 --- a/models/face_detection_yunet/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ -cmake_minimum_required(VERSION 3.24.0) -project(opencv_zoo_face_detection_yunet) - -set(OPENCV_VERSION "4.10.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") - -# Find OpenCV -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) - -add_executable(demo demo.cpp) -target_link_libraries(demo ${OpenCV_LIBS}) diff --git a/models/face_detection_yunet/LICENSE b/models/face_detection_yunet/LICENSE deleted file mode 100644 index 4cdf89a4..00000000 --- a/models/face_detection_yunet/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2020 Shiqi Yu - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/models/face_detection_yunet/README.md b/models/face_detection_yunet/README.md deleted file mode 100644 index fda37c8d..00000000 --- a/models/face_detection_yunet/README.md +++ /dev/null @@ -1,90 +0,0 @@ -# YuNet - -YuNet is a light-weight, fast and accurate face detection model, which achieves 0.834(AP_easy), 0.824(AP_medium), 0.708(AP_hard) on the WIDER Face validation set. - -Notes: - -- Model source: [here](https://github.com/ShiqiYu/libfacedetection.train/blob/a61a428929148171b488f024b5d6774f93cdbc13/tasks/task1/onnx/yunet.onnx). -- This model can detect **faces of pixels between around 10x10 to 300x300** due to the training scheme. -- For details on training this model, please visit https://github.com/ShiqiYu/libfacedetection.train. -- This ONNX model has fixed input shape, but OpenCV DNN infers on the exact shape of input image. See https://github.com/opencv/opencv_zoo/issues/44 for more information. -- `face_detection_yunet_2023mar_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. -- Paper source: [Yunet: A tiny millisecond-level face detector](https://link.springer.com/article/10.1007/s11633-023-1423-y). - -Results of accuracy evaluation with [tools/eval](../../tools/eval). - -| Models | Easy AP | Medium AP | Hard AP | -| ----------- | ------- | --------- | ------- | -| YuNet | 0.8844 | 0.8656 | 0.7503 | -| YuNet block | 0.8845 | 0.8652 | 0.7504 | -| YuNet quant | 0.8810 | 0.8629 | 0.7503 | - - -\*: 'quant' stands for 'quantized'. -\*\*: 'block' stands for 'blockwise quantized'. - - -## Demo - -### Python - -Run the following command to try the demo: - -```shell -# detect on camera input -python demo.py -# detect on an image -python demo.py --input /path/to/image -v - -# get help regarding various parameters -python demo.py --help -``` - -### C++ - -Install latest OpenCV and CMake >= 3.24.0 to get started with: - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# detect on camera input -./build/demo -# detect on an image -./build/demo -i=/path/to/image -v -# get help messages -./build/demo -h -``` - -### Example outputs - -![webcam demo](./example_outputs/yunet_demo.gif) - -![largest selfie](./example_outputs/largest_selfie.jpg) - -## License - -All files in this directory are licensed under [MIT License](./LICENSE). - -## Reference - -- https://github.com/ShiqiYu/libfacedetection -- https://github.com/ShiqiYu/libfacedetection.train - -## Citation - -If you use `YuNet` in your work, please use the following BibTeX entries: - -``` -@article{wu2023yunet, - title={Yunet: A tiny millisecond-level face detector}, - author={Wu, Wei and Peng, Hanyang and Yu, Shiqi}, - journal={Machine Intelligence Research}, - volume={20}, - number={5}, - pages={656--665}, - year={2023}, - publisher={Springer} -} -``` diff --git a/models/face_detection_yunet/demo.cpp b/models/face_detection_yunet/demo.cpp deleted file mode 100644 index 45ebb747..00000000 --- a/models/face_detection_yunet/demo.cpp +++ /dev/null @@ -1,213 +0,0 @@ -#include "opencv2/opencv.hpp" - -#include -#include -#include -#include - -const std::map str2backend{ - {"opencv", cv::dnn::DNN_BACKEND_OPENCV}, {"cuda", cv::dnn::DNN_BACKEND_CUDA}, - {"timvx", cv::dnn::DNN_BACKEND_TIMVX}, {"cann", cv::dnn::DNN_BACKEND_CANN} -}; -const std::map str2target{ - {"cpu", cv::dnn::DNN_TARGET_CPU}, {"cuda", cv::dnn::DNN_TARGET_CUDA}, - {"npu", cv::dnn::DNN_TARGET_NPU}, {"cuda_fp16", cv::dnn::DNN_TARGET_CUDA_FP16} -}; - -class YuNet -{ -public: - YuNet(const std::string& model_path, - const cv::Size& input_size = cv::Size(320, 320), - float conf_threshold = 0.6f, - float nms_threshold = 0.3f, - int top_k = 5000, - int backend_id = 0, - int target_id = 0) - : model_path_(model_path), input_size_(input_size), - conf_threshold_(conf_threshold), nms_threshold_(nms_threshold), - top_k_(top_k), backend_id_(backend_id), target_id_(target_id) - { - model = cv::FaceDetectorYN::create(model_path_, "", input_size_, conf_threshold_, nms_threshold_, top_k_, backend_id_, target_id_); - } - - /* Overwrite the input size when creating the model. Size format: [Width, Height]. - */ - void setInputSize(const cv::Size& input_size) - { - input_size_ = input_size; - model->setInputSize(input_size_); - } - - cv::Mat infer(const cv::Mat image) - { - cv::Mat res; - model->detect(image, res); - return res; - } - -private: - cv::Ptr model; - - std::string model_path_; - cv::Size input_size_; - float conf_threshold_; - float nms_threshold_; - int top_k_; - int backend_id_; - int target_id_; -}; - -cv::Mat visualize(const cv::Mat& image, const cv::Mat& faces, float fps = -1.f) -{ - static cv::Scalar box_color{0, 255, 0}; - static std::vector landmark_color{ - cv::Scalar(255, 0, 0), // right eye - cv::Scalar( 0, 0, 255), // left eye - cv::Scalar( 0, 255, 0), // nose tip - cv::Scalar(255, 0, 255), // right mouth corner - cv::Scalar( 0, 255, 255) // left mouth corner - }; - static cv::Scalar text_color{0, 255, 0}; - - auto output_image = image.clone(); - - if (fps >= 0) - { - cv::putText(output_image, cv::format("FPS: %.2f", fps), cv::Point(0, 15), cv::FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2); - } - - for (int i = 0; i < faces.rows; ++i) - { - // Draw bounding boxes - int x1 = static_cast(faces.at(i, 0)); - int y1 = static_cast(faces.at(i, 1)); - int w = static_cast(faces.at(i, 2)); - int h = static_cast(faces.at(i, 3)); - cv::rectangle(output_image, cv::Rect(x1, y1, w, h), box_color, 2); - - // Confidence as text - float conf = faces.at(i, 14); - cv::putText(output_image, cv::format("%.4f", conf), cv::Point(x1, y1+12), cv::FONT_HERSHEY_DUPLEX, 0.5, text_color); - - // Draw landmarks - for (int j = 0; j < landmark_color.size(); ++j) - { - int x = static_cast(faces.at(i, 2*j+4)), y = static_cast(faces.at(i, 2*j+5)); - cv::circle(output_image, cv::Point(x, y), 2, landmark_color[j], 2); - } - } - return output_image; -} - -int main(int argc, char** argv) -{ - cv::CommandLineParser parser(argc, argv, - "{help h | | Print this message}" - "{input i | | Set input to a certain image, omit if using camera}" - "{model m | face_detection_yunet_2023mar.onnx | Set path to the model}" - "{backend b | opencv | Set DNN backend}" - "{target t | cpu | Set DNN target}" - "{save s | false | Whether to save result image or not}" - "{vis v | false | Whether to visualize result image or not}" - /* model params below*/ - "{conf_threshold | 0.9 | Set the minimum confidence for the model to identify a face. Filter out faces of conf < conf_threshold}" - "{nms_threshold | 0.3 | Set the threshold to suppress overlapped boxes. Suppress boxes if IoU(box1, box2) >= nms_threshold, the one of higher score is kept.}" - "{top_k | 5000 | Keep top_k bounding boxes before NMS. Set a lower value may help speed up postprocessing.}" - ); - if (parser.has("help")) - { - parser.printMessage(); - return 0; - } - - std::string input_path = parser.get("input"); - std::string model_path = parser.get("model"); - std::string backend = parser.get("backend"); - std::string target = parser.get("target"); - bool save_flag = parser.get("save"); - bool vis_flag = parser.get("vis"); - - // model params - float conf_threshold = parser.get("conf_threshold"); - float nms_threshold = parser.get("nms_threshold"); - int top_k = parser.get("top_k"); - const int backend_id = str2backend.at(backend); - const int target_id = str2target.at(target); - - // Instantiate YuNet - YuNet model(model_path, cv::Size(320, 320), conf_threshold, nms_threshold, top_k, backend_id, target_id); - - // If input is an image - if (!input_path.empty()) - { - auto image = cv::imread(input_path); - - // Inference - model.setInputSize(image.size()); - auto faces = model.infer(image); - - // Print faces - std::cout << cv::format("%d faces detected:\n", faces.rows); - for (int i = 0; i < faces.rows; ++i) - { - int x1 = static_cast(faces.at(i, 0)); - int y1 = static_cast(faces.at(i, 1)); - int w = static_cast(faces.at(i, 2)); - int h = static_cast(faces.at(i, 3)); - float conf = faces.at(i, 14); - std::cout << cv::format("%d: x1=%d, y1=%d, w=%d, h=%d, conf=%.4f\n", i, x1, y1, w, h, conf); - } - - // Draw reults on the input image - if (save_flag || vis_flag) - { - auto res_image = visualize(image, faces); - if (save_flag) - { - std::cout << "Results are saved to result.jpg\n"; - cv::imwrite("result.jpg", res_image); - } - if (vis_flag) - { - cv::namedWindow(input_path, cv::WINDOW_AUTOSIZE); - cv::imshow(input_path, res_image); - cv::waitKey(0); - } - } - } - else // Call default camera - { - int device_id = 0; - auto cap = cv::VideoCapture(device_id); - int w = static_cast(cap.get(cv::CAP_PROP_FRAME_WIDTH)); - int h = static_cast(cap.get(cv::CAP_PROP_FRAME_HEIGHT)); - model.setInputSize(cv::Size(w, h)); - - auto tick_meter = cv::TickMeter(); - cv::Mat frame; - while (cv::waitKey(1) < 0) - { - bool has_frame = cap.read(frame); - if (!has_frame) - { - std::cout << "No frames grabbed! Exiting ...\n"; - break; - } - - // Inference - tick_meter.start(); - cv::Mat faces = model.infer(frame); - tick_meter.stop(); - - // Draw results on the input image - auto res_image = visualize(frame, faces, (float)tick_meter.getFPS()); - // Visualize in a new window - cv::imshow("YuNet Demo", res_image); - - tick_meter.reset(); - } - } - - return 0; -} diff --git a/models/face_detection_yunet/demo.py b/models/face_detection_yunet/demo.py deleted file mode 100644 index d33a9db5..00000000 --- a/models/face_detection_yunet/demo.py +++ /dev/null @@ -1,146 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from yunet import YuNet - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).') -parser.add_argument('--input', '-i', type=str, - help='Usage: Set input to a certain image, omit if using camera.') -parser.add_argument('--model', '-m', type=str, default='face_detection_yunet_2023mar.onnx', - help="Usage: Set model type, defaults to 'face_detection_yunet_2023mar.onnx'.") -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--conf_threshold', type=float, default=0.9, - help='Usage: Set the minimum needed confidence for the model to identify a face, defauts to 0.9. Smaller values may result in faster detection, but will limit accuracy. Filter out faces of confidence < conf_threshold.') -parser.add_argument('--nms_threshold', type=float, default=0.3, - help='Usage: Suppress bounding boxes of iou >= nms_threshold. Default = 0.3.') -parser.add_argument('--top_k', type=int, default=5000, - help='Usage: Keep top_k bounding boxes before NMS.') -parser.add_argument('--save', '-s', action='store_true', - help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.') -parser.add_argument('--vis', '-v', action='store_true', - help='Usage: Specify to open a new window to show results. Invalid in case of camera input.') -args = parser.parse_args() - -def visualize(image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), fps=None): - output = image.copy() - landmark_color = [ - (255, 0, 0), # right eye - ( 0, 0, 255), # left eye - ( 0, 255, 0), # nose tip - (255, 0, 255), # right mouth corner - ( 0, 255, 255) # left mouth corner - ] - - if fps is not None: - cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, text_color) - - for det in results: - bbox = det[0:4].astype(np.int32) - cv.rectangle(output, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), box_color, 2) - - conf = det[-1] - cv.putText(output, '{:.4f}'.format(conf), (bbox[0], bbox[1]+12), cv.FONT_HERSHEY_DUPLEX, 0.5, text_color) - - landmarks = det[4:14].astype(np.int32).reshape((5,2)) - for idx, landmark in enumerate(landmarks): - cv.circle(output, landmark, 2, landmark_color[idx], 2) - - return output - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - - # Instantiate YuNet - model = YuNet(modelPath=args.model, - inputSize=[320, 320], - confThreshold=args.conf_threshold, - nmsThreshold=args.nms_threshold, - topK=args.top_k, - backendId=backend_id, - targetId=target_id) - - # If input is an image - if args.input is not None: - image = cv.imread(args.input) - h, w, _ = image.shape - - # Inference - model.setInputSize([w, h]) - results = model.infer(image) - - # Print results - print('{} faces detected.'.format(results.shape[0])) - for idx, det in enumerate(results): - print('{}: {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f}'.format( - idx, *det[:-1]) - ) - - # Draw results on the input image - image = visualize(image, results) - - # Save results if save is true - if args.save: - print('Resutls saved to result.jpg\n') - cv.imwrite('result.jpg', image) - - # Visualize results in a new window - if args.vis: - cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE) - cv.imshow(args.input, image) - cv.waitKey(0) - else: # Omit input to call default camera - deviceId = 0 - cap = cv.VideoCapture(deviceId) - w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH)) - h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) - model.setInputSize([w, h]) - - tm = cv.TickMeter() - while cv.waitKey(1) < 0: - hasFrame, frame = cap.read() - if not hasFrame: - print('No frames grabbed!') - break - - # Inference - tm.start() - results = model.infer(frame) # results is a tuple - tm.stop() - - # Draw results on the input image - frame = visualize(frame, results, fps=tm.getFPS()) - - # Visualize results in a new Window - cv.imshow('YuNet Demo', frame) - - tm.reset() diff --git a/models/face_detection_yunet/example_outputs/largest_selfie.jpg b/models/face_detection_yunet/example_outputs/largest_selfie.jpg deleted file mode 100644 index fe494914..00000000 --- a/models/face_detection_yunet/example_outputs/largest_selfie.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ab8413ad9bb4f53068f4fb63c6747e5989991dd02241c923d5595b614ecf2bf6 -size 1147146 diff --git a/models/face_detection_yunet/example_outputs/yunet_demo.gif b/models/face_detection_yunet/example_outputs/yunet_demo.gif deleted file mode 100644 index 099beab6..00000000 --- a/models/face_detection_yunet/example_outputs/yunet_demo.gif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:db90459c308b14dd423014eabf3253f5f6147fbe7906e81429a7a88c8dbe7b8c -size 661072 diff --git a/models/face_detection_yunet/face_detection_yunet_2023mar.onnx b/models/face_detection_yunet/face_detection_yunet_2023mar.onnx deleted file mode 100644 index 2d8804a5..00000000 --- a/models/face_detection_yunet/face_detection_yunet_2023mar.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8f2383e4dd3cfbb4553ea8718107fc0423210dc964f9f4280604804ed2552fa4 -size 232589 diff --git a/models/face_detection_yunet/face_detection_yunet_2023mar_int8.onnx b/models/face_detection_yunet/face_detection_yunet_2023mar_int8.onnx deleted file mode 100644 index c10540eb..00000000 --- a/models/face_detection_yunet/face_detection_yunet_2023mar_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:321aa5a6afabf7ecc46a3d06bfab2b579dc96eb5c3be7edd365fa04502ad9294 -size 100416 diff --git a/models/face_detection_yunet/face_detection_yunet_2023mar_int8bq.onnx b/models/face_detection_yunet/face_detection_yunet_2023mar_int8bq.onnx deleted file mode 100644 index 5778d83e..00000000 --- a/models/face_detection_yunet/face_detection_yunet_2023mar_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:49f000ec501fef24739071fc7e68267d32209045b6822c0c72dce1da25726f10 -size 122489 diff --git a/models/face_detection_yunet/yunet.py b/models/face_detection_yunet/yunet.py deleted file mode 100644 index 710d24b8..00000000 --- a/models/face_detection_yunet/yunet.py +++ /dev/null @@ -1,55 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -from itertools import product - -import numpy as np -import cv2 as cv - -class YuNet: - def __init__(self, modelPath, inputSize=[320, 320], confThreshold=0.6, nmsThreshold=0.3, topK=5000, backendId=0, targetId=0): - self._modelPath = modelPath - self._inputSize = tuple(inputSize) # [w, h] - self._confThreshold = confThreshold - self._nmsThreshold = nmsThreshold - self._topK = topK - self._backendId = backendId - self._targetId = targetId - - self._model = cv.FaceDetectorYN.create( - model=self._modelPath, - config="", - input_size=self._inputSize, - score_threshold=self._confThreshold, - nms_threshold=self._nmsThreshold, - top_k=self._topK, - backend_id=self._backendId, - target_id=self._targetId) - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self._backendId = backendId - self._targetId = targetId - self._model = cv.FaceDetectorYN.create( - model=self._modelPath, - config="", - input_size=self._inputSize, - score_threshold=self._confThreshold, - nms_threshold=self._nmsThreshold, - top_k=self._topK, - backend_id=self._backendId, - target_id=self._targetId) - - def setInputSize(self, input_size): - self._model.setInputSize(tuple(input_size)) - - def infer(self, image): - # Forward - faces = self._model.detect(image) - return np.empty(shape=(0, 5)) if faces[1] is None else faces[1] diff --git a/models/face_image_quality_assessment_ediffiqa/LICENSE b/models/face_image_quality_assessment_ediffiqa/LICENSE deleted file mode 100644 index 4ea99c21..00000000 --- a/models/face_image_quality_assessment_ediffiqa/LICENSE +++ /dev/null @@ -1,395 +0,0 @@ -Attribution 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution 4.0 International Public License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution 4.0 International Public License ("Public License"). To the -extent this Public License may be interpreted as a contract, You are -granted the Licensed Rights in consideration of Your acceptance of -these terms and conditions, and the Licensor grants You such rights in -consideration of benefits the Licensor receives from making the -Licensed Material available under these terms and conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - d. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - e. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - f. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - g. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - h. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - i. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - j. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - k. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - 4. If You Share Adapted Material You produce, the Adapter's - License You apply must not prevent recipients of the Adapted - Material from complying with this Public License. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material; and - - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public -licenses. Notwithstanding, Creative Commons may elect to apply one of -its public licenses to material it publishes and in those instances -will be considered the “Licensor.” The text of the Creative Commons -public licenses is dedicated to the public domain under the CC0 Public -Domain Dedication. Except for the limited purpose of indicating that -material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the -public licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/models/face_image_quality_assessment_ediffiqa/README.md b/models/face_image_quality_assessment_ediffiqa/README.md deleted file mode 100644 index 83ea05ab..00000000 --- a/models/face_image_quality_assessment_ediffiqa/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# eDifFIQA(T) - -eDifFIQA(T) is a light-weight version of the models presented in the paper [eDifFIQA: Towards Efficient Face Image Quality Assessment based on Denoising Diffusion Probabilistic Models](https://ieeexplore.ieee.org/document/10468647), it achieves state-of-the-art results in the field of face image quality assessment. - -Notes: - -- The original implementation can be found [here](https://github.com/LSIbabnikz/eDifFIQA). -- The included model combines a pretrained MobileFaceNet backbone, with a quality regression head trained using the proceedure presented in the original paper. -- The model predicts quality scores of aligned face samples, where a higher predicted score corresponds to a higher quality of the input sample. - -- In the figure below we show the quality distribution on two distinct datasets: LFW[[1]](#1) and XQLFW[[2]](#2). The LFW dataset contains images of relatively high quality, whereas the XQLFW dataset contains images of variable quality. There is a clear difference between the two distributions, with high quality images from the LFW dataset receiving quality scores higher than 0.5, while the mixed images from XQLFW receive much lower quality scores on average. - - -![qualityDist](./quality_distribution.png) - - -[1] -B. Huang, M. Ramesh, T. Berg, and E. Learned-Miller -“Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments” -University of Massachusetts, Amherst, Tech. Rep. 07-49, -October 2007. - -[2] -M. Knoche, S. Hormann, and G. Rigoll -“Cross-Quality LFW: A Database for Analyzing Cross-Resolution Image Face Recognition in Unconstrained Environments,” in Proceedings of the IEEE International Conference on Automatic Face and Gesture Recognition (FG), 2021, pp. 1–5. - - - -## Demo - -***NOTE***: The provided demo uses [../face_detection_yunet](../face_detection_yunet) for face detection, in order to properly align the face samples, while the original implementation uses a RetinaFace(ResNet50) model, which might cause some differences between the results of the two implementations. - -To try the demo run the following commands: - - -```shell -# Assess the quality of 'image1' -python demo.py -i /path/to/image1 - -# Output all the arguments of the demo -python demo.py --help -``` - - -### Example outputs - -![ediffiqaDemo](./example_outputs/demo.jpg) - -The demo outputs the quality of the sample via terminal (print) and via image in __results.jpg__. - -## License - -All files in this directory are licensed under [CC-BY-4.0](./LICENSE). - diff --git a/models/face_image_quality_assessment_ediffiqa/demo.py b/models/face_image_quality_assessment_ediffiqa/demo.py deleted file mode 100644 index f2de4da1..00000000 --- a/models/face_image_quality_assessment_ediffiqa/demo.py +++ /dev/null @@ -1,155 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. - - -import sys -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -sys.path.append('../face_detection_yunet') -from yunet import YuNet - -from ediffiqa import eDifFIQA - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -REFERENCE_FACIAL_POINTS = [ - [38.2946 , 51.6963 ], - [73.5318 , 51.5014 ], - [56.0252 , 71.7366 ], - [41.5493 , 92.3655 ], - [70.729904, 92.2041 ] -] - -parser = argparse.ArgumentParser(description='eDifFIQA: Towards Efficient Face Image Quality Assessment based on Denoising Diffusion Probabilistic Models (https://github.com/LSIbabnikz/eDifFIQA).') -parser.add_argument('--input', '-i', type=str, default='./sample_image.jpg', - help='Usage: Set input to a certain image, defaults to "./sample_image.jpg".') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) - -ediffiqa_parser = parser.add_argument_group("eDifFIQA", " Parameters of eDifFIQA - For face image quality assessment ") -ediffiqa_parser.add_argument('--model_q', '-mq', type=str, default='ediffiqa_tiny_jun2024.onnx', - help="Usage: Set model type, defaults to 'ediffiqa_tiny_jun2024.onnx'.") - -yunet_parser = parser.add_argument_group("YuNet", " Parameters of YuNet - For face detection ") -yunet_parser.add_argument('--model_d', '-md', type=str, default='../face_detection_yunet/face_detection_yunet_2023mar.onnx', - help="Usage: Set model type, defaults to '../face_detection_yunet/face_detection_yunet_2023mar.onnx'.") -yunet_parser.add_argument('--conf_threshold', type=float, default=0.9, - help='Usage: Set the minimum needed confidence for the model to identify a face, defauts to 0.9. Smaller values may result in faster detection, but will limit accuracy. Filter out faces of confidence < conf_threshold.') -yunet_parser.add_argument('--nms_threshold', type=float, default=0.3, - help='Usage: Suppress bounding boxes of iou >= nms_threshold. Default = 0.3.') -yunet_parser.add_argument('--top_k', type=int, default=5000, - help='Usage: Keep top_k bounding boxes before NMS.') -args = parser.parse_args() - - -def visualize(image, results): - output = image.copy() - cv.putText(output, f"{results:.3f}", (0, 20), cv.FONT_HERSHEY_DUPLEX, .8, (0, 0, 255)) - - return output - - -def align_image(image, detection_data): - """ Performs face alignment on given image using the provided face landmarks (keypoints) - - Args: - image (np.array): Unaligned face image - detection_data (np.array): Detection data provided by YuNet - - Returns: - np.array: Aligned image - """ - - reference_pts = REFERENCE_FACIAL_POINTS - - ref_pts = np.float32(reference_pts) - ref_pts_shp = ref_pts.shape - - if ref_pts_shp[0] == 2: - ref_pts = ref_pts.T - - # Get source keypoints from YuNet detection data - src_pts = np.float32(detection_data[0][4:-1]).reshape(5,2) - src_pts_shp = src_pts.shape - - if src_pts_shp[0] == 2: - src_pts = src_pts.T - - tfm, _ = cv.estimateAffinePartial2D(src_pts, ref_pts, method=cv.LMEDS) - - face_img = cv.warpAffine(image, tfm, (112, 112)) - - return face_img - - -if __name__ == '__main__': - - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - - # Instantiate eDifFIQA(T) (quality assesment) - model_quality = eDifFIQA( - modelPath=args.model_q, - inputSize=[112, 112], - ) - model_quality.setBackendAndTarget( - backendId=backend_id, - targetId=target_id - ) - - # Instantiate YuNet (face detection) - model_detect = YuNet( - modelPath=args.model_d, - inputSize=[320, 320], - confThreshold=args.conf_threshold, - nmsThreshold=args.nms_threshold, - topK=args.top_k, - backendId=backend_id, - targetId=target_id - ) - - # If input is an image - image = cv.imread(args.input) - h, w, _ = image.shape - - # Face Detection - model_detect.setInputSize([w, h]) - results_detect = model_detect.infer(image) - - assert results_detect.size != 0, f" Face could not be detected in: {args.input}. " - - # Face Alignment - aligned_image = align_image(image, results_detect) - - # Quality Assesment - quality = model_quality.infer(aligned_image) - quality = np.squeeze(quality).item() - - viz_image = visualize(aligned_image, quality) - - print(f" Quality score of {args.input}: {quality:.3f} ") - - print(f" Saving visualization to results.jpg. ") - cv.imwrite('results.jpg', viz_image) - diff --git a/models/face_image_quality_assessment_ediffiqa/ediffiqa.py b/models/face_image_quality_assessment_ediffiqa/ediffiqa.py deleted file mode 100644 index 6f91d207..00000000 --- a/models/face_image_quality_assessment_ediffiqa/ediffiqa.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. - -import numpy as np -import cv2 as cv - - -class eDifFIQA: - - def __init__(self, modelPath, inputSize=[112, 112]): - self.modelPath = modelPath - self.inputSize = tuple(inputSize) # [w, h] - - self.model = cv.dnn.readNetFromONNX(self.modelPath) - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self._backendId = backendId - self._targetId = targetId - self.model.setPreferableBackend(self._backendId) - self.model.setPreferableTarget(self._targetId) - - def infer(self, image): - # Preprocess image - image = self._preprocess(image) - # Forward - self.model.setInput(image) - quality_score = self.model.forward() - - return quality_score - - def _preprocess(self, image: cv.Mat): - # Change image from BGR to RGB - image = cv.cvtColor(image, cv.COLOR_BGR2RGB) - # Resize to (112, 112) - image = cv.resize(image, self.inputSize) - # Scale to [0, 1] and normalize by mean=0.5, std=0.5 - image = ((image / 255) - 0.5) / 0.5 - # Move channel axis - image = np.moveaxis(image[None, ...], -1, 1) - - return image diff --git a/models/face_image_quality_assessment_ediffiqa/ediffiqa_tiny_jun2024.onnx b/models/face_image_quality_assessment_ediffiqa/ediffiqa_tiny_jun2024.onnx deleted file mode 100644 index 41e32607..00000000 --- a/models/face_image_quality_assessment_ediffiqa/ediffiqa_tiny_jun2024.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9426c899cc0f01665240cb7d9e7f98e18e24e456c178326c771a43da289bfc6a -size 7272678 diff --git a/models/face_image_quality_assessment_ediffiqa/example_outputs/demo.jpg b/models/face_image_quality_assessment_ediffiqa/example_outputs/demo.jpg deleted file mode 100644 index 8cf069b6..00000000 --- a/models/face_image_quality_assessment_ediffiqa/example_outputs/demo.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:73d32e2822fcdfd8ede6184d85092f0f59db4a1ed40ad31e4ba9741b1ac5b0d3 -size 7879 diff --git a/models/face_image_quality_assessment_ediffiqa/quality_distribution.png b/models/face_image_quality_assessment_ediffiqa/quality_distribution.png deleted file mode 100644 index cc95457f..00000000 --- a/models/face_image_quality_assessment_ediffiqa/quality_distribution.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1e1774951ee1d6008f669a57674033893fd3d3809a6aaffe8628c3cf5d3c98c1 -size 19900 diff --git a/models/face_recognition_sface/CMakeLists.txt b/models/face_recognition_sface/CMakeLists.txt deleted file mode 100644 index cb1bac44..00000000 --- a/models/face_recognition_sface/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ -cmake_minimum_required(VERSION 3.24.0) -project(opencv_zoo_face_recognition_sface) - -set(OPENCV_VERSION "4.9.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") - -# Find OpenCV -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) - -add_executable(demo demo.cpp) -target_link_libraries(demo ${OpenCV_LIBS}) diff --git a/models/face_recognition_sface/LICENSE b/models/face_recognition_sface/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/models/face_recognition_sface/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/models/face_recognition_sface/README.md b/models/face_recognition_sface/README.md deleted file mode 100644 index fed1076e..00000000 --- a/models/face_recognition_sface/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# SFace - -SFace: Sigmoid-Constrained Hypersphere Loss for Robust Face Recognition - -Note: - -- SFace is contributed by [Yaoyao Zhong](https://github.com/zhongyy). -- Model files encode MobileFaceNet instances trained on the SFace loss function, see the [SFace paper](https://arxiv.org/abs/2205.12010) for reference. -- ONNX file conversions from [original code base](https://github.com/zhongyy/SFace) thanks to [Chengrui Wang](https://github.com/crywang). -- (As of Sep 2021) Supporting 5-landmark warping for now, see below for details. -- `face_recognition_sface_2021dec_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - -Results of accuracy evaluation with [tools/eval](../../tools/eval). - -| Models | Accuracy | -| ----------- | -------- | -| SFace | 0.9940 | -| SFace block | 0.9942 | -| SFace quant | 0.9932 | - -\*: 'quant' stands for 'quantized'. -\*\*: 'block' stands for 'blockwise quantized'. - -## Demo - -***NOTE***: This demo uses [../face_detection_yunet](../face_detection_yunet) as face detector, which supports 5-landmark detection for now (2021sep). - -Run the following command to try the demo: - -### Python -```shell -# recognize on images -python demo.py --target /path/to/image1 --query /path/to/image2 - -# get help regarding various parameters -python demo.py --help -``` - -### C++ -Install latest OpenCV and CMake >= 3.24.0 to get started with: - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# detect on camera input -./build/demo -t=/path/to/target_face -# detect on an image -./build/demo -t=/path/to/target_face -q=/path/to/query_face -v -# get help messages -./build/demo -h -``` - -### Example outputs - -![sface demo](./example_outputs/demo.jpg) - -Note: Left part of the image is the target identity, the right part is the query. Green boxes are the same identity, red boxes are different identities compared to the left. - -## License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE). - -## Reference - -- https://ieeexplore.ieee.org/document/9318547 -- https://github.com/zhongyy/SFace diff --git a/models/face_recognition_sface/demo.cpp b/models/face_recognition_sface/demo.cpp deleted file mode 100644 index 2bccbc3c..00000000 --- a/models/face_recognition_sface/demo.cpp +++ /dev/null @@ -1,322 +0,0 @@ -#include "opencv2/opencv.hpp" -#include "opencv2/core/types.hpp" - -#include -#include - -const std::vector> backend_target_pairs = { - {cv::dnn::DNN_BACKEND_OPENCV, cv::dnn::DNN_TARGET_CPU}, - {cv::dnn::DNN_BACKEND_CUDA, cv::dnn::DNN_TARGET_CUDA}, - {cv::dnn::DNN_BACKEND_CUDA, cv::dnn::DNN_TARGET_CUDA_FP16}, - {cv::dnn::DNN_BACKEND_TIMVX, cv::dnn::DNN_TARGET_NPU}, - {cv::dnn::DNN_BACKEND_CANN, cv::dnn::DNN_TARGET_NPU} -}; - -class YuNet -{ - public: - YuNet(const std::string& model_path, - const cv::Size& input_size, - const float conf_threshold, - const float nms_threshold, - const int top_k, - const int backend_id, - const int target_id) - { - _detector = cv::FaceDetectorYN::create( - model_path, "", input_size, conf_threshold, nms_threshold, top_k, backend_id, target_id); - } - - void setInputSize(const cv::Size& input_size) - { - _detector->setInputSize(input_size); - } - - void setTopK(const int top_k) - { - _detector->setTopK(top_k); - } - - cv::Mat infer(const cv::Mat& image) - { - cv::Mat result; - _detector->detect(image, result); - return result; - } - - private: - cv::Ptr _detector; -}; - -class SFace -{ - public: - SFace(const std::string& model_path, - const int backend_id, - const int target_id, - const int distance_type) - : _distance_type(static_cast(distance_type)) - { - _recognizer = cv::FaceRecognizerSF::create(model_path, "", backend_id, target_id); - } - - cv::Mat extractFeatures(const cv::Mat& orig_image, const cv::Mat& face_image) - { - // Align and crop detected face from original image - cv::Mat target_aligned; - _recognizer->alignCrop(orig_image, face_image, target_aligned); - // Extract features from cropped detected face - cv::Mat target_features; - _recognizer->feature(target_aligned, target_features); - return target_features.clone(); - } - - std::pair matchFeatures(const cv::Mat& target_features, const cv::Mat& query_features) - { - const double score = _recognizer->match(target_features, query_features, _distance_type); - if (_distance_type == cv::FaceRecognizerSF::DisType::FR_COSINE) - { - return {score, score >= _threshold_cosine}; - } - return {score, score <= _threshold_norml2}; - } - - private: - cv::Ptr _recognizer; - cv::FaceRecognizerSF::DisType _distance_type; - double _threshold_cosine = 0.363; - double _threshold_norml2 = 1.128; -}; - -cv::Mat visualize(const cv::Mat& image, - const cv::Mat& faces, - const std::vector>& matches, - const float fps = -0.1F, - const cv::Size& target_size = cv::Size(512, 512)) -{ - static const cv::Scalar matched_box_color{0, 255, 0}; - static const cv::Scalar mismatched_box_color{0, 0, 255}; - - if (fps >= 0) - { - cv::Mat output_image = image.clone(); - - const int x1 = static_cast(faces.at(0, 0)); - const int y1 = static_cast(faces.at(0, 1)); - const int w = static_cast(faces.at(0, 2)); - const int h = static_cast(faces.at(0, 3)); - const auto match = matches.at(0); - - cv::Scalar box_color = match.second ? matched_box_color : mismatched_box_color; - // Draw bounding box - cv::rectangle(output_image, cv::Rect(x1, y1, w, h), box_color, 2); - // Draw match score - cv::putText(output_image, cv::format("%.4f", match.first), cv::Point(x1, y1+12), cv::FONT_HERSHEY_DUPLEX, 0.30, box_color); - // Draw FPS - cv::putText(output_image, cv::format("FPS: %.2f", fps), cv::Point(0, 15), cv::FONT_HERSHEY_SIMPLEX, 0.5, box_color, 2); - - return output_image; - } - - cv::Mat output_image = cv::Mat::zeros(target_size, CV_8UC3); - - // Determine new height and width of image with aspect ratio of original image - const double ratio = std::min(static_cast(target_size.height) / image.rows, - static_cast(target_size.width) / image.cols); - const int new_height = static_cast(image.rows * ratio); - const int new_width = static_cast(image.cols * ratio); - - // Resize the original image, maintaining aspect ratio - cv::Mat resize_out; - cv::resize(image, resize_out, cv::Size(new_width, new_height), cv::INTER_LINEAR); - - // Determine top left corner in resized dimensions - const int top = std::max(0, target_size.height - new_height) / 2; - const int left = std::max(0, target_size.width - new_width) / 2; - - // Copy resized image into target output image - const cv::Rect roi = cv::Rect(cv::Point(left, top), cv::Size(new_width, new_height)); - cv::Mat out_sub_image = output_image(roi); - resize_out.copyTo(out_sub_image); - - for (int i = 0; i < faces.rows; ++i) - { - const int x1 = static_cast(faces.at(i, 0) * ratio) + left; - const int y1 = static_cast(faces.at(i, 1) * ratio) + top; - const int w = static_cast(faces.at(i, 2) * ratio); - const int h = static_cast(faces.at(i, 3) * ratio); - const auto match = matches.at(i); - - cv::Scalar box_color = match.second ? matched_box_color : mismatched_box_color; - // Draw bounding box - cv::rectangle(output_image, cv::Rect(x1, y1, w, h), box_color, 2); - // Draw match score - cv::putText(output_image, cv::format("%.4f", match.first), cv::Point(x1, y1+12), cv::FONT_HERSHEY_DUPLEX, 0.30, box_color); - } - return output_image; -} - -int main(int argc, char** argv) -{ - cv::CommandLineParser parser(argc, argv, - // General options - "{help h | | Print this message}" - "{backend_target b | 0 | Set DNN backend target pair:\n" - "0: (default) OpenCV implementation + CPU,\n" - "1: CUDA + GPU (CUDA),\n" - "2: CUDA + GPU (CUDA FP16),\n" - "3: TIM-VX + NPU,\n" - "4: CANN + NPU}" - "{save s | false | Whether to save result image or not}" - "{vis v | false | Whether to visualize result image or not}" - // SFace options - "{target_face t | | Set path to input image 1 (target face)}" - "{query_face q | | Set path to input image 2 (query face), omit if using camera}" - "{model m | face_recognition_sface_2021dec.onnx | Set path to the model}" - "{distance_type d | 0 | 0 = cosine, 1 = norm_l1}" - // YuNet options - "{yunet_model | ../face_detection_yunet/face_detection_yunet_2023mar.onnx | Set path to the YuNet model}" - "{detect_threshold | 0.9 | Set the minimum confidence for the model\n" - "to identify a face. Filter out faces of\n" - "conf < conf_threshold}" - "{nms_threshold | 0.3 | Set the threshold to suppress overlapped boxes.\n" - "Suppress boxes if IoU(box1, box2) >= nms_threshold\n" - ", the one of higher score is kept.}" - "{top_k | 5000 | Keep top_k bounding boxes before NMS}" - ); - - if (parser.has("help")) - { - parser.printMessage(); - return 0; - } - // General CLI options - const int backend = parser.get("backend_target"); - const bool save_flag = parser.get("save"); - const bool vis_flag = parser.get("vis"); - const int backend_id = backend_target_pairs.at(backend).first; - const int target_id = backend_target_pairs.at(backend).second; - - // YuNet CLI options - const std::string detector_model_path = parser.get("yunet_model"); - const float detect_threshold = parser.get("detect_threshold"); - const float nms_threshold = parser.get("nms_threshold"); - const int top_k = parser.get("top_k"); - - // Use YuNet as the detector backend - auto face_detector = YuNet( - detector_model_path, cv::Size(320, 320), detect_threshold, nms_threshold, top_k, backend_id, target_id); - - // SFace CLI options - const std::string target_path = parser.get("target_face"); - const std::string query_path = parser.get("query_face"); - const std::string model_path = parser.get("model"); - const int distance_type = parser.get("distance_type"); - - auto face_recognizer = SFace(model_path, backend_id, target_id, distance_type); - - if (target_path.empty()) - { - CV_Error(cv::Error::StsError, "Path to target image " + target_path + " not found"); - } - - cv::Mat target_image = cv::imread(target_path); - // Detect single face in target image - face_detector.setInputSize(target_image.size()); - face_detector.setTopK(1); - cv::Mat target_face = face_detector.infer(target_image); - // Extract features from target face - cv::Mat target_features = face_recognizer.extractFeatures(target_image, target_face.row(0)); - - if (!query_path.empty()) // use image - { - // Detect any faces in query image - cv::Mat query_image = cv::imread(query_path); - face_detector.setInputSize(query_image.size()); - face_detector.setTopK(5000); - cv::Mat query_faces = face_detector.infer(query_image); - - // Store match scores for visualization - std::vector> matches; - - for (int i = 0; i < query_faces.rows; ++i) - { - // Extract features from query face - cv::Mat query_features = face_recognizer.extractFeatures(query_image, query_faces.row(i)); - // Measure similarity of target face to query face - const auto match = face_recognizer.matchFeatures(target_features, query_features); - matches.push_back(match); - - const int x1 = static_cast(query_faces.at(i, 0)); - const int y1 = static_cast(query_faces.at(i, 1)); - const int w = static_cast(query_faces.at(i, 2)); - const int h = static_cast(query_faces.at(i, 3)); - const float conf = query_faces.at(i, 14); - - std::cout << cv::format("%d: x1=%d, y1=%d, w=%d, h=%d, conf=%.4f, match=%.4f\n", i, x1, y1, w, h, conf, match.first); - } - - if (save_flag || vis_flag) - { - auto vis_target = visualize(target_image, target_face, {{1.0, true}}); - auto vis_query = visualize(query_image, query_faces, matches); - cv::Mat output_image; - cv::hconcat(vis_target, vis_query, output_image); - - if (save_flag) - { - std::cout << "Results are saved to result.jpg\n"; - cv::imwrite("result.jpg", output_image); - } - if (vis_flag) - { - cv::namedWindow(query_path, cv::WINDOW_AUTOSIZE); - cv::imshow(query_path, output_image); - cv::waitKey(0); - } - } - } - else // use video capture - { - const int device_id = 0; - auto cap = cv::VideoCapture(device_id); - const int w = static_cast(cap.get(cv::CAP_PROP_FRAME_WIDTH)); - const int h = static_cast(cap.get(cv::CAP_PROP_FRAME_HEIGHT)); - face_detector.setInputSize(cv::Size(w, h)); - - auto tick_meter = cv::TickMeter(); - cv::Mat query_frame; - - while (cv::waitKey(1) < 0) - { - bool has_frame = cap.read(query_frame); - if (!has_frame) - { - std::cout << "No frames grabbed! Exiting ...\n"; - break; - } - tick_meter.start(); - // Detect faces from webcam image - cv::Mat query_faces = face_detector.infer(query_frame); - tick_meter.stop(); - - // Extract features from query face - cv::Mat query_features = face_recognizer.extractFeatures(query_frame, query_faces.row(0)); - // Measure similarity of target face to query face - const auto match = face_recognizer.matchFeatures(target_features, query_features); - - const auto fps = static_cast(tick_meter.getFPS()); - - auto vis_target = visualize(target_image, target_face, {{1.0, true}}, -0.1F, cv::Size(w, h)); - auto vis_query = visualize(query_frame, query_faces, {match}, fps); - cv::Mat output_image; - cv::hconcat(vis_target, vis_query, output_image); - - // Visualize in a new window - cv::imshow("SFace Demo", output_image); - - tick_meter.reset(); - } - } - return 0; -} diff --git a/models/face_recognition_sface/demo.py b/models/face_recognition_sface/demo.py deleted file mode 100644 index c3054b14..00000000 --- a/models/face_recognition_sface/demo.py +++ /dev/null @@ -1,156 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import sys -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from sface import SFace - -sys.path.append('../face_detection_yunet') -from yunet import YuNet - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser( - description="SFace: Sigmoid-Constrained Hypersphere Loss for Robust Face Recognition (https://ieeexplore.ieee.org/document/9318547)") -parser.add_argument('--target', '-t', type=str, - help='Usage: Set path to the input image 1 (target face).') -parser.add_argument('--query', '-q', type=str, - help='Usage: Set path to the input image 2 (query).') -parser.add_argument('--model', '-m', type=str, default='face_recognition_sface_2021dec.onnx', - help='Usage: Set model path, defaults to face_recognition_sface_2021dec.onnx.') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--dis_type', type=int, choices=[0, 1], default=0, - help='Usage: Distance type. \'0\': cosine, \'1\': norm_l1. Defaults to \'0\'') -parser.add_argument('--save', '-s', action='store_true', - help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.') -parser.add_argument('--vis', '-v', action='store_true', - help='Usage: Specify to open a new window to show results. Invalid in case of camera input.') -args = parser.parse_args() - -def visualize(img1, faces1, img2, faces2, matches, scores, target_size=[512, 512]): # target_size: (h, w) - out1 = img1.copy() - out2 = img2.copy() - matched_box_color = (0, 255, 0) # BGR - mismatched_box_color = (0, 0, 255) # BGR - - # Resize to 256x256 with the same aspect ratio - padded_out1 = np.zeros((target_size[0], target_size[1], 3)).astype(np.uint8) - h1, w1, _ = out1.shape - ratio1 = min(target_size[0] / out1.shape[0], target_size[1] / out1.shape[1]) - new_h1 = int(h1 * ratio1) - new_w1 = int(w1 * ratio1) - resized_out1 = cv.resize(out1, (new_w1, new_h1), interpolation=cv.INTER_LINEAR).astype(np.float32) - top = max(0, target_size[0] - new_h1) // 2 - bottom = top + new_h1 - left = max(0, target_size[1] - new_w1) // 2 - right = left + new_w1 - padded_out1[top : bottom, left : right] = resized_out1 - - # Draw bbox - bbox1 = faces1[0][:4] * ratio1 - x, y, w, h = bbox1.astype(np.int32) - cv.rectangle(padded_out1, (x + left, y + top), (x + left + w, y + top + h), matched_box_color, 2) - - # Resize to 256x256 with the same aspect ratio - padded_out2 = np.zeros((target_size[0], target_size[1], 3)).astype(np.uint8) - h2, w2, _ = out2.shape - ratio2 = min(target_size[0] / out2.shape[0], target_size[1] / out2.shape[1]) - new_h2 = int(h2 * ratio2) - new_w2 = int(w2 * ratio2) - resized_out2 = cv.resize(out2, (new_w2, new_h2), interpolation=cv.INTER_LINEAR).astype(np.float32) - top = max(0, target_size[0] - new_h2) // 2 - bottom = top + new_h2 - left = max(0, target_size[1] - new_w2) // 2 - right = left + new_w2 - padded_out2[top : bottom, left : right] = resized_out2 - - # Draw bbox - assert faces2.shape[0] == len(matches), "number of faces2 needs to match matches" - assert len(matches) == len(scores), "number of matches needs to match number of scores" - for index, match in enumerate(matches): - bbox2 = faces2[index][:4] * ratio2 - x, y, w, h = bbox2.astype(np.int32) - box_color = matched_box_color if match else mismatched_box_color - cv.rectangle(padded_out2, (x + left, y + top), (x + left + w, y + top + h), box_color, 2) - - score = scores[index] - text_color = matched_box_color if match else mismatched_box_color - cv.putText(padded_out2, "{:.2f}".format(score), (x + left, y + top - 5), cv.FONT_HERSHEY_DUPLEX, 0.4, text_color) - - return np.concatenate([padded_out1, padded_out2], axis=1) - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - # Instantiate SFace for face recognition - recognizer = SFace(modelPath=args.model, - disType=args.dis_type, - backendId=backend_id, - targetId=target_id) - # Instantiate YuNet for face detection - detector = YuNet(modelPath='../face_detection_yunet/face_detection_yunet_2023mar.onnx', - inputSize=[320, 320], - confThreshold=0.9, - nmsThreshold=0.3, - topK=5000, - backendId=backend_id, - targetId=target_id) - - img1 = cv.imread(args.target) - img2 = cv.imread(args.query) - - # Detect faces - detector.setInputSize([img1.shape[1], img1.shape[0]]) - faces1 = detector.infer(img1) - assert faces1.shape[0] > 0, 'Cannot find a face in {}'.format(args.target) - detector.setInputSize([img2.shape[1], img2.shape[0]]) - faces2 = detector.infer(img2) - assert faces2.shape[0] > 0, 'Cannot find a face in {}'.format(args.query) - - # Match - scores = [] - matches = [] - for face in faces2: - result = recognizer.match(img1, faces1[0][:-1], img2, face[:-1]) - scores.append(result[0]) - matches.append(result[1]) - - # Draw results - image = visualize(img1, faces1, img2, faces2, matches, scores) - - # Save results if save is true - if args.save: - print('Resutls saved to result.jpg\n') - cv.imwrite('result.jpg', image) - - # Visualize results in a new window - if args.vis: - cv.namedWindow("SFace Demo", cv.WINDOW_AUTOSIZE) - cv.imshow("SFace Demo", image) - cv.waitKey(0) diff --git a/models/face_recognition_sface/example_outputs/demo.jpg b/models/face_recognition_sface/example_outputs/demo.jpg deleted file mode 100644 index 2d49bbc6..00000000 --- a/models/face_recognition_sface/example_outputs/demo.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0f879881a598fea6fec74e047e6a1d00e36d81de63bf0ed392b628e6ab6c2fc4 -size 156282 diff --git a/models/face_recognition_sface/face_recognition_sface_2021dec.onnx b/models/face_recognition_sface/face_recognition_sface_2021dec.onnx deleted file mode 100644 index 5817e559..00000000 --- a/models/face_recognition_sface/face_recognition_sface_2021dec.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0ba9fbfa01b5270c96627c4ef784da859931e02f04419c829e83484087c34e79 -size 38696353 diff --git a/models/face_recognition_sface/face_recognition_sface_2021dec_int8.onnx b/models/face_recognition_sface/face_recognition_sface_2021dec_int8.onnx deleted file mode 100644 index 23086ad9..00000000 --- a/models/face_recognition_sface/face_recognition_sface_2021dec_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2b0e941e6f16cc048c20aee0c8e31f569118f65d702914540f7bfdc14048d78a -size 9896933 diff --git a/models/face_recognition_sface/face_recognition_sface_2021dec_int8bq.onnx b/models/face_recognition_sface/face_recognition_sface_2021dec_int8bq.onnx deleted file mode 100644 index c9acf218..00000000 --- a/models/face_recognition_sface/face_recognition_sface_2021dec_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fb143eea07838aa532d1c95df5f69899974ea0140e1fba05e94204be13ed74ee -size 10667852 diff --git a/models/face_recognition_sface/sface.py b/models/face_recognition_sface/sface.py deleted file mode 100644 index cb467071..00000000 --- a/models/face_recognition_sface/sface.py +++ /dev/null @@ -1,63 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import numpy as np -import cv2 as cv - -class SFace: - def __init__(self, modelPath, disType=0, backendId=0, targetId=0): - self._modelPath = modelPath - self._backendId = backendId - self._targetId = targetId - self._model = cv.FaceRecognizerSF.create( - model=self._modelPath, - config="", - backend_id=self._backendId, - target_id=self._targetId) - - self._disType = disType # 0: cosine similarity, 1: Norm-L2 distance - assert self._disType in [0, 1], "0: Cosine similarity, 1: norm-L2 distance, others: invalid" - - self._threshold_cosine = 0.363 - self._threshold_norml2 = 1.128 - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self._backendId = backendId - self._targetId = targetId - self._model = cv.FaceRecognizerSF.create( - model=self._modelPath, - config="", - backend_id=self._backendId, - target_id=self._targetId) - - def _preprocess(self, image, bbox): - if bbox is None: - return image - else: - return self._model.alignCrop(image, bbox) - - def infer(self, image, bbox=None): - # Preprocess - inputBlob = self._preprocess(image, bbox) - - # Forward - features = self._model.feature(inputBlob) - return features - - def match(self, image1, face1, image2, face2): - feature1 = self.infer(image1, face1) - feature2 = self.infer(image2, face2) - - if self._disType == 0: # COSINE - cosine_score = self._model.match(feature1, feature2, self._disType) - return cosine_score, 1 if cosine_score >= self._threshold_cosine else 0 - else: # NORM_L2 - norml2_distance = self._model.match(feature1, feature2, self._disType) - return norml2_distance, 1 if norml2_distance <= self._threshold_norml2 else 0 diff --git a/models/facial_expression_recognition/CMakeLists.txt b/models/facial_expression_recognition/CMakeLists.txt deleted file mode 100644 index 5004f437..00000000 --- a/models/facial_expression_recognition/CMakeLists.txt +++ /dev/null @@ -1,30 +0,0 @@ -cmake_minimum_required(VERSION 3.24) -set(CMAKE_CXX_STANDARD 11) -set(project_name "opencv_zoo_face_expression_recognition") - -PROJECT (${project_name}) - -set(OPENCV_VERSION "4.10.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) -# Find OpenCV, you may need to set OpenCV_DIR variable -# to the absolute path to the directory containing OpenCVConfig.cmake file -# via the command line or GUI - -file(GLOB SourceFile - "demo.cpp") -# If the package has been found, several variables will -# be set, you can find the full list with descriptions -# in the OpenCVConfig.cmake file. -# Print some message showing some of them -message(STATUS "OpenCV library status:") -message(STATUS " config: ${OpenCV_DIR}") -message(STATUS " version: ${OpenCV_VERSION}") -message(STATUS " libraries: ${OpenCV_LIBS}") -message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}") - -# Declare the executable target built from your sources -add_executable(${project_name} ${SourceFile}) - -# Link your application with OpenCV libraries -target_link_libraries(${project_name} PRIVATE ${OpenCV_LIBS}) diff --git a/models/facial_expression_recognition/README.md b/models/facial_expression_recognition/README.md deleted file mode 100644 index 7c1c9445..00000000 --- a/models/facial_expression_recognition/README.md +++ /dev/null @@ -1,59 +0,0 @@ - -# Progressive Teacher - -Progressive Teacher: [Boosting Facial Expression Recognition by A Semi-Supervised Progressive Teacher](https://scholar.google.com/citations?view_op=view_citation&hl=zh-CN&user=OCwcfAwAAAAJ&citation_for_view=OCwcfAwAAAAJ:u5HHmVD_uO8C) - -Note: -- Progressive Teacher is contributed by [Jing Jiang](https://scholar.google.com/citations?user=OCwcfAwAAAAJ&hl=zh-CN). -- [MobileFaceNet](https://link.springer.com/chapter/10.1007/978-3-319-97909-0_46) is used as the backbone and the model is able to classify seven basic facial expressions (angry, disgust, fearful, happy, neutral, sad, surprised). -- [facial_expression_recognition_mobilefacenet_2022july.onnx](https://github.com/opencv/opencv_zoo/raw/master/models/facial_expression_recognition/facial_expression_recognition_mobilefacenet_2022july.onnx) is implemented thanks to [Chengrui Wang](https://github.com/crywang). -- `facial_expression_recognition_mobilefacenet_2022july_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - -Results of accuracy evaluation on [RAF-DB](http://whdeng.cn/RAF/model1.html). - -| Models | Accuracy | -|-------------|----------| -| Progressive Teacher | 88.27% | - - -## Demo - -***NOTE***: This demo uses [../face_detection_yunet](../face_detection_yunet) as face detector, which supports 5-landmark detection for now (2021sep). - -### Python -Run the following command to try the demo: -```shell -# recognize the facial expression on images -python demo.py --input /path/to/image -v -``` - -### C++ - -Install latest OpenCV and CMake >= 3.24.0 to get started with: - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# detect on camera input -./build/opencv_zoo_face_expression_recognition -# detect on an image -./build/opencv_zoo_face_expression_recognition -i=/path/to/image -# get help messages -./build/opencv_zoo_face_expression_recognition -h -``` - -### Example outputs - -Note: Zoom in to to see the recognized facial expression in the top-left corner of each face boxes. - -![fer demo](./example_outputs/selfie.jpg) - -## License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE). - -## Reference - -- https://ieeexplore.ieee.org/abstract/document/9629313 diff --git a/models/facial_expression_recognition/demo.cpp b/models/facial_expression_recognition/demo.cpp deleted file mode 100644 index bba5cb3f..00000000 --- a/models/facial_expression_recognition/demo.cpp +++ /dev/null @@ -1,304 +0,0 @@ -#include "opencv2/opencv.hpp" - -#include -#include -#include -#include - -using namespace std; -using namespace cv; -using namespace dnn; - -std::vector> backend_target_pairs = { - {DNN_BACKEND_OPENCV, DNN_TARGET_CPU}, - {DNN_BACKEND_CUDA, DNN_TARGET_CUDA}, - {DNN_BACKEND_CUDA, DNN_TARGET_CUDA_FP16}, - {DNN_BACKEND_TIMVX, DNN_TARGET_NPU}, - {DNN_BACKEND_CANN, DNN_TARGET_NPU} -}; - -class FER -{ -private: - Net model; - string modelPath; - float std[5][2] = { - {38.2946, 51.6963}, - {73.5318, 51.5014}, - {56.0252, 71.7366}, - {41.5493, 92.3655}, - {70.7299, 92.2041} - }; - vector expressionEnum = { - "angry", "disgust", "fearful", - "happy", "neutral", "sad", "surprised" - }; - Mat stdPoints = Mat(5, 2, CV_32F, this->std); - Size patchSize = Size(112,112); - Scalar imageMean = Scalar(0.5,0.5,0.5); - Scalar imageStd = Scalar(0.5,0.5,0.5); - - const String inputNames = "data"; - const String outputNames = "label"; - - int backend_id; - int target_id; - -public: - FER(const string& modelPath, - int backend_id = 0, - int target_id = 0) - : modelPath(modelPath), backend_id(backend_id), target_id(target_id) - { - this->model = readNet(modelPath); - this->model.setPreferableBackend(backend_id); - this->model.setPreferableTarget(target_id); - } - - Mat preprocess(const Mat image, const Mat points) - { - // image alignment - Mat transformation = estimateAffine2D(points, this->stdPoints); - Mat aligned = Mat::zeros(this->patchSize.height, this->patchSize.width, image.type()); - warpAffine(image, aligned, transformation, this->patchSize); - - // image normalization - aligned.convertTo(aligned, CV_32F, 1.0 / 255.0); - aligned -= imageMean; - aligned /= imageStd; - - return blobFromImage(aligned);; - } - - String infer(const Mat image, const Mat facePoints) - { - Mat points = facePoints(Rect(4, 0, facePoints.cols-5, facePoints.rows)).reshape(2, 5); - Mat inputBlob = preprocess(image, points); - - this->model.setInput(inputBlob, this->inputNames); - Mat outputBlob = this->model.forward(this->outputNames); - - Point maxLoc; - minMaxLoc(outputBlob, nullptr, nullptr, nullptr, &maxLoc); - - return getDesc(maxLoc.x); - } - - String getDesc(int ind) - { - - if (ind >= 0 && ind < this->expressionEnum.size()) - { - return this->expressionEnum[ind]; - } - else - { - cerr << "Error: Index out of bounds." << endl; - return ""; - } - } - -}; - -class YuNet -{ -public: - YuNet(const string& model_path, - const Size& input_size = Size(320, 320), - float conf_threshold = 0.6f, - float nms_threshold = 0.3f, - int top_k = 5000, - int backend_id = 0, - int target_id = 0) - : model_path_(model_path), input_size_(input_size), - conf_threshold_(conf_threshold), nms_threshold_(nms_threshold), - top_k_(top_k), backend_id_(backend_id), target_id_(target_id) - { - model = FaceDetectorYN::create(model_path_, "", input_size_, conf_threshold_, nms_threshold_, top_k_, backend_id_, target_id_); - } - - void setBackendAndTarget(int backend_id, int target_id) - { - backend_id_ = backend_id; - target_id_ = target_id; - model = FaceDetectorYN::create(model_path_, "", input_size_, conf_threshold_, nms_threshold_, top_k_, backend_id_, target_id_); - } - - /* Overwrite the input size when creating the model. Size format: [Width, Height]. - */ - void setInputSize(const Size& input_size) - { - input_size_ = input_size; - model->setInputSize(input_size_); - } - - Mat infer(const Mat image) - { - Mat res; - model->detect(image, res); - return res; - } - -private: - Ptr model; - - string model_path_; - Size input_size_; - float conf_threshold_; - float nms_threshold_; - int top_k_; - int backend_id_; - int target_id_; -}; - -cv::Mat visualize(const cv::Mat& image, const cv::Mat& faces, const vector expressions, float fps = -1.f) -{ - static cv::Scalar box_color{0, 255, 0}; - static std::vector landmark_color{ - cv::Scalar(255, 0, 0), // right eye - cv::Scalar( 0, 0, 255), // left eye - cv::Scalar( 0, 255, 0), // nose tip - cv::Scalar(255, 0, 255), // right mouth corner - cv::Scalar( 0, 255, 255) // left mouth corner - }; - static cv::Scalar text_color{0, 255, 0}; - - auto output_image = image.clone(); - - if (fps >= 0) - { - cv::putText(output_image, cv::format("FPS: %.2f", fps), cv::Point(0, 15), cv::FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2); - } - - for (int i = 0; i < faces.rows; ++i) - { - // Draw bounding boxes - int x1 = static_cast(faces.at(i, 0)); - int y1 = static_cast(faces.at(i, 1)); - int w = static_cast(faces.at(i, 2)); - int h = static_cast(faces.at(i, 3)); - cv::rectangle(output_image, cv::Rect(x1, y1, w, h), box_color, 2); - - // Expression as text - String exp = expressions[i]; - cv::putText(output_image, exp, cv::Point(x1, y1+12), cv::FONT_HERSHEY_DUPLEX, 0.5, text_color); - - // Draw landmarks - for (int j = 0; j < landmark_color.size(); ++j) - { - int x = static_cast(faces.at(i, 2*j+4)), y = static_cast(faces.at(i, 2*j+5)); - cv::circle(output_image, cv::Point(x, y), 2, landmark_color[j], 2); - } - } - return output_image; -} - -string keys = -"{ help h | | Print help message. }" -"{ model m | facial_expression_recognition_mobilefacenet_2022july.onnx | Usage: Path to the model, defaults to facial_expression_recognition_mobilefacenet_2022july.onnx }" -"{ yunet_model ym | ../face_detection_yunet/face_detection_yunet_2023mar.onnx | Usage: Path to the face detection yunet model, defaults to face_detection_yunet_2023mar.onnx }" -"{ input i | | Path to input image or video file. Skip this argument to capture frames from a camera.}" -"{ backend_target t | 0 | Choose one of the backend-target pair to run this demo:\n" - "0: (default) OpenCV implementation + CPU,\n" - "1: CUDA + GPU (CUDA),\n" - "2: CUDA + GPU (CUDA FP16),\n" - "3: TIM-VX + NPU,\n" - "4: CANN + NPU}" -"{ save s | false | Specify to save results.}" -"{ vis v | true | Specify to open a window for result visualization.}" -; - - -int main(int argc, char** argv) -{ - CommandLineParser parser(argc, argv, keys); - - parser.about("Facial Expression Recognition"); - if (parser.has("help")) - { - parser.printMessage(); - return 0; - } - - string modelPath = parser.get("model"); - string yunetModelPath = parser.get("yunet_model"); - string inputPath = parser.get("input"); - uint8_t backendTarget = parser.get("backend_target"); - bool saveFlag = parser.get("save"); - bool visFlag = parser.get("vis"); - - if (modelPath.empty()) - CV_Error(Error::StsError, "Model file " + modelPath + " not found"); - - if (yunetModelPath.empty()) - CV_Error(Error::StsError, "Face Detection Model file " + yunetModelPath + " not found"); - - YuNet faceDetectionModel(yunetModelPath); - FER expressionRecognitionModel(modelPath, backend_target_pairs[backendTarget].first, backend_target_pairs[backendTarget].second); - - VideoCapture cap; - if (!inputPath.empty()) - cap.open(samples::findFile(inputPath)); - else - cap.open(0); - - if (!cap.isOpened()) - CV_Error(Error::StsError, "Cannot opend video or file"); - - Mat frame; - static const std::string kWinName = "Facial Expression Demo"; - - - while (waitKey(1) < 0) - { - cap >> frame; - - if (frame.empty()) - { - if(inputPath.empty()) - cout << "Frame is empty" << endl; - break; - } - - faceDetectionModel.setInputSize(frame.size()); - - Mat faces = faceDetectionModel.infer(frame); - vector expressions; - - for (int i = 0; i < faces.rows; ++i) - { - Mat face = faces.row(i); - String exp = expressionRecognitionModel.infer(frame, face); - expressions.push_back(exp); - - int x1 = static_cast(faces.at(i, 0)); - int y1 = static_cast(faces.at(i, 1)); - int w = static_cast(faces.at(i, 2)); - int h = static_cast(faces.at(i, 3)); - float conf = faces.at(i, 14); - - std::cout << cv::format("%d: x1=%d, y1=%d, w=%d, h=%d, conf=%.4f expression=%s\n", i, x1, y1, w, h, conf, exp.c_str()); - - } - - Mat res_frame = visualize(frame, faces, expressions); - - if(visFlag || inputPath.empty()) - { - imshow(kWinName, res_frame); - if(!inputPath.empty()) - waitKey(0); - } - if(saveFlag) - { - cout << "Results are saved to result.jpg" << endl; - - cv::imwrite("result.jpg", res_frame); - } - } - - - return 0; - -} - diff --git a/models/facial_expression_recognition/demo.py b/models/facial_expression_recognition/demo.py deleted file mode 100644 index 3b273928..00000000 --- a/models/facial_expression_recognition/demo.py +++ /dev/null @@ -1,135 +0,0 @@ -import sys -import argparse -import copy -import datetime - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from facial_fer_model import FacialExpressionRecog - -sys.path.append('../face_detection_yunet') -from yunet import YuNet - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser(description='Facial Expression Recognition') -parser.add_argument('--input', '-i', type=str, - help='Path to the input image. Omit for using default camera.') -parser.add_argument('--model', '-m', type=str, default='./facial_expression_recognition_mobilefacenet_2022july.onnx', - help='Path to the facial expression recognition model.') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--save', '-s', action='store_true', - help='Specify to save results. This flag is invalid when using camera.') -parser.add_argument('--vis', '-v', action='store_true', - help='Specify to open a window for result visualization. This flag is invalid when using camera.') -args = parser.parse_args() - -def visualize(image, det_res, fer_res, box_color=(0, 255, 0), text_color=(0, 0, 255)): - - print('%s %3d faces detected.' % (datetime.datetime.now(), len(det_res))) - - output = image.copy() - landmark_color = [ - (255, 0, 0), # right eye - (0, 0, 255), # left eye - (0, 255, 0), # nose tip - (255, 0, 255), # right mouth corner - (0, 255, 255) # left mouth corner - ] - - for ind, (det, fer_type) in enumerate(zip(det_res, fer_res)): - bbox = det[0:4].astype(np.int32) - fer_type = FacialExpressionRecog.getDesc(fer_type) - print("Face %2d: %d %d %d %d %s." % (ind, bbox[0], bbox[1], bbox[0]+bbox[2], bbox[1]+bbox[3], fer_type)) - cv.rectangle(output, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), box_color, 2) - cv.putText(output, fer_type, (bbox[0], bbox[1]+12), cv.FONT_HERSHEY_DUPLEX, 0.5, text_color) - landmarks = det[4:14].astype(np.int32).reshape((5, 2)) - for idx, landmark in enumerate(landmarks): - cv.circle(output, landmark, 2, landmark_color[idx], 2) - return output - - -def process(detect_model, fer_model, frame): - h, w, _ = frame.shape - detect_model.setInputSize([w, h]) - dets = detect_model.infer(frame) - - if dets is None: - return False, None, None - - fer_res = np.zeros(0, dtype=np.int8) - for face_points in dets: - fer_res = np.concatenate((fer_res, fer_model.infer(frame, face_points[:-1])), axis=0) - return True, dets, fer_res - - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - - detect_model = YuNet(modelPath='../face_detection_yunet/face_detection_yunet_2023mar.onnx') - - fer_model = FacialExpressionRecog(modelPath=args.model, - backendId=backend_id, - targetId=target_id) - - # If input is an image - if args.input is not None: - image = cv.imread(args.input) - - # Get detection and fer results - status, dets, fer_res = process(detect_model, fer_model, image) - - if status: - # Draw results on the input image - image = visualize(image, dets, fer_res) - - # Save results - if args.save: - cv.imwrite('result.jpg', image) - print('Results saved to result.jpg\n') - - # Visualize results in a new window - if args.vis: - cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE) - cv.imshow(args.input, image) - cv.waitKey(0) - else: # Omit input to call default camera - deviceId = 0 - cap = cv.VideoCapture(deviceId) - - while cv.waitKey(1) < 0: - hasFrame, frame = cap.read() - if not hasFrame: - print('No frames grabbed!') - break - - # Get detection and fer results - status, dets, fer_res = process(detect_model, fer_model, frame) - - if status: - # Draw results on the input image - frame = visualize(frame, dets, fer_res) - - # Visualize results in a new window - cv.imshow('FER Demo', frame) diff --git a/models/facial_expression_recognition/example_outputs/selfie.jpg b/models/facial_expression_recognition/example_outputs/selfie.jpg deleted file mode 100644 index 5a74c3d3..00000000 --- a/models/facial_expression_recognition/example_outputs/selfie.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e3f8148169fe993afd0164200335a24301f1221a45535d7a938a0d133f2149ac -size 1233078 diff --git a/models/facial_expression_recognition/facial_expression_recognition_mobilefacenet_2022july.onnx b/models/facial_expression_recognition/facial_expression_recognition_mobilefacenet_2022july.onnx deleted file mode 100644 index 67dd024c..00000000 --- a/models/facial_expression_recognition/facial_expression_recognition_mobilefacenet_2022july.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4f61307602fc089ce20488a31d4e4614e3c9753a7d6c41578c854858b183e1a9 -size 4791892 diff --git a/models/facial_expression_recognition/facial_expression_recognition_mobilefacenet_2022july_int8.onnx b/models/facial_expression_recognition/facial_expression_recognition_mobilefacenet_2022july_int8.onnx deleted file mode 100644 index 06473970..00000000 --- a/models/facial_expression_recognition/facial_expression_recognition_mobilefacenet_2022july_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f0d7093aff10e2638c734c5f18a6a7eabd2b9239b20bdb9b8090865a6f69a1ed -size 1364007 diff --git a/models/facial_expression_recognition/facial_expression_recognition_mobilefacenet_2022july_int8bq.onnx b/models/facial_expression_recognition/facial_expression_recognition_mobilefacenet_2022july_int8bq.onnx deleted file mode 100644 index b396210d..00000000 --- a/models/facial_expression_recognition/facial_expression_recognition_mobilefacenet_2022july_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0c3eaf9d0a7d442c0aa3beb3234243e1cdff9ad8871fb3cec346e90874caf57d -size 1376702 diff --git a/models/facial_expression_recognition/facial_fer_model.py b/models/facial_expression_recognition/facial_fer_model.py deleted file mode 100644 index 307af559..00000000 --- a/models/facial_expression_recognition/facial_fer_model.py +++ /dev/null @@ -1,176 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2022, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import numpy as np -import cv2 as cv - -class FacialExpressionRecog: - def __init__(self, modelPath, backendId=0, targetId=0): - self._modelPath = modelPath - self._backendId = backendId - self._targetId = targetId - - self._model = cv.dnn.readNet(self._modelPath) - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - self._align_model = FaceAlignment() - - self._inputNames = 'data' - self._outputNames = ['label'] - self._inputSize = [112, 112] - self._mean = np.array([0.5, 0.5, 0.5])[np.newaxis, np.newaxis, :] - self._std = np.array([0.5, 0.5, 0.5])[np.newaxis, np.newaxis, :] - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self._backendId = backendId - self._targetId = targetId - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - def _preprocess(self, image, bbox): - if bbox is not None: - image = self._align_model.get_align_image(image, bbox[4:].reshape(-1, 2)) - image = cv.cvtColor(image, cv.COLOR_BGR2RGB) - image = image.astype(np.float32, copy=False) / 255.0 - image -= self._mean - image /= self._std - return cv.dnn.blobFromImage(image) - - def infer(self, image, bbox=None): - # Preprocess - inputBlob = self._preprocess(image, bbox) - - # Forward - self._model.setInput(inputBlob, self._inputNames) - outputBlob = self._model.forward(self._outputNames) - - # Postprocess - results = self._postprocess(outputBlob) - - return results - - def _postprocess(self, outputBlob): - result = np.argmax(outputBlob[0], axis=1).astype(np.uint8) - return result - - @staticmethod - def getDesc(ind): - _expression_enum = ["angry", "disgust", "fearful", "happy", "neutral", "sad", "surprised"] - return _expression_enum[ind] - - -class FaceAlignment(): - def __init__(self, reflective=False): - self._std_points = np.array([[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], [41.5493, 92.3655], [70.7299, 92.2041]]) - self.reflective = reflective - - def __tformfwd(self, trans, uv): - uv = np.hstack((uv, np.ones((uv.shape[0], 1)))) - xy = np.dot(uv, trans) - xy = xy[:, 0:-1] - return xy - - def __tforminv(self, trans, uv): - Tinv = np.linalg.inv(trans) - xy = self.__tformfwd(Tinv, uv) - return xy - - def __findNonreflectiveSimilarity(self, uv, xy, options=None): - options = {"K": 2} - - K = options["K"] - M = xy.shape[0] - x = xy[:, 0].reshape((-1, 1)) # use reshape to keep a column vector - y = xy[:, 1].reshape((-1, 1)) # use reshape to keep a column vector - # print '--->x, y:\n', x, y - - tmp1 = np.hstack((x, y, np.ones((M, 1)), np.zeros((M, 1)))) - tmp2 = np.hstack((y, -x, np.zeros((M, 1)), np.ones((M, 1)))) - X = np.vstack((tmp1, tmp2)) - # print '--->X.shape: ', X.shape - # print 'X:\n', X - - u = uv[:, 0].reshape((-1, 1)) # use reshape to keep a column vector - v = uv[:, 1].reshape((-1, 1)) # use reshape to keep a column vector - U = np.vstack((u, v)) - # print '--->U.shape: ', U.shape - # print 'U:\n', U - - # We know that X * r = U - if np.linalg.matrix_rank(X) >= 2 * K: - r, _, _, _ = np.linalg.lstsq(X, U, rcond=-1) - # print(r, X, U, sep="\n") - r = np.squeeze(r) - else: - raise Exception("cp2tform:twoUniquePointsReq") - - sc = r[0] - ss = r[1] - tx = r[2] - ty = r[3] - - Tinv = np.array([[sc, -ss, 0], [ss, sc, 0], [tx, ty, 1]]) - T = np.linalg.inv(Tinv) - T[:, 2] = np.array([0, 0, 1]) - - return T, Tinv - - def __findSimilarity(self, uv, xy, options=None): - options = {"K": 2} - - # uv = np.array(uv) - # xy = np.array(xy) - - # Solve for trans1 - trans1, trans1_inv = self.__findNonreflectiveSimilarity(uv, xy, options) - - # manually reflect the xy data across the Y-axis - xyR = xy - xyR[:, 0] = -1 * xyR[:, 0] - # Solve for trans2 - trans2r, trans2r_inv = self.__findNonreflectiveSimilarity(uv, xyR, options) - - # manually reflect the tform to undo the reflection done on xyR - TreflectY = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]]) - trans2 = np.dot(trans2r, TreflectY) - - # Figure out if trans1 or trans2 is better - xy1 = self.__tformfwd(trans1, uv) - norm1 = np.linalg.norm(xy1 - xy) - xy2 = self.__tformfwd(trans2, uv) - norm2 = np.linalg.norm(xy2 - xy) - - if norm1 <= norm2: - return trans1, trans1_inv - else: - trans2_inv = np.linalg.inv(trans2) - return trans2, trans2_inv - - def __get_similarity_transform(self, src_pts, dst_pts): - if self.reflective: - trans, trans_inv = self.__findSimilarity(src_pts, dst_pts) - else: - trans, trans_inv = self.__findNonreflectiveSimilarity(src_pts, dst_pts) - return trans, trans_inv - - def __cvt_tform_mat_for_cv2(self, trans): - cv2_trans = trans[:, 0:2].T - return cv2_trans - - def get_similarity_transform_for_cv2(self, src_pts, dst_pts): - trans, trans_inv = self.__get_similarity_transform(src_pts, dst_pts) - cv2_trans = self.__cvt_tform_mat_for_cv2(trans) - return cv2_trans, trans - - def get_align_image(self, image, lm5_points): - assert lm5_points is not None - tfm, trans = self.get_similarity_transform_for_cv2(lm5_points, self._std_points) - return cv.warpAffine(image, tfm, (112, 112)) diff --git a/models/handpose_estimation_mediapipe/LICENSE b/models/handpose_estimation_mediapipe/LICENSE deleted file mode 100644 index 7a4a3ea2..00000000 --- a/models/handpose_estimation_mediapipe/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/models/handpose_estimation_mediapipe/README.md b/models/handpose_estimation_mediapipe/README.md deleted file mode 100644 index bab4ffd8..00000000 --- a/models/handpose_estimation_mediapipe/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Hand pose estimation from MediaPipe Handpose - -This model estimates 21 hand keypoints per detected hand from [palm detector](../palm_detection_mediapipe). (The image below is referenced from [MediaPipe Hands Keypoints](https://github.com/tensorflow/tfjs-models/tree/master/hand-pose-detection#mediapipe-hands-keypoints-used-in-mediapipe-hands)) - -![MediaPipe Hands Keypoints](./example_outputs/hand_keypoints.png) - -Hand gesture classification demo (0-9) -![hand gestures](./example_outputs/gesture_classification.png) - -This model is converted from TFlite to ONNX using following tools: -- TFLite model to ONNX: https://github.com/onnx/tensorflow-onnx -- simplified by [onnx-simplifier](https://github.com/daquexian/onnx-simplifier) - -**Note**: -- The int8-quantized model may produce invalid results due to a significant drop of accuracy. -- Visit https://github.com/google/mediapipe/blob/master/docs/solutions/models.md#hands for models of larger scale. -- `handpose_estimation_mediapipe_2023feb_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - -## Demo - -Run the following commands to try the demo: -```bash -# detect on camera input -python demo.py -# detect on an image -python demo.py -i /path/to/image -v -``` - -### Example outputs - -![webcam demo](./example_outputs/mphandpose_demo.webp) - -## License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE). - -## Reference - -- MediaPipe Handpose: https://developers.google.com/mediapipe/solutions/vision/hand_landmarker -- MediaPipe hands model and model card: https://github.com/google/mediapipe/blob/master/docs/solutions/models.md#hands -- Handpose TFJS:https://github.com/tensorflow/tfjs-models/tree/master/handpose -- Int8 model quantized with rgb evaluation set of FreiHAND: https://lmb.informatik.uni-freiburg.de/resources/datasets/FreihandDataset.en.html diff --git a/models/handpose_estimation_mediapipe/demo.py b/models/handpose_estimation_mediapipe/demo.py deleted file mode 100644 index 37c3cf03..00000000 --- a/models/handpose_estimation_mediapipe/demo.py +++ /dev/null @@ -1,356 +0,0 @@ -import sys -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from mp_handpose import MPHandPose - -sys.path.append('../palm_detection_mediapipe') -from mp_palmdet import MPPalmDet - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser(description='Hand Pose Estimation from MediaPipe') -parser.add_argument('--input', '-i', type=str, - help='Path to the input image. Omit for using default camera.') -parser.add_argument('--model', '-m', type=str, default='./handpose_estimation_mediapipe_2023feb.onnx', - help='Path to the model.') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--conf_threshold', type=float, default=0.9, - help='Filter out hands of confidence < conf_threshold.') -parser.add_argument('--save', '-s', action='store_true', - help='Specify to save results. This flag is invalid when using camera.') -parser.add_argument('--vis', '-v', action='store_true', - help='Specify to open a window for result visualization. This flag is invalid when using camera.') -args = parser.parse_args() - - -def visualize(image, hands, print_result=False): - display_screen = image.copy() - display_3d = np.zeros((400, 400, 3), np.uint8) - cv.line(display_3d, (200, 0), (200, 400), (255, 255, 255), 2) - cv.line(display_3d, (0, 200), (400, 200), (255, 255, 255), 2) - cv.putText(display_3d, 'Main View', (0, 12), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255)) - cv.putText(display_3d, 'Top View', (200, 12), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255)) - cv.putText(display_3d, 'Left View', (0, 212), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255)) - cv.putText(display_3d, 'Right View', (200, 212), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255)) - is_draw = False # ensure only one hand is drawn - - def draw_lines(image, landmarks, is_draw_point=True, thickness=2): - cv.line(image, landmarks[0], landmarks[1], (255, 255, 255), thickness) - cv.line(image, landmarks[1], landmarks[2], (255, 255, 255), thickness) - cv.line(image, landmarks[2], landmarks[3], (255, 255, 255), thickness) - cv.line(image, landmarks[3], landmarks[4], (255, 255, 255), thickness) - - cv.line(image, landmarks[0], landmarks[5], (255, 255, 255), thickness) - cv.line(image, landmarks[5], landmarks[6], (255, 255, 255), thickness) - cv.line(image, landmarks[6], landmarks[7], (255, 255, 255), thickness) - cv.line(image, landmarks[7], landmarks[8], (255, 255, 255), thickness) - - cv.line(image, landmarks[0], landmarks[9], (255, 255, 255), thickness) - cv.line(image, landmarks[9], landmarks[10], (255, 255, 255), thickness) - cv.line(image, landmarks[10], landmarks[11], (255, 255, 255), thickness) - cv.line(image, landmarks[11], landmarks[12], (255, 255, 255), thickness) - - cv.line(image, landmarks[0], landmarks[13], (255, 255, 255), thickness) - cv.line(image, landmarks[13], landmarks[14], (255, 255, 255), thickness) - cv.line(image, landmarks[14], landmarks[15], (255, 255, 255), thickness) - cv.line(image, landmarks[15], landmarks[16], (255, 255, 255), thickness) - - cv.line(image, landmarks[0], landmarks[17], (255, 255, 255), thickness) - cv.line(image, landmarks[17], landmarks[18], (255, 255, 255), thickness) - cv.line(image, landmarks[18], landmarks[19], (255, 255, 255), thickness) - cv.line(image, landmarks[19], landmarks[20], (255, 255, 255), thickness) - - if is_draw_point: - for p in landmarks: - cv.circle(image, p, thickness, (0, 0, 255), -1) - - # used for gesture classification - gc = GestureClassification() - - for idx, handpose in enumerate(hands): - conf = handpose[-1] - bbox = handpose[0:4].astype(np.int32) - handedness = handpose[-2] - if handedness <= 0.5: - handedness_text = 'Left' - else: - handedness_text = 'Right' - landmarks_screen = handpose[4:67].reshape(21, 3).astype(np.int32) - landmarks_word = handpose[67:130].reshape(21, 3) - - gesture = gc.classify(landmarks_screen) - - # Print results - if print_result: - print('-----------hand {}-----------'.format(idx + 1)) - print('conf: {:.2f}'.format(conf)) - print('handedness: {}'.format(handedness_text)) - print('gesture: {}'.format(gesture)) - print('hand box: {}'.format(bbox)) - print('hand landmarks: ') - for l in landmarks_screen: - print('\t{}'.format(l)) - print('hand world landmarks: ') - for l in landmarks_word: - print('\t{}'.format(l)) - - # draw box - cv.rectangle(display_screen, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2) - # draw handedness - cv.putText(display_screen, '{}'.format(handedness_text), (bbox[0], bbox[1] + 12), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255)) - # draw gesture - cv.putText(display_screen, '{}'.format(gesture), (bbox[0], bbox[1] + 30), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255)) - # Draw line between each key points - landmarks_xy = landmarks_screen[:, 0:2] - draw_lines(display_screen, landmarks_xy, is_draw_point=False) - - # z value is relative to WRIST - for p in landmarks_screen: - r = max(5 - p[2] // 5, 0) - r = min(r, 14) - cv.circle(display_screen, np.array([p[0], p[1]]), r, (0, 0, 255), -1) - - if is_draw is False: - is_draw = True - # Main view - landmarks_xy = landmarks_word[:, [0, 1]] - landmarks_xy = (landmarks_xy * 1000 + 100).astype(np.int32) - draw_lines(display_3d, landmarks_xy, thickness=5) - - # Top view - landmarks_xz = landmarks_word[:, [0, 2]] - landmarks_xz[:, 1] = -landmarks_xz[:, 1] - landmarks_xz = (landmarks_xz * 1000 + np.array([300, 100])).astype(np.int32) - draw_lines(display_3d, landmarks_xz, thickness=5) - - # Left view - landmarks_yz = landmarks_word[:, [2, 1]] - landmarks_yz[:, 0] = -landmarks_yz[:, 0] - landmarks_yz = (landmarks_yz * 1000 + np.array([100, 300])).astype(np.int32) - draw_lines(display_3d, landmarks_yz, thickness=5) - - # Right view - landmarks_zy = landmarks_word[:, [2, 1]] - landmarks_zy = (landmarks_zy * 1000 + np.array([300, 300])).astype(np.int32) - draw_lines(display_3d, landmarks_zy, thickness=5) - - return display_screen, display_3d - -class GestureClassification: - def _vector_2_angle(self, v1, v2): - uv1 = v1 / np.linalg.norm(v1) - uv2 = v2 / np.linalg.norm(v2) - angle = np.degrees(np.arccos(np.dot(uv1, uv2))) - return angle - - def _hand_angle(self, hand): - angle_list = [] - # thumb - angle_ = self._vector_2_angle( - np.array([hand[0][0] - hand[2][0], hand[0][1] - hand[2][1]]), - np.array([hand[3][0] - hand[4][0], hand[3][1] - hand[4][1]]) - ) - angle_list.append(angle_) - # index - angle_ = self._vector_2_angle( - np.array([hand[0][0] - hand[6][0], hand[0][1] - hand[6][1]]), - np.array([hand[7][0] - hand[8][0], hand[7][1] - hand[8][1]]) - ) - angle_list.append(angle_) - # middle - angle_ = self._vector_2_angle( - np.array([hand[0][0] - hand[10][0], hand[0][1] - hand[10][1]]), - np.array([hand[11][0] - hand[12][0], hand[11][1] - hand[12][1]]) - ) - angle_list.append(angle_) - # ring - angle_ = self._vector_2_angle( - np.array([hand[0][0] - hand[14][0], hand[0][1] - hand[14][1]]), - np.array([hand[15][0] - hand[16][0], hand[15][1] - hand[16][1]]) - ) - angle_list.append(angle_) - # pink - angle_ = self._vector_2_angle( - np.array([hand[0][0] - hand[18][0], hand[0][1] - hand[18][1]]), - np.array([hand[19][0] - hand[20][0], hand[19][1] - hand[20][1]]) - ) - angle_list.append(angle_) - return angle_list - - def _finger_status(self, lmList): - fingerList = [] - originx, originy = lmList[0] - keypoint_list = [[5, 4], [6, 8], [10, 12], [14, 16], [18, 20]] - for point in keypoint_list: - x1, y1 = lmList[point[0]] - x2, y2 = lmList[point[1]] - if np.hypot(x2 - originx, y2 - originy) > np.hypot(x1 - originx, y1 - originy): - fingerList.append(True) - else: - fingerList.append(False) - - return fingerList - - def _classify(self, hand): - thr_angle = 65. - thr_angle_thumb = 30. - thr_angle_s = 49. - gesture_str = "Undefined" - - angle_list = self._hand_angle(hand) - - thumbOpen, firstOpen, secondOpen, thirdOpen, fourthOpen = self._finger_status(hand) - # Number - if (angle_list[0] > thr_angle_thumb) and (angle_list[1] > thr_angle) and (angle_list[2] > thr_angle) and ( - angle_list[3] > thr_angle) and (angle_list[4] > thr_angle) and \ - not firstOpen and not secondOpen and not thirdOpen and not fourthOpen: - gesture_str = "Zero" - elif (angle_list[0] > thr_angle_thumb) and (angle_list[1] < thr_angle_s) and (angle_list[2] > thr_angle) and ( - angle_list[3] > thr_angle) and (angle_list[4] > thr_angle) and \ - firstOpen and not secondOpen and not thirdOpen and not fourthOpen: - gesture_str = "One" - elif (angle_list[0] > thr_angle_thumb) and (angle_list[1] < thr_angle_s) and (angle_list[2] < thr_angle_s) and ( - angle_list[3] > thr_angle) and (angle_list[4] > thr_angle) and \ - not thumbOpen and firstOpen and secondOpen and not thirdOpen and not fourthOpen: - gesture_str = "Two" - elif (angle_list[0] > thr_angle_thumb) and (angle_list[1] < thr_angle_s) and (angle_list[2] < thr_angle_s) and ( - angle_list[3] < thr_angle_s) and (angle_list[4] > thr_angle) and \ - not thumbOpen and firstOpen and secondOpen and thirdOpen and not fourthOpen: - gesture_str = "Three" - elif (angle_list[0] > thr_angle_thumb) and (angle_list[1] < thr_angle_s) and (angle_list[2] < thr_angle_s) and ( - angle_list[3] < thr_angle_s) and (angle_list[4] < thr_angle) and \ - firstOpen and secondOpen and thirdOpen and fourthOpen: - gesture_str = "Four" - elif (angle_list[0] < thr_angle_s) and (angle_list[1] < thr_angle_s) and (angle_list[2] < thr_angle_s) and ( - angle_list[3] < thr_angle_s) and (angle_list[4] < thr_angle_s) and \ - thumbOpen and firstOpen and secondOpen and thirdOpen and fourthOpen: - gesture_str = "Five" - elif (angle_list[0] < thr_angle_s) and (angle_list[1] > thr_angle) and (angle_list[2] > thr_angle) and ( - angle_list[3] > thr_angle) and (angle_list[4] < thr_angle_s) and \ - thumbOpen and not firstOpen and not secondOpen and not thirdOpen and fourthOpen: - gesture_str = "Six" - elif (angle_list[0] < thr_angle_s) and (angle_list[1] < thr_angle) and (angle_list[2] > thr_angle) and ( - angle_list[3] > thr_angle) and (angle_list[4] > thr_angle_s) and \ - thumbOpen and firstOpen and not secondOpen and not thirdOpen and not fourthOpen: - gesture_str = "Seven" - elif (angle_list[0] < thr_angle_s) and (angle_list[1] < thr_angle) and (angle_list[2] < thr_angle) and ( - angle_list[3] > thr_angle) and (angle_list[4] > thr_angle_s) and \ - thumbOpen and firstOpen and secondOpen and not thirdOpen and not fourthOpen: - gesture_str = "Eight" - elif (angle_list[0] < thr_angle_s) and (angle_list[1] < thr_angle) and (angle_list[2] < thr_angle) and ( - angle_list[3] < thr_angle) and (angle_list[4] > thr_angle_s) and \ - thumbOpen and firstOpen and secondOpen and thirdOpen and not fourthOpen: - gesture_str = "Nine" - - return gesture_str - - def classify(self, landmarks): - hand = landmarks[:21, :2] - gesture = self._classify(hand) - return gesture - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - # palm detector - palm_detector = MPPalmDet(modelPath='../palm_detection_mediapipe/palm_detection_mediapipe_2023feb.onnx', - nmsThreshold=0.3, - scoreThreshold=0.6, - backendId=backend_id, - targetId=target_id) - # handpose detector - handpose_detector = MPHandPose(modelPath=args.model, - confThreshold=args.conf_threshold, - backendId=backend_id, - targetId=target_id) - - # If input is an image - if args.input is not None: - image = cv.imread(args.input) - - # Palm detector inference - palms = palm_detector.infer(image) - hands = np.empty(shape=(0, 132)) - - # Estimate the pose of each hand - for palm in palms: - # Handpose detector inference - handpose = handpose_detector.infer(image, palm) - if handpose is not None: - hands = np.vstack((hands, handpose)) - # Draw results on the input image - image, view_3d = visualize(image, hands, True) - - if len(palms) == 0: - print('No palm detected!') - else: - print('Palm detected!') - - # Save results - if args.save: - cv.imwrite('result.jpg', image) - print('Results saved to result.jpg\n') - - # Visualize results in a new window - if args.vis: - cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE) - cv.imshow(args.input, image) - cv.imshow('3D HandPose Demo', view_3d) - cv.waitKey(0) - else: # Omit input to call default camera - deviceId = 0 - cap = cv.VideoCapture(deviceId) - - tm = cv.TickMeter() - while cv.waitKey(1) < 0: - hasFrame, frame = cap.read() - if not hasFrame: - print('No frames grabbed!') - break - - # Palm detector inference - palms = palm_detector.infer(frame) - hands = np.empty(shape=(0, 132)) - - tm.start() - # Estimate the pose of each hand - for palm in palms: - # Handpose detector inference - handpose = handpose_detector.infer(frame, palm) - if handpose is not None: - hands = np.vstack((hands, handpose)) - tm.stop() - # Draw results on the input image - frame, view_3d = visualize(frame, hands) - - if len(palms) == 0: - print('No palm detected!') - else: - print('Palm detected!') - cv.putText(frame, 'FPS: {:.2f}'.format(tm.getFPS()), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) - - cv.imshow('MediaPipe Handpose Detection Demo', frame) - cv.imshow('3D HandPose Demo', view_3d) - tm.reset() diff --git a/models/handpose_estimation_mediapipe/example_outputs/gesture_classification.png b/models/handpose_estimation_mediapipe/example_outputs/gesture_classification.png deleted file mode 100644 index 29c13c06..00000000 --- a/models/handpose_estimation_mediapipe/example_outputs/gesture_classification.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:759c971087aef3068527f46b401af9fdead5a8dda3f68257d445bfb279fa99a9 -size 513541 diff --git a/models/handpose_estimation_mediapipe/example_outputs/hand_keypoints.png b/models/handpose_estimation_mediapipe/example_outputs/hand_keypoints.png deleted file mode 100644 index b3541222..00000000 --- a/models/handpose_estimation_mediapipe/example_outputs/hand_keypoints.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:24ce896420149a7b9dc0c05e547681e3027b4a65ce3a0a6c73685ba1cc535496 -size 159592 diff --git a/models/handpose_estimation_mediapipe/example_outputs/mphandpose_demo.webp b/models/handpose_estimation_mediapipe/example_outputs/mphandpose_demo.webp deleted file mode 100644 index 1bb1441a..00000000 --- a/models/handpose_estimation_mediapipe/example_outputs/mphandpose_demo.webp +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c7873ed63f8c19a5961b53d866d71f0fe767f137e4ea6dbb6756a03b93b2b6a0 -size 1656194 diff --git a/models/handpose_estimation_mediapipe/handpose_estimation_mediapipe_2023feb.onnx b/models/handpose_estimation_mediapipe/handpose_estimation_mediapipe_2023feb.onnx deleted file mode 100644 index 3d660a34..00000000 --- a/models/handpose_estimation_mediapipe/handpose_estimation_mediapipe_2023feb.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:db0898ae717b76b075d9bf563af315b29562e11f8df5027a1ef07b02bef6d81c -size 4099621 diff --git a/models/handpose_estimation_mediapipe/handpose_estimation_mediapipe_2023feb_int8.onnx b/models/handpose_estimation_mediapipe/handpose_estimation_mediapipe_2023feb_int8.onnx deleted file mode 100644 index d6301154..00000000 --- a/models/handpose_estimation_mediapipe/handpose_estimation_mediapipe_2023feb_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e97bc1fb83b641954d33424c82b6ade719d0f73250bdb91710ecfd5f7b47e321 -size 1167628 diff --git a/models/handpose_estimation_mediapipe/handpose_estimation_mediapipe_2023feb_int8bq.onnx b/models/handpose_estimation_mediapipe/handpose_estimation_mediapipe_2023feb_int8bq.onnx deleted file mode 100644 index 31b2bf2c..00000000 --- a/models/handpose_estimation_mediapipe/handpose_estimation_mediapipe_2023feb_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d7e63ed33e39c8b532a04a5466ac68c8680981637df288bcf93286bec08befbd -size 1222348 diff --git a/models/handpose_estimation_mediapipe/mp_handpose.py b/models/handpose_estimation_mediapipe/mp_handpose.py deleted file mode 100644 index a694c952..00000000 --- a/models/handpose_estimation_mediapipe/mp_handpose.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import cv2 as cv - -class MPHandPose: - def __init__(self, modelPath, confThreshold=0.8, backendId=0, targetId=0): - self.model_path = modelPath - self.conf_threshold = confThreshold - self.backend_id = backendId - self.target_id = targetId - - self.input_size = np.array([224, 224]) # wh - self.PALM_LANDMARK_IDS = [0, 5, 9, 13, 17, 1, 2] - self.PALM_LANDMARKS_INDEX_OF_PALM_BASE = 0 - self.PALM_LANDMARKS_INDEX_OF_MIDDLE_FINGER_BASE = 2 - self.PALM_BOX_PRE_SHIFT_VECTOR = [0, 0] - self.PALM_BOX_PRE_ENLARGE_FACTOR = 4 - self.PALM_BOX_SHIFT_VECTOR = [0, -0.4] - self.PALM_BOX_ENLARGE_FACTOR = 3 - self.HAND_BOX_SHIFT_VECTOR = [0, -0.1] - self.HAND_BOX_ENLARGE_FACTOR = 1.65 - - self.model = cv.dnn.readNet(self.model_path) - self.model.setPreferableBackend(self.backend_id) - self.model.setPreferableTarget(self.target_id) - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self.backend_id = backendId - self.target_id = targetId - self.model.setPreferableBackend(self.backend_id) - self.model.setPreferableTarget(self.target_id) - - def _cropAndPadFromPalm(self, image, palm_bbox, for_rotation = False): - # shift bounding box - wh_palm_bbox = palm_bbox[1] - palm_bbox[0] - if for_rotation: - shift_vector = self.PALM_BOX_PRE_SHIFT_VECTOR - else: - shift_vector = self.PALM_BOX_SHIFT_VECTOR - shift_vector = shift_vector * wh_palm_bbox - palm_bbox = palm_bbox + shift_vector - # enlarge bounding box - center_palm_bbox = np.sum(palm_bbox, axis=0) / 2 - wh_palm_bbox = palm_bbox[1] - palm_bbox[0] - if for_rotation: - enlarge_scale = self.PALM_BOX_PRE_ENLARGE_FACTOR - else: - enlarge_scale = self.PALM_BOX_ENLARGE_FACTOR - new_half_size = wh_palm_bbox * enlarge_scale / 2 - palm_bbox = np.array([ - center_palm_bbox - new_half_size, - center_palm_bbox + new_half_size]) - palm_bbox = palm_bbox.astype(np.int32) - palm_bbox[:, 0] = np.clip(palm_bbox[:, 0], 0, image.shape[1]) - palm_bbox[:, 1] = np.clip(palm_bbox[:, 1], 0, image.shape[0]) - # crop to the size of interest - image = image[palm_bbox[0][1]:palm_bbox[1][1], palm_bbox[0][0]:palm_bbox[1][0], :] - # pad to ensure conner pixels won't be cropped - if for_rotation: - side_len = np.linalg.norm(image.shape[:2]) - else: - side_len = max(image.shape[:2]) - - side_len = int(side_len) - pad_h = side_len - image.shape[0] - pad_w = side_len - image.shape[1] - left = pad_w // 2 - top = pad_h // 2 - right = pad_w - left - bottom = pad_h - top - image = cv.copyMakeBorder(image, top, bottom, left, right, cv.BORDER_CONSTANT, None, (0, 0, 0)) - bias = palm_bbox[0] - [left, top] - return image, palm_bbox, bias - - def _preprocess(self, image, palm): - ''' - Rotate input for inference. - Parameters: - image - input image of BGR channel order - palm_bbox - palm bounding box found in image of format [[x1, y1], [x2, y2]] (top-left and bottom-right points) - palm_landmarks - 7 landmarks (5 finger base points, 2 palm base points) of shape [7, 2] - Returns: - rotated_hand - rotated hand image for inference - rotate_palm_bbox - palm box of interest range - angle - rotate angle for hand - rotation_matrix - matrix for rotation and de-rotation - pad_bias - pad pixels of interest range - ''' - # crop and pad image to interest range - pad_bias = np.array([0, 0], dtype=np.int32) # left, top - palm_bbox = palm[0:4].reshape(2, 2) - image, palm_bbox, bias = self._cropAndPadFromPalm(image, palm_bbox, True) - image = cv.cvtColor(image, cv.COLOR_BGR2RGB) - pad_bias += bias - - # Rotate input to have vertically oriented hand image - # compute rotation - palm_bbox -= pad_bias - palm_landmarks = palm[4:18].reshape(7, 2) - pad_bias - p1 = palm_landmarks[self.PALM_LANDMARKS_INDEX_OF_PALM_BASE] - p2 = palm_landmarks[self.PALM_LANDMARKS_INDEX_OF_MIDDLE_FINGER_BASE] - radians = np.pi / 2 - np.arctan2(-(p2[1] - p1[1]), p2[0] - p1[0]) - radians = radians - 2 * np.pi * np.floor((radians + np.pi) / (2 * np.pi)) - angle = np.rad2deg(radians) - # get bbox center - center_palm_bbox = np.sum(palm_bbox, axis=0) / 2 - # get rotation matrix - rotation_matrix = cv.getRotationMatrix2D(center_palm_bbox, angle, 1.0) - # get rotated image - rotated_image = cv.warpAffine(image, rotation_matrix, (image.shape[1], image.shape[0])) - # get bounding boxes from rotated palm landmarks - homogeneous_coord = np.c_[palm_landmarks, np.ones(palm_landmarks.shape[0])] - rotated_palm_landmarks = np.array([ - np.dot(homogeneous_coord, rotation_matrix[0]), - np.dot(homogeneous_coord, rotation_matrix[1])]) - # get landmark bounding box - rotated_palm_bbox = np.array([ - np.amin(rotated_palm_landmarks, axis=1), - np.amax(rotated_palm_landmarks, axis=1)]) # [top-left, bottom-right] - - crop, rotated_palm_bbox, _ = self._cropAndPadFromPalm(rotated_image, rotated_palm_bbox) - blob = cv.resize(crop, dsize=self.input_size, interpolation=cv.INTER_AREA).astype(np.float32) - blob = blob / 255. - - return blob[np.newaxis, :, :, :], rotated_palm_bbox, angle, rotation_matrix, pad_bias - - def infer(self, image, palm): - # Preprocess - input_blob, rotated_palm_bbox, angle, rotation_matrix, pad_bias = self._preprocess(image, palm) - - # Forward - self.model.setInput(input_blob) - output_blob = self.model.forward(self.model.getUnconnectedOutLayersNames()) - - # Postprocess - results = self._postprocess(output_blob, rotated_palm_bbox, angle, rotation_matrix, pad_bias) - return results # [bbox_coords, landmarks_coords, conf] - - def _postprocess(self, blob, rotated_palm_bbox, angle, rotation_matrix, pad_bias): - landmarks, conf, handedness, landmarks_word = blob - - conf = conf[0][0] - if conf < self.conf_threshold: - return None - - landmarks = landmarks[0].reshape(-1, 3) # shape: (1, 63) -> (21, 3) - landmarks_word = landmarks_word[0].reshape(-1, 3) # shape: (1, 63) -> (21, 3) - - # transform coords back to the input coords - wh_rotated_palm_bbox = rotated_palm_bbox[1] - rotated_palm_bbox[0] - scale_factor = wh_rotated_palm_bbox / self.input_size - landmarks[:, :2] = (landmarks[:, :2] - self.input_size / 2) * max(scale_factor) - landmarks[:, 2] = landmarks[:, 2] * max(scale_factor) # depth scaling - coords_rotation_matrix = cv.getRotationMatrix2D((0, 0), angle, 1.0) - rotated_landmarks = np.dot(landmarks[:, :2], coords_rotation_matrix[:, :2]) - rotated_landmarks = np.c_[rotated_landmarks, landmarks[:, 2]] - rotated_landmarks_world = np.dot(landmarks_word[:, :2], coords_rotation_matrix[:, :2]) - rotated_landmarks_world = np.c_[rotated_landmarks_world, landmarks_word[:, 2]] - # invert rotation - rotation_component = np.array([ - [rotation_matrix[0][0], rotation_matrix[1][0]], - [rotation_matrix[0][1], rotation_matrix[1][1]]]) - translation_component = np.array([ - rotation_matrix[0][2], rotation_matrix[1][2]]) - inverted_translation = np.array([ - -np.dot(rotation_component[0], translation_component), - -np.dot(rotation_component[1], translation_component)]) - inverse_rotation_matrix = np.c_[rotation_component, inverted_translation] - # get box center - center = np.append(np.sum(rotated_palm_bbox, axis=0) / 2, 1) - original_center = np.array([ - np.dot(center, inverse_rotation_matrix[0]), - np.dot(center, inverse_rotation_matrix[1])]) - landmarks[:, :2] = rotated_landmarks[:, :2] + original_center + pad_bias - - # get bounding box from rotated_landmarks - bbox = np.array([ - np.amin(landmarks[:, :2], axis=0), - np.amax(landmarks[:, :2], axis=0)]) # [top-left, bottom-right] - # shift bounding box - wh_bbox = bbox[1] - bbox[0] - shift_vector = self.HAND_BOX_SHIFT_VECTOR * wh_bbox - bbox = bbox + shift_vector - # enlarge bounding box - center_bbox = np.sum(bbox, axis=0) / 2 - wh_bbox = bbox[1] - bbox[0] - new_half_size = wh_bbox * self.HAND_BOX_ENLARGE_FACTOR / 2 - bbox = np.array([ - center_bbox - new_half_size, - center_bbox + new_half_size]) - - # [0: 4]: hand bounding box found in image of format [x1, y1, x2, y2] (top-left and bottom-right points) - # [4: 67]: screen landmarks with format [x1, y1, z1, x2, y2 ... x21, y21, z21], z value is relative to WRIST - # [67: 130]: world landmarks with format [x1, y1, z1, x2, y2 ... x21, y21, z21], 3D metric x, y, z coordinate - # [130]: handedness, (left)[0, 1](right) hand - # [131]: confidence - return np.r_[bbox.reshape(-1), landmarks.reshape(-1), rotated_landmarks_world.reshape(-1), handedness[0][0], conf] diff --git a/models/human_segmentation_pphumanseg/CMakeLists.txt b/models/human_segmentation_pphumanseg/CMakeLists.txt deleted file mode 100644 index 95aec537..00000000 --- a/models/human_segmentation_pphumanseg/CMakeLists.txt +++ /dev/null @@ -1,31 +0,0 @@ -cmake_minimum_required(VERSION 3.24) -set(CMAKE_CXX_STANDARD 11) -set(project_name "opencv_zoo_human_segmentation") - -PROJECT (${project_name}) - -set(OPENCV_VERSION "4.10.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) -# Find OpenCV, you may need to set OpenCV_DIR variable -# to the absolute path to the directory containing OpenCVConfig.cmake file -# via the command line or GUI - -file(GLOB SourceFile - "demo.cpp") -# If the package has been found, several variables will -# be set, you can find the full list with descriptions -# in the OpenCVConfig.cmake file. -# Print some message showing some of them -message(STATUS "OpenCV library status:") -message(STATUS " config: ${OpenCV_DIR}") -message(STATUS " version: ${OpenCV_VERSION}") -message(STATUS " libraries: ${OpenCV_LIBS}") -message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}") - -# Declare the executable target built from your sources -add_executable(${project_name} ${SourceFile}) - -# Link your application with OpenCV libraries -target_link_libraries(${project_name} PRIVATE ${OpenCV_LIBS}) - diff --git a/models/human_segmentation_pphumanseg/LICENSE b/models/human_segmentation_pphumanseg/LICENSE deleted file mode 100644 index 94255ff4..00000000 --- a/models/human_segmentation_pphumanseg/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ -Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/models/human_segmentation_pphumanseg/README.md b/models/human_segmentation_pphumanseg/README.md deleted file mode 100644 index fd644309..00000000 --- a/models/human_segmentation_pphumanseg/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# PPHumanSeg - -This model is ported from [PaddleHub](https://github.com/PaddlePaddle/PaddleHub) using [this script from OpenCV](https://github.com/opencv/opencv/blob/master/samples/dnn/dnn_model_runner/dnn_conversion/paddlepaddle/paddle_humanseg.py). - -**Note**: -- `human_segmentation_pphumanseg_2023mar_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - -## Demo - -### Python - -Run the following command to try the demo: - -```shell -# detect on camera input -python demo.py -# detect on an image -python demo.py --input /path/to/image -v - -# get help regarding various parameters -python demo.py --help -``` - -### C++ - -Install latest OpenCV and CMake >= 3.24.0 to get started with: - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# detect on camera input -./build/opencv_zoo_human_segmentation -# detect on an image -./build/opencv_zoo_human_segmentation -i=/path/to/image -# get help messages -./build/opencv_zoo_human_segmentation -h -``` - -### Example outputs - -![webcam demo](./example_outputs/pphumanseg_demo.gif) - -![messi](./example_outputs/messi.jpg) - ---- -Results of accuracy evaluation with [tools/eval](../../tools/eval). - -| Models | Accuracy | mIoU | -| ------------------ | -------------- | ------------- | -| PPHumanSeg | 0.9656 | 0.9164 | -| PPHumanSeg block | 0.9655 | 0.9162 | -| PPHumanSeg quant | 0.7285 | 0.3642 | - - -\*: 'quant' stands for 'quantized'. -\*\*: 'block' stands for 'blockwise quantized'. - ---- -## License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE). - -## Reference - -- https://arxiv.org/abs/1512.03385 -- https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/paddlepaddle -- https://github.com/PaddlePaddle/PaddleHub diff --git a/models/human_segmentation_pphumanseg/demo.cpp b/models/human_segmentation_pphumanseg/demo.cpp deleted file mode 100644 index 6408768d..00000000 --- a/models/human_segmentation_pphumanseg/demo.cpp +++ /dev/null @@ -1,226 +0,0 @@ -#include "opencv2/opencv.hpp" - -#include -#include -#include -#include - -using namespace std; -using namespace cv; -using namespace dnn; - -std::vector> backend_target_pairs = { - {DNN_BACKEND_OPENCV, DNN_TARGET_CPU}, - {DNN_BACKEND_CUDA, DNN_TARGET_CUDA}, - {DNN_BACKEND_CUDA, DNN_TARGET_CUDA_FP16}, - {DNN_BACKEND_TIMVX, DNN_TARGET_NPU}, - {DNN_BACKEND_CANN, DNN_TARGET_NPU} -}; - -class PPHS -{ -private: - Net model; - string modelPath; - - Scalar imageMean = Scalar(0.5,0.5,0.5); - Scalar imageStd = Scalar(0.5,0.5,0.5); - Size modelInputSize = Size(192, 192); - Size currentSize; - - const String inputNames = "x"; - const String outputNames = "save_infer_model/scale_0.tmp_1"; - - int backend_id; - int target_id; - -public: - PPHS(const string& modelPath, - int backend_id = 0, - int target_id = 0) - : modelPath(modelPath), backend_id(backend_id), target_id(target_id) - { - this->model = readNet(modelPath); - this->model.setPreferableBackend(backend_id); - this->model.setPreferableTarget(target_id); - } - - Mat preprocess(const Mat image) - { - this->currentSize = image.size(); - Mat preprocessed = Mat::zeros(this->modelInputSize, image.type()); - resize(image, preprocessed, this->modelInputSize); - - // image normalization - preprocessed.convertTo(preprocessed, CV_32F, 1.0 / 255.0); - preprocessed -= imageMean; - preprocessed /= imageStd; - - return blobFromImage(preprocessed);; - } - - Mat infer(const Mat image) - { - Mat inputBlob = preprocess(image); - - this->model.setInput(inputBlob, this->inputNames); - Mat outputBlob = this->model.forward(this->outputNames); - - return postprocess(outputBlob); - } - - Mat postprocess(Mat image) - { - reduceArgMax(image,image,1); - image = image.reshape(1,image.size[2]); - image.convertTo(image, CV_32F); - resize(image, image, this->currentSize, 0, 0, INTER_LINEAR); - image.convertTo(image, CV_8U); - - return image; - } - -}; - - -vector getColorMapList(int num_classes) { - num_classes += 1; - - vector cm(num_classes*3, 0); - - int lab, j; - - for (int i = 0; i < num_classes; ++i) { - lab = i; - j = 0; - - while(lab){ - cm[i] |= (((lab >> 0) & 1) << (7 - j)); - cm[i+num_classes] |= (((lab >> 1) & 1) << (7 - j)); - cm[i+2*num_classes] |= (((lab >> 2) & 1) << (7 - j)); - ++j; - lab >>= 3; - } - - } - - cm.erase(cm.begin(), cm.begin()+3); - - return cm; -}; - -Mat visualize(const Mat& image, const Mat& result, float fps = -1.f, float weight = 0.4) -{ - const Scalar& text_color = Scalar(0, 255, 0); - Mat output_image = image.clone(); - - vector color_map = getColorMapList(256); - - Mat cmm(color_map); - - cmm = cmm.reshape(1,{3,256}); - - if (fps >= 0) - { - putText(output_image, format("FPS: %.2f", fps), Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2); - } - - Mat c1, c2, c3; - - LUT(result, cmm.row(0), c1); - LUT(result, cmm.row(1), c2); - LUT(result, cmm.row(2), c3); - - Mat pseudo_img; - merge(std::vector{c1,c2,c3}, pseudo_img); - - addWeighted(output_image, weight, pseudo_img, 1 - weight, 0, output_image); - - return output_image; -}; - -string keys = -"{ help h | | Print help message. }" -"{ model m | human_segmentation_pphumanseg_2023mar.onnx | Usage: Path to the model, defaults to human_segmentation_pphumanseg_2023mar.onnx }" -"{ input i | | Path to input image or video file. Skip this argument to capture frames from a camera.}" -"{ backend_target t | 0 | Choose one of the backend-target pair to run this demo:\n" - "0: (default) OpenCV implementation + CPU,\n" - "1: CUDA + GPU (CUDA),\n" - "2: CUDA + GPU (CUDA FP16),\n" - "3: TIM-VX + NPU,\n" - "4: CANN + NPU}" -"{ save s | false | Specify to save results.}" -"{ vis v | true | Specify to open a window for result visualization.}" -; - - -int main(int argc, char** argv) -{ - CommandLineParser parser(argc, argv, keys); - - parser.about("Human Segmentation"); - if (parser.has("help")) - { - parser.printMessage(); - return 0; - } - - string modelPath = parser.get("model"); - string inputPath = parser.get("input"); - uint8_t backendTarget = parser.get("backend_target"); - bool saveFlag = parser.get("save"); - bool visFlag = parser.get("vis"); - - if (modelPath.empty()) - CV_Error(Error::StsError, "Model file " + modelPath + " not found"); - - PPHS humanSegmentationModel(modelPath, backend_target_pairs[backendTarget].first, backend_target_pairs[backendTarget].second); - - VideoCapture cap; - if (!inputPath.empty()) - cap.open(samples::findFile(inputPath)); - else - cap.open(0); - - if (!cap.isOpened()) - CV_Error(Error::StsError, "Cannot opend video or file"); - - Mat frame; - Mat result; - static const std::string kWinName = "Human Segmentation Demo"; - TickMeter tm; - - while (waitKey(1) < 0) - { - cap >> frame; - - if (frame.empty()) - { - if(inputPath.empty()) - cout << "Frame is empty" << endl; - break; - } - - tm.start(); - result = humanSegmentationModel.infer(frame); - tm.stop(); - - Mat res_frame = visualize(frame, result, tm.getFPS()); - - if(visFlag || inputPath.empty()) - { - imshow(kWinName, res_frame); - if(!inputPath.empty()) - waitKey(0); - } - if(saveFlag) - { - cout << "Results are saved to result.jpg" << endl; - - imwrite("result.jpg", res_frame); - } - } - - return 0; -} - diff --git a/models/human_segmentation_pphumanseg/demo.py b/models/human_segmentation_pphumanseg/demo.py deleted file mode 100644 index df28f570..00000000 --- a/models/human_segmentation_pphumanseg/demo.py +++ /dev/null @@ -1,162 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from pphumanseg import PPHumanSeg - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser(description='PPHumanSeg (https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.2/contrib/PP-HumanSeg)') -parser.add_argument('--input', '-i', type=str, - help='Usage: Set input path to a certain image, omit if using camera.') -parser.add_argument('--model', '-m', type=str, default='human_segmentation_pphumanseg_2023mar.onnx', - help='Usage: Set model path, defaults to human_segmentation_pphumanseg_2023mar.onnx.') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--save', '-s', action='store_true', - help='Usage: Specify to save a file with results. Invalid in case of camera input.') -parser.add_argument('--vis', '-v', action='store_true', - help='Usage: Specify to open a new window to show results. Invalid in case of camera input.') -args = parser.parse_args() - -def get_color_map_list(num_classes): - """ - Returns the color map for visualizing the segmentation mask, - which can support arbitrary number of classes. - - Args: - num_classes (int): Number of classes. - - Returns: - (list). The color map. - """ - - num_classes += 1 - color_map = num_classes * [0, 0, 0] - for i in range(0, num_classes): - j = 0 - lab = i - while lab: - color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j)) - color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j)) - color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j)) - j += 1 - lab >>= 3 - color_map = color_map[3:] - return color_map - -def visualize(image, result, weight=0.6, fps=None): - """ - Convert predict result to color image, and save added image. - - Args: - image (str): The input image. - result (np.ndarray): The predict result of image. - weight (float): The image weight of visual image, and the result weight is (1 - weight). Default: 0.6 - fps (str): The FPS to be drawn on the input image. - - Returns: - vis_result (np.ndarray): The visualized result. - """ - color_map = get_color_map_list(256) - color_map = np.array(color_map).reshape(256, 3).astype(np.uint8) - - # Use OpenCV LUT for color mapping - c1 = cv.LUT(result, color_map[:, 0]) - c2 = cv.LUT(result, color_map[:, 1]) - c3 = cv.LUT(result, color_map[:, 2]) - pseudo_img = np.dstack((c1, c2, c3)) - - vis_result = cv.addWeighted(image, weight, pseudo_img, 1 - weight, 0) - - if fps is not None: - cv.putText(vis_result, 'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) - - return vis_result - - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - # Instantiate PPHumanSeg - model = PPHumanSeg(modelPath=args.model, backendId=backend_id, targetId=target_id) - - if args.input is not None: - # Read image and resize to 192x192 - image = cv.imread(args.input) - h, w, _ = image.shape - image = cv.cvtColor(image, cv.COLOR_BGR2RGB) - _image = cv.resize(image, dsize=(192, 192)) - - # Inference - result = model.infer(_image) - result = cv.resize(result[0, :, :], dsize=(w, h), interpolation=cv.INTER_NEAREST) - - # Draw results on the input image - image = visualize(image, result) - - # Save results if save is true - if args.save: - print('Results saved to result.jpg\n') - cv.imwrite('result.jpg', image) - - # Visualize results in a new window - if args.vis: - cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE) - cv.imshow(args.input, image) - cv.waitKey(0) - else: # Omit input to call default camera - deviceId = 0 - cap = cv.VideoCapture(deviceId) - w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH)) - h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) - - tm = cv.TickMeter() - while cv.waitKey(1) < 0: - hasFrame, frame = cap.read() - if not hasFrame: - print('No frames grabbed!') - break - - _frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB) - _frame = cv.resize(_frame, dsize=(192, 192)) - - # Inference - tm.start() - result = model.infer(_frame) - tm.stop() - result = cv.resize(result[0, :, :], dsize=(w, h), interpolation=cv.INTER_NEAREST) - - # Draw results on the input image - frame = visualize(frame, result, fps=tm.getFPS()) - - # Visualize results in a new window - cv.imshow('PPHumanSeg Demo', frame) - - tm.reset() - diff --git a/models/human_segmentation_pphumanseg/example_outputs/messi.jpg b/models/human_segmentation_pphumanseg/example_outputs/messi.jpg deleted file mode 100644 index 6a152708..00000000 --- a/models/human_segmentation_pphumanseg/example_outputs/messi.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:92fff88f42578a1bb46755ac11530fdcbb7b5f9a06ab478a45b1995feb1cd4e6 -size 62004 diff --git a/models/human_segmentation_pphumanseg/example_outputs/pphumanseg_demo.gif b/models/human_segmentation_pphumanseg/example_outputs/pphumanseg_demo.gif deleted file mode 100644 index 122ab0d4..00000000 --- a/models/human_segmentation_pphumanseg/example_outputs/pphumanseg_demo.gif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a386278ce825418231a371a0a2990f63ab0dc976bf03164517d9491150d34400 -size 548204 diff --git a/models/human_segmentation_pphumanseg/human_segmentation_pphumanseg_2023mar.onnx b/models/human_segmentation_pphumanseg/human_segmentation_pphumanseg_2023mar.onnx deleted file mode 100644 index d2921c1d..00000000 --- a/models/human_segmentation_pphumanseg/human_segmentation_pphumanseg_2023mar.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:552d8a984054e59b5d773d24b9b12022b22046ceb2bbc4c9aaeaceb36a9ddf24 -size 6163938 diff --git a/models/human_segmentation_pphumanseg/human_segmentation_pphumanseg_2023mar_int8.onnx b/models/human_segmentation_pphumanseg/human_segmentation_pphumanseg_2023mar_int8.onnx deleted file mode 100644 index d1eea02a..00000000 --- a/models/human_segmentation_pphumanseg/human_segmentation_pphumanseg_2023mar_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:510775a9e23c1a53c34013a2fa3ac1906bfd7b789d55c07e6b49f30bb669007d -size 1607872 diff --git a/models/human_segmentation_pphumanseg/human_segmentation_pphumanseg_2023mar_int8bq.onnx b/models/human_segmentation_pphumanseg/human_segmentation_pphumanseg_2023mar_int8bq.onnx deleted file mode 100644 index d925e472..00000000 --- a/models/human_segmentation_pphumanseg/human_segmentation_pphumanseg_2023mar_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1da023e95e3adbeef16ac2862767b53b86a743ff26a34692e0833d7e088f0231 -size 1734724 diff --git a/models/human_segmentation_pphumanseg/pphumanseg.py b/models/human_segmentation_pphumanseg/pphumanseg.py deleted file mode 100644 index be41351a..00000000 --- a/models/human_segmentation_pphumanseg/pphumanseg.py +++ /dev/null @@ -1,69 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import numpy as np -import cv2 as cv - -class PPHumanSeg: - def __init__(self, modelPath, backendId=0, targetId=0): - self._modelPath = modelPath - self._backendId = backendId - self._targetId = targetId - - self._model = cv.dnn.readNet(self._modelPath) - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - self._inputNames = '' - self._outputNames = ['save_infer_model/scale_0.tmp_1'] - self._currentInputSize = None - self._inputSize = [192, 192] - self._mean = np.array([0.5, 0.5, 0.5])[np.newaxis, np.newaxis, :] - self._std = np.array([0.5, 0.5, 0.5])[np.newaxis, np.newaxis, :] - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self._backendId = backendId - self._targetId = targetId - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - def _preprocess(self, image): - - image = cv.cvtColor(image, cv.COLOR_BGR2RGB) - - self._currentInputSize = image.shape - image = cv.resize(image, (192, 192)) - - image = image.astype(np.float32, copy=False) / 255.0 - image -= self._mean - image /= self._std - return cv.dnn.blobFromImage(image) - - def infer(self, image): - - # Preprocess - inputBlob = self._preprocess(image) - - # Forward - self._model.setInput(inputBlob, self._inputNames) - outputBlob = self._model.forward() - - # Postprocess - results = self._postprocess(outputBlob) - - return results - - def _postprocess(self, outputBlob): - - outputBlob = outputBlob[0] - outputBlob = cv.resize(outputBlob.transpose(1,2,0), (self._currentInputSize[1], self._currentInputSize[0]), interpolation=cv.INTER_LINEAR).transpose(2,0,1)[np.newaxis, ...] - - result = np.argmax(outputBlob, axis=1).astype(np.uint8) - return result diff --git a/models/image_classification_mobilenet/CMakeLists.txt b/models/image_classification_mobilenet/CMakeLists.txt deleted file mode 100644 index b4005c04..00000000 --- a/models/image_classification_mobilenet/CMakeLists.txt +++ /dev/null @@ -1,29 +0,0 @@ -cmake_minimum_required(VERSION 3.2) -set(project_name "opencv_zoo_image_classification_mobilenet") - -PROJECT (${project_name}) - -set(OPENCV_VERSION "4.10.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) -# Find OpenCV, you may need to set OpenCV_DIR variable -# to the absolute path to the directory containing OpenCVConfig.cmake file -# via the command line or GUI - -file(GLOB SourceFile - "demo.cpp") -# If the package has been found, several variables will -# be set, you can find the full list with descriptions -# in the OpenCVConfig.cmake file. -# Print some message showing some of them -message(STATUS "OpenCV library status:") -message(STATUS " config: ${OpenCV_DIR}") -message(STATUS " version: ${OpenCV_VERSION}") -message(STATUS " libraries: ${OpenCV_LIBS}") -message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}") - -# Declare the executable target built from your sources -add_executable(${project_name} ${SourceFile}) - -# Link your application with OpenCV libraries -target_link_libraries(${project_name} PRIVATE ${OpenCV_LIBS}) diff --git a/models/image_classification_mobilenet/LICENSE b/models/image_classification_mobilenet/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/models/image_classification_mobilenet/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/models/image_classification_mobilenet/README.md b/models/image_classification_mobilenet/README.md deleted file mode 100644 index a30e0094..00000000 --- a/models/image_classification_mobilenet/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# MobileNets - -MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications - -MobileNetV2: Inverted Residuals and Linear Bottlenecks - -**Note**: -- `image_classification_mobilenetvX_2022apr_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - -Results of accuracy evaluation with [tools/eval](../../tools/eval). - -| Models | Top-1 Accuracy | Top-5 Accuracy | -| ------------------ | -------------- | -------------- | -| MobileNet V1 | 67.64 | 87.97 | -| MobileNet V1 block | 67.21 | 87.62 | -| MobileNet V1 quant | 55.53 | 78.74 | -| MobileNet V2 | 69.44 | 89.23 | -| MobileNet V2 block | 68.66 | 88.90 | -| MobileNet V2 quant | 68.37 | 88.56 | - -\*: 'quant' stands for 'quantized'. -\*\*: 'block' stands for 'blockwise quantized'. - -## Demo - -### Python - -Run the following command to try the demo: - -```shell -# MobileNet V1 -python demo.py --input /path/to/image -# MobileNet V2 -python demo.py --input /path/to/image --model v2 - -# get help regarding various parameters -python demo.py --help -``` - -### C++ - -Install latest OpenCV and CMake >= 3.24.0 to get started with: - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# detect on camera input -./build/opencv_zoo_image_classification_mobilenet -# detect on an image -./build/opencv_zoo_image_classification_mobilenet -m=/path/to/model -i=/path/to/image -v -# get help messages -./build/opencv_zoo_image_classification_mobilenet -h -``` - - -## License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE). - -## Reference - -- MobileNet V1: https://arxiv.org/abs/1704.04861 -- MobileNet V2: https://arxiv.org/abs/1801.04381 -- MobileNet V1 weight and scripts for training: https://github.com/wjc852456/pytorch-mobilenet-v1 -- MobileNet V2 weight: https://github.com/onnx/models/tree/main/vision/classification/mobilenet diff --git a/models/image_classification_mobilenet/demo.cpp b/models/image_classification_mobilenet/demo.cpp deleted file mode 100644 index 22612877..00000000 --- a/models/image_classification_mobilenet/demo.cpp +++ /dev/null @@ -1,133 +0,0 @@ -#include -#include -#include - -#include -#include "labelsimagenet1k.h" - -using namespace std; -using namespace cv; -using namespace dnn; - -vector< pair > backendTargetPairs = { - std::make_pair(dnn::DNN_BACKEND_OPENCV, dnn::DNN_TARGET_CPU), - std::make_pair(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA), - std::make_pair(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA_FP16), - std::make_pair(dnn::DNN_BACKEND_TIMVX, dnn::DNN_TARGET_NPU), - std::make_pair(dnn::DNN_BACKEND_CANN, dnn::DNN_TARGET_NPU) }; - - -std::string keys = -"{ help h | | Print help message. }" -"{ model m | image_classification_mobilenetv1_2022apr.onnx | Usage: Set model type, defaults to image_classification_mobilenetv1_2022apr.onnx (v1) }" -"{ input i | | Path to input image or video file. Skip this argument to capture frames from a camera.}" -"{ initial_width | 0 | Preprocess input image by initial resizing to a specific width.}" -"{ initial_height | 0 | Preprocess input image by initial resizing to a specific height.}" -"{ rgb | true | swap R and B plane.}" -"{ crop | false | Preprocess input image by center cropping.}" -"{ vis v | true | Usage: Specify to open a new window to show results.}" -"{ backend bt | 0 | Choose one of computation backends: " -"0: (default) OpenCV implementation + CPU, " -"1: CUDA + GPU (CUDA), " -"2: CUDA + GPU (CUDA FP16), " -"3: TIM-VX + NPU, " -"4: CANN + NPU}"; - - -int main(int argc, char** argv) -{ - CommandLineParser parser(argc, argv, keys); - - parser.about("Use this script to run classification deep learning networks in opencv Zoo using OpenCV."); - if (parser.has("help")) - { - parser.printMessage(); - return 0; - } - - int rszWidth = parser.get("initial_width"); - int rszHeight = parser.get("initial_height"); - bool swapRB = parser.get("rgb"); - bool crop = parser.get("crop"); - bool vis = parser.get("vis"); - String model = parser.get("model"); - int backendTargetid = parser.get("backend"); - - if (model.empty()) - { - CV_Error(Error::StsError, "Model file " + model + " not found"); - } - vector labels = getLabelsImagenet1k(); - - Net net = readNet(samples::findFile(model)); - net.setPreferableBackend(backendTargetPairs[backendTargetid].first); - net.setPreferableTarget(backendTargetPairs[backendTargetid].second); - //! [Open a video file or an image file or a camera stream] - VideoCapture cap; - if (parser.has("input")) - cap.open(samples::findFile(parser.get("input"))); - else - cap.open(0); - if (!cap.isOpened()) - CV_Error(Error::StsError, "Cannot open video or file"); - Mat frame, blob; - static const std::string kWinName = model; - int nbInference = 0; - while (waitKey(1) < 0) - { - cap >> frame; - if (frame.empty()) - { - cout << "Frame is empty" << endl; - waitKey(); - break; - } - - if (rszWidth != 0 && rszHeight != 0) - { - resize(frame, frame, Size(rszWidth, rszHeight)); - } - Image2BlobParams paramMobilenet; - paramMobilenet.datalayout = DNN_LAYOUT_NCHW; - paramMobilenet.ddepth = CV_32F; - paramMobilenet.mean = Scalar(123.675, 116.28, 103.53); - paramMobilenet.scalefactor = Scalar(1 / (255. * 0.229), 1 / (255. * 0.224), 1 / (255. * 0.225)); - paramMobilenet.size = Size(224, 224); - paramMobilenet.swapRB = swapRB; - if (crop) - paramMobilenet.paddingmode = DNN_PMODE_CROP_CENTER; - else - paramMobilenet.paddingmode = DNN_PMODE_NULL; - //! [Create a 4D blob from a frame] - blobFromImageWithParams(frame, blob, paramMobilenet); - - //! [Set input blob] - net.setInput(blob); - Mat prob = net.forward(); - - //! [Get a class with a highest score] - Point classIdPoint; - double confidence; - minMaxLoc(prob.reshape(1, 1), 0, &confidence, 0, &classIdPoint); - int classId = classIdPoint.x; - std::string label = format("%s: %.4f", (labels.empty() ? format("Class #%d", classId).c_str() : - labels[classId].c_str()), - confidence); - if (vis) - { - putText(frame, label, Point(0, 55), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0)); - imshow(kWinName, frame); - } - else - { - cout << label << endl; - nbInference++; - if (nbInference > 100) - { - cout << nbInference << " inference made. Demo existing" << endl; - break; - } - } - } - return 0; -} diff --git a/models/image_classification_mobilenet/demo.py b/models/image_classification_mobilenet/demo.py deleted file mode 100644 index 4aa990af..00000000 --- a/models/image_classification_mobilenet/demo.py +++ /dev/null @@ -1,56 +0,0 @@ -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from mobilenet import MobileNet - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser(description='Demo for MobileNet V1 & V2.') -parser.add_argument('--input', '-i', type=str, - help='Usage: Set input path to a certain image, omit if using camera.') -parser.add_argument('--model', '-m', type=str, default='image_classification_mobilenetv1_2022apr.onnx', - help='Usage: Set model type, defaults to image_classification_mobilenetv1_2022apr.onnx (v1).') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--top_k', type=int, default=1, - help='Usage: Get top k predictions.') -args = parser.parse_args() - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - top_k = args.top_k - # Instantiate MobileNet - model = MobileNet(modelPath=args.model, topK=top_k, backendId=backend_id, targetId=target_id) - - # Read image and get a 224x224 crop from a 256x256 resized - image = cv.imread(args.input) - image = cv.cvtColor(image, cv.COLOR_BGR2RGB) - image = cv.resize(image, dsize=(256, 256)) - image = image[16:240, 16:240, :] - - # Inference - result = model.infer(image) - - # Print result - print('label: {}'.format(result)) diff --git a/models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr.onnx b/models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr.onnx deleted file mode 100644 index a7dd869f..00000000 --- a/models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cf4c563b54e7144d7c2803ee22a6471e68ae87377554724d501624f602395bea -size 16890136 diff --git a/models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr_int8.onnx b/models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr_int8.onnx deleted file mode 100644 index 240b151a..00000000 --- a/models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ef32077ef2f8f37ddafeeb1d29a0662e7a794d61190552730769a96b7d58e6df -size 4321622 diff --git a/models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr_int8bq.onnx b/models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr_int8bq.onnx deleted file mode 100644 index 1949a473..00000000 --- a/models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4437385b4908011f6e3019ae253031272c19522ae1cbdff8374bceffe262a5ee -size 4599388 diff --git a/models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr.onnx b/models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr.onnx deleted file mode 100644 index 20731372..00000000 --- a/models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c0c3f76d93fa3fd6580652a45618618a220fced18babf65774ed169de0432ad5 -size 13964571 diff --git a/models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr_int8.onnx b/models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr_int8.onnx deleted file mode 100644 index 63db23c8..00000000 --- a/models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cc028fe6cae7bc11a4ff53cfc9b79c920e8be65ce33a904ec3e2a8f66d77f95f -size 3655033 diff --git a/models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr_int8bq.onnx b/models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr_int8bq.onnx deleted file mode 100644 index c744fb6a..00000000 --- a/models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3fe457eda49d71b664918ae87349bdb3e3815a56bb82cdb7f477bc83597f4313 -size 3872948 diff --git a/models/image_classification_mobilenet/labelsimagenet1k.h b/models/image_classification_mobilenet/labelsimagenet1k.h deleted file mode 100644 index 4ca30280..00000000 --- a/models/image_classification_mobilenet/labelsimagenet1k.h +++ /dev/null @@ -1,1010 +0,0 @@ -#include -#include - -std::vector getLabelsImagenet1k() -{ - std::vector labels = { - "tench, Tinca tinca", - "goldfish, Carassius auratus", - "great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias", - "tiger shark, Galeocerdo cuvieri", - "hammerhead, hammerhead shark", - "electric ray, crampfish, numbfish, torpedo", - "stingray", - "cock", - "hen", - "ostrich, Struthio camelus", - "brambling, Fringilla montifringilla", - "goldfinch, Carduelis carduelis", - "house finch, linnet, Carpodacus mexicanus", - "junco, snowbird", - "indigo bunting, indigo finch, indigo bird, Passerina cyanea", - "robin, American robin, Turdus migratorius", - "bulbul", - "jay", - "magpie", - "chickadee", - "water ouzel, dipper", - "kite", - "bald eagle, American eagle, Haliaeetus leucocephalus", - "vulture", - "great grey owl, great gray owl, Strix nebulosa", - "European fire salamander, Salamandra salamandra", - "common newt, Triturus vulgaris", - "eft", - "spotted salamander, Ambystoma maculatum", - "axolotl, mud puppy, Ambystoma mexicanum", - "bullfrog, Rana catesbeiana", - "tree frog, tree-frog", - "tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui", - "loggerhead, loggerhead turtle, Caretta caretta", - "leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea", - "mud turtle", - "terrapin", - "box turtle, box tortoise", - "banded gecko", - "common iguana, iguana, Iguana iguana", - "American chameleon, anole, Anolis carolinensis", - "whiptail, whiptail lizard", - "agama", - "frilled lizard, Chlamydosaurus kingi", - "alligator lizard", - "Gila monster, Heloderma suspectum", - "green lizard, Lacerta viridis", - "African chameleon, Chamaeleo chamaeleon", - "Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis", - "African crocodile, Nile crocodile, Crocodylus niloticus", - "American alligator, Alligator mississipiensis", - "triceratops", - "thunder snake, worm snake, Carphophis amoenus", - "ringneck snake, ring-necked snake, ring snake", - "hognose snake, puff adder, sand viper", - "green snake, grass snake", - "king snake, kingsnake", - "garter snake, grass snake", - "water snake", - "vine snake", - "night snake, Hypsiglena torquata", - "boa constrictor, Constrictor constrictor", - "rock python, rock snake, Python sebae", - "Indian cobra, Naja naja", - "green mamba", - "sea snake", - "horned viper, cerastes, sand viper, horned asp, Cerastes cornutus", - "diamondback, diamondback rattlesnake, Crotalus adamanteus", - "sidewinder, horned rattlesnake, Crotalus cerastes", - "trilobite", - "harvestman, daddy longlegs, Phalangium opilio", - "scorpion", - "black and gold garden spider, Argiope aurantia", - "barn spider, Araneus cavaticus", - "garden spider, Aranea diademata", - "black widow, Latrodectus mactans", - "tarantula", - "wolf spider, hunting spider", - "tick", - "centipede", - "black grouse", - "ptarmigan", - "ruffed grouse, partridge, Bonasa umbellus", - "prairie chicken, prairie grouse, prairie fowl", - "peacock", - "quail", - "partridge", - "African grey, African gray, Psittacus erithacus", - "macaw", - "sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita", - "lorikeet", - "coucal", - "bee eater", - "hornbill", - "hummingbird", - "jacamar", - "toucan", - "drake", - "red-breasted merganser, Mergus serrator", - "goose", - "black swan, Cygnus atratus", - "tusker", - "echidna, spiny anteater, anteater", - "platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus", - "wallaby, brush kangaroo", - "koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus", - "wombat", - "jellyfish", - "sea anemone, anemone", - "brain coral", - "flatworm, platyhelminth", - "nematode, nematode worm, roundworm", - "conch", - "snail", - "slug", - "sea slug, nudibranch", - "chiton, coat-of-mail shell, sea cradle, polyplacophore", - "chambered nautilus, pearly nautilus, nautilus", - "Dungeness crab, Cancer magister", - "rock crab, Cancer irroratus", - "fiddler crab", - "king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica", - "American lobster, Northern lobster, Maine lobster, Homarus americanus", - "spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish", - "crayfish, crawfish, crawdad, crawdaddy", - "hermit crab", - "isopod", - "white stork, Ciconia ciconia", - "black stork, Ciconia nigra", - "spoonbill", - "flamingo", - "little blue heron, Egretta caerulea", - "American egret, great white heron, Egretta albus", - "bittern", - "crane", - "limpkin, Aramus pictus", - "European gallinule, Porphyrio porphyrio", - "American coot, marsh hen, mud hen, water hen, Fulica americana", - "bustard", - "ruddy turnstone, Arenaria interpres", - "red-backed sandpiper, dunlin, Erolia alpina", - "redshank, Tringa totanus", - "dowitcher", - "oystercatcher, oyster catcher", - "pelican", - "king penguin, Aptenodytes patagonica", - "albatross, mollymawk", - "grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus", - "killer whale, killer, orca, grampus, sea wolf, Orcinus orca", - "dugong, Dugong dugon", - "sea lion", - "Chihuahua", - "Japanese spaniel", - "Maltese dog, Maltese terrier, Maltese", - "Pekinese, Pekingese, Peke", - "Shih-Tzu", - "Blenheim spaniel", - "papillon", - "toy terrier", - "Rhodesian ridgeback", - "Afghan hound, Afghan", - "basset, basset hound", - "beagle", - "bloodhound, sleuthhound", - "bluetick", - "black-and-tan coonhound", - "Walker hound, Walker foxhound", - "English foxhound", - "redbone", - "borzoi, Russian wolfhound", - "Irish wolfhound", - "Italian greyhound", - "whippet", - "Ibizan hound, Ibizan Podenco", - "Norwegian elkhound, elkhound", - "otterhound, otter hound", - "Saluki, gazelle hound", - "Scottish deerhound, deerhound", - "Weimaraner", - "Staffordshire bullterrier, Staffordshire bull terrier", - "American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier", - "Bedlington terrier", - "Border terrier", - "Kerry blue terrier", - "Irish terrier", - "Norfolk terrier", - "Norwich terrier", - "Yorkshire terrier", - "wire-haired fox terrier", - "Lakeland terrier", - "Sealyham terrier, Sealyham", - "Airedale, Airedale terrier", - "cairn, cairn terrier", - "Australian terrier", - "Dandie Dinmont, Dandie Dinmont terrier", - "Boston bull, Boston terrier", - "miniature schnauzer", - "giant schnauzer", - "standard schnauzer", - "Scotch terrier, Scottish terrier, Scottie", - "Tibetan terrier, chrysanthemum dog", - "silky terrier, Sydney silky", - "soft-coated wheaten terrier", - "West Highland white terrier", - "Lhasa, Lhasa apso", - "flat-coated retriever", - "curly-coated retriever", - "golden retriever", - "Labrador retriever", - "Chesapeake Bay retriever", - "German short-haired pointer", - "vizsla, Hungarian pointer", - "English setter", - "Irish setter, red setter", - "Gordon setter", - "Brittany spaniel", - "clumber, clumber spaniel", - "English springer, English springer spaniel", - "Welsh springer spaniel", - "cocker spaniel, English cocker spaniel, cocker", - "Sussex spaniel", - "Irish water spaniel", - "kuvasz", - "schipperke", - "groenendael", - "malinois", - "briard", - "kelpie", - "komondor", - "Old English sheepdog, bobtail", - "Shetland sheepdog, Shetland sheep dog, Shetland", - "collie", - "Border collie", - "Bouvier des Flandres, Bouviers des Flandres", - "Rottweiler", - "German shepherd, German shepherd dog, German police dog, alsatian", - "Doberman, Doberman pinscher", - "miniature pinscher", - "Greater Swiss Mountain dog", - "Bernese mountain dog", - "Appenzeller", - "EntleBucher", - "boxer", - "bull mastiff", - "Tibetan mastiff", - "French bulldog", - "Great Dane", - "Saint Bernard, St Bernard", - "Eskimo dog, husky", - "malamute, malemute, Alaskan malamute", - "Siberian husky", - "dalmatian, coach dog, carriage dog", - "affenpinscher, monkey pinscher, monkey dog", - "basenji", - "pug, pug-dog", - "Leonberg", - "Newfoundland, Newfoundland dog", - "Great Pyrenees", - "Samoyed, Samoyede", - "Pomeranian", - "chow, chow chow", - "keeshond", - "Brabancon griffon", - "Pembroke, Pembroke Welsh corgi", - "Cardigan, Cardigan Welsh corgi", - "toy poodle", - "miniature poodle", - "standard poodle", - "Mexican hairless", - "timber wolf, grey wolf, gray wolf, Canis lupus", - "white wolf, Arctic wolf, Canis lupus tundrarum", - "red wolf, maned wolf, Canis rufus, Canis niger", - "coyote, prairie wolf, brush wolf, Canis latrans", - "dingo, warrigal, warragal, Canis dingo", - "dhole, Cuon alpinus", - "African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus", - "hyena, hyaena", - "red fox, Vulpes vulpes", - "kit fox, Vulpes macrotis", - "Arctic fox, white fox, Alopex lagopus", - "grey fox, gray fox, Urocyon cinereoargenteus", - "tabby, tabby cat", - "tiger cat", - "Persian cat", - "Siamese cat, Siamese", - "Egyptian cat", - "cougar, puma, catamount, mountain lion, painter, panther, Felis concolor", - "lynx, catamount", - "leopard, Panthera pardus", - "snow leopard, ounce, Panthera uncia", - "jaguar, panther, Panthera onca, Felis onca", - "lion, king of beasts, Panthera leo", - "tiger, Panthera tigris", - "cheetah, chetah, Acinonyx jubatus", - "brown bear, bruin, Ursus arctos", - "American black bear, black bear, Ursus americanus, Euarctos americanus", - "ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus", - "sloth bear, Melursus ursinus, Ursus ursinus", - "mongoose", - "meerkat, mierkat", - "tiger beetle", - "ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle", - "ground beetle, carabid beetle", - "long-horned beetle, longicorn, longicorn beetle", - "leaf beetle, chrysomelid", - "dung beetle", - "rhinoceros beetle", - "weevil", - "fly", - "bee", - "ant, emmet, pismire", - "grasshopper, hopper", - "cricket", - "walking stick, walkingstick, stick insect", - "cockroach, roach", - "mantis, mantid", - "cicada, cicala", - "leafhopper", - "lacewing, lacewing fly", - "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", - "damselfly", - "admiral", - "ringlet, ringlet butterfly", - "monarch, monarch butterfly, milkweed butterfly, Danaus plexippus", - "cabbage butterfly", - "sulphur butterfly, sulfur butterfly", - "lycaenid, lycaenid butterfly", - "starfish, sea star", - "sea urchin", - "sea cucumber, holothurian", - "wood rabbit, cottontail, cottontail rabbit", - "hare", - "Angora, Angora rabbit", - "hamster", - "porcupine, hedgehog", - "fox squirrel, eastern fox squirrel, Sciurus niger", - "marmot", - "beaver", - "guinea pig, Cavia cobaya", - "sorrel", - "zebra", - "hog, pig, grunter, squealer, Sus scrofa", - "wild boar, boar, Sus scrofa", - "warthog", - "hippopotamus, hippo, river horse, Hippopotamus amphibius", - "ox", - "water buffalo, water ox, Asiatic buffalo, Bubalus bubalis", - "bison", - "ram, tup", - "bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis", - "ibex, Capra ibex", - "hartebeest", - "impala, Aepyceros melampus", - "gazelle", - "Arabian camel, dromedary, Camelus dromedarius", - "llama", - "weasel", - "mink", - "polecat, fitch, foulmart, foumart, Mustela putorius", - "black-footed ferret, ferret, Mustela nigripes", - "otter", - "skunk, polecat, wood pussy", - "badger", - "armadillo", - "three-toed sloth, ai, Bradypus tridactylus", - "orangutan, orang, orangutang, Pongo pygmaeus", - "gorilla, Gorilla gorilla", - "chimpanzee, chimp, Pan troglodytes", - "gibbon, Hylobates lar", - "siamang, Hylobates syndactylus, Symphalangus syndactylus", - "guenon, guenon monkey", - "patas, hussar monkey, Erythrocebus patas", - "baboon", - "macaque", - "langur", - "colobus, colobus monkey", - "proboscis monkey, Nasalis larvatus", - "marmoset", - "capuchin, ringtail, Cebus capucinus", - "howler monkey, howler", - "titi, titi monkey", - "spider monkey, Ateles geoffroyi", - "squirrel monkey, Saimiri sciureus", - "Madagascar cat, ring-tailed lemur, Lemur catta", - "indri, indris, Indri indri, Indri brevicaudatus", - "Indian elephant, Elephas maximus", - "African elephant, Loxodonta africana", - "lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens", - "giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca", - "barracouta, snoek", - "eel", - "coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch", - "rock beauty, Holocanthus tricolor", - "anemone fish", - "sturgeon", - "gar, garfish, garpike, billfish, Lepisosteus osseus", - "lionfish", - "puffer, pufferfish, blowfish, globefish", - "abacus", - "abaya", - "academic gown, academic robe, judge's robe", - "accordion, piano accordion, squeeze box", - "acoustic guitar", - "aircraft carrier, carrier, flattop, attack aircraft carrier", - "airliner", - "airship, dirigible", - "altar", - "ambulance", - "amphibian, amphibious vehicle", - "analog clock", - "apiary, bee house", - "apron", - "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin", - "assault rifle, assault gun", - "backpack, back pack, knapsack, packsack, rucksack, haversack", - "bakery, bakeshop, bakehouse", - "balance beam, beam", - "balloon", - "ballpoint, ballpoint pen, ballpen, Biro", - "Band Aid", - "banjo", - "bannister, banister, balustrade, balusters, handrail", - "barbell", - "barber chair", - "barbershop", - "barn", - "barometer", - "barrel, cask", - "barrow, garden cart, lawn cart, wheelbarrow", - "baseball", - "basketball", - "bassinet", - "bassoon", - "bathing cap, swimming cap", - "bath towel", - "bathtub, bathing tub, bath, tub", - "beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon", - "beacon, lighthouse, beacon light, pharos", - "beaker", - "bearskin, busby, shako", - "beer bottle", - "beer glass", - "bell cote, bell cot", - "bib", - "bicycle-built-for-two, tandem bicycle, tandem", - "bikini, two-piece", - "binder, ring-binder", - "binoculars, field glasses, opera glasses", - "birdhouse", - "boathouse", - "bobsled, bobsleigh, bob", - "bolo tie, bolo, bola tie, bola", - "bonnet, poke bonnet", - "bookcase", - "bookshop, bookstore, bookstall", - "bottlecap", - "bow", - "bow tie, bow-tie, bowtie", - "brass, memorial tablet, plaque", - "brassiere, bra, bandeau", - "breakwater, groin, groyne, mole, bulwark, seawall, jetty", - "breastplate, aegis, egis", - "broom", - "bucket, pail", - "buckle", - "bulletproof vest", - "bullet train, bullet", - "butcher shop, meat market", - "cab, hack, taxi, taxicab", - "caldron, cauldron", - "candle, taper, wax light", - "cannon", - "canoe", - "can opener, tin opener", - "cardigan", - "car mirror", - "carousel, carrousel, merry-go-round, roundabout, whirligig", - "carpenter's kit, tool kit", - "carton", - "car wheel", - "cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM", - "cassette", - "cassette player", - "castle", - "catamaran", - "CD player", - "cello, violoncello", - "cellular telephone, cellular phone, cellphone, cell, mobile phone", - "chain", - "chainlink fence", - "chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour", - "chain saw, chainsaw", - "chest", - "chiffonier, commode", - "chime, bell, gong", - "china cabinet, china closet", - "Christmas stocking", - "church, church building", - "cinema, movie theater, movie theatre, movie house, picture palace", - "cleaver, meat cleaver, chopper", - "cliff dwelling", - "cloak", - "clog, geta, patten, sabot", - "cocktail shaker", - "coffee mug", - "coffeepot", - "coil, spiral, volute, whorl, helix", - "combination lock", - "computer keyboard, keypad", - "confectionery, confectionary, candy store", - "container ship, containership, container vessel", - "convertible", - "corkscrew, bottle screw", - "cornet, horn, trumpet, trump", - "cowboy boot", - "cowboy hat, ten-gallon hat", - "cradle", - "crane", - "crash helmet", - "crate", - "crib, cot", - "Crock Pot", - "croquet ball", - "crutch", - "cuirass", - "dam, dike, dyke", - "desk", - "desktop computer", - "dial telephone, dial phone", - "diaper, nappy, napkin", - "digital clock", - "digital watch", - "dining table, board", - "dishrag, dishcloth", - "dishwasher, dish washer, dishwashing machine", - "disk brake, disc brake", - "dock, dockage, docking facility", - "dogsled, dog sled, dog sleigh", - "dome", - "doormat, welcome mat", - "drilling platform, offshore rig", - "drum, membranophone, tympan", - "drumstick", - "dumbbell", - "Dutch oven", - "electric fan, blower", - "electric guitar", - "electric locomotive", - "entertainment center", - "envelope", - "espresso maker", - "face powder", - "feather boa, boa", - "file, file cabinet, filing cabinet", - "fireboat", - "fire engine, fire truck", - "fire screen, fireguard", - "flagpole, flagstaff", - "flute, transverse flute", - "folding chair", - "football helmet", - "forklift", - "fountain", - "fountain pen", - "four-poster", - "freight car", - "French horn, horn", - "frying pan, frypan, skillet", - "fur coat", - "garbage truck, dustcart", - "gasmask, respirator, gas helmet", - "gas pump, gasoline pump, petrol pump, island dispenser", - "goblet", - "go-kart", - "golf ball", - "golfcart, golf cart", - "gondola", - "gong, tam-tam", - "gown", - "grand piano, grand", - "greenhouse, nursery, glasshouse", - "grille, radiator grille", - "grocery store, grocery, food market, market", - "guillotine", - "hair slide", - "hair spray", - "half track", - "hammer", - "hamper", - "hand blower, blow dryer, blow drier, hair dryer, hair drier", - "hand-held computer, hand-held microcomputer", - "handkerchief, hankie, hanky, hankey", - "hard disc, hard disk, fixed disk", - "harmonica, mouth organ, harp, mouth harp", - "harp", - "harvester, reaper", - "hatchet", - "holster", - "home theater, home theatre", - "honeycomb", - "hook, claw", - "hoopskirt, crinoline", - "horizontal bar, high bar", - "horse cart, horse-cart", - "hourglass", - "iPod", - "iron, smoothing iron", - "jack-o'-lantern", - "jean, blue jean, denim", - "jeep, landrover", - "jersey, T-shirt, tee shirt", - "jigsaw puzzle", - "jinrikisha, ricksha, rickshaw", - "joystick", - "kimono", - "knee pad", - "knot", - "lab coat, laboratory coat", - "ladle", - "lampshade, lamp shade", - "laptop, laptop computer", - "lawn mower, mower", - "lens cap, lens cover", - "letter opener, paper knife, paperknife", - "library", - "lifeboat", - "lighter, light, igniter, ignitor", - "limousine, limo", - "liner, ocean liner", - "lipstick, lip rouge", - "Loafer", - "lotion", - "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system", - "loupe, jeweler's loupe", - "lumbermill, sawmill", - "magnetic compass", - "mailbag, postbag", - "mailbox, letter box", - "maillot", - "maillot, tank suit", - "manhole cover", - "maraca", - "marimba, xylophone", - "mask", - "matchstick", - "maypole", - "maze, labyrinth", - "measuring cup", - "medicine chest, medicine cabinet", - "megalith, megalithic structure", - "microphone, mike", - "microwave, microwave oven", - "military uniform", - "milk can", - "minibus", - "miniskirt, mini", - "minivan", - "missile", - "mitten", - "mixing bowl", - "mobile home, manufactured home", - "Model T", - "modem", - "monastery", - "monitor", - "moped", - "mortar", - "mortarboard", - "mosque", - "mosquito net", - "motor scooter, scooter", - "mountain bike, all-terrain bike, off-roader", - "mountain tent", - "mouse, computer mouse", - "mousetrap", - "moving van", - "muzzle", - "nail", - "neck brace", - "necklace", - "nipple", - "notebook, notebook computer", - "obelisk", - "oboe, hautboy, hautbois", - "ocarina, sweet potato", - "odometer, hodometer, mileometer, milometer", - "oil filter", - "organ, pipe organ", - "oscilloscope, scope, cathode-ray oscilloscope, CRO", - "overskirt", - "oxcart", - "oxygen mask", - "packet", - "paddle, boat paddle", - "paddlewheel, paddle wheel", - "padlock", - "paintbrush", - "pajama, pyjama, pj's, jammies", - "palace", - "panpipe, pandean pipe, syrinx", - "paper towel", - "parachute, chute", - "parallel bars, bars", - "park bench", - "parking meter", - "passenger car, coach, carriage", - "patio, terrace", - "pay-phone, pay-station", - "pedestal, plinth, footstall", - "pencil box, pencil case", - "pencil sharpener", - "perfume, essence", - "Petri dish", - "photocopier", - "pick, plectrum, plectron", - "pickelhaube", - "picket fence, paling", - "pickup, pickup truck", - "pier", - "piggy bank, penny bank", - "pill bottle", - "pillow", - "ping-pong ball", - "pinwheel", - "pirate, pirate ship", - "pitcher, ewer", - "plane, carpenter's plane, woodworking plane", - "planetarium", - "plastic bag", - "plate rack", - "plow, plough", - "plunger, plumber's helper", - "Polaroid camera, Polaroid Land camera", - "pole", - "police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria", - "poncho", - "pool table, billiard table, snooker table", - "pop bottle, soda bottle", - "pot, flowerpot", - "potter's wheel", - "power drill", - "prayer rug, prayer mat", - "printer", - "prison, prison house", - "projectile, missile", - "projector", - "puck, hockey puck", - "punching bag, punch bag, punching ball, punchball", - "purse", - "quill, quill pen", - "quilt, comforter, comfort, puff", - "racer, race car, racing car", - "racket, racquet", - "radiator", - "radio, wireless", - "radio telescope, radio reflector", - "rain barrel", - "recreational vehicle, RV, R.V.", - "reel", - "reflex camera", - "refrigerator, icebox", - "remote control, remote", - "restaurant, eating house, eating place, eatery", - "revolver, six-gun, six-shooter", - "rifle", - "rocking chair, rocker", - "rotisserie", - "rubber eraser, rubber, pencil eraser", - "rugby ball", - "rule, ruler", - "running shoe", - "safe", - "safety pin", - "saltshaker, salt shaker", - "sandal", - "sarong", - "sax, saxophone", - "scabbard", - "scale, weighing machine", - "school bus", - "schooner", - "scoreboard", - "screen, CRT screen", - "screw", - "screwdriver", - "seat belt, seatbelt", - "sewing machine", - "shield, buckler", - "shoe shop, shoe-shop, shoe store", - "shoji", - "shopping basket", - "shopping cart", - "shovel", - "shower cap", - "shower curtain", - "ski", - "ski mask", - "sleeping bag", - "slide rule, slipstick", - "sliding door", - "slot, one-armed bandit", - "snorkel", - "snowmobile", - "snowplow, snowplough", - "soap dispenser", - "soccer ball", - "sock", - "solar dish, solar collector, solar furnace", - "sombrero", - "soup bowl", - "space bar", - "space heater", - "space shuttle", - "spatula", - "speedboat", - "spider web, spider's web", - "spindle", - "sports car, sport car", - "spotlight, spot", - "stage", - "steam locomotive", - "steel arch bridge", - "steel drum", - "stethoscope", - "stole", - "stone wall", - "stopwatch, stop watch", - "stove", - "strainer", - "streetcar, tram, tramcar, trolley, trolley car", - "stretcher", - "studio couch, day bed", - "stupa, tope", - "submarine, pigboat, sub, U-boat", - "suit, suit of clothes", - "sundial", - "sunglass", - "sunglasses, dark glasses, shades", - "sunscreen, sunblock, sun blocker", - "suspension bridge", - "swab, swob, mop", - "sweatshirt", - "swimming trunks, bathing trunks", - "swing", - "switch, electric switch, electrical switch", - "syringe", - "table lamp", - "tank, army tank, armored combat vehicle, armoured combat vehicle", - "tape player", - "teapot", - "teddy, teddy bear", - "television, television system", - "tennis ball", - "thatch, thatched roof", - "theater curtain, theatre curtain", - "thimble", - "thresher, thrasher, threshing machine", - "throne", - "tile roof", - "toaster", - "tobacco shop, tobacconist shop, tobacconist", - "toilet seat", - "torch", - "totem pole", - "tow truck, tow car, wrecker", - "toyshop", - "tractor", - "trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi", - "tray", - "trench coat", - "tricycle, trike, velocipede", - "trimaran", - "tripod", - "triumphal arch", - "trolleybus, trolley coach, trackless trolley", - "trombone", - "tub, vat", - "turnstile", - "typewriter keyboard", - "umbrella", - "unicycle, monocycle", - "upright, upright piano", - "vacuum, vacuum cleaner", - "vase", - "vault", - "velvet", - "vending machine", - "vestment", - "viaduct", - "violin, fiddle", - "volleyball", - "waffle iron", - "wall clock", - "wallet, billfold, notecase, pocketbook", - "wardrobe, closet, press", - "warplane, military plane", - "washbasin, handbasin, washbowl, lavabo, wash-hand basin", - "washer, automatic washer, washing machine", - "water bottle", - "water jug", - "water tower", - "whiskey jug", - "whistle", - "wig", - "window screen", - "window shade", - "Windsor tie", - "wine bottle", - "wing", - "wok", - "wooden spoon", - "wool, woolen, woollen", - "worm fence, snake fence, snake-rail fence, Virginia fence", - "wreck", - "yawl", - "yurt", - "web site, website, internet site, site", - "comic book", - "crossword puzzle, crossword", - "street sign", - "traffic light, traffic signal, stoplight", - "book jacket, dust cover, dust jacket, dust wrapper", - "menu", - "plate", - "guacamole", - "consomme", - "hot pot, hotpot", - "trifle", - "ice cream, icecream", - "ice lolly, lolly, lollipop, popsicle", - "French loaf", - "bagel, beigel", - "pretzel", - "cheeseburger", - "hotdog, hot dog, red hot", - "mashed potato", - "head cabbage", - "broccoli", - "cauliflower", - "zucchini, courgette", - "spaghetti squash", - "acorn squash", - "butternut squash", - "cucumber, cuke", - "artichoke, globe artichoke", - "bell pepper", - "cardoon", - "mushroom", - "Granny Smith", - "strawberry", - "orange", - "lemon", - "fig", - "pineapple, ananas", - "banana", - "jackfruit, jak, jack", - "custard apple", - "pomegranate", - "hay", - "carbonara", - "chocolate sauce, chocolate syrup", - "dough", - "meat loaf, meatloaf", - "pizza, pizza pie", - "potpie", - "burrito", - "red wine", - "espresso", - "cup", - "eggnog", - "alp", - "bubble", - "cliff, drop, drop-off", - "coral reef", - "geyser", - "lakeside, lakeshore", - "promontory, headland, head, foreland", - "sandbar, sand bar", - "seashore, coast, seacoast, sea-coast", - "valley, vale", - "volcano", - "ballplayer, baseball player", - "groom, bridegroom", - "scuba diver", - "rapeseed", - "daisy", - "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", - "corn", - "acorn", - "hip, rose hip, rosehip", - "buckeye, horse chestnut, conker", - "coral fungus", - "agaric", - "gyromitra", - "stinkhorn, carrion fungus", - "earthstar", - "hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa", - "bolete", - "ear, spike, capitulum", - "toilet tissue, toilet paper, bathroom tissue", - }; - - return labels; -} diff --git a/models/image_classification_mobilenet/mobilenet.py b/models/image_classification_mobilenet/mobilenet.py deleted file mode 100644 index f2cf111d..00000000 --- a/models/image_classification_mobilenet/mobilenet.py +++ /dev/null @@ -1,1078 +0,0 @@ -import numpy as np -import cv2 as cv - -class MobileNet: - ''' - Works with MobileNet V1 & V2. - ''' - - def __init__(self, modelPath, topK=1, loadLabel=True, backendId=0, targetId=0): - self.model_path = modelPath - assert topK >= 1 - self.top_k = topK - self.load_label = loadLabel - self.backend_id = backendId - self.target_id = targetId - - self.model = cv.dnn.readNet(self.model_path) - self.model.setPreferableBackend(self.backend_id) - self.model.setPreferableTarget(self.target_id) - - self.input_names = '' - self.output_names = '' - self.input_size = [224, 224] - self.mean=[0.485, 0.456, 0.406] - self.std=[0.229, 0.224, 0.225] - - # load labels - self._labels = self._load_labels() - - def _load_labels(self): - return self.LABELS_IMAGENET_1K.splitlines() - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self.backend_id = backendId - self.target_id = targetId - self.model.setPreferableBackend(self.backend_id) - self.model.setPreferableTarget(self.target_id) - - def _preprocess(self, image): - input_blob = (image / 255.0 - self.mean) / self.std - input_blob = input_blob.transpose(2, 0, 1) - input_blob = input_blob[np.newaxis, :, :, :] - input_blob = input_blob.astype(np.float32) - return input_blob - - def infer(self, image): - # Preprocess - input_blob = self._preprocess(image) - - # Forward - self.model.setInput(input_blob, self.input_names) - output_blob = self.model.forward(self.output_names) - - # Postprocess - results = self._postprocess(output_blob) - - return results - - def _postprocess(self, output_blob): - batched_class_id_list = [] - for o in output_blob: - class_id_list = o.argsort()[::-1][:self.top_k] - batched_class_id_list.append(class_id_list) - if len(self._labels) > 0 and self.load_label: - batched_predicted_labels = [] - for class_id_list in batched_class_id_list: - predicted_labels = [] - for class_id in class_id_list: - predicted_labels.append(self._labels[class_id]) - batched_predicted_labels.append(predicted_labels) - return batched_predicted_labels - else: - return batched_class_id_list - - LABELS_IMAGENET_1K = '''tench -goldfish -great white shark -tiger shark -hammerhead -electric ray -stingray -cock -hen -ostrich -brambling -goldfinch -house finch -junco -indigo bunting -robin -bulbul -jay -magpie -chickadee -water ouzel -kite -bald eagle -vulture -great grey owl -European fire salamander -common newt -eft -spotted salamander -axolotl -bullfrog -tree frog -tailed frog -loggerhead -leatherback turtle -mud turtle -terrapin -box turtle -banded gecko -common iguana -American chameleon -whiptail -agama -frilled lizard -alligator lizard -Gila monster -green lizard -African chameleon -Komodo dragon -African crocodile -American alligator -triceratops -thunder snake -ringneck snake -hognose snake -green snake -king snake -garter snake -water snake -vine snake -night snake -boa constrictor -rock python -Indian cobra -green mamba -sea snake -horned viper -diamondback -sidewinder -trilobite -harvestman -scorpion -black and gold garden spider -barn spider -garden spider -black widow -tarantula -wolf spider -tick -centipede -black grouse -ptarmigan -ruffed grouse -prairie chicken -peacock -quail -partridge -African grey -macaw -sulphur-crested cockatoo -lorikeet -coucal -bee eater -hornbill -hummingbird -jacamar -toucan -drake -red-breasted merganser -goose -black swan -tusker -echidna -platypus -wallaby -koala -wombat -jellyfish -sea anemone -brain coral -flatworm -nematode -conch -snail -slug -sea slug -chiton -chambered nautilus -Dungeness crab -rock crab -fiddler crab -king crab -American lobster -spiny lobster -crayfish -hermit crab -isopod -white stork -black stork -spoonbill -flamingo -little blue heron -American egret -bittern -crane -limpkin -European gallinule -American coot -bustard -ruddy turnstone -red-backed sandpiper -redshank -dowitcher -oystercatcher -pelican -king penguin -albatross -grey whale -killer whale -dugong -sea lion -Chihuahua -Japanese spaniel -Maltese dog -Pekinese -Shih-Tzu -Blenheim spaniel -papillon -toy terrier -Rhodesian ridgeback -Afghan hound -basset -beagle -bloodhound -bluetick -black-and-tan coonhound -Walker hound -English foxhound -redbone -borzoi -Irish wolfhound -Italian greyhound -whippet -Ibizan hound -Norwegian elkhound -otterhound -Saluki -Scottish deerhound -Weimaraner -Staffordshire bullterrier -American Staffordshire terrier -Bedlington terrier -Border terrier -Kerry blue terrier -Irish terrier -Norfolk terrier -Norwich terrier -Yorkshire terrier -wire-haired fox terrier -Lakeland terrier -Sealyham terrier -Airedale -cairn -Australian terrier -Dandie Dinmont -Boston bull -miniature schnauzer -giant schnauzer -standard schnauzer -Scotch terrier -Tibetan terrier -silky terrier -soft-coated wheaten terrier -West Highland white terrier -Lhasa -flat-coated retriever -curly-coated retriever -golden retriever -Labrador retriever -Chesapeake Bay retriever -German short-haired pointer -vizsla -English setter -Irish setter -Gordon setter -Brittany spaniel -clumber -English springer -Welsh springer spaniel -cocker spaniel -Sussex spaniel -Irish water spaniel -kuvasz -schipperke -groenendael -malinois -briard -kelpie -komondor -Old English sheepdog -Shetland sheepdog -collie -Border collie -Bouvier des Flandres -Rottweiler -German shepherd -Doberman -miniature pinscher -Greater Swiss Mountain dog -Bernese mountain dog -Appenzeller -EntleBucher -boxer -bull mastiff -Tibetan mastiff -French bulldog -Great Dane -Saint Bernard -Eskimo dog -malamute -Siberian husky -dalmatian -affenpinscher -basenji -pug -Leonberg -Newfoundland -Great Pyrenees -Samoyed -Pomeranian -chow -keeshond -Brabancon griffon -Pembroke -Cardigan -toy poodle -miniature poodle -standard poodle -Mexican hairless -timber wolf -white wolf -red wolf -coyote -dingo -dhole -African hunting dog -hyena -red fox -kit fox -Arctic fox -grey fox -tabby -tiger cat -Persian cat -Siamese cat -Egyptian cat -cougar -lynx -leopard -snow leopard -jaguar -lion -tiger -cheetah -brown bear -American black bear -ice bear -sloth bear -mongoose -meerkat -tiger beetle -ladybug -ground beetle -long-horned beetle -leaf beetle -dung beetle -rhinoceros beetle -weevil -fly -bee -ant -grasshopper -cricket -walking stick -cockroach -mantis -cicada -leafhopper -lacewing -dragonfly -damselfly -admiral -ringlet -monarch -cabbage butterfly -sulphur butterfly -lycaenid -starfish -sea urchin -sea cucumber -wood rabbit -hare -Angora -hamster -porcupine -fox squirrel -marmot -beaver -guinea pig -sorrel -zebra -hog -wild boar -warthog -hippopotamus -ox -water buffalo -bison -ram -bighorn -ibex -hartebeest -impala -gazelle -Arabian camel -llama -weasel -mink -polecat -black-footed ferret -otter -skunk -badger -armadillo -three-toed sloth -orangutan -gorilla -chimpanzee -gibbon -siamang -guenon -patas -baboon -macaque -langur -colobus -proboscis monkey -marmoset -capuchin -howler monkey -titi -spider monkey -squirrel monkey -Madagascar cat -indri -Indian elephant -African elephant -lesser panda -giant panda -barracouta -eel -coho -rock beauty -anemone fish -sturgeon -gar -lionfish -puffer -abacus -abaya -academic gown -accordion -acoustic guitar -aircraft carrier -airliner -airship -altar -ambulance -amphibian -analog clock -apiary -apron -ashcan -assault rifle -backpack -bakery -balance beam -balloon -ballpoint -Band Aid -banjo -bannister -barbell -barber chair -barbershop -barn -barometer -barrel -barrow -baseball -basketball -bassinet -bassoon -bathing cap -bath towel -bathtub -beach wagon -beacon -beaker -bearskin -beer bottle -beer glass -bell cote -bib -bicycle-built-for-two -bikini -binder -binoculars -birdhouse -boathouse -bobsled -bolo tie -bonnet -bookcase -bookshop -bottlecap -bow -bow tie -brass -brassiere -breakwater -breastplate -broom -bucket -buckle -bulletproof vest -bullet train -butcher shop -cab -caldron -candle -cannon -canoe -can opener -cardigan -car mirror -carousel -carpenters kit -carton -car wheel -cash machine -cassette -cassette player -castle -catamaran -CD player -cello -cellular telephone -chain -chainlink fence -chain mail -chain saw -chest -chiffonier -chime -china cabinet -Christmas stocking -church -cinema -cleaver -cliff dwelling -cloak -clog -cocktail shaker -coffee mug -coffeepot -coil -combination lock -computer keyboard -confectionery -container ship -convertible -corkscrew -cornet -cowboy boot -cowboy hat -cradle -crane -crash helmet -crate -crib -Crock Pot -croquet ball -crutch -cuirass -dam -desk -desktop computer -dial telephone -diaper -digital clock -digital watch -dining table -dishrag -dishwasher -disk brake -dock -dogsled -dome -doormat -drilling platform -drum -drumstick -dumbbell -Dutch oven -electric fan -electric guitar -electric locomotive -entertainment center -envelope -espresso maker -face powder -feather boa -file -fireboat -fire engine -fire screen -flagpole -flute -folding chair -football helmet -forklift -fountain -fountain pen -four-poster -freight car -French horn -frying pan -fur coat -garbage truck -gasmask -gas pump -goblet -go-kart -golf ball -golfcart -gondola -gong -gown -grand piano -greenhouse -grille -grocery store -guillotine -hair slide -hair spray -half track -hammer -hamper -hand blower -hand-held computer -handkerchief -hard disc -harmonica -harp -harvester -hatchet -holster -home theater -honeycomb -hook -hoopskirt -horizontal bar -horse cart -hourglass -iPod -iron -jack-o-lantern -jean -jeep -jersey -jigsaw puzzle -jinrikisha -joystick -kimono -knee pad -knot -lab coat -ladle -lampshade -laptop -lawn mower -lens cap -letter opener -library -lifeboat -lighter -limousine -liner -lipstick -Loafer -lotion -loudspeaker -loupe -lumbermill -magnetic compass -mailbag -mailbox -maillot -maillot -manhole cover -maraca -marimba -mask -matchstick -maypole -maze -measuring cup -medicine chest -megalith -microphone -microwave -military uniform -milk can -minibus -miniskirt -minivan -missile -mitten -mixing bowl -mobile home -Model T -modem -monastery -monitor -moped -mortar -mortarboard -mosque -mosquito net -motor scooter -mountain bike -mountain tent -mouse -mousetrap -moving van -muzzle -nail -neck brace -necklace -nipple -notebook -obelisk -oboe -ocarina -odometer -oil filter -organ -oscilloscope -overskirt -oxcart -oxygen mask -packet -paddle -paddlewheel -padlock -paintbrush -pajama -palace -panpipe -paper towel -parachute -parallel bars -park bench -parking meter -passenger car -patio -pay-phone -pedestal -pencil box -pencil sharpener -perfume -Petri dish -photocopier -pick -pickelhaube -picket fence -pickup -pier -piggy bank -pill bottle -pillow -ping-pong ball -pinwheel -pirate -pitcher -plane -planetarium -plastic bag -plate rack -plow -plunger -Polaroid camera -pole -police van -poncho -pool table -pop bottle -pot -potters wheel -power drill -prayer rug -printer -prison -projectile -projector -puck -punching bag -purse -quill -quilt -racer -racket -radiator -radio -radio telescope -rain barrel -recreational vehicle -reel -reflex camera -refrigerator -remote control -restaurant -revolver -rifle -rocking chair -rotisserie -rubber eraser -rugby ball -rule -running shoe -safe -safety pin -saltshaker -sandal -sarong -sax -scabbard -scale -school bus -schooner -scoreboard -screen -screw -screwdriver -seat belt -sewing machine -shield -shoe shop -shoji -shopping basket -shopping cart -shovel -shower cap -shower curtain -ski -ski mask -sleeping bag -slide rule -sliding door -slot -snorkel -snowmobile -snowplow -soap dispenser -soccer ball -sock -solar dish -sombrero -soup bowl -space bar -space heater -space shuttle -spatula -speedboat -spider web -spindle -sports car -spotlight -stage -steam locomotive -steel arch bridge -steel drum -stethoscope -stole -stone wall -stopwatch -stove -strainer -streetcar -stretcher -studio couch -stupa -submarine -suit -sundial -sunglass -sunglasses -sunscreen -suspension bridge -swab -sweatshirt -swimming trunks -swing -switch -syringe -table lamp -tank -tape player -teapot -teddy -television -tennis ball -thatch -theater curtain -thimble -thresher -throne -tile roof -toaster -tobacco shop -toilet seat -torch -totem pole -tow truck -toyshop -tractor -trailer truck -tray -trench coat -tricycle -trimaran -tripod -triumphal arch -trolleybus -trombone -tub -turnstile -typewriter keyboard -umbrella -unicycle -upright -vacuum -vase -vault -velvet -vending machine -vestment -viaduct -violin -volleyball -waffle iron -wall clock -wallet -wardrobe -warplane -washbasin -washer -water bottle -water jug -water tower -whiskey jug -whistle -wig -window screen -window shade -Windsor tie -wine bottle -wing -wok -wooden spoon -wool -worm fence -wreck -yawl -yurt -web site -comic book -crossword puzzle -street sign -traffic light -book jacket -menu -plate -guacamole -consomme -hot pot -trifle -ice cream -ice lolly -French loaf -bagel -pretzel -cheeseburger -hotdog -mashed potato -head cabbage -broccoli -cauliflower -zucchini -spaghetti squash -acorn squash -butternut squash -cucumber -artichoke -bell pepper -cardoon -mushroom -Granny Smith -strawberry -orange -lemon -fig -pineapple -banana -jackfruit -custard apple -pomegranate -hay -carbonara -chocolate sauce -dough -meat loaf -pizza -potpie -burrito -red wine -espresso -cup -eggnog -alp -bubble -cliff -coral reef -geyser -lakeside -promontory -sandbar -seashore -valley -volcano -ballplayer -groom -scuba diver -rapeseed -daisy -yellow ladys slipper -corn -acorn -hip -buckeye -coral fungus -agaric -gyromitra -stinkhorn -earthstar -hen-of-the-woods -bolete -ear -toilet tissue''' diff --git a/models/image_classification_ppresnet/CMakeLists.txt b/models/image_classification_ppresnet/CMakeLists.txt deleted file mode 100644 index 9707d1e6..00000000 --- a/models/image_classification_ppresnet/CMakeLists.txt +++ /dev/null @@ -1,32 +0,0 @@ -cmake_minimum_required(VERSION 3.24) -set(project_name "opencv_zoo_image_classification_ppresnet") - -PROJECT (${project_name}) - -set(OPENCV_VERSION "4.10.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) -# Find OpenCV, you may need to set OpenCV_DIR variable -# to the absolute path to the directory containing OpenCVConfig.cmake file -# via the command line or GUI - -file(GLOB SourceFile - "demo.cpp") -# If the package has been found, several variables will -# be set, you can find the full list with descriptions -# in the OpenCVConfig.cmake file. -# Print some message showing some of them -message(STATUS "OpenCV library status:") -message(STATUS " config: ${OpenCV_DIR}") -message(STATUS " version: ${OpenCV_VERSION}") -message(STATUS " libraries: ${OpenCV_LIBS}") -message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}") - -# Declare the executable target built from your sources -add_executable(${project_name} ${SourceFile}) - -# Set C++ compilation standard to C++11 -set(CMAKE_CXX_STANDARD 11) - -# Link your application with OpenCV libraries -target_link_libraries(${project_name} PRIVATE ${OpenCV_LIBS}) diff --git a/models/image_classification_ppresnet/LICENSE b/models/image_classification_ppresnet/LICENSE deleted file mode 100644 index 94255ff4..00000000 --- a/models/image_classification_ppresnet/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ -Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/models/image_classification_ppresnet/README.md b/models/image_classification_ppresnet/README.md deleted file mode 100644 index 88cdf6d0..00000000 --- a/models/image_classification_ppresnet/README.md +++ /dev/null @@ -1,60 +0,0 @@ -# ResNet - -Deep Residual Learning for Image Recognition - -This model is ported from [PaddleHub](https://github.com/PaddlePaddle/PaddleHub) using [this script from OpenCV](https://github.com/opencv/opencv/blob/master/samples/dnn/dnn_model_runner/dnn_conversion/paddlepaddle/paddle_resnet50.py). - -**Note**: -- `image_classification_ppresnet50_2022jan_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - -Results of accuracy evaluation with [tools/eval](../../tools/eval). - -| Models | Top-1 Accuracy | Top-5 Accuracy | -| --------------- | -------------- | -------------- | -| PP-ResNet | 82.28 | 96.15 | -| PP-ResNet block | 82.27 | 96.15 | -| PP-ResNet quant | 0.22 | 0.96 | - -\*: 'quant' stands for 'quantized'. -\*\*: 'block' stands for 'blockwise quantized'. - -## Demo - -Run the following commands to try the demo: - -### Python - -```shell -python demo.py --input /path/to/image - -# get help regarding various parameters -python demo.py --help -``` -### C++ - -Install latest OpenCV and CMake >= 3.24.0 to get started with: - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# detect on an image -./build/opencv_zoo_image_classification_ppresnet -i=/path/to/image - -# detect on an image and display top N classes -./build/opencv_zoo_image_classification_ppresnet -i=/path/to/image -k=N - -# get help messages -./build/opencv_zoo_image_classification_ppresnet -h -``` - -## License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE). - -## Reference - -- https://arxiv.org/abs/1512.03385 -- https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/paddlepaddle -- https://github.com/PaddlePaddle/PaddleHub diff --git a/models/image_classification_ppresnet/demo.cpp b/models/image_classification_ppresnet/demo.cpp deleted file mode 100644 index cb54d3b8..00000000 --- a/models/image_classification_ppresnet/demo.cpp +++ /dev/null @@ -1,1123 +0,0 @@ -#include -#include -#include -#include - -using namespace std; -using namespace cv; -using namespace dnn; - -extern vector LABELS_IMAGENET_1K; - -class PPResNet { -public: - PPResNet(const string& modelPath, int topK, int backendId, int targetId) - : _topK(topK) { - _model = readNet(modelPath); - _model.setPreferableBackend(backendId); - _model.setPreferableTarget(targetId); - } - - Mat preprocess(const Mat& image) - { - Mat floatImage; - image.convertTo(floatImage, CV_32F, 1.0 / 255.0); - subtract(floatImage, _mean, floatImage); - divide(floatImage, _std, floatImage); - return blobFromImage(floatImage); - } - - vector infer(const Mat& image) - { - assert(image.rows == _inputSize.height && image.cols == _inputSize.width); - Mat inputBlob = preprocess(image); - _model.setInput(inputBlob, _inputName); - Mat outputBlob = _model.forward(_outputName); - vector results = postprocess(outputBlob); - return results; - } - - vector postprocess(const Mat& outputBlob) - { - vector class_id_list; - sortIdx(outputBlob, class_id_list, SORT_EVERY_ROW | SORT_DESCENDING); - class_id_list.resize(min(_topK, static_cast(outputBlob.cols))); - vector predicted_labels; - for (int class_id : class_id_list) - { - predicted_labels.push_back(LABELS_IMAGENET_1K[class_id]); - } - return predicted_labels; - } - -private: - Net _model; - int _topK; - const Size _inputSize = Size(224, 224); - const Scalar _mean = Scalar(0.485, 0.456, 0.406); - const Scalar _std = Scalar(0.229, 0.224, 0.225); - string _inputName = ""; - string _outputName = "save_infer_model/scale_0.tmp_0"; -}; - -const vector> backend_target_pairs = -{ - {DNN_BACKEND_OPENCV, DNN_TARGET_CPU}, - {DNN_BACKEND_CUDA, DNN_TARGET_CUDA}, - {DNN_BACKEND_CUDA, DNN_TARGET_CUDA_FP16}, - {DNN_BACKEND_TIMVX, DNN_TARGET_NPU}, - {DNN_BACKEND_CANN, DNN_TARGET_NPU} -}; - -int main(int argc, char** argv) -{ - CommandLineParser parser(argc, argv, - "{ input i | | Set input path to a certain image, omit if using camera.}" - "{ model m | image_classification_ppresnet50_2022jan.onnx | Set model path.}" - "{ top_k k | 1 | Get top k predictions.}" - "{ backend_target bt | 0 | Choose one of computation backends: " - "0: (default) OpenCV implementation + CPU, " - "1: CUDA + GPU (CUDA), " - "2: CUDA + GPU (CUDA FP16), " - "3: TIM-VX + NPU, " - "4: CANN + NPU}"); - - string inputPath = parser.get("input"); - string modelPath = parser.get("model"); - int backendTarget = parser.get("backend_target"); - int topK = parser.get("top_k"); - - int backendId = backend_target_pairs[backendTarget][0]; - int targetId = backend_target_pairs[backendTarget][1]; - - PPResNet model(modelPath, topK, backendId, targetId); - - // Read image and get a 224x224 crop from a 256x256 resized - Mat image = imread(inputPath); - cvtColor(image, image, COLOR_BGR2RGB); - resize(image, image, Size(256, 256)); - image = image(Rect(16, 16, 224, 224)); - - // Inference - auto predictions = model.infer(image); - - // Print result - if (topK == 1) - { - cout << "Predicted Label: " << predictions[0] << endl; - } - else - { - cout << "Predicted Top-K Labels (in decreasing confidence): " << endl; - for (size_t i = 0; i < predictions.size(); ++i) - { - cout << "(" << i+1 << ") " << predictions[i] << endl; - } - } - - return 0; -} - -vector LABELS_IMAGENET_1K = -{ - "tench", - "goldfish", - "great white shark", - "tiger shark", - "hammerhead", - "electric ray", - "stingray", - "cock", - "hen", - "ostrich", - "brambling", - "goldfinch", - "house finch", - "junco", - "indigo bunting", - "robin", - "bulbul", - "jay", - "magpie", - "chickadee", - "water ouzel", - "kite", - "bald eagle", - "vulture", - "great grey owl", - "European fire salamander", - "common newt", - "eft", - "spotted salamander", - "axolotl", - "bullfrog", - "tree frog", - "tailed frog", - "loggerhead", - "leatherback turtle", - "mud turtle", - "terrapin", - "box turtle", - "banded gecko", - "common iguana", - "American chameleon", - "whiptail", - "agama", - "frilled lizard", - "alligator lizard", - "Gila monster", - "green lizard", - "African chameleon", - "Komodo dragon", - "African crocodile", - "American alligator", - "triceratops", - "thunder snake", - "ringneck snake", - "hognose snake", - "green snake", - "king snake", - "garter snake", - "water snake", - "vine snake", - "night snake", - "boa constrictor", - "rock python", - "Indian cobra", - "green mamba", - "sea snake", - "horned viper", - "diamondback", - "sidewinder", - "trilobite", - "harvestman", - "scorpion", - "black and gold garden spider", - "barn spider", - "garden spider", - "black widow", - "tarantula", - "wolf spider", - "tick", - "centipede", - "black grouse", - "ptarmigan", - "ruffed grouse", - "prairie chicken", - "peacock", - "quail", - "partridge", - "African grey", - "macaw", - "sulphur-crested cockatoo", - "lorikeet", - "coucal", - "bee eater", - "hornbill", - "hummingbird", - "jacamar", - "toucan", - "drake", - "red-breasted merganser", - "goose", - "black swan", - "tusker", - "echidna", - "platypus", - "wallaby", - "koala", - "wombat", - "jellyfish", - "sea anemone", - "brain coral", - "flatworm", - "nematode", - "conch", - "snail", - "slug", - "sea slug", - "chiton", - "chambered nautilus", - "Dungeness crab", - "rock crab", - "fiddler crab", - "king crab", - "American lobster", - "spiny lobster", - "crayfish", - "hermit crab", - "isopod", - "white stork", - "black stork", - "spoonbill", - "flamingo", - "little blue heron", - "American egret", - "bittern", - "crane", - "limpkin", - "European gallinule", - "American coot", - "bustard", - "ruddy turnstone", - "red-backed sandpiper", - "redshank", - "dowitcher", - "oystercatcher", - "pelican", - "king penguin", - "albatross", - "grey whale", - "killer whale", - "dugong", - "sea lion", - "Chihuahua", - "Japanese spaniel", - "Maltese dog", - "Pekinese", - "Shih-Tzu", - "Blenheim spaniel", - "papillon", - "toy terrier", - "Rhodesian ridgeback", - "Afghan hound", - "basset", - "beagle", - "bloodhound", - "bluetick", - "black-and-tan coonhound", - "Walker hound", - "English foxhound", - "redbone", - "borzoi", - "Irish wolfhound", - "Italian greyhound", - "whippet", - "Ibizan hound", - "Norwegian elkhound", - "otterhound", - "Saluki", - "Scottish deerhound", - "Weimaraner", - "Staffordshire bullterrier", - "American Staffordshire terrier", - "Bedlington terrier", - "Border terrier", - "Kerry blue terrier", - "Irish terrier", - "Norfolk terrier", - "Norwich terrier", - "Yorkshire terrier", - "wire-haired fox terrier", - "Lakeland terrier", - "Sealyham terrier", - "Airedale", - "cairn", - "Australian terrier", - "Dandie Dinmont", - "Boston bull", - "miniature schnauzer", - "giant schnauzer", - "standard schnauzer", - "Scotch terrier", - "Tibetan terrier", - "silky terrier", - "soft-coated wheaten terrier", - "West Highland white terrier", - "Lhasa", - "flat-coated retriever", - "curly-coated retriever", - "golden retriever", - "Labrador retriever", - "Chesapeake Bay retriever", - "German short-haired pointer", - "vizsla", - "English setter", - "Irish setter", - "Gordon setter", - "Brittany spaniel", - "clumber", - "English springer", - "Welsh springer spaniel", - "cocker spaniel", - "Sussex spaniel", - "Irish water spaniel", - "kuvasz", - "schipperke", - "groenendael", - "malinois", - "briard", - "kelpie", - "komondor", - "Old English sheepdog", - "Shetland sheepdog", - "collie", - "Border collie", - "Bouvier des Flandres", - "Rottweiler", - "German shepherd", - "Doberman", - "miniature pinscher", - "Greater Swiss Mountain dog", - "Bernese mountain dog", - "Appenzeller", - "EntleBucher", - "boxer", - "bull mastiff", - "Tibetan mastiff", - "French bulldog", - "Great Dane", - "Saint Bernard", - "Eskimo dog", - "malamute", - "Siberian husky", - "dalmatian", - "affenpinscher", - "basenji", - "pug", - "Leonberg", - "Newfoundland", - "Great Pyrenees", - "Samoyed", - "Pomeranian", - "chow", - "keeshond", - "Brabancon griffon", - "Pembroke", - "Cardigan", - "toy poodle", - "miniature poodle", - "standard poodle", - "Mexican hairless", - "timber wolf", - "white wolf", - "red wolf", - "coyote", - "dingo", - "dhole", - "African hunting dog", - "hyena", - "red fox", - "kit fox", - "Arctic fox", - "grey fox", - "tabby", - "tiger cat", - "Persian cat", - "Siamese cat", - "Egyptian cat", - "cougar", - "lynx", - "leopard", - "snow leopard", - "jaguar", - "lion", - "tiger", - "cheetah", - "brown bear", - "American black bear", - "ice bear", - "sloth bear", - "mongoose", - "meerkat", - "tiger beetle", - "ladybug", - "ground beetle", - "long-horned beetle", - "leaf beetle", - "dung beetle", - "rhinoceros beetle", - "weevil", - "fly", - "bee", - "ant", - "grasshopper", - "cricket", - "walking stick", - "cockroach", - "mantis", - "cicada", - "leafhopper", - "lacewing", - "dragonfly", - "damselfly", - "admiral", - "ringlet", - "monarch", - "cabbage butterfly", - "sulphur butterfly", - "lycaenid", - "starfish", - "sea urchin", - "sea cucumber", - "wood rabbit", - "hare", - "Angora", - "hamster", - "porcupine", - "fox squirrel", - "marmot", - "beaver", - "guinea pig", - "sorrel", - "zebra", - "hog", - "wild boar", - "warthog", - "hippopotamus", - "ox", - "water buffalo", - "bison", - "ram", - "bighorn", - "ibex", - "hartebeest", - "impala", - "gazelle", - "Arabian camel", - "llama", - "weasel", - "mink", - "polecat", - "black-footed ferret", - "otter", - "skunk", - "badger", - "armadillo", - "three-toed sloth", - "orangutan", - "gorilla", - "chimpanzee", - "gibbon", - "siamang", - "guenon", - "patas", - "baboon", - "macaque", - "langur", - "colobus", - "proboscis monkey", - "marmoset", - "capuchin", - "howler monkey", - "titi", - "spider monkey", - "squirrel monkey", - "Madagascar cat", - "indri", - "Indian elephant", - "African elephant", - "lesser panda", - "giant panda", - "barracouta", - "eel", - "coho", - "rock beauty", - "anemone fish", - "sturgeon", - "gar", - "lionfish", - "puffer", - "abacus", - "abaya", - "academic gown", - "accordion", - "acoustic guitar", - "aircraft carrier", - "airliner", - "airship", - "altar", - "ambulance", - "amphibian", - "analog clock", - "apiary", - "apron", - "ashcan", - "assault rifle", - "backpack", - "bakery", - "balance beam", - "balloon", - "ballpoint", - "Band Aid", - "banjo", - "bannister", - "barbell", - "barber chair", - "barbershop", - "barn", - "barometer", - "barrel", - "barrow", - "baseball", - "basketball", - "bassinet", - "bassoon", - "bathing cap", - "bath towel", - "bathtub", - "beach wagon", - "beacon", - "beaker", - "bearskin", - "beer bottle", - "beer glass", - "bell cote", - "bib", - "bicycle-built-for-two", - "bikini", - "binder", - "binoculars", - "birdhouse", - "boathouse", - "bobsled", - "bolo tie", - "bonnet", - "bookcase", - "bookshop", - "bottlecap", - "bow", - "bow tie", - "brass", - "brassiere", - "breakwater", - "breastplate", - "broom", - "bucket", - "buckle", - "bulletproof vest", - "bullet train", - "butcher shop", - "cab", - "caldron", - "candle", - "cannon", - "canoe", - "can opener", - "cardigan", - "car mirror", - "carousel", - "carpenter's kit", - "carton", - "car wheel", - "cash machine", - "cassette", - "cassette player", - "castle", - "catamaran", - "CD player", - "cello", - "cellular telephone", - "chain", - "chainlink fence", - "chain mail", - "chain saw", - "chest", - "chiffonier", - "chime", - "china cabinet", - "Christmas stocking", - "church", - "cinema", - "cleaver", - "cliff dwelling", - "cloak", - "clog", - "cocktail shaker", - "coffee mug", - "coffeepot", - "coil", - "combination lock", - "computer keyboard", - "confectionery", - "container ship", - "convertible", - "corkscrew", - "cornet", - "cowboy boot", - "cowboy hat", - "cradle", - "crane", - "crash helmet", - "crate", - "crib", - "Crock Pot", - "croquet ball", - "crutch", - "cuirass", - "dam", - "desk", - "desktop computer", - "dial telephone", - "diaper", - "digital clock", - "digital watch", - "dining table", - "dishrag", - "dishwasher", - "disk brake", - "dock", - "dogsled", - "dome", - "doormat", - "drilling platform", - "drum", - "drumstick", - "dumbbell", - "Dutch oven", - "electric fan", - "electric guitar", - "electric locomotive", - "entertainment center", - "envelope", - "espresso maker", - "face powder", - "feather boa", - "filing cabinet", - "fireboat", - "fire engine", - "fire screen", - "flagpole", - "flute", - "folding chair", - "football helmet", - "forklift", - "fountain", - "fountain pen", - "four-poster", - "freight car", - "French horn", - "frying pan", - "fur coat", - "garbage truck", - "gasmask", - "gas pump", - "goblet", - "go-kart", - "golf ball", - "golfcart", - "gondola", - "gong", - "gown", - "grand piano", - "greenhouse", - "grille", - "grocery store", - "guillotine", - "hair slide", - "hair spray", - "half track", - "hammer", - "hamper", - "hand blower", - "hand-held computer", - "handkerchief", - "hard disc", - "harmonica", - "harp", - "harvester", - "hatchet", - "holster", - "home theater", - "honeycomb", - "hook", - "hoopskirt", - "horizontal bar", - "horse cart", - "hourglass", - "iPod", - "iron", - "jack-o'-lantern", - "jean", - "jeep", - "jersey", - "jigsaw puzzle", - "jinrikisha", - "joystick", - "kimono", - "knee pad", - "knot", - "lab coat", - "ladle", - "lampshade", - "laptop", - "lawn mower", - "lens cap", - "letter opener", - "library", - "lifeboat", - "lighter", - "limousine", - "liner", - "lipstick", - "Loafer", - "lotion", - "loudspeaker", - "loupe", - "lumbermill", - "magnetic compass", - "mailbag", - "mailbox", - "maillot", - "maillot", - "manhole cover", - "maraca", - "marimba", - "mask", - "matchstick", - "maypole", - "maze", - "measuring cup", - "medicine chest", - "megalith", - "microphone", - "microwave", - "military uniform", - "milk can", - "minibus", - "miniskirt", - "minivan", - "missile", - "mitten", - "mixing bowl", - "mobile home", - "Model T", - "modem", - "monastery", - "monitor", - "moped", - "mortar", - "mortarboard", - "mosque", - "mosquito net", - "motor scooter", - "mountain bike", - "mountain tent", - "mouse", - "mousetrap", - "moving van", - "muzzle", - "nail", - "neck brace", - "necklace", - "nipple", - "notebook", - "obelisk", - "oboe", - "ocarina", - "odometer", - "oil filter", - "organ", - "oscilloscope", - "overskirt", - "oxcart", - "oxygen mask", - "packet", - "paddle", - "paddlewheel", - "padlock", - "paintbrush", - "pajama", - "palace", - "panpipe", - "paper towel", - "parachute", - "parallel bars", - "park bench", - "parking meter", - "passenger car", - "patio", - "pay-phone", - "pedestal", - "pencil box", - "pencil sharpener", - "perfume", - "Petri dish", - "photocopier", - "pick", - "pickelhaube", - "picket fence", - "pickup", - "pier", - "piggy bank", - "pill bottle", - "pillow", - "ping-pong ball", - "pinwheel", - "pirate", - "pitcher", - "plane", - "planetarium", - "plastic bag", - "plate rack", - "plow", - "plunger", - "Polaroid camera", - "pole", - "police van", - "poncho", - "pool table", - "pop bottle", - "pot", - "potter's wheel", - "power drill", - "prayer rug", - "printer", - "prison", - "projectile", - "projector", - "puck", - "punching bag", - "purse", - "quill", - "quilt", - "racer", - "racket", - "radiator", - "radio", - "radio telescope", - "rain barrel", - "recreational vehicle", - "reel", - "reflex camera", - "refrigerator", - "remote control", - "restaurant", - "revolver", - "rifle", - "rocking chair", - "rotisserie", - "rubber eraser", - "rugby ball", - "rule", - "running shoe", - "safe", - "safety pin", - "saltshaker", - "sandal", - "sarong", - "sax", - "scabbard", - "scale", - "school bus", - "schooner", - "scoreboard", - "screen", - "screw", - "screwdriver", - "seat belt", - "sewing machine", - "shield", - "shoe shop", - "shoji", - "shopping basket", - "shopping cart", - "shovel", - "shower cap", - "shower curtain", - "ski", - "ski mask", - "sleeping bag", - "slide rule", - "sliding door", - "slot", - "snorkel", - "snowmobile", - "snowplow", - "soap dispenser", - "soccer ball", - "sock", - "solar dish", - "sombrero", - "soup bowl", - "space bar", - "space heater", - "space shuttle", - "spatula", - "speedboat", - "spider web", - "spindle", - "sports car", - "spotlight", - "stage", - "steam locomotive", - "steel arch bridge", - "steel drum", - "stethoscope", - "stole", - "stone wall", - "stopwatch", - "stove", - "strainer", - "streetcar", - "stretcher", - "studio couch", - "stupa", - "submarine", - "suit", - "sundial", - "sunglass", - "sunglasses", - "sunscreen", - "suspension bridge", - "swab", - "sweatshirt", - "swimming trunks", - "swing", - "switch", - "syringe", - "table lamp", - "tank", - "tape player", - "teapot", - "teddy", - "television", - "tennis ball", - "thatch", - "theater curtain", - "thimble", - "thresher", - "throne", - "tile roof", - "toaster", - "tobacco shop", - "toilet seat", - "torch", - "totem pole", - "tow truck", - "toyshop", - "tractor", - "trailer truck", - "tray", - "trench coat", - "tricycle", - "trimaran", - "tripod", - "triumphal arch", - "trolleybus", - "trombone", - "tub", - "turnstile", - "typewriter keyboard", - "umbrella", - "unicycle", - "upright", - "vacuum", - "vase", - "vault", - "velvet", - "vending machine", - "vestment", - "viaduct", - "violin", - "volleyball", - "waffle iron", - "wall clock", - "wallet", - "wardrobe", - "warplane", - "washbasin", - "washer", - "water bottle", - "water jug", - "water tower", - "whiskey jug", - "whistle", - "wig", - "window screen", - "window shade", - "Windsor tie", - "wine bottle", - "wing", - "wok", - "wooden spoon", - "wool", - "worm fence", - "wreck", - "yawl", - "yurt", - "web site", - "comic book", - "crossword puzzle", - "street sign", - "traffic light", - "book jacket", - "menu", - "plate", - "guacamole", - "consomme", - "hot pot", - "trifle", - "ice cream", - "ice lolly", - "French loaf", - "bagel", - "pretzel", - "cheeseburger", - "hotdog", - "mashed potato", - "head cabbage", - "broccoli", - "cauliflower", - "zucchini", - "spaghetti squash", - "acorn squash", - "butternut squash", - "cucumber", - "artichoke", - "bell pepper", - "cardoon", - "mushroom", - "Granny Smith", - "strawberry", - "orange", - "lemon", - "fig", - "pineapple", - "banana", - "jackfruit", - "custard apple", - "pomegranate", - "hay", - "carbonara", - "chocolate sauce", - "dough", - "meatloaf", - "pizza", - "potpie", - "burrito", - "red wine", - "espresso", - "cup", - "eggnog", - "alp", - "bubble", - "cliff", - "coral reef", - "geyser", - "lakeside", - "promontory", - "sandbar", - "seashore", - "valley", - "volcano", - "ballplayer", - "groom", - "scuba diver", - "rapeseed", - "daisy", - "yellow lady's slipper", - "corn", - "acorn", - "hip", - "buckeye", - "coral fungus", - "agaric", - "gyromitra", - "stinkhorn", - "earthstar", - "hen-of-the-woods", - "bolete", - "ear", - "toilet tissue" -}; diff --git a/models/image_classification_ppresnet/demo.py b/models/image_classification_ppresnet/demo.py deleted file mode 100644 index 8157f26f..00000000 --- a/models/image_classification_ppresnet/demo.py +++ /dev/null @@ -1,67 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from ppresnet import PPResNet - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser(description='Deep Residual Learning for Image Recognition (https://arxiv.org/abs/1512.03385, https://github.com/PaddlePaddle/PaddleHub)') -parser.add_argument('--input', '-i', type=str, - help='Usage: Set input path to a certain image, omit if using camera.') -parser.add_argument('--model', '-m', type=str, default='image_classification_ppresnet50_2022jan.onnx', - help='Usage: Set model path, defaults to image_classification_ppresnet50_2022jan.onnx.') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--top_k', type=int, default=1, - help='Usage: Get top k predictions.') -args = parser.parse_args() - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - top_k = args.top_k - # Instantiate ResNet - model = PPResNet(modelPath=args.model, topK=top_k, backendId=backend_id, targetId=target_id) - - # Read image and get a 224x224 crop from a 256x256 resized - image = cv.imread(args.input) - image = cv.cvtColor(image, cv.COLOR_BGR2RGB) - image = cv.resize(image, dsize=(256, 256)) - image = image[16:240, 16:240, :] - - # Inference - result = model.infer(image)[0] - - # Print result - if top_k == 1: - print(f"Predicted Label: {result[0]}") - else: - print("Predicted Top-K Labels (in decreasing confidence):") - for i, prediction in enumerate(result): - print(f"({i+1}) {prediction}") diff --git a/models/image_classification_ppresnet/image_classification_ppresnet50_2022jan.onnx b/models/image_classification_ppresnet/image_classification_ppresnet50_2022jan.onnx deleted file mode 100644 index d1e03061..00000000 --- a/models/image_classification_ppresnet/image_classification_ppresnet50_2022jan.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ad5486b0de6c2171ea4d28c734c2fb7c5f64fcdbd97180a0ef515cf4b766a405 -size 102567035 diff --git a/models/image_classification_ppresnet/image_classification_ppresnet50_2022jan_int8.onnx b/models/image_classification_ppresnet/image_classification_ppresnet50_2022jan_int8.onnx deleted file mode 100644 index 50a7c2cc..00000000 --- a/models/image_classification_ppresnet/image_classification_ppresnet50_2022jan_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:574bc954869eef09b40a3968bb19157c8faf4999419dca13cfaa3ee56ab5ecd4 -size 25692063 diff --git a/models/image_classification_ppresnet/image_classification_ppresnet50_2022jan_int8bq.onnx b/models/image_classification_ppresnet/image_classification_ppresnet50_2022jan_int8bq.onnx deleted file mode 100644 index 5dc4f5c5..00000000 --- a/models/image_classification_ppresnet/image_classification_ppresnet50_2022jan_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:92c4ba8e363cc5114279ca61f62838600f3121481b74b73c744086b64c694003 -size 28093644 diff --git a/models/image_classification_ppresnet/ppresnet.py b/models/image_classification_ppresnet/ppresnet.py deleted file mode 100644 index 8c844ebd..00000000 --- a/models/image_classification_ppresnet/ppresnet.py +++ /dev/null @@ -1,1083 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - - -import numpy as np -import cv2 as cv - -class PPResNet: - def __init__(self, modelPath, topK=1, loadLabel=True, backendId=0, targetId=0): - self._modelPath = modelPath - assert topK >= 1 - self._topK = topK - self._load_label = loadLabel - self._backendId = backendId - self._targetId = targetId - - self._model = cv.dnn.readNet(self._modelPath) - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - self._inputNames = '' - self._outputNames = ['save_infer_model/scale_0.tmp_0'] - self._inputSize = [224, 224] - self._mean = np.array([0.485, 0.456, 0.406])[np.newaxis, np.newaxis, :] - self._std = np.array([0.229, 0.224, 0.225])[np.newaxis, np.newaxis, :] - - # load labels - self._labels = self._load_labels() - - def _load_labels(self): - return self.LABELS_IMAGENET_1K.splitlines() - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self._backendId = backendId - self._targetId = targetId - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - def _preprocess(self, image): - image = image.astype(np.float32, copy=False) / 255.0 - image -= self._mean - image /= self._std - return cv.dnn.blobFromImage(image) - - def infer(self, image): - assert image.shape[0] == self._inputSize[1], '{} (height of input image) != {} (preset height)'.format(image.shape[0], self._inputSize[1]) - assert image.shape[1] == self._inputSize[0], '{} (width of input image) != {} (preset width)'.format(image.shape[1], self._inputSize[0]) - - # Preprocess - inputBlob = self._preprocess(image) - - # Forward - self._model.setInput(inputBlob, self._inputNames) - outputBlob = self._model.forward(self._outputNames) - - # Postprocess - results = self._postprocess(outputBlob[0]) - - return results - - def _postprocess(self, outputBlob): - batched_class_id_list = [] - for ob in outputBlob: - class_id_list = ob.argsort()[::-1][:self._topK] - batched_class_id_list.append(class_id_list) - if len(self._labels) > 0 and self._load_label: - batched_predicted_labels = [] - for class_id_list in batched_class_id_list: - predicted_labels = [] - for class_id in class_id_list: - predicted_labels.append(self._labels[class_id]) - batched_predicted_labels.append(predicted_labels) - return batched_predicted_labels - else: - return batched_class_id_list - - LABELS_IMAGENET_1K = '''tench -goldfish -great white shark -tiger shark -hammerhead -electric ray -stingray -cock -hen -ostrich -brambling -goldfinch -house finch -junco -indigo bunting -robin -bulbul -jay -magpie -chickadee -water ouzel -kite -bald eagle -vulture -great grey owl -European fire salamander -common newt -eft -spotted salamander -axolotl -bullfrog -tree frog -tailed frog -loggerhead -leatherback turtle -mud turtle -terrapin -box turtle -banded gecko -common iguana -American chameleon -whiptail -agama -frilled lizard -alligator lizard -Gila monster -green lizard -African chameleon -Komodo dragon -African crocodile -American alligator -triceratops -thunder snake -ringneck snake -hognose snake -green snake -king snake -garter snake -water snake -vine snake -night snake -boa constrictor -rock python -Indian cobra -green mamba -sea snake -horned viper -diamondback -sidewinder -trilobite -harvestman -scorpion -black and gold garden spider -barn spider -garden spider -black widow -tarantula -wolf spider -tick -centipede -black grouse -ptarmigan -ruffed grouse -prairie chicken -peacock -quail -partridge -African grey -macaw -sulphur-crested cockatoo -lorikeet -coucal -bee eater -hornbill -hummingbird -jacamar -toucan -drake -red-breasted merganser -goose -black swan -tusker -echidna -platypus -wallaby -koala -wombat -jellyfish -sea anemone -brain coral -flatworm -nematode -conch -snail -slug -sea slug -chiton -chambered nautilus -Dungeness crab -rock crab -fiddler crab -king crab -American lobster -spiny lobster -crayfish -hermit crab -isopod -white stork -black stork -spoonbill -flamingo -little blue heron -American egret -bittern -crane -limpkin -European gallinule -American coot -bustard -ruddy turnstone -red-backed sandpiper -redshank -dowitcher -oystercatcher -pelican -king penguin -albatross -grey whale -killer whale -dugong -sea lion -Chihuahua -Japanese spaniel -Maltese dog -Pekinese -Shih-Tzu -Blenheim spaniel -papillon -toy terrier -Rhodesian ridgeback -Afghan hound -basset -beagle -bloodhound -bluetick -black-and-tan coonhound -Walker hound -English foxhound -redbone -borzoi -Irish wolfhound -Italian greyhound -whippet -Ibizan hound -Norwegian elkhound -otterhound -Saluki -Scottish deerhound -Weimaraner -Staffordshire bullterrier -American Staffordshire terrier -Bedlington terrier -Border terrier -Kerry blue terrier -Irish terrier -Norfolk terrier -Norwich terrier -Yorkshire terrier -wire-haired fox terrier -Lakeland terrier -Sealyham terrier -Airedale -cairn -Australian terrier -Dandie Dinmont -Boston bull -miniature schnauzer -giant schnauzer -standard schnauzer -Scotch terrier -Tibetan terrier -silky terrier -soft-coated wheaten terrier -West Highland white terrier -Lhasa -flat-coated retriever -curly-coated retriever -golden retriever -Labrador retriever -Chesapeake Bay retriever -German short-haired pointer -vizsla -English setter -Irish setter -Gordon setter -Brittany spaniel -clumber -English springer -Welsh springer spaniel -cocker spaniel -Sussex spaniel -Irish water spaniel -kuvasz -schipperke -groenendael -malinois -briard -kelpie -komondor -Old English sheepdog -Shetland sheepdog -collie -Border collie -Bouvier des Flandres -Rottweiler -German shepherd -Doberman -miniature pinscher -Greater Swiss Mountain dog -Bernese mountain dog -Appenzeller -EntleBucher -boxer -bull mastiff -Tibetan mastiff -French bulldog -Great Dane -Saint Bernard -Eskimo dog -malamute -Siberian husky -dalmatian -affenpinscher -basenji -pug -Leonberg -Newfoundland -Great Pyrenees -Samoyed -Pomeranian -chow -keeshond -Brabancon griffon -Pembroke -Cardigan -toy poodle -miniature poodle -standard poodle -Mexican hairless -timber wolf -white wolf -red wolf -coyote -dingo -dhole -African hunting dog -hyena -red fox -kit fox -Arctic fox -grey fox -tabby -tiger cat -Persian cat -Siamese cat -Egyptian cat -cougar -lynx -leopard -snow leopard -jaguar -lion -tiger -cheetah -brown bear -American black bear -ice bear -sloth bear -mongoose -meerkat -tiger beetle -ladybug -ground beetle -long-horned beetle -leaf beetle -dung beetle -rhinoceros beetle -weevil -fly -bee -ant -grasshopper -cricket -walking stick -cockroach -mantis -cicada -leafhopper -lacewing -dragonfly -damselfly -admiral -ringlet -monarch -cabbage butterfly -sulphur butterfly -lycaenid -starfish -sea urchin -sea cucumber -wood rabbit -hare -Angora -hamster -porcupine -fox squirrel -marmot -beaver -guinea pig -sorrel -zebra -hog -wild boar -warthog -hippopotamus -ox -water buffalo -bison -ram -bighorn -ibex -hartebeest -impala -gazelle -Arabian camel -llama -weasel -mink -polecat -black-footed ferret -otter -skunk -badger -armadillo -three-toed sloth -orangutan -gorilla -chimpanzee -gibbon -siamang -guenon -patas -baboon -macaque -langur -colobus -proboscis monkey -marmoset -capuchin -howler monkey -titi -spider monkey -squirrel monkey -Madagascar cat -indri -Indian elephant -African elephant -lesser panda -giant panda -barracouta -eel -coho -rock beauty -anemone fish -sturgeon -gar -lionfish -puffer -abacus -abaya -academic gown -accordion -acoustic guitar -aircraft carrier -airliner -airship -altar -ambulance -amphibian -analog clock -apiary -apron -ashcan -assault rifle -backpack -bakery -balance beam -balloon -ballpoint -Band Aid -banjo -bannister -barbell -barber chair -barbershop -barn -barometer -barrel -barrow -baseball -basketball -bassinet -bassoon -bathing cap -bath towel -bathtub -beach wagon -beacon -beaker -bearskin -beer bottle -beer glass -bell cote -bib -bicycle-built-for-two -bikini -binder -binoculars -birdhouse -boathouse -bobsled -bolo tie -bonnet -bookcase -bookshop -bottlecap -bow -bow tie -brass -brassiere -breakwater -breastplate -broom -bucket -buckle -bulletproof vest -bullet train -butcher shop -cab -caldron -candle -cannon -canoe -can opener -cardigan -car mirror -carousel -carpenters kit -carton -car wheel -cash machine -cassette -cassette player -castle -catamaran -CD player -cello -cellular telephone -chain -chainlink fence -chain mail -chain saw -chest -chiffonier -chime -china cabinet -Christmas stocking -church -cinema -cleaver -cliff dwelling -cloak -clog -cocktail shaker -coffee mug -coffeepot -coil -combination lock -computer keyboard -confectionery -container ship -convertible -corkscrew -cornet -cowboy boot -cowboy hat -cradle -crane -crash helmet -crate -crib -Crock Pot -croquet ball -crutch -cuirass -dam -desk -desktop computer -dial telephone -diaper -digital clock -digital watch -dining table -dishrag -dishwasher -disk brake -dock -dogsled -dome -doormat -drilling platform -drum -drumstick -dumbbell -Dutch oven -electric fan -electric guitar -electric locomotive -entertainment center -envelope -espresso maker -face powder -feather boa -file -fireboat -fire engine -fire screen -flagpole -flute -folding chair -football helmet -forklift -fountain -fountain pen -four-poster -freight car -French horn -frying pan -fur coat -garbage truck -gasmask -gas pump -goblet -go-kart -golf ball -golfcart -gondola -gong -gown -grand piano -greenhouse -grille -grocery store -guillotine -hair slide -hair spray -half track -hammer -hamper -hand blower -hand-held computer -handkerchief -hard disc -harmonica -harp -harvester -hatchet -holster -home theater -honeycomb -hook -hoopskirt -horizontal bar -horse cart -hourglass -iPod -iron -jack-o-lantern -jean -jeep -jersey -jigsaw puzzle -jinrikisha -joystick -kimono -knee pad -knot -lab coat -ladle -lampshade -laptop -lawn mower -lens cap -letter opener -library -lifeboat -lighter -limousine -liner -lipstick -Loafer -lotion -loudspeaker -loupe -lumbermill -magnetic compass -mailbag -mailbox -maillot -maillot -manhole cover -maraca -marimba -mask -matchstick -maypole -maze -measuring cup -medicine chest -megalith -microphone -microwave -military uniform -milk can -minibus -miniskirt -minivan -missile -mitten -mixing bowl -mobile home -Model T -modem -monastery -monitor -moped -mortar -mortarboard -mosque -mosquito net -motor scooter -mountain bike -mountain tent -mouse -mousetrap -moving van -muzzle -nail -neck brace -necklace -nipple -notebook -obelisk -oboe -ocarina -odometer -oil filter -organ -oscilloscope -overskirt -oxcart -oxygen mask -packet -paddle -paddlewheel -padlock -paintbrush -pajama -palace -panpipe -paper towel -parachute -parallel bars -park bench -parking meter -passenger car -patio -pay-phone -pedestal -pencil box -pencil sharpener -perfume -Petri dish -photocopier -pick -pickelhaube -picket fence -pickup -pier -piggy bank -pill bottle -pillow -ping-pong ball -pinwheel -pirate -pitcher -plane -planetarium -plastic bag -plate rack -plow -plunger -Polaroid camera -pole -police van -poncho -pool table -pop bottle -pot -potters wheel -power drill -prayer rug -printer -prison -projectile -projector -puck -punching bag -purse -quill -quilt -racer -racket -radiator -radio -radio telescope -rain barrel -recreational vehicle -reel -reflex camera -refrigerator -remote control -restaurant -revolver -rifle -rocking chair -rotisserie -rubber eraser -rugby ball -rule -running shoe -safe -safety pin -saltshaker -sandal -sarong -sax -scabbard -scale -school bus -schooner -scoreboard -screen -screw -screwdriver -seat belt -sewing machine -shield -shoe shop -shoji -shopping basket -shopping cart -shovel -shower cap -shower curtain -ski -ski mask -sleeping bag -slide rule -sliding door -slot -snorkel -snowmobile -snowplow -soap dispenser -soccer ball -sock -solar dish -sombrero -soup bowl -space bar -space heater -space shuttle -spatula -speedboat -spider web -spindle -sports car -spotlight -stage -steam locomotive -steel arch bridge -steel drum -stethoscope -stole -stone wall -stopwatch -stove -strainer -streetcar -stretcher -studio couch -stupa -submarine -suit -sundial -sunglass -sunglasses -sunscreen -suspension bridge -swab -sweatshirt -swimming trunks -swing -switch -syringe -table lamp -tank -tape player -teapot -teddy -television -tennis ball -thatch -theater curtain -thimble -thresher -throne -tile roof -toaster -tobacco shop -toilet seat -torch -totem pole -tow truck -toyshop -tractor -trailer truck -tray -trench coat -tricycle -trimaran -tripod -triumphal arch -trolleybus -trombone -tub -turnstile -typewriter keyboard -umbrella -unicycle -upright -vacuum -vase -vault -velvet -vending machine -vestment -viaduct -violin -volleyball -waffle iron -wall clock -wallet -wardrobe -warplane -washbasin -washer -water bottle -water jug -water tower -whiskey jug -whistle -wig -window screen -window shade -Windsor tie -wine bottle -wing -wok -wooden spoon -wool -worm fence -wreck -yawl -yurt -web site -comic book -crossword puzzle -street sign -traffic light -book jacket -menu -plate -guacamole -consomme -hot pot -trifle -ice cream -ice lolly -French loaf -bagel -pretzel -cheeseburger -hotdog -mashed potato -head cabbage -broccoli -cauliflower -zucchini -spaghetti squash -acorn squash -butternut squash -cucumber -artichoke -bell pepper -cardoon -mushroom -Granny Smith -strawberry -orange -lemon -fig -pineapple -banana -jackfruit -custard apple -pomegranate -hay -carbonara -chocolate sauce -dough -meat loaf -pizza -potpie -burrito -red wine -espresso -cup -eggnog -alp -bubble -cliff -coral reef -geyser -lakeside -promontory -sandbar -seashore -valley -volcano -ballplayer -groom -scuba diver -rapeseed -daisy -yellow ladys slipper -corn -acorn -hip -buckeye -coral fungus -agaric -gyromitra -stinkhorn -earthstar -hen-of-the-woods -bolete -ear -toilet tissue''' diff --git a/models/image_segmentation_efficientsam/LICENSE b/models/image_segmentation_efficientsam/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/models/image_segmentation_efficientsam/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/models/image_segmentation_efficientsam/README.md b/models/image_segmentation_efficientsam/README.md deleted file mode 100644 index 1a4b25ca..00000000 --- a/models/image_segmentation_efficientsam/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# image_segmentation_efficientsam - -EfficientSAM: Leveraged Masked Image Pretraining for Efficient Segment Anything - -Notes: -- The current implementation of the EfficientSAM demo uses the EfficientSAM-Ti model, which is specifically tailored for scenarios requiring higher speed and lightweight. -- image_segmentation_efficientsam_ti_2024may.onnx(supports only single point infering) - - MD5 value: 117d6a6cac60039a20b399cc133c2a60 - - SHA-256 value: e3957d2cd1422855f350aa7b044f47f5b3eafada64b5904ed330b696229e2943 -- image_segmentation_efficientsam_ti_2025april.onnx - - MD5 value: f23cecbb344547c960c933ff454536a3 - - SHA-256 value: 4eb496e0a7259d435b49b66faf1754aa45a5c382a34558ddda9a8c6fe5915d77 -- image_segmentation_efficientsam_ti_2025april_int8.onnx - - MD5 value: a1164f44b0495b82e9807c7256e95a50 - - SHA-256 value: 5ecc8d59a2802c32246e68553e1cf8ce74cf74ba707b84f206eb9181ff774b4e - - -## Demo - -### Python -Run the following command to try the demo: - -```shell -python demo.py --input /path/to/image -``` - -**Click** to select foreground points, **drag** to use box to select and **long press** to select background points on the object you wish to segment in the displayed image. After clicking the **Enter**, the segmentation result will be shown in a new window. Clicking the **Backspace** to clear all the prompts. - -## Result - -Here are some of the sample results that were observed using the model: - -![test1_res.jpg](./example_outputs/example1.png) -![test2_res.jpg](./example_outputs/example2.png) - -Video inference result: - -![sam_present.gif](./example_outputs/sam_present.gif) - -## Model metrics: - -## License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE). - -#### Contributor Details - -## Reference - -- https://arxiv.org/abs/2312.00863 -- https://github.com/yformer/EfficientSAM -- https://github.com/facebookresearch/segment-anything \ No newline at end of file diff --git a/models/image_segmentation_efficientsam/demo.py b/models/image_segmentation_efficientsam/demo.py deleted file mode 100644 index 306945d3..00000000 --- a/models/image_segmentation_efficientsam/demo.py +++ /dev/null @@ -1,247 +0,0 @@ -import argparse -import numpy as np -import cv2 as cv -from efficientSAM import EfficientSAM - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser(description='EfficientSAM Demo') -parser.add_argument('--input', '-i', type=str, - help='Set input path to a certain image.') -parser.add_argument('--model', '-m', type=str, default='image_segmentation_efficientsam_ti_2025april.onnx', - help='Set model path, defaults to image_segmentation_efficientsam_ti_2025april.onnx.') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--save', '-s', action='store_true', - help='Specify to save a file with results. Invalid in case of camera input.') -args = parser.parse_args() - -# Global configuration -WINDOW_SIZE = (800, 600) # Fixed window size (width, height) -MAX_POINTS = 6 # Maximum allowed points -points = [] # Store clicked coordinates (original image scale) -labels = [] # Point labels (-1: useless, 0: background, 1: foreground, 2: top-left, 3: bottom right) -backend_point = [] -rectangle = False -current_img = None - -def visualize(image, result): - """ - Visualize the inference result on the input image. - - Args: - image (np.ndarray): The input image. - result (np.ndarray): The inference result. - - Returns: - vis_result (np.ndarray): The visualized result. - """ - # get image and mask - vis_result = np.copy(image) - mask = np.copy(result) - # change mask to binary image - t, binary = cv.threshold(mask, 127, 255, cv.THRESH_BINARY) - assert set(np.unique(binary)) <= {0, 255}, "The mask must be a binary image." - # enhance red channel to make the segmentation more obviously - enhancement_factor = 1.8 - red_channel = vis_result[:, :, 2] - # update the channel - red_channel = np.where(binary == 255, np.minimum(red_channel * enhancement_factor, 255), red_channel) - vis_result[:, :, 2] = red_channel - - # draw borders - contours, hierarchy = cv.findContours(binary, cv.RETR_LIST, cv.CHAIN_APPROX_TC89_L1) - cv.drawContours(vis_result, contours, contourIdx = -1, color = (255,255,255), thickness=2) - return vis_result - -def select(event, x, y, flags, param): - """Handle mouse events with coordinate conversion""" - global points, labels, backend_point, rectangle, current_img - orig_img = param['original_img'] - image_window = param['image_window'] - - if event == cv.EVENT_LBUTTONDOWN: - param['mouse_down_time'] = cv.getTickCount() - backend_point = [x, y] - - elif event == cv.EVENT_MOUSEMOVE: - if rectangle == True: - rectangle_change_img = current_img.copy() - cv.rectangle(rectangle_change_img, (backend_point[0], backend_point[1]), (x, y), (255,0,0) , 2) - cv.imshow(image_window, rectangle_change_img) - elif len(backend_point) != 0 and len(points) < MAX_POINTS: - rectangle = True - - - elif event == cv.EVENT_LBUTTONUP: - if len(points) >= MAX_POINTS: - print(f"Maximum points reached {MAX_POINTS}.") - return - - if rectangle == False: - duration = (cv.getTickCount() - param['mouse_down_time'])/cv.getTickFrequency() - label = -1 if duration > 0.5 else 1 # Long press = background - - points.append([backend_point[0], backend_point[1]]) - labels.append(label) - print(f"Added {['background','foreground','background'][label]} point {backend_point}.") - else: - if len(points) + 1 >= MAX_POINTS: - rectangle = False - backend_point.clear() - cv.imshow(image_window, current_img) - print(f"Points reached {MAX_POINTS}, could not add box.") - return - point_leftup = [] - point_rightdown = [] - if x > backend_point[0] or y > backend_point[1]: - point_leftup.extend(backend_point) - point_rightdown.extend([x,y]) - else: - point_leftup.extend([x,y]) - point_rightdown.extend(backend_point) - points.append(point_leftup) - points.append(point_rightdown) - print(f"Added box from {point_leftup} to {point_rightdown}.") - labels.append(2) - labels.append(3) - rectangle = False - backend_point.clear() - - marked_img = orig_img.copy() - top_left = None - for (px, py), lbl in zip(points, labels): - if lbl == -1: - cv.circle(marked_img, (px, py), 5, (0, 0, 255), -1) - elif lbl == 1: - cv.circle(marked_img, (px, py), 5, (0, 255, 0), -1) - elif lbl == 2: - top_left = (px, py) - elif lbl == 3: - bottom_right = (px, py) - cv.rectangle(marked_img, top_left, bottom_right, (255,0,0) , 2) - cv.imshow(image_window, marked_img) - current_img = marked_img.copy() - - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - # Load the EfficientSAM model - model = EfficientSAM(modelPath=args.model) - - if args.input is not None: - # Read image - image = cv.imread(args.input) - if image is None: - print('Could not open or find the image:', args.input) - exit(0) - # create window - image_window = "Origin image" - cv.namedWindow(image_window, cv.WINDOW_NORMAL) - # change window size - rate = 1 - rate1 = 1 - rate2 = 1 - if(image.shape[1]>WINDOW_SIZE[0]): - rate1 = WINDOW_SIZE[0]/image.shape[1] - if(image.shape[0]>WINDOW_SIZE[1]): - rate2 = WINDOW_SIZE[1]/image.shape[0] - rate = min(rate1, rate2) - # width, height - WINDOW_SIZE = (int(image.shape[1] * rate), int(image.shape[0] * rate)) - cv.resizeWindow(image_window, WINDOW_SIZE[0], WINDOW_SIZE[1]) - # put the window on the left of the screen - cv.moveWindow(image_window, 50, 100) - # set listener to record user's click point - param = { - 'original_img': image, - 'mouse_down_time': 0, - 'image_window' : image_window - } - cv.setMouseCallback(image_window, select, param) - # tips in the terminal - print("Click — Select foreground point\n" - "Long press — Select background point\n" - "Drag — Create selection box\n" - "Enter — Infer\n" - "Backspace — Clear the prompts\n" - "Q - Quit") - # show image - cv.imshow(image_window, image) - current_img = image.copy() - # create window to show visualized result - vis_image = image.copy() - segmentation_window = "Segment result" - cv.namedWindow(segmentation_window, cv.WINDOW_NORMAL) - cv.resizeWindow(segmentation_window, WINDOW_SIZE[0], WINDOW_SIZE[1]) - cv.moveWindow(segmentation_window, WINDOW_SIZE[0]+51, 100) - cv.imshow(segmentation_window, vis_image) - # waiting for click - while True: - # Check window status - # if click × to close the image window then ending - if (cv.getWindowProperty(image_window, cv.WND_PROP_VISIBLE) < 1 or - cv.getWindowProperty(segmentation_window, cv.WND_PROP_VISIBLE) < 1): - break - - # Handle keyboard input - key = cv.waitKey(1) - - # receive enter - if key == 13: - - vis_image = image.copy() - cv.putText(vis_image, "infering...", - (50, vis_image.shape[0]//2), - cv.FONT_HERSHEY_SIMPLEX, 10, (255,255,255), 5) - cv.imshow(segmentation_window, vis_image) - - result = model.infer(image=image, points=points, labels=labels) - if len(result) == 0: - print("clear and select points again!") - else: - vis_result = visualize(image, result) - - cv.imshow(segmentation_window, vis_result) - elif key == 8 or key == 127: # ASCII for Backspace or Delete - points.clear() - labels.clear() - backend_point = [] - rectangle = False - current_img = image - print("Points are cleared.") - cv.imshow(image_window, image) - elif key == ord('q') or key == ord('Q'): - break - - cv.destroyAllWindows() - - # Save results if save is true - if args.save: - cv.imwrite('./example_outputs/vis_result.jpg', vis_result) - cv.imwrite("./example_outputs/mask.jpg", result) - print('vis_result.jpg and mask.jpg are saved to ./example_outputs/') - - else: - print('Set input path to a certain image.') - pass - diff --git a/models/image_segmentation_efficientsam/efficientSAM.py b/models/image_segmentation_efficientsam/efficientSAM.py deleted file mode 100644 index 334d8834..00000000 --- a/models/image_segmentation_efficientsam/efficientSAM.py +++ /dev/null @@ -1,136 +0,0 @@ -import numpy as np -import cv2 as cv - -class EfficientSAM: - def __init__(self, modelPath, backendId=0, targetId=0): - self._modelPath = modelPath - self._backendId = backendId - self._targetId = targetId - - self._model = cv.dnn.readNet(self._modelPath) - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - # 3 inputs - self._inputNames = ["batched_images", "batched_point_coords", "batched_point_labels"] - - self._outputNames = ['output_masks', 'iou_predictions'] # actual output layer name - self._currentInputSize = None - self._inputSize = [1024, 1024] # input size for the model - self._maxPointNums = 6 - self._frontGroundPoints = [] - self._backGroundPoints = [] - self._labels = [] - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self._backendId = backendId - self._targetId = targetId - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - def _preprocess(self, image, points, labels): - - image = cv.cvtColor(image, cv.COLOR_BGR2RGB) - # record the input image size, (width, height) - self._currentInputSize = (image.shape[1], image.shape[0]) - - image = cv.resize(image, self._inputSize) - - image = image.astype(np.float32, copy=False) / 255.0 - - image_blob = cv.dnn.blobFromImage(image) - - points = np.array(points, dtype=np.float32) - labels = np.array(labels, dtype=np.float32) - assert points.shape[0] <= self._maxPointNums, f"Max input points number: {self._maxPointNums}" - assert points.shape[0] == labels.shape[0] - - frontGroundPoints = [] - backGroundPoints = [] - inputLabels = [] - for i in range(len(points)): - if labels[i] == -1: - backGroundPoints.append(points[i]) - else: - frontGroundPoints.append(points[i]) - inputLabels.append(labels[i]) - self._backGroundPoints = np.uint32(backGroundPoints) - # print("input:") - # print(" back: ", self._backGroundPoints) - # print(" front: ", frontGroundPoints) - # print(" label: ", inputLabels) - - # convert points to (1024*1024) size space - for p in frontGroundPoints: - p[0] = np.float32(p[0] * self._inputSize[0]/self._currentInputSize[0]) - p[1] = np.float32(p[1] * self._inputSize[1]/self._currentInputSize[1]) - - if len(frontGroundPoints) > self._maxPointNums: - return "no" - - pad_num = self._maxPointNums - len(frontGroundPoints) - self._frontGroundPoints = np.vstack([frontGroundPoints, np.zeros((pad_num, 2), dtype=np.float32)]) - inputLabels_arr = np.array(inputLabels, dtype=np.float32).reshape(-1, 1) - self._labels = np.vstack([inputLabels_arr, np.full((pad_num, 1), -1, dtype=np.float32)]) - - points_blob = np.array([[self._frontGroundPoints]]) - - labels_blob = np.array([[self._labels]]) - - return image_blob, points_blob, labels_blob - - def infer(self, image, points, labels): - # Preprocess - imageBlob, pointsBlob, labelsBlob = self._preprocess(image, points, labels) - # Forward - self._model.setInput(imageBlob, self._inputNames[0]) - self._model.setInput(pointsBlob, self._inputNames[1]) - self._model.setInput(labelsBlob, self._inputNames[2]) - # print("infering...") - outputs = self._model.forward(self._outputNames) - outputBlob, outputIou = outputs[0], outputs[1] - # Postprocess - results = self._postprocess(outputBlob, outputIou) - # print("done") - return results - - def _postprocess(self, outputBlob, outputIou): - # The masks are already sorted by their predicted IOUs. - # The first dimension is the batch size (we have a single image. so it is 1). - # The second dimension is the number of masks we want to generate - # The third dimension is the number of candidate masks output by the model. - masks = outputBlob[0, 0, :, :, :] >= 0 - ious = outputIou[0, 0, :] - - # sorted by ious - sorted_indices = np.argsort(ious)[::-1] - sorted_masks = masks[sorted_indices] - - # sorted by area - # mask_areas = np.sum(masks, axis=(1, 2)) - # sorted_indices = np.argsort(mask_areas) - # sorted_masks = masks[sorted_indices] - - masks_uint8 = (sorted_masks * 255).astype(np.uint8) - - # change to real image size - resized_masks = [ - cv.resize(mask, dsize=self._currentInputSize, - interpolation=cv.INTER_NEAREST) - for mask in masks_uint8 - ] - - # background mask don't need - for mask in resized_masks: - contains_bg = any( - mask[y, x] if (0 <= x < mask.shape[1] and 0 <= y < mask.shape[0]) - else False - for (x, y) in self._backGroundPoints - ) - if not contains_bg: - return mask - - return resized_masks[0] diff --git a/models/image_segmentation_efficientsam/example_outputs/example1.png b/models/image_segmentation_efficientsam/example_outputs/example1.png deleted file mode 100644 index c20d7834..00000000 --- a/models/image_segmentation_efficientsam/example_outputs/example1.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:70065831fb12915dc5a3b4641019bc152a89d6d5be1887bdf7ada432a04e63c5 -size 1993654 diff --git a/models/image_segmentation_efficientsam/example_outputs/example2.png b/models/image_segmentation_efficientsam/example_outputs/example2.png deleted file mode 100644 index 3b0cb955..00000000 --- a/models/image_segmentation_efficientsam/example_outputs/example2.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dfe6860d701b8b707a96d69b6bfc33fd05167168fbb46594f6377ad4e9c1733e -size 1917383 diff --git a/models/image_segmentation_efficientsam/example_outputs/sam_present.gif b/models/image_segmentation_efficientsam/example_outputs/sam_present.gif deleted file mode 100644 index 403a2817..00000000 --- a/models/image_segmentation_efficientsam/example_outputs/sam_present.gif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ab75c654d4368d1f4762fc71af35c02b6f0a3e21dca4530d22f92fff4134890c -size 103918 diff --git a/models/image_segmentation_efficientsam/image_segmentation_efficientsam_ti_2024may.onnx b/models/image_segmentation_efficientsam/image_segmentation_efficientsam_ti_2024may.onnx deleted file mode 100644 index e6eb2a47..00000000 --- a/models/image_segmentation_efficientsam/image_segmentation_efficientsam_ti_2024may.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e3957d2cd1422855f350aa7b044f47f5b3eafada64b5904ed330b696229e2943 -size 47777193 diff --git a/models/image_segmentation_efficientsam/image_segmentation_efficientsam_ti_2025april.onnx b/models/image_segmentation_efficientsam/image_segmentation_efficientsam_ti_2025april.onnx deleted file mode 100644 index 2bf444b7..00000000 --- a/models/image_segmentation_efficientsam/image_segmentation_efficientsam_ti_2025april.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4eb496e0a7259d435b49b66faf1754aa45a5c382a34558ddda9a8c6fe5915d77 -size 48312857 diff --git a/models/image_segmentation_efficientsam/image_segmentation_efficientsam_ti_2025april_int8.onnx b/models/image_segmentation_efficientsam/image_segmentation_efficientsam_ti_2025april_int8.onnx deleted file mode 100644 index 8f7b6907..00000000 --- a/models/image_segmentation_efficientsam/image_segmentation_efficientsam_ti_2025april_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5ecc8d59a2802c32246e68553e1cf8ce74cf74ba707b84f206eb9181ff774b4e -size 20479928 diff --git a/models/inpainting_lama/CMakeLists.txt b/models/inpainting_lama/CMakeLists.txt deleted file mode 100644 index 5eee867c..00000000 --- a/models/inpainting_lama/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ -cmake_minimum_required(VERSION 3.22.1) -project(opencv_zoo_inpainting_lama) - -set(OPENCV_VERSION "5.0.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") - -# Find OpenCV -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) - -add_executable(demo demo.cpp) -target_link_libraries(demo ${OpenCV_LIBS}) diff --git a/models/inpainting_lama/LICENSE b/models/inpainting_lama/LICENSE deleted file mode 100644 index f542a480..00000000 --- a/models/inpainting_lama/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [2021] Samsung Research - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/models/inpainting_lama/README.md b/models/inpainting_lama/README.md deleted file mode 100644 index 4a5c0856..00000000 --- a/models/inpainting_lama/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# Lama - -LaMa is a very lightweight yet powerful image inpainting model. - -Notes: - -- Model source: [ONNX](https://huggingface.co/Carve/LaMa-ONNX/blob/main/lama_fp32.onnx). - -## Requirements -Install latest OpenCV >=5.0.0 and CMake >= 3.22.1 to get started with. - -## Demo - -### Python - -Run the following command to try the demo: - -```shell -# usage -python demo.py --input /path/to/image - -# get help regarding various parameters -python demo.py --help -``` - -### C++ - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# usage -./build/demo --input=/path/to/image -# get help messages -./build/demo -h -``` - -### Example outputs - -![chicky](./example_outputs/squirrel_output.jpg) - -## License - -All files in this directory are licensed under [Apache License](./LICENSE). - -## Reference - -- https://github.com/advimman/lama \ No newline at end of file diff --git a/models/inpainting_lama/demo.cpp b/models/inpainting_lama/demo.cpp deleted file mode 100644 index 303ee251..00000000 --- a/models/inpainting_lama/demo.cpp +++ /dev/null @@ -1,174 +0,0 @@ -/* -This sample inpaints the masked area in the given image. - -Copyright (C) 2025, Bigvision LLC. -*/ - -#include -#include - -#include -#include -#include - -using namespace cv; -using namespace dnn; -using namespace std; - -class Lama { -public: - Lama(const string& modelPath) { - loadModel(modelPath); - } - - // Function to set up the input image and process it - void process(const Mat& image, const Mat& mask, Mat& result) { - double aspectRatio = static_cast(image.rows) / static_cast(image.cols); - - Mat image_blob = blobFromImage(image, 1.0/255.0, Size(512, 512), Scalar(0, 0, 0), false, false, CV_32F); - Mat mask_blob = blobFromImage(mask, 1.0, Size(512, 512), Scalar(0), false, false); - - mask_blob = (mask_blob > 0); - mask_blob.convertTo(mask_blob, CV_32F); - mask_blob = mask_blob/255.0; - - net.setInput(image_blob, "image"); - net.setInput(mask_blob, "mask"); - - Mat output = net.forward(); - - postProcess(output, result, aspectRatio); - } -private: - Net net; - - // Load Model - void loadModel(const string modelPath) { - net = readNetFromONNX(modelPath); - net.setPreferableBackend(DNN_BACKEND_DEFAULT); - net.setPreferableTarget(DNN_TARGET_CPU); - } - - void postProcess(const Mat& output, Mat& result, double aspectRatio) { - Mat output_transposed(3, &output.size[1], CV_32F, const_cast(reinterpret_cast(output.ptr()))); - - vector channels; - for (int i = 0; i < 3; ++i) { - channels.push_back(Mat(output_transposed.size[1], output_transposed.size[2], CV_32F, - output_transposed.ptr(i))); - } - merge(channels, result); - result.convertTo(result, CV_8U); - - int h = static_cast(512 * aspectRatio); - resize(result, result, Size(512, h)); - } -}; - - -const string about = "This sample demonstrates image inpainting with lama inpainting technique.\n\n"; - -const string keys = - "{help h | | show help message}" - "{input i | | Path to input image}" - "{ model | inpainting_lama_2024jan.onnx | Path to the lama onnx model file }"; - -bool drawing = false; -Mat maskGray; -int brush_size = 25; - -static void drawMask(int event, int x, int y, int, void*) { - if (event == EVENT_LBUTTONDOWN) { - drawing = true; - } else if (event == EVENT_MOUSEMOVE) { - if (drawing) { - circle(maskGray, Point(x, y), brush_size, Scalar(255), -1); - } - } else if (event == EVENT_LBUTTONUP) { - drawing = false; - } -} - -int main(int argc, char **argv) -{ - CommandLineParser parser(argc, argv, keys); - - if (parser.has("help")) - { - cout<("model"); - - int height = 512; - int width = 512; - int stdSize = 20; - int stdWeight = 400; - int stdImgSize = 512; - int imgWidth = -1; // Initialization - int fontSize = 50; - int fontWeight = 500; - - FontFace fontFace("sans"); - Lama lama(model); - - Mat image = imread(parser.get("input")); - if (image.empty()) { - cerr << "Error: Input image could not be loaded." << endl; - return -1; - } - - imgWidth = min(image.rows, image.cols); - fontSize = min(fontSize, (stdSize*imgWidth)/stdImgSize); - fontWeight = min(fontWeight, (stdWeight*imgWidth)/stdImgSize); - - maskGray = Mat::zeros(image.size(), CV_8U); - - namedWindow("Draw Mask"); - setMouseCallback("Draw Mask", drawMask); - - const string label = "Draw the mask on the image. Press space bar when done "; - - for(;;) { - Mat displayImage = image.clone(); - Mat overlay = image.clone(); - - double alpha = 0.5; - Rect r = getTextSize(Size(), label, Point(), fontFace, fontSize, fontWeight); - r.height += 2 * fontSize; // padding - r.width += 10; // padding - rectangle(overlay, r, Scalar::all(255), FILLED); - addWeighted(overlay, alpha, displayImage, 1 - alpha, 0, displayImage); - putText(displayImage, label, Point(10, fontSize), Scalar(0,0,0), fontFace, fontSize, fontWeight); - putText(displayImage, "Press 'i' to increase and 'd' to decrease brush size", Point(10, 2*fontSize), Scalar(0,0,0), fontFace, fontSize, fontWeight); - - displayImage.setTo(Scalar(255, 255, 255), maskGray > 0); // Highlight mask area - imshow("Draw Mask", displayImage); - - char key = waitKey(1); - if (key == 'i') { - brush_size += 1; - cout << "Brush size increased to " << brush_size << endl; - } else if (key == 'd') { - brush_size = max(1, brush_size - 1); - cout << "Brush size decreased to " << brush_size << endl; - } else if (key == ' ') { - break; - } else if (key == 27){ - return -1; - } - } - destroyAllWindows(); - - Mat result; - lama.process(image, maskGray, result); - - imshow("Inpainted Output", result); - waitKey(0); - - return 0; -} diff --git a/models/inpainting_lama/demo.py b/models/inpainting_lama/demo.py deleted file mode 100644 index 82576abd..00000000 --- a/models/inpainting_lama/demo.py +++ /dev/null @@ -1,87 +0,0 @@ -import cv2 as cv -import numpy as np -import argparse -from lama import Lama - -def get_args_parser(func_args): - parser = argparse.ArgumentParser(add_help=False) - parser.add_argument('--input', help='Path to input image', default=0, required=False) - parser.add_argument('--model', help='Path to lama onnx', default='inpainting_lama_2025jan.onnx', required=False) - - parser = argparse.ArgumentParser(parents=[parser], - description='', formatter_class=argparse.RawTextHelpFormatter) - return parser.parse_args(func_args) - -drawing = False -mask_gray = None -brush_size = 15 - -def draw_mask(event, x, y, flags, param): - global drawing, mask_gray, brush_size - if event == cv.EVENT_LBUTTONDOWN: - drawing = True - elif event == cv.EVENT_MOUSEMOVE: - if drawing: - cv.circle(mask_gray, (x, y), brush_size, (255), thickness=-1) - elif event == cv.EVENT_LBUTTONUP: - drawing = False - -def main(func_args=None): - global mask_gray, brush_size - args = get_args_parser(func_args) - - lama = Lama(modelPath=args.model) - input_image = cv.imread(args.input) - mask_gray = np.zeros((input_image.shape[0], input_image.shape[1]), dtype=np.uint8) - - stdSize = 0.6 - stdWeight = 2 - stdImgSize = 512 - imgWidth = min(input_image.shape[:2]) - fontSize = min(1.5, (stdSize*imgWidth)/stdImgSize) - fontThickness = max(1,(stdWeight*imgWidth)//stdImgSize) - - cv.namedWindow("Draw Mask") - cv.setMouseCallback("Draw Mask", draw_mask) - - label = "Draw the mask on the image. Press space bar when done." - labelSize, _ = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, fontSize, fontThickness) - while True: - display_image = input_image.copy() - overlay = input_image.copy() - - alpha = 0.5 - cv.rectangle(overlay, (0, 0), (labelSize[0]+10, labelSize[1]+int(30*fontSize)), (255, 255, 255), cv.FILLED) - cv.addWeighted(overlay, alpha, display_image, 1 - alpha, 0, display_image) - - cv.putText(display_image, label, (10, int(25*fontSize)), cv.FONT_HERSHEY_SIMPLEX, fontSize, (0, 0, 0), fontThickness) - cv.putText(display_image, "Press 'i' to increase and 'd' to decrease brush size.", (10, int(50*fontSize)), cv.FONT_HERSHEY_SIMPLEX, fontSize, (0, 0, 0), fontThickness) - display_image[mask_gray > 0] = [255, 255, 255] - cv.imshow("Draw Mask", display_image) - - key = cv.waitKey(1) & 0xFF - if key == ord('i'): # Increase brush size - brush_size += 1 - print(f"Brush size increased to {brush_size}") - elif key == ord('d'): # Decrease brush size - brush_size = max(1, brush_size - 1) - print(f"Brush size decreased to {brush_size}") - elif key == ord(' '): # Press space bar to finish drawing - break - elif key == 27: - exit() - cv.destroyAllWindows() - - tm = cv.TickMeter() - tm.start() - result = lama.infer(input_image, mask_gray) - tm.stop() - label = 'Inference time: {:.2f} ms'.format(tm.getTimeMilli()) - cv.putText(result, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0)) - - cv.imshow("Inpainted Output", result) - cv.waitKey(0) - cv.destroyAllWindows() - -if __name__ == '__main__': - main() diff --git a/models/inpainting_lama/example_outputs/squirrel.jpg b/models/inpainting_lama/example_outputs/squirrel.jpg deleted file mode 100644 index 0a3909e3..00000000 --- a/models/inpainting_lama/example_outputs/squirrel.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:20bb6e8ae96918a36c9886b6d48e54eedeb3948591e1485c206bc1dc60c8dc8b -size 62311 diff --git a/models/inpainting_lama/example_outputs/squirrel_output.jpg b/models/inpainting_lama/example_outputs/squirrel_output.jpg deleted file mode 100644 index 982f019e..00000000 --- a/models/inpainting_lama/example_outputs/squirrel_output.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:aaa765b3ef286f8de34efc7302e49b078d720c3eb6adf79ee8a2df73f3889f52 -size 63086 diff --git a/models/inpainting_lama/inpainting_lama_2025jan.onnx b/models/inpainting_lama/inpainting_lama_2025jan.onnx deleted file mode 100644 index 425f3a0e..00000000 --- a/models/inpainting_lama/inpainting_lama_2025jan.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7df918ac3921d3daf0aae1d219776cf0dc4e4935f035af81841b40adcf74fdf2 -size 92591623 diff --git a/models/inpainting_lama/lama.py b/models/inpainting_lama/lama.py deleted file mode 100644 index c242ef0c..00000000 --- a/models/inpainting_lama/lama.py +++ /dev/null @@ -1,43 +0,0 @@ -import cv2 as cv -import numpy as np - -class Lama: - def __init__(self, modelPath='inpainting_lama_2025jan.onnx', backendId=0, targetId=0): - self._modelPath = modelPath - self._backendId = backendId - self._targetId = targetId - - # Load the model - self._model = cv.dnn.readNetFromONNX(self._modelPath) - self.setBackendAndTarget(self._backendId, self._targetId) - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self._backendId = backendId - self._targetId = targetId - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - def infer(self, image, mask): - image_blob = cv.dnn.blobFromImage(image, 0.00392, (512, 512), (0,0,0), False, False) - mask_blob = cv.dnn.blobFromImage(mask, scalefactor=1.0, size=(512, 512), mean=(0,), swapRB=False, crop=False) - mask_blob = (mask_blob > 0).astype(np.float32) - - self._model.setInput(image_blob, "image") - self._model.setInput(mask_blob, "mask") - - output = self._model.forward() - - # Postprocessing - aspect_ratio = image.shape[0]/image.shape[1] - result = output[0] - result = np.transpose(result, (1, 2, 0)) - result = (result).astype(np.uint8) - width = result.shape[1] - height = int(width*aspect_ratio) - result = cv.resize(result, (width, height)) - - return result diff --git a/models/license_plate_detection_yunet/LICENSE b/models/license_plate_detection_yunet/LICENSE deleted file mode 100644 index 5e53223d..00000000 --- a/models/license_plate_detection_yunet/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2022 WATRIX - Author: Dong Xu - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/models/license_plate_detection_yunet/README.md b/models/license_plate_detection_yunet/README.md deleted file mode 100644 index df1cbceb..00000000 --- a/models/license_plate_detection_yunet/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# License Plate Detection with YuNet - -This model is contributed by Dong Xu (徐栋) from [watrix.ai](watrix.ai) (银河水滴). - -Please note that the model is trained with Chinese license plates, so the detection results of other license plates with this model may be limited. - -**Note**: -- `license_plate_detection_lpd_yunet_2023mar_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - -## Demo - -Run the following command to try the demo: - -```shell -# detect on camera input -python demo.py -# detect on an image -python demo.py --input /path/to/image -v -# get help regarding various parameters -python demo.py --help -``` - -### Example outputs - -![lpd](./example_outputs/lpd_yunet_demo.gif) - -## License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE) - -## Reference - -- https://github.com/ShiqiYu/libfacedetection.train diff --git a/models/license_plate_detection_yunet/demo.py b/models/license_plate_detection_yunet/demo.py deleted file mode 100644 index 066b63a4..00000000 --- a/models/license_plate_detection_yunet/demo.py +++ /dev/null @@ -1,130 +0,0 @@ -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from lpd_yunet import LPD_YuNet - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser(description='LPD-YuNet for License Plate Detection') -parser.add_argument('--input', '-i', type=str, - help='Usage: Set path to the input image. Omit for using default camera.') -parser.add_argument('--model', '-m', type=str, default='license_plate_detection_lpd_yunet_2023mar.onnx', - help='Usage: Set model path, defaults to license_plate_detection_lpd_yunet_2023mar.onnx.') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--conf_threshold', type=float, default=0.9, - help='Usage: Set the minimum needed confidence for the model to identify a license plate, defaults to 0.9. Smaller values may result in faster detection, but will limit accuracy. Filter out faces of confidence < conf_threshold.') -parser.add_argument('--nms_threshold', type=float, default=0.3, - help='Usage: Suppress bounding boxes of iou >= nms_threshold. Default = 0.3. Suppress bounding boxes of iou >= nms_threshold.') -parser.add_argument('--top_k', type=int, default=5000, - help='Usage: Keep top_k bounding boxes before NMS.') -parser.add_argument('--keep_top_k', type=int, default=750, - help='Usage: Keep keep_top_k bounding boxes after NMS.') -parser.add_argument('--save', '-s', action='store_true', - help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.') -parser.add_argument('--vis', '-v', action='store_true', - help='Usage: Specify to open a new window to show results. Invalid in case of camera input.') -args = parser.parse_args() - -def visualize(image, dets, line_color=(0, 255, 0), text_color=(0, 0, 255), fps=None): - output = image.copy() - - if fps is not None: - cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, text_color) - - for det in dets: - bbox = det[:-1].astype(np.int32) - x1, y1, x2, y2, x3, y3, x4, y4 = bbox - - # Draw the border of license plate - cv.line(output, (x1, y1), (x2, y2), line_color, 2) - cv.line(output, (x2, y2), (x3, y3), line_color, 2) - cv.line(output, (x3, y3), (x4, y4), line_color, 2) - cv.line(output, (x4, y4), (x1, y1), line_color, 2) - - return output - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - - # Instantiate LPD-YuNet - model = LPD_YuNet(modelPath=args.model, - confThreshold=args.conf_threshold, - nmsThreshold=args.nms_threshold, - topK=args.top_k, - keepTopK=args.keep_top_k, - backendId=backend_id, - targetId=target_id) - - # If input is an image - if args.input is not None: - image = cv.imread(args.input) - h, w, _ = image.shape - - # Inference - model.setInputSize([w, h]) - results = model.infer(image) - - # Print results - print('{} license plates detected.'.format(results.shape[0])) - - # Draw results on the input image - image = visualize(image, results) - - # Save results if save is true - if args.save: - print('Resutls saved to result.jpg') - cv.imwrite('result.jpg', image) - - # Visualize results in a new window - if args.vis: - cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE) - cv.imshow(args.input, image) - cv.waitKey(0) - else: # Omit input to call default camera - deviceId = 0 - cap = cv.VideoCapture(deviceId) - w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH)) - h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) - model.setInputSize([w, h]) - - tm = cv.TickMeter() - while cv.waitKey(1) < 0: - hasFrame, frame = cap.read() - if not hasFrame: - print('No frames grabbed!') - break - - # Inference - tm.start() - results = model.infer(frame) # results is a tuple - tm.stop() - - # Draw results on the input image - frame = visualize(frame, results, fps=tm.getFPS()) - - # Visualize results in a new Window - cv.imshow('LPD-YuNet Demo', frame) - - tm.reset() diff --git a/models/license_plate_detection_yunet/example_outputs/lpd_yunet_demo.gif b/models/license_plate_detection_yunet/example_outputs/lpd_yunet_demo.gif deleted file mode 100644 index f62dc8f1..00000000 --- a/models/license_plate_detection_yunet/example_outputs/lpd_yunet_demo.gif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e878ac62d49fca94f7eccaa5ac0b60e97508ef8225744a6a898f1bc833cee314 -size 300669 diff --git a/models/license_plate_detection_yunet/example_outputs/result-1.jpg b/models/license_plate_detection_yunet/example_outputs/result-1.jpg deleted file mode 100644 index 6f371d3b..00000000 --- a/models/license_plate_detection_yunet/example_outputs/result-1.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ff4f66031aa7ac82f1e218791c89e0655f3bdaf226a2b7272f0d48b5a62cb083 -size 58506 diff --git a/models/license_plate_detection_yunet/example_outputs/result-2.jpg b/models/license_plate_detection_yunet/example_outputs/result-2.jpg deleted file mode 100644 index 0b876b35..00000000 --- a/models/license_plate_detection_yunet/example_outputs/result-2.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b6a18a5593bdd3794d7c484a9eb1a97f418d0daa5a35938d092e805a10c2df44 -size 55650 diff --git a/models/license_plate_detection_yunet/example_outputs/result-3.jpg b/models/license_plate_detection_yunet/example_outputs/result-3.jpg deleted file mode 100644 index 47f0ba0b..00000000 --- a/models/license_plate_detection_yunet/example_outputs/result-3.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4e9c8cc2ff8272075b73c1352fc93fb5b802737d2a89eefee47859f9737e5640 -size 63523 diff --git a/models/license_plate_detection_yunet/example_outputs/result-4.jpg b/models/license_plate_detection_yunet/example_outputs/result-4.jpg deleted file mode 100644 index f9afaf44..00000000 --- a/models/license_plate_detection_yunet/example_outputs/result-4.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6257486bd6e08c6c1fd80874ad7cc8be2d1ed06e288d16670a04b9b8acb18530 -size 52606 diff --git a/models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2023mar.onnx b/models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2023mar.onnx deleted file mode 100644 index 8e7b5cc8..00000000 --- a/models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2023mar.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6d4978a7b6d25514d5e24811b82bfb511d166bdd8ca3b03aa63c1623d4d039c7 -size 4146213 diff --git a/models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2023mar_int8.onnx b/models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2023mar_int8.onnx deleted file mode 100644 index 94c15dc1..00000000 --- a/models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2023mar_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d67982a014fe93ad04612f565ed23ca010dcb0fd925d880ef0edf9cd7bdf931a -size 1087142 diff --git a/models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2023mar_int8bq.onnx b/models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2023mar_int8bq.onnx deleted file mode 100644 index 7fee23c0..00000000 --- a/models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2023mar_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8a346e9db6a085a79848903a95cb902b2ab01d47972057f1cf71ede095410b49 -size 1185866 diff --git a/models/license_plate_detection_yunet/lpd_yunet.py b/models/license_plate_detection_yunet/lpd_yunet.py deleted file mode 100644 index 917e58a3..00000000 --- a/models/license_plate_detection_yunet/lpd_yunet.py +++ /dev/null @@ -1,136 +0,0 @@ -from itertools import product - -import numpy as np -import cv2 as cv - -class LPD_YuNet: - def __init__(self, modelPath, inputSize=[320, 240], confThreshold=0.8, nmsThreshold=0.3, topK=5000, keepTopK=750, backendId=0, targetId=0): - self.model_path = modelPath - self.input_size = np.array(inputSize) - self.confidence_threshold=confThreshold - self.nms_threshold = nmsThreshold - self.top_k = topK - self.keep_top_k = keepTopK - self.backend_id = backendId - self.target_id = targetId - - self.output_names = ['loc', 'conf', 'iou'] - self.min_sizes = [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]] - self.steps = [8, 16, 32, 64] - self.variance = [0.1, 0.2] - - # load model - self.model = cv.dnn.readNet(self.model_path) - # set backend and target - self.model.setPreferableBackend(self.backend_id) - self.model.setPreferableTarget(self.target_id) - # generate anchors/priorboxes - self._priorGen() - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self.backend_id = backendId - self.target_id = targetId - self.model.setPreferableBackend(self.backend_id) - self.model.setPreferableTarget(self.target_id) - - def setInputSize(self, inputSize): - self.input_size = inputSize - # re-generate anchors/priorboxes - self._priorGen() - - def _preprocess(self, image): - return cv.dnn.blobFromImage(image) - - def infer(self, image): - assert image.shape[0] == self.input_size[1], '{} (height of input image) != {} (preset height)'.format(image.shape[0], self.input_size[1]) - assert image.shape[1] == self.input_size[0], '{} (width of input image) != {} (preset width)'.format(image.shape[1], self.input_size[0]) - - # Preprocess - inputBlob = self._preprocess(image) - - # Forward - self.model.setInput(inputBlob) - outputBlob = self.model.forward(self.output_names) - - # Postprocess - results = self._postprocess(outputBlob) - - return results - - def _postprocess(self, blob): - # Decode - dets = self._decode(blob) - - # NMS - keepIdx = cv.dnn.NMSBoxes( - bboxes=dets[:, 0:4].tolist(), - scores=dets[:, -1].tolist(), - score_threshold=self.confidence_threshold, - nms_threshold=self.nms_threshold, - top_k=self.top_k - ) # box_num x class_num - if len(keepIdx) > 0: - dets = dets[keepIdx] - return dets[:self.keep_top_k] - else: - return np.empty(shape=(0, 9)) - - def _priorGen(self): - w, h = self.input_size - feature_map_2th = [int(int((h + 1) / 2) / 2), - int(int((w + 1) / 2) / 2)] - feature_map_3th = [int(feature_map_2th[0] / 2), - int(feature_map_2th[1] / 2)] - feature_map_4th = [int(feature_map_3th[0] / 2), - int(feature_map_3th[1] / 2)] - feature_map_5th = [int(feature_map_4th[0] / 2), - int(feature_map_4th[1] / 2)] - feature_map_6th = [int(feature_map_5th[0] / 2), - int(feature_map_5th[1] / 2)] - - feature_maps = [feature_map_3th, feature_map_4th, - feature_map_5th, feature_map_6th] - - priors = [] - for k, f in enumerate(feature_maps): - min_sizes = self.min_sizes[k] - for i, j in product(range(f[0]), range(f[1])): # i->h, j->w - for min_size in min_sizes: - s_kx = min_size / w - s_ky = min_size / h - - cx = (j + 0.5) * self.steps[k] / w - cy = (i + 0.5) * self.steps[k] / h - - priors.append([cx, cy, s_kx, s_ky]) - self.priors = np.array(priors, dtype=np.float32) - - def _decode(self, blob): - loc, conf, iou = blob - # get score - cls_scores = conf[:, 1] - iou_scores = iou[:, 0] - # clamp - _idx = np.where(iou_scores < 0.) - iou_scores[_idx] = 0. - _idx = np.where(iou_scores > 1.) - iou_scores[_idx] = 1. - scores = np.sqrt(cls_scores * iou_scores) - scores = scores[:, np.newaxis] - - scale = self.input_size - - # get four corner points for bounding box - bboxes = np.hstack(( - (self.priors[:, 0:2] + loc[:, 4: 6] * self.variance[0] * self.priors[:, 2:4]) * scale, - (self.priors[:, 0:2] + loc[:, 6: 8] * self.variance[0] * self.priors[:, 2:4]) * scale, - (self.priors[:, 0:2] + loc[:, 10:12] * self.variance[0] * self.priors[:, 2:4]) * scale, - (self.priors[:, 0:2] + loc[:, 12:14] * self.variance[0] * self.priors[:, 2:4]) * scale - )) - - dets = np.hstack((bboxes, scores)) - return dets diff --git a/models/object_detection_nanodet/CMakeLists.txt b/models/object_detection_nanodet/CMakeLists.txt deleted file mode 100644 index 332f487d..00000000 --- a/models/object_detection_nanodet/CMakeLists.txt +++ /dev/null @@ -1,32 +0,0 @@ -cmake_minimum_required(VERSION 3.24) -set(project_name "opencv_zoo_object_detection_nanodet") - -PROJECT (${project_name}) - -set(OPENCV_VERSION "4.10.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) -# Find OpenCV, you may need to set OpenCV_DIR variable -# to the absolute path to the directory containing OpenCVConfig.cmake file -# via the command line or GUI - -file(GLOB SourceFile - "demo.cpp") -# If the package has been found, several variables will -# be set, you can find the full list with descriptions -# in the OpenCVConfig.cmake file. -# Print some message showing some of them -message(STATUS "OpenCV library status:") -message(STATUS " config: ${OpenCV_DIR}") -message(STATUS " version: ${OpenCV_VERSION}") -message(STATUS " libraries: ${OpenCV_LIBS}") -message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}") - -# Declare the executable target built from your sources -add_executable(${project_name} ${SourceFile}) - -# Set C++ compilation standard to C++11 -set(CMAKE_CXX_STANDARD 11) - -# Link your application with OpenCV libraries -target_link_libraries(${project_name} PRIVATE ${OpenCV_LIBS}) diff --git a/models/object_detection_nanodet/LICENSE b/models/object_detection_nanodet/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/models/object_detection_nanodet/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/models/object_detection_nanodet/README.md b/models/object_detection_nanodet/README.md deleted file mode 100644 index 533c0938..00000000 --- a/models/object_detection_nanodet/README.md +++ /dev/null @@ -1,142 +0,0 @@ -# Nanodet - -Nanodet: NanoDet is a FCOS-style one-stage anchor-free object detection model which using Generalized Focal Loss as classification and regression loss.In NanoDet-Plus, we propose a novel label assignment strategy with a simple assign guidance module (AGM) and a dynamic soft label assigner (DSLA) to solve the optimal label assignment problem in lightweight model training. - -**Note**: -- This version of nanodet: Nanodet-m-plus-1.5x_416 -- `object_detection_nanodet_2022nov_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - - -## Demo - -### Python - -Run the following command to try the demo: -```shell -# detect on camera input -python demo.py -# detect on an image -python demo.py --input /path/to/image -v -``` -Note: -- image result saved as "result.jpg" - -### C++ - -Install latest OpenCV and CMake >= 3.24.0 to get started with: - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# detect on camera input -./build/opencv_zoo_object_detection_nanodet -# detect on an image -./build/opencv_zoo_object_detection_nanodet -i=/path/to/image -# get help messages -./build/opencv_zoo_object_detection_nanodet -h -``` - - -## Results - -Here are some of the sample results that were observed using the model, - -![test1_res.jpg](./example_outputs/1_res.jpg) -![test2_res.jpg](./example_outputs/2_res.jpg) - -Check [benchmark/download_data.py](../../benchmark/download_data.py) for the original images. - -Video inference result, -![WebCamR.gif](./example_outputs/WebCamR.gif) - -## Model metrics: - -The model is evaluated on [COCO 2017 val](https://cocodataset.org/#download). Results are showed below: - - - -
Average Precision Average Recall
- -| area | IoU | Average Precision(AP) | -|:-------|:------|:------------------------| -| all | 0.50:0.95 | 0.304 | -| all | 0.50 | 0.459 | -| all | 0.75 | 0.317 | -| small | 0.50:0.95 | 0.107 | -| medium | 0.50:0.95 | 0.322 | -| large | 0.50:0.95 | 0.478 | - - - - area | IoU | Average Recall | -|:-------|:------|:----------------| -| all | 0.50:0.95 | 0.278 | -| all | 0.50:0.95 | 0.434 | -| all | 0.50:0.95 | 0.462 | -| small | 0.50:0.95 | 0.198 | -| medium | 0.50:0.95 | 0.510 | -| large | 0.50:0.95 | 0.702 | -
- -| class | AP50 | mAP | class | AP50 | mAP | -|:--------------|:-------|:------|:---------------|:-------|:------| -| person | 67.5 | 41.8 | bicycle | 35.4 | 18.8 | -| car | 45.0 | 25.4 | motorcycle | 58.9 | 33.1 | -| airplane | 77.3 | 58.9 | bus | 68.8 | 56.4 | -| train | 81.1 | 60.5 | truck | 38.6 | 24.7 | -| boat | 35.5 | 16.7 | traffic light | 30.5 | 14.0 | -| fire hydrant | 69.8 | 54.5 | stop sign | 60.9 | 54.6 | -| parking meter | 55.1 | 38.5 | bench | 26.8 | 15.9 | -| bird | 38.3 | 23.6 | cat | 82.5 | 62.1 | -| dog | 67.0 | 51.4 | horse | 64.3 | 44.2 | -| sheep | 57.7 | 35.8 | cow | 61.2 | 39.9 | -| elephant | 79.9 | 56.2 | bear | 81.8 | 63.0 | -| zebra | 85.4 | 59.5 | giraffe | 84.1 | 59.9 | -| backpack | 12.4 | 5.9 | umbrella | 46.5 | 28.8 | -| handbag | 8.4 | 3.7 | tie | 35.2 | 19.6 | -| suitcase | 38.1 | 23.8 | frisbee | 60.7 | 43.9 | -| skis | 30.5 | 14.5 | snowboard | 32.3 | 18.2 | -| sports ball | 37.6 | 24.5 | kite | 51.1 | 30.4 | -| baseball bat | 28.9 | 13.6 | baseball glove | 40.1 | 21.6 | -| skateboard | 59.4 | 35.2 | surfboard | 47.9 | 26.6 | -| tennis racket | 55.2 | 30.5 | bottle | 34.7 | 20.2 | -| wine glass | 27.8 | 16.3 | cup | 35.5 | 23.7 | -| fork | 25.9 | 14.8 | knife | 10.9 | 5.6 | -| spoon | 8.7 | 4.1 | bowl | 42.8 | 29.4 | -| banana | 35.5 | 18.5 | apple | 19.4 | 12.9 | -| sandwich | 46.7 | 33.4 | orange | 35.2 | 25.9 | -| broccoli | 36.4 | 19.1 | carrot | 30.9 | 17.8 | -| hot dog | 42.7 | 29.3 | pizza | 61.0 | 44.9 | -| donut | 47.3 | 34.0 | cake | 39.9 | 24.4 | -| chair | 28.8 | 16.1 | couch | 60.5 | 42.6 | -| potted plant | 29.0 | 15.3 | bed | 63.3 | 46.0 | -| dining table | 39.6 | 27.5 | toilet | 71.3 | 55.3 | -| tv | 66.5 | 48.1 | laptop | 62.6 | 46.9 | -| mouse | 63.5 | 44.1 | remote | 19.8 | 10.3 | -| keyboard | 62.1 | 41.5 | cell phone | 33.7 | 22.8 | -| microwave | 54.9 | 39.6 | oven | 48.1 | 30.4 | -| toaster | 30.0 | 16.4 | sink | 44.5 | 27.8 | -| refrigerator | 63.2 | 46.1 | book | 18.4 | 7.3 | -| clock | 57.8 | 35.8 | vase | 33.7 | 22.1 | -| scissors | 27.8 | 17.8 | teddy bear | 54.1 | 35.4 | -| hair drier | 2.9 | 1.1 | toothbrush | 13.1 | 8.2 | - -## License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE). - -#### Contributor Details - -- Google Summer of Code'22 -- Contributor: Sri Siddarth Chakaravarthy -- Github Profile: https://github.com/Sidd1609 -- Organisation: OpenCV -- Project: Lightweight object detection models using OpenCV - -## Reference - -- Nanodet: https://zhuanlan.zhihu.com/p/306530300 -- Nanodet Plus: https://zhuanlan.zhihu.com/p/449912627 -- Nanodet weight and scripts for training: https://github.com/RangiLyu/nanodet diff --git a/models/object_detection_nanodet/demo.cpp b/models/object_detection_nanodet/demo.cpp deleted file mode 100644 index cce9165c..00000000 --- a/models/object_detection_nanodet/demo.cpp +++ /dev/null @@ -1,503 +0,0 @@ -#include -#include -#include - -#include - -using namespace std; -using namespace cv; -using namespace dnn; - -const auto backendTargetPairs = vector> -{ - {DNN_BACKEND_OPENCV, DNN_TARGET_CPU}, - {DNN_BACKEND_CUDA, DNN_TARGET_CUDA}, - {DNN_BACKEND_CUDA, DNN_TARGET_CUDA_FP16}, - {DNN_BACKEND_TIMVX, DNN_TARGET_NPU}, - {DNN_BACKEND_CANN, DNN_TARGET_NPU} -}; - -const vector nanodetClassLabels = -{ - "person", "bicycle", "car", "motorcycle", "airplane", "bus", - "train", "truck", "boat", "traffic light", "fire hydrant", - "stop sign", "parking meter", "bench", "bird", "cat", "dog", - "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", - "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", - "skis", "snowboard", "sports ball", "kite", "baseball bat", - "baseball glove", "skateboard", "surfboard", "tennis racket", - "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", - "banana", "apple", "sandwich", "orange", "broccoli", "carrot", - "hot dog", "pizza", "donut", "cake", "chair", "couch", - "potted plant", "bed", "dining table", "toilet", "tv", "laptop", - "mouse", "remote", "keyboard", "cell phone", "microwave", - "oven", "toaster", "sink", "refrigerator", "book", "clock", - "vase", "scissors", "teddy bear", "hair drier", "toothbrush" -}; - -class NanoDet -{ -public: - NanoDet(const String& modelPath, const float probThresh = 0.35, const float iouThresh = 0.6, - const Backend bId = DNN_BACKEND_DEFAULT, const Target tId = DNN_TARGET_CPU) : - modelPath(modelPath), probThreshold(probThresh), - iouThreshold(iouThresh), backendId(bId), targetId(tId), - imageShape(416, 416), regMax(7) - { - this->strides = { 8, 16, 32, 64 }; - this->net = readNet(modelPath); - this->net.setPreferableBackend(bId); - this->net.setPreferableTarget(tId); - this->project = Mat::zeros(1, this->regMax + 1, CV_32F); - for (size_t i = 0; i <= this->regMax; ++i) - { - this->project.at(0, i) = static_cast(i); - } - this->mean = Scalar(103.53, 116.28, 123.675); - this->std = Scalar(1.0 / 57.375, 1.0 / 57.12, 1.0 / 58.395); - this->generateAnchors(); - } - - Mat preProcess(const Mat& inputImage) - { - Image2BlobParams paramNanodet; - paramNanodet.datalayout = DNN_LAYOUT_NCHW; - paramNanodet.ddepth = CV_32F; - paramNanodet.mean = this->mean; - paramNanodet.scalefactor = this->std; - paramNanodet.size = this->imageShape; - Mat blob; - blobFromImageWithParams(inputImage, blob, paramNanodet); - return blob; - } - - Mat infer(const Mat& sourceImage) - { - Mat blob = this->preProcess(sourceImage); - this->net.setInput(blob); - vector modelOutput; - this->net.forward(modelOutput, this->net.getUnconnectedOutLayersNames()); - Mat preds = this->postProcess(modelOutput); - return preds; - } - - Mat reshapeIfNeeded(const Mat& input) - { - if (input.dims == 3) - { - return input.reshape(0, input.size[1]); - } - return input; - } - - Mat softmaxActivation(const Mat& input) - { - Mat x_exp, x_sum, x_repeat_sum, result; - exp(input.reshape(0, input.total() / (this->regMax + 1)), x_exp); - reduce(x_exp, x_sum, 1, REDUCE_SUM, CV_32F); - repeat(x_sum, 1, this->regMax + 1, x_repeat_sum); - divide(x_exp, x_repeat_sum, result); - return result; - } - - Mat applyProjection(Mat& input) - { - Mat repeat_project; - repeat(this->project, input.rows, 1, repeat_project); - multiply(input, repeat_project, input); - reduce(input, input, 1, REDUCE_SUM, CV_32F); - Mat projection = input.col(0).clone(); - return projection.reshape(0, projection.total() / 4); - } - - void preNMS(Mat& anchors, Mat& bbox_pred, Mat& cls_score, const int nms_pre = 1000) - { - Mat max_scores; - reduce(cls_score, max_scores, 1, REDUCE_MAX); - - Mat indices; - sortIdx(max_scores.t(), indices, SORT_DESCENDING); - - Mat indices_float = indices.colRange(0, nms_pre); - Mat selected_anchors, selected_bbox_pred, selected_cls_score; - for (int j = 0; j < indices_float.cols; ++j) - { - selected_anchors.push_back(anchors.row(indices_float.at(j))); - selected_bbox_pred.push_back(bbox_pred.row(indices_float.at(j))); - selected_cls_score.push_back(cls_score.row(indices_float.at(j))); - } - - anchors = selected_anchors; - bbox_pred = selected_bbox_pred; - cls_score = selected_cls_score; - } - - void clipBoundingBoxes(Mat& x1, Mat& y1, Mat& x2, Mat& y2) - { - Mat zeros = Mat::zeros(x1.size(), x1.type()); - x1 = min(max(x1, zeros), Scalar(this->imageShape.width - 1)); - y1 = min(max(y1, zeros), Scalar(this->imageShape.height - 1)); - x2 = min(max(x2, zeros), Scalar(this->imageShape.width - 1)); - y2 = min(max(y2, zeros), Scalar(this->imageShape.height - 1)); - } - - Mat calculateBoundingBoxes(const Mat& anchors, const Mat& bbox_pred) - { - Mat x1 = anchors.col(0) - bbox_pred.col(0); - Mat y1 = anchors.col(1) - bbox_pred.col(1); - Mat x2 = anchors.col(0) + bbox_pred.col(2); - Mat y2 = anchors.col(1) + bbox_pred.col(3); - - clipBoundingBoxes(x1, y1, x2, y2); - - Mat bboxes; - hconcat(vector{x1, y1, x2, y2}, bboxes); - - return bboxes; - } - - vector bboxMatToRect2d(const Mat& bboxes) - { - Mat bboxes_wh(bboxes.clone()); - bboxes_wh.colRange(2, 4) = bboxes_wh.colRange(2, 4) -= bboxes_wh.colRange(0, 2); - vector boxesXYXY; - for (size_t i = 0; i < bboxes_wh.rows; i++) - { - boxesXYXY.emplace_back(bboxes.at(i, 0), - bboxes.at(i, 1), - bboxes.at(i, 2), - bboxes.at(i, 3)); - } - return boxesXYXY; - } - - Mat postProcess(const vector& preds) - { - vector cls_scores, bbox_preds; - for (size_t i = 0; i < preds.size(); i += 2) - { - cls_scores.push_back(preds[i]); - bbox_preds.push_back(preds[i + 1]); - } - - vector bboxes_mlvl; - vector scores_mlvl; - - for (size_t i = 0; i < strides.size(); ++i) - { - if (i >= cls_scores.size() || i >= bbox_preds.size()) continue; - // Extract necessary data - int stride = strides[i]; - Mat cls_score = reshapeIfNeeded(cls_scores[i]); - Mat bbox_pred = reshapeIfNeeded(bbox_preds[i]); - Mat anchors = anchorsMlvl[i].t(); - - // Softmax activation, projection, and calculate bounding boxes - bbox_pred = softmaxActivation(bbox_pred); - bbox_pred = applyProjection(bbox_pred); - bbox_pred = stride * bbox_pred; - - const int nms_pre = 1000; - if (nms_pre > 0 && cls_score.rows > nms_pre) - { - preNMS(anchors, bbox_pred, cls_score, nms_pre); - } - - Mat bboxes = calculateBoundingBoxes(anchors, bbox_pred); - - - bboxes_mlvl.push_back(bboxes); - scores_mlvl.push_back(cls_score); - } - Mat bboxes; - Mat scores; - vconcat(bboxes_mlvl, bboxes); - vconcat(scores_mlvl, scores); - - vector boxesXYXY = bboxMatToRect2d(bboxes); - vector classIds; - vector confidences; - for (size_t i = 0; i < scores.rows; ++i) - { - Point maxLoc; - minMaxLoc(scores.row(i), nullptr, nullptr, nullptr, &maxLoc); - classIds.push_back(maxLoc.x); - confidences.push_back(scores.at(i, maxLoc.x)); - } - - vector indices; - NMSBoxesBatched(boxesXYXY, confidences, classIds, probThreshold, iouThreshold, indices); - Mat result(int(indices.size()), 6, CV_32FC1); - int row = 0; - for (auto idx : indices) - { - bboxes.rowRange(idx, idx + 1).copyTo(result(Rect(0, row, 4, 1))); - result.at(row, 4) = confidences[idx]; - result.at(row, 5) = static_cast(classIds[idx]); - row++; - } - if (indices.size() == 0) - { - return Mat(); - } - return result; - } - - void generateAnchors() - { - for (const int stride : strides) { - int feat_h = this->imageShape.height / stride; - int feat_w = this->imageShape.width / stride; - - vector anchors; - - for (int y = 0; y < feat_h; ++y) - { - for (int x = 0; x < feat_w; ++x) - { - float shift_x = x * stride; - float shift_y = y * stride; - float cx = shift_x + 0.5 * (stride - 1); - float cy = shift_y + 0.5 * (stride - 1); - Mat anchor_point = (Mat_(2, 1) << cx, cy); - anchors.push_back(anchor_point); - } - } - Mat anchors_mat; - hconcat(anchors, anchors_mat); - this->anchorsMlvl.push_back(anchors_mat); - } - } -private: - Net net; - String modelPath; - vector strides; - Size imageShape; - int regMax; - float probThreshold; - float iouThreshold; - Backend backendId; - Target targetId; - Mat project; - Scalar mean; - Scalar std; - vector anchorsMlvl; -}; - -// Function to resize and pad an image and return both the image and scale information -tuple> letterbox(const Mat& sourceImage, const Size& target_size = Size(416, 416)) -{ - Mat img = sourceImage.clone(); - - double top = 0, left = 0, newh = target_size.height, neww = target_size.width; - - if (img.rows != img.cols) - { - double hw_scale = static_cast(img.rows) / img.cols; - if (hw_scale > 1) - { - newh = target_size.height; - neww = static_cast(target_size.width / hw_scale); - resize(img, img, Size(neww, newh), 0, 0, INTER_AREA); - left = static_cast((target_size.width - neww) * 0.5); - copyMakeBorder(img, img, 0, 0, left, target_size.width - neww - left, BORDER_CONSTANT, Scalar(0)); - } - else - { - newh = static_cast(target_size.height * hw_scale); - neww = target_size.width; - resize(img, img, Size(neww, newh), 0, 0, INTER_AREA); - top = static_cast((target_size.height - newh) * 0.5); - copyMakeBorder(img, img, top, target_size.height - newh - top, 0, 0, BORDER_CONSTANT, Scalar(0)); - } - } - else - { - resize(img, img, target_size, 0, 0, INTER_AREA); - } - vector letterbox_scale = {top, left, newh, neww}; - - return make_tuple(img, letterbox_scale); -} - -// Function to scale bounding boxes back to original image coordinates -vector unletterbox(const Mat& bbox, const Size& original_image_shape, const vector& letterbox_scale) -{ - vector ret(bbox.cols); - - int h = original_image_shape.height; - int w = original_image_shape.width; - double top = letterbox_scale[0]; - double left = letterbox_scale[1]; - double newh = letterbox_scale[2]; - double neww = letterbox_scale[3]; - - if (h == w) - { - double ratio = static_cast(h) / newh; - for (int& val : ret) - { - val = static_cast(val * ratio); - } - return ret; - } - - double ratioh = static_cast(h) / newh; - double ratiow = static_cast(w) / neww; - ret[0] = max(static_cast((bbox.at(0) - left) * ratiow), 0); - ret[1] = max(static_cast((bbox.at(1) - top) * ratioh), 0); - ret[2] = min(static_cast((bbox.at(2) - left) * ratiow), w); - ret[3] = min(static_cast((bbox.at(3) - top) * ratioh), h); - - return ret; -} - -// Function to visualize predictions on an image -Mat visualize(const Mat& preds, const Mat& result_image, const vector& letterbox_scale, bool video, double fps = 0.0) -{ - Mat visualized_image = result_image.clone(); - - // Draw FPS if provided - if (fps > 0.0 && video) - { - std::ostringstream fps_stream; - fps_stream << "FPS: " << std::fixed << std::setprecision(2) << fps; - putText(visualized_image, fps_stream.str(), Point(10, 25), FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 0, 255), 2); - } - - // Draw bounding boxes and labels for each prediction - for (size_t i = 0; i < preds.rows; i++) - { - Mat pred = preds.row(i); - Mat bbox = pred.colRange(0, 4); - double conf = pred.at(4); - int classid = static_cast(pred.at(5)); - - // Convert bbox coordinates back to original image space - vector unnormalized_bbox = unletterbox(bbox, visualized_image.size(), letterbox_scale); - - // Draw bounding box - rectangle(visualized_image, Point(unnormalized_bbox[0], unnormalized_bbox[1]), - Point(unnormalized_bbox[2], unnormalized_bbox[3]), Scalar(0, 255, 0), 2); - - // Draw label - stringstream label; - label << nanodetClassLabels[classid] << ": " << fixed << setprecision(2) << conf; - putText(visualized_image, label.str(), Point(unnormalized_bbox[0], unnormalized_bbox[1] - 10), - FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 255, 0), 2); - } - - return visualized_image; -} - -void processImage(Mat& inputImage, NanoDet& nanodet, TickMeter& tm, bool save, bool vis, bool video) -{ - cvtColor(inputImage, inputImage, COLOR_BGR2RGB); - tuple> w = letterbox(inputImage); - Mat inputBlob = get<0>(w); - vector letterboxScale = get<1>(w); - - tm.start(); - Mat predictions = nanodet.infer(inputBlob); - tm.stop(); - if (!video) - { - cout << "Inference time: " << tm.getTimeMilli() << " ms\n"; - } - - Mat img = visualize(predictions, inputImage, letterboxScale, video, tm.getFPS()); - cvtColor(img, img, COLOR_BGR2RGB); - if (save) - { - static const string kOutputName = "result.jpg"; - imwrite(kOutputName, img); - if (!video) - { - cout << "Results saved to " + kOutputName << endl; - } - } - if (vis) - { - static const string kWinName = "model"; - imshow(kWinName, img); - } -} - - -const String keys = - "{ help h | | Print help message. }" - "{ model m | object_detection_nanodet_2022nov.onnx | Usage: Path to the model, defaults to object_detection_nanodet_2022nov.onnx }" - "{ input i | | Path to the input image. Omit for using the default camera.}" - "{ confidence | 0.35 | Class confidence }" - "{ nms | 0.6 | Enter nms IOU threshold }" - "{ save s | true | Specify to save results. This flag is invalid when using the camera. }" - "{ vis v | true | Specify to open a window for result visualization. This flag is invalid when using the camera. }" - "{ backend bt | 0 | Choose one of computation backends: " - "0: (default) OpenCV implementation + CPU, " - "1: CUDA + GPU (CUDA), " - "2: CUDA + GPU (CUDA FP16), " - "3: TIM-VX + NPU, " - "4: CANN + NPU}"; - -int main(int argc, char** argv) -{ - CommandLineParser parser(argc, argv, keys); - - parser.about("Use this script to run Nanodet inference using OpenCV, a contribution by Sri Siddarth Chakaravarthy as part of GSOC_2022."); - if (parser.has("help")) - { - parser.printMessage(); - return 0; - } - - string model = parser.get("model"); - string inputPath = parser.get("input"); - float confThreshold = parser.get("confidence"); - float nmsThreshold = parser.get("nms"); - bool save = parser.get("save"); - bool vis = parser.get("vis"); - int backendTargetid = parser.get("backend"); - - if (model.empty()) - { - CV_Error(Error::StsError, "Model file " + model + " not found"); - } - - NanoDet nanodet(model, confThreshold, nmsThreshold, - backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second); - - TickMeter tm; - if (parser.has("input")) - { - Mat inputImage = imread(samples::findFile(inputPath)); - static const bool kNotVideo = false; - processImage(inputImage, nanodet, tm, save, vis, kNotVideo); - waitKey(0); - } - else - { - VideoCapture cap; - cap.open(0); - if (!cap.isOpened()) - { - CV_Error(Error::StsError, "Cannot open video or file"); - } - - Mat frame; - while (waitKey(1) < 0) - { - cap >> frame; - if (frame.empty()) - { - cout << "Frame is empty" << endl; - waitKey(); - break; - } - tm.reset(); - static const bool kIsVideo = true; - processImage(frame, nanodet, tm, save, vis, kIsVideo); - } - cap.release(); - } - return 0; -} diff --git a/models/object_detection_nanodet/demo.py b/models/object_detection_nanodet/demo.py deleted file mode 100644 index ab6e980a..00000000 --- a/models/object_detection_nanodet/demo.py +++ /dev/null @@ -1,182 +0,0 @@ -import numpy as np -import cv2 as cv -import argparse - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from nanodet import NanoDet - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -classes = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', - 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', - 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', - 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', - 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', - 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', - 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', - 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', - 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', - 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', - 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', - 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush') - -def letterbox(srcimg, target_size=(416, 416)): - img = srcimg.copy() - - top, left, newh, neww = 0, 0, target_size[0], target_size[1] - if img.shape[0] != img.shape[1]: - hw_scale = img.shape[0] / img.shape[1] - if hw_scale > 1: - newh, neww = target_size[0], int(target_size[1] / hw_scale) - img = cv.resize(img, (neww, newh), interpolation=cv.INTER_AREA) - left = int((target_size[1] - neww) * 0.5) - img = cv.copyMakeBorder(img, 0, 0, left, target_size[1] - neww - left, cv.BORDER_CONSTANT, value=0) # add border - else: - newh, neww = int(target_size[0] * hw_scale), target_size[1] - img = cv.resize(img, (neww, newh), interpolation=cv.INTER_AREA) - top = int((target_size[0] - newh) * 0.5) - img = cv.copyMakeBorder(img, top, target_size[0] - newh - top, 0, 0, cv.BORDER_CONSTANT, value=0) - else: - img = cv.resize(img, target_size, interpolation=cv.INTER_AREA) - - letterbox_scale = [top, left, newh, neww] - return img, letterbox_scale - -def unletterbox(bbox, original_image_shape, letterbox_scale): - ret = bbox.copy() - - h, w = original_image_shape - top, left, newh, neww = letterbox_scale - - if h == w: - ratio = h / newh - ret = ret * ratio - return ret - - ratioh, ratiow = h / newh, w / neww - ret[0] = max((ret[0] - left) * ratiow, 0) - ret[1] = max((ret[1] - top) * ratioh, 0) - ret[2] = min((ret[2] - left) * ratiow, w) - ret[3] = min((ret[3] - top) * ratioh, h) - - return ret.astype(np.int32) - -def vis(preds, res_img, letterbox_scale, fps=None): - ret = res_img.copy() - - # draw FPS - if fps is not None: - fps_label = "FPS: %.2f" % fps - cv.putText(ret, fps_label, (10, 25), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) - - # draw bboxes and labels - for pred in preds: - bbox = pred[:4] - conf = pred[-2] - classid = pred[-1].astype(np.int32) - - # bbox - xmin, ymin, xmax, ymax = unletterbox(bbox, ret.shape[:2], letterbox_scale) - cv.rectangle(ret, (xmin, ymin), (xmax, ymax), (0, 255, 0), thickness=2) - - # label - label = "{:s}: {:.2f}".format(classes[classid], conf) - cv.putText(ret, label, (xmin, ymin - 10), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), thickness=2) - - return ret - -if __name__=='__main__': - parser = argparse.ArgumentParser(description='Nanodet inference using OpenCV an contribution by Sri Siddarth Chakaravarthy part of GSOC_2022') - parser.add_argument('--input', '-i', type=str, - help='Path to the input image. Omit for using default camera.') - parser.add_argument('--model', '-m', type=str, - default='object_detection_nanodet_2022nov.onnx', help="Path to the model") - parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) - parser.add_argument('--confidence', default=0.35, type=float, - help='Class confidence') - parser.add_argument('--nms', default=0.6, type=float, - help='Enter nms IOU threshold') - parser.add_argument('--save', '-s', action='store_true', - help='Specify to save results. This flag is invalid when using camera.') - parser.add_argument('--vis', '-v', action='store_true', - help='Specify to open a window for result visualization. This flag is invalid when using camera.') - args = parser.parse_args() - - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - - model = NanoDet(modelPath= args.model, - prob_threshold=args.confidence, - iou_threshold=args.nms, - backend_id=backend_id, - target_id=target_id) - - tm = cv.TickMeter() - tm.reset() - if args.input is not None: - image = cv.imread(args.input) - input_blob = cv.cvtColor(image, cv.COLOR_BGR2RGB) - - # Letterbox transformation - input_blob, letterbox_scale = letterbox(input_blob) - - # Inference - tm.start() - preds = model.infer(input_blob) - tm.stop() - print("Inference time: {:.2f} ms".format(tm.getTimeMilli())) - - img = vis(preds, image, letterbox_scale) - - if args.save: - print('Results saved to result.jpg\n') - cv.imwrite('result.jpg', img) - - if args.vis: - cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE) - cv.imshow(args.input, img) - cv.waitKey(0) - - else: - print("Press any key to stop video capture") - deviceId = 0 - cap = cv.VideoCapture(deviceId) - - while cv.waitKey(1) < 0: - hasFrame, frame = cap.read() - if not hasFrame: - print('No frames grabbed!') - break - - input_blob = cv.cvtColor(frame, cv.COLOR_BGR2RGB) - input_blob, letterbox_scale = letterbox(input_blob) - # Inference - tm.start() - preds = model.infer(input_blob) - tm.stop() - - img = vis(preds, frame, letterbox_scale, fps=tm.getFPS()) - - cv.imshow("NanoDet Demo", img) - - tm.reset() diff --git a/models/object_detection_nanodet/example_outputs/1_res.jpg b/models/object_detection_nanodet/example_outputs/1_res.jpg deleted file mode 100644 index 642fd18c..00000000 --- a/models/object_detection_nanodet/example_outputs/1_res.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:920fb925f17720c68476fe77b396b87504081be0372662d33df0c0dcf9fc9562 -size 128531 diff --git a/models/object_detection_nanodet/example_outputs/2_res.jpg b/models/object_detection_nanodet/example_outputs/2_res.jpg deleted file mode 100644 index 1949805b..00000000 --- a/models/object_detection_nanodet/example_outputs/2_res.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f0138234ef80f63ecb51f51f651248a0157f5dda81d5b3fe390cbec42951bf99 -size 419826 diff --git a/models/object_detection_nanodet/example_outputs/3_res.jpg b/models/object_detection_nanodet/example_outputs/3_res.jpg deleted file mode 100644 index 675f320d..00000000 --- a/models/object_detection_nanodet/example_outputs/3_res.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cdf23ae1ceb2cde982c83763d74ea8317fceb3c5a091331cd5c7d39a08dda840 -size 114182 diff --git a/models/object_detection_nanodet/example_outputs/WebCamR.gif b/models/object_detection_nanodet/example_outputs/WebCamR.gif deleted file mode 100644 index 3b4dd944..00000000 --- a/models/object_detection_nanodet/example_outputs/WebCamR.gif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:169d6f224a20bef4c7a7c889d1b4e9168adf114da981edc3f9c087b7a3ec40ad -size 4814729 diff --git a/models/object_detection_nanodet/nanodet.py b/models/object_detection_nanodet/nanodet.py deleted file mode 100644 index 53af96b5..00000000 --- a/models/object_detection_nanodet/nanodet.py +++ /dev/null @@ -1,122 +0,0 @@ -import numpy as np -import cv2 - -class NanoDet: - def __init__(self, modelPath, prob_threshold=0.35, iou_threshold=0.6, backend_id=0, target_id=0): - self.strides = (8, 16, 32, 64) - self.image_shape = (416, 416) - self.reg_max = 7 - self.prob_threshold = prob_threshold - self.iou_threshold = iou_threshold - self.backend_id = backend_id - self.target_id = target_id - self.project = np.arange(self.reg_max + 1) - self.mean = np.array([103.53, 116.28, 123.675], dtype=np.float32).reshape(1, 1, 3) - self.std = np.array([57.375, 57.12, 58.395], dtype=np.float32).reshape(1, 1, 3) - self.net = cv2.dnn.readNet(modelPath) - self.net.setPreferableBackend(self.backend_id) - self.net.setPreferableTarget(self.target_id) - - self.anchors_mlvl = [] - for i in range(len(self.strides)): - featmap_size = (int(self.image_shape[0] / self.strides[i]), int(self.image_shape[1] / self.strides[i])) - stride = self.strides[i] - feat_h, feat_w = featmap_size - shift_x = np.arange(0, feat_w) * stride - shift_y = np.arange(0, feat_h) * stride - xv, yv = np.meshgrid(shift_x, shift_y) - xv = xv.flatten() - yv = yv.flatten() - cx = xv + 0.5 * (stride-1) - cy = yv + 0.5 * (stride - 1) - #anchors = np.stack((cx, cy), axis=-1) - anchors = np.column_stack((cx, cy)) - self.anchors_mlvl.append(anchors) - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self.backend_id = backendId - self.target_id = targetId - self.net.setPreferableBackend(self.backend_id) - self.net.setPreferableTarget(self.target_id) - - def pre_process(self, img): - img = img.astype(np.float32) - img = (img - self.mean) / self.std - blob = cv2.dnn.blobFromImage(img) - return blob - - def infer(self, srcimg): - blob = self.pre_process(srcimg) - self.net.setInput(blob) - outs = self.net.forward(self.net.getUnconnectedOutLayersNames()) - preds = self.post_process(outs) - return preds - - def post_process(self, preds): - cls_scores, bbox_preds = preds[::2], preds[1::2] - rescale = False - scale_factor = 1 - bboxes_mlvl = [] - scores_mlvl = [] - for stride, cls_score, bbox_pred, anchors in zip(self.strides, cls_scores, bbox_preds, self.anchors_mlvl): - if cls_score.ndim==3: - cls_score = cls_score.squeeze(axis=0) - if bbox_pred.ndim==3: - bbox_pred = bbox_pred.squeeze(axis=0) - - x_exp = np.exp(bbox_pred.reshape(-1, self.reg_max + 1)) - x_sum = np.sum(x_exp, axis=1, keepdims=True) - bbox_pred = x_exp / x_sum - bbox_pred = np.dot(bbox_pred, self.project).reshape(-1,4) - bbox_pred *= stride - - nms_pre = 1000 - if nms_pre > 0 and cls_score.shape[0] > nms_pre: - max_scores = cls_score.max(axis=1) - topk_inds = max_scores.argsort()[::-1][0:nms_pre] - anchors = anchors[topk_inds, :] - bbox_pred = bbox_pred[topk_inds, :] - cls_score = cls_score[topk_inds, :] - - points = anchors - distance = bbox_pred - max_shape=self.image_shape - x1 = points[:, 0] - distance[:, 0] - y1 = points[:, 1] - distance[:, 1] - x2 = points[:, 0] + distance[:, 2] - y2 = points[:, 1] + distance[:, 3] - - if max_shape is not None: - x1 = np.clip(x1, 0, max_shape[1]) - y1 = np.clip(y1, 0, max_shape[0]) - x2 = np.clip(x2, 0, max_shape[1]) - y2 = np.clip(y2, 0, max_shape[0]) - - #bboxes = np.stack([x1, y1, x2, y2], axis=-1) - bboxes = np.column_stack([x1, y1, x2, y2]) - bboxes_mlvl.append(bboxes) - scores_mlvl.append(cls_score) - - bboxes_mlvl = np.concatenate(bboxes_mlvl, axis=0) - if rescale: - bboxes_mlvl /= scale_factor - scores_mlvl = np.concatenate(scores_mlvl, axis=0) - bboxes_wh = bboxes_mlvl.copy() - bboxes_wh[:, 2:4] = bboxes_wh[:, 2:4] - bboxes_wh[:, 0:2] - classIds = np.argmax(scores_mlvl, axis=1) - confidences = np.max(scores_mlvl, axis=1) - - indices = cv2.dnn.NMSBoxes(bboxes_wh.tolist(), confidences.tolist(), self.prob_threshold, self.iou_threshold) - - if len(indices)>0: - det_bboxes = bboxes_mlvl[indices] - det_conf = confidences[indices] - det_classid = classIds[indices] - - return np.concatenate([det_bboxes, det_conf.reshape(-1, 1), det_classid.reshape(-1, 1)], axis=1) - else: - return np.array([]) diff --git a/models/object_detection_nanodet/object_detection_nanodet_2022nov.onnx b/models/object_detection_nanodet/object_detection_nanodet_2022nov.onnx deleted file mode 100644 index 066cb860..00000000 --- a/models/object_detection_nanodet/object_detection_nanodet_2022nov.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4b82da9944b88577175ee23a459dce2e26e6e4be573def65b1055dc2d9720186 -size 3800954 diff --git a/models/object_detection_nanodet/object_detection_nanodet_2022nov_int8.onnx b/models/object_detection_nanodet/object_detection_nanodet_2022nov_int8.onnx deleted file mode 100644 index 430fd0b4..00000000 --- a/models/object_detection_nanodet/object_detection_nanodet_2022nov_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8dd32b85f2d273e9047f1d6b59e0b2fd008b1076338107bb547ac28942cdf90b -size 1031424 diff --git a/models/object_detection_nanodet/object_detection_nanodet_2022nov_int8bq.onnx b/models/object_detection_nanodet/object_detection_nanodet_2022nov_int8bq.onnx deleted file mode 100644 index 95880ff3..00000000 --- a/models/object_detection_nanodet/object_detection_nanodet_2022nov_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8a2c877cc6f09e7dfac7a9066e33ee5ae68de530b3b994f6ee9125cff6e34d3f -size 1123958 diff --git a/models/object_detection_yolox/CMakeLists.txt b/models/object_detection_yolox/CMakeLists.txt deleted file mode 100644 index 5c373e40..00000000 --- a/models/object_detection_yolox/CMakeLists.txt +++ /dev/null @@ -1,29 +0,0 @@ -cmake_minimum_required(VERSION 3.24) -set(project_name "opencv_zoo_object_detection_yolox") - -PROJECT (${project_name}) - -set(OPENCV_VERSION "4.10.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) -# Find OpenCV, you may need to set OpenCV_DIR variable -# to the absolute path to the directory containing OpenCVConfig.cmake file -# via the command line or GUI - -file(GLOB SourceFile - "demo.cpp") -# If the package has been found, several variables will -# be set, you can find the full list with descriptions -# in the OpenCVConfig.cmake file. -# Print some message showing some of them -message(STATUS "OpenCV library status:") -message(STATUS " config: ${OpenCV_DIR}") -message(STATUS " version: ${OpenCV_VERSION}") -message(STATUS " libraries: ${OpenCV_LIBS}") -message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}") - -# Declare the executable target built from your sources -add_executable(${project_name} ${SourceFile}) - -# Link your application with OpenCV libraries -target_link_libraries(${project_name} PRIVATE ${OpenCV_LIBS}) diff --git a/models/object_detection_yolox/LICENSE b/models/object_detection_yolox/LICENSE deleted file mode 100644 index 1d4dc763..00000000 --- a/models/object_detection_yolox/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2021-2022 Megvii Inc. All rights reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/models/object_detection_yolox/README.md b/models/object_detection_yolox/README.md deleted file mode 100644 index e902660c..00000000 --- a/models/object_detection_yolox/README.md +++ /dev/null @@ -1,135 +0,0 @@ -# YOLOX - -Nanodet: YOLOX is an anchor-free version of YOLO, with a simpler design but better performance! It aims to bridge the gap between research and industrial communities. YOLOX is a high-performing object detector, an improvement to the existing YOLO series. YOLO series are in constant exploration of techniques to improve the object detection techniques for optimal speed and accuracy trade-off for real-time applications. - -Key features of the YOLOX object detector -- **Anchor-free detectors** significantly reduce the number of design parameters -- **A decoupled head for classification, regression, and localization** improves the convergence speed -- **SimOTA advanced label assignment strategy** reduces training time and avoids additional solver hyperparameters -- **Strong data augmentations like MixUp and Mosiac** to boost YOLOX performance - -**Note**: -- This version of YoloX: YoloX_s -- `object_detection_yolox_2022nov_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - - -## Demo - -### Python - -Run the following command to try the demo: -```shell -# detect on camera input -python demo.py -# detect on an image -python demo.py --input /path/to/image -v -``` -Note: -- image result saved as "result.jpg" -- this model requires `opencv-python>=4.8.0` - -### C++ - -Install latest OpenCV and CMake >= 3.24.0 to get started with: - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# detect on camera input -./build/opencv_zoo_object_detection_yolox -# detect on an image -./build/opencv_zoo_object_detection_yolox -m=/path/to/model -i=/path/to/image -v -# get help messages -./build/opencv_zoo_object_detection_yolox -h -``` - - -## Results - -Here are some of the sample results that were observed using the model (**yolox_s.onnx**), - -![1_res.jpg](./example_outputs/1_res.jpg) -![2_res.jpg](./example_outputs/2_res.jpg) -![3_res.jpg](./example_outputs/3_res.jpg) - -Check [benchmark/download_data.py](../../benchmark/download_data.py) for the original images. - -## Model metrics: - -The model is evaluated on [COCO 2017 val](https://cocodataset.org/#download). Results are showed below: - - - -
Average Precision Average Recall
- -| area | IoU | Average Precision(AP) | -|:-------|:------|:------------------------| -| all | 0.50:0.95 | 0.405 | -| all | 0.50 | 0.593 | -| all | 0.75 | 0.437 | -| small | 0.50:0.95 | 0.232 | -| medium | 0.50:0.95 | 0.448 | -| large | 0.50:0.95 | 0.541 | - - - -| area | IoU | Average Recall(AR) | -|:-------|:------|:----------------| -| all | 0.50:0.95 | 0.326 | -| all | 0.50:0.95 | 0.531 | -| all | 0.50:0.95 | 0.574 | -| small | 0.50:0.95 | 0.365 | -| medium | 0.50:0.95 | 0.634 | -| large | 0.50:0.95 | 0.724 | -
- -| class | AP | class | AP | class | AP | -|:--------------|:-------|:-------------|:-------|:---------------|:-------| -| person | 54.109 | bicycle | 31.580 | car | 40.447 | -| motorcycle | 43.477 | airplane | 66.070 | bus | 64.183 | -| train | 64.483 | truck | 35.110 | boat | 24.681 | -| traffic light | 25.068 | fire hydrant | 64.382 | stop sign | 65.333 | -| parking meter | 48.439 | bench | 22.653 | bird | 33.324 | -| cat | 66.394 | dog | 60.096 | horse | 58.080 | -| sheep | 49.456 | cow | 53.596 | elephant | 65.574 | -| bear | 70.541 | zebra | 66.461 | giraffe | 66.780 | -| backpack | 13.095 | umbrella | 41.614 | handbag | 12.865 | -| tie | 29.453 | suitcase | 39.089 | frisbee | 61.712 | -| skis | 21.623 | snowboard | 31.326 | sports ball | 39.820 | -| kite | 41.410 | baseball bat | 27.311 | baseball glove | 36.661 | -| skateboard | 49.374 | surfboard | 35.524 | tennis racket | 45.569 | -| bottle | 37.270 | wine glass | 33.088 | cup | 39.835 | -| fork | 31.620 | knife | 15.265 | spoon | 14.918 | -| bowl | 43.251 | banana | 27.904 | apple | 17.630 | -| sandwich | 32.789 | orange | 29.388 | broccoli | 23.187 | -| carrot | 23.114 | hot dog | 33.716 | pizza | 52.541 | -| donut | 47.980 | cake | 36.160 | chair | 29.707 | -| couch | 46.175 | potted plant | 24.781 | bed | 44.323 | -| dining table | 30.022 | toilet | 64.237 | tv | 57.301 | -| laptop | 58.362 | mouse | 57.774 | remote | 24.271 | -| keyboard | 48.020 | cell phone | 32.376 | microwave | 57.220 | -| oven | 36.168 | toaster | 28.735 | sink | 38.159 | -| refrigerator | 52.876 | book | 15.030 | clock | 48.622 | -| vase | 37.013 | scissors | 26.307 | teddy bear | 45.676 | -| hair drier | 7.255 | toothbrush | 19.374 | | | - -## License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE). - -#### Contributor Details - -- Google Summer of Code'22 -- Contributor: Sri Siddarth Chakaravarthy -- Github Profile: https://github.com/Sidd1609 -- Organisation: OpenCV -- Project: Lightweight object detection models using OpenCV - -## Reference - -- YOLOX article: https://arxiv.org/abs/2107.08430 -- YOLOX weight and scripts for training: https://github.com/Megvii-BaseDetection/YOLOX -- YOLOX blog: https://arshren.medium.com/yolox-new-improved-yolo-d430c0e4cf20 -- YOLOX-lite: https://github.com/TexasInstruments/edgeai-yolox diff --git a/models/object_detection_yolox/demo.cpp b/models/object_detection_yolox/demo.cpp deleted file mode 100644 index 0239b1c7..00000000 --- a/models/object_detection_yolox/demo.cpp +++ /dev/null @@ -1,311 +0,0 @@ -#include -#include -#include - -#include - -using namespace std; -using namespace cv; -using namespace dnn; - -vector< pair > backendTargetPairs = { - std::make_pair(dnn::DNN_BACKEND_OPENCV, dnn::DNN_TARGET_CPU), - std::make_pair(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA), - std::make_pair(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA_FP16), - std::make_pair(dnn::DNN_BACKEND_TIMVX, dnn::DNN_TARGET_NPU), - std::make_pair(dnn::DNN_BACKEND_CANN, dnn::DNN_TARGET_NPU) }; - -vector labelYolox = { - "person", "bicycle", "car", "motorcycle", "airplane", "bus", - "train", "truck", "boat", "traffic light", "fire hydrant", - "stop sign", "parking meter", "bench", "bird", "cat", "dog", - "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", - "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", - "skis", "snowboard", "sports ball", "kite", "baseball bat", - "baseball glove", "skateboard", "surfboard", "tennis racket", - "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", - "banana", "apple", "sandwich", "orange", "broccoli", "carrot", - "hot dog", "pizza", "donut", "cake", "chair", "couch", - "potted plant", "bed", "dining table", "toilet", "tv", "laptop", - "mouse", "remote", "keyboard", "cell phone", "microwave", - "oven", "toaster", "sink", "refrigerator", "book", "clock", - "vase", "scissors", "teddy bear", "hair drier", "toothbrush" }; - -class YoloX { -private: - Net net; - string modelPath; - Size inputSize; - float confThreshold; - float nmsThreshold; - float objThreshold; - dnn::Backend backendId; - dnn::Target targetId; - int num_classes; - vector strides; - Mat expandedStrides; - Mat grids; - -public: - YoloX(string modPath, float confThresh = 0.35, float nmsThresh = 0.5, float objThresh = 0.5, dnn::Backend bId = DNN_BACKEND_DEFAULT, dnn::Target tId = DNN_TARGET_CPU) : - modelPath(modPath), confThreshold(confThresh), - nmsThreshold(nmsThresh), objThreshold(objThresh), - backendId(bId), targetId(tId) - { - this->num_classes = int(labelYolox.size()); - this->net = readNet(modelPath); - this->inputSize = Size(640, 640); - this->strides = vector{ 8, 16, 32 }; - this->net.setPreferableBackend(this->backendId); - this->net.setPreferableTarget(this->targetId); - this->generateAnchors(); - } - - Mat preprocess(Mat img) - { - Mat blob; - Image2BlobParams paramYolox; - paramYolox.datalayout = DNN_LAYOUT_NCHW; - paramYolox.ddepth = CV_32F; - paramYolox.mean = Scalar::all(0); - paramYolox.scalefactor = Scalar::all(1); - paramYolox.size = Size(img.cols, img.rows); - paramYolox.swapRB = true; - - blob = blobFromImageWithParams(img, paramYolox); - return blob; - } - - Mat infer(Mat srcimg) - { - Mat inputBlob = this->preprocess(srcimg); - - this->net.setInput(inputBlob); - vector outs; - this->net.forward(outs, this->net.getUnconnectedOutLayersNames()); - - Mat predictions = this->postprocess(outs[0]); - return predictions; - } - - Mat postprocess(Mat outputs) - { - Mat dets = outputs.reshape(0,outputs.size[1]); - Mat col01; - add(dets.colRange(0, 2), this->grids, col01); - Mat col23; - exp(dets.colRange(2, 4), col23); - vector col = { col01, col23 }; - Mat boxes; - hconcat(col, boxes); - float* ptr = this->expandedStrides.ptr(0); - for (int r = 0; r < boxes.rows; r++, ptr++) - { - boxes.rowRange(r, r + 1) = *ptr * boxes.rowRange(r, r + 1); - } - // get boxes - Mat boxes_xywh(boxes.rows, boxes.cols, CV_32FC1, Scalar(1)); - Mat scores = dets.colRange(5, dets.cols).clone(); - vector maxScores(dets.rows); - vector maxScoreIdx(dets.rows); - vector boxesXYWH(dets.rows); - - for (int r = 0; r < boxes_xywh.rows; r++, ptr++) - { - boxes_xywh.at(r, 0) = boxes.at(r, 0) - boxes.at(r, 2) / 2.f; - boxes_xywh.at(r, 1) = boxes.at(r, 1) - boxes.at(r, 3) / 2.f; - boxes_xywh.at(r, 2) = boxes.at(r, 2); - boxes_xywh.at(r, 3) = boxes.at(r, 3); - // get scores and class indices - scores.rowRange(r, r + 1) = scores.rowRange(r, r + 1) * dets.at(r, 4); - double minVal, maxVal; - Point maxIdx; - minMaxLoc(scores.rowRange(r, r+1), &minVal, &maxVal, nullptr, &maxIdx); - maxScoreIdx[r] = maxIdx.x; - maxScores[r] = float(maxVal); - boxesXYWH[r].x = boxes_xywh.at(r, 0); - boxesXYWH[r].y = boxes_xywh.at(r, 1); - boxesXYWH[r].width = boxes_xywh.at(r, 2); - boxesXYWH[r].height = boxes_xywh.at(r, 3); - } - - vector keep; - NMSBoxesBatched(boxesXYWH, maxScores, maxScoreIdx, this->confThreshold, this->nmsThreshold, keep); - Mat candidates(int(keep.size()), 6, CV_32FC1); - int row = 0; - for (auto idx : keep) - { - boxes_xywh.rowRange(idx, idx + 1).copyTo(candidates(Rect(0, row, 4, 1))); - candidates.at(row, 4) = maxScores[idx]; - candidates.at(row, 5) = float(maxScoreIdx[idx]); - row++; - } - if (keep.size() == 0) - return Mat(); - return candidates; - - } - - - void generateAnchors() - { - vector< tuple > nb; - int total = 0; - - for (auto v : this->strides) - { - int w = this->inputSize.width / v; - int h = this->inputSize.height / v; - nb.push_back(tuple(w * h, w, v)); - total += w * h; - } - this->grids = Mat(total, 2, CV_32FC1); - this->expandedStrides = Mat(total, 1, CV_32FC1); - float* ptrGrids = this->grids.ptr(0); - float* ptrStrides = this->expandedStrides.ptr(0); - int pos = 0; - for (auto le : nb) - { - int r = get<1>(le); - for (int i = 0; i < get<0>(le); i++, pos++) - { - *ptrGrids++ = float(i % r); - *ptrGrids++ = float(i / r); - *ptrStrides++ = float((get<2>(le))); - } - } - } -}; - -std::string keys = -"{ help h | | Print help message. }" -"{ model m | object_detection_yolox_2022nov.onnx | Usage: Path to the model, defaults to object_detection_yolox_2022nov.onnx }" -"{ input i | | Path to input image or video file. Skip this argument to capture frames from a camera.}" -"{ confidence | 0.5 | Class confidence }" -"{ obj | 0.5 | Enter object threshold }" -"{ nms | 0.5 | Enter nms IOU threshold }" -"{ save s | true | Specify to save results. This flag is invalid when using camera. }" -"{ vis v | 1 | Specify to open a window for result visualization. This flag is invalid when using camera. }" -"{ backend bt | 0 | Choose one of computation backends: " -"0: (default) OpenCV implementation + CPU, " -"1: CUDA + GPU (CUDA), " -"2: CUDA + GPU (CUDA FP16), " -"3: TIM-VX + NPU, " -"4: CANN + NPU}"; - -pair letterBox(Mat srcimg, Size targetSize = Size(640, 640)) -{ - Mat paddedImg(targetSize.height, targetSize.width, CV_32FC3, Scalar::all(114.0)); - Mat resizeImg; - - double ratio = min(targetSize.height / double(srcimg.rows), targetSize.width / double(srcimg.cols)); - resize(srcimg, resizeImg, Size(int(srcimg.cols * ratio), int(srcimg.rows * ratio)), INTER_LINEAR); - resizeImg.copyTo(paddedImg(Rect(0, 0, int(srcimg.cols * ratio), int(srcimg.rows * ratio)))); - return pair(paddedImg, ratio); -} - -Mat unLetterBox(Mat bbox, double letterboxScale) -{ - return bbox / letterboxScale; -} - -Mat visualize(Mat dets, Mat srcimg, double letterbox_scale, double fps = -1) -{ - Mat resImg = srcimg.clone(); - - if (fps > 0) - putText(resImg, format("FPS: %.2f", fps), Size(10, 25), FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 0, 255), 2); - - for (int row = 0; row < dets.rows; row++) - { - Mat boxF = unLetterBox(dets(Rect(0, row, 4, 1)), letterbox_scale); - Mat box; - boxF.convertTo(box, CV_32S); - float score = dets.at(row, 4); - int clsId = int(dets.at(row, 5)); - - int x0 = box.at(0, 0); - int y0 = box.at(0, 1); - int x1 = box.at(0, 2); - int y1 = box.at(0, 3); - - string text = format("%s : %f", labelYolox[clsId].c_str(), score * 100); - int font = FONT_HERSHEY_SIMPLEX; - int baseLine = 0; - Size txtSize = getTextSize(text, font, 0.4, 1, &baseLine); - rectangle(resImg, Point(x0, y0), Point(x1, y1), Scalar(0, 255, 0), 2); - rectangle(resImg, Point(x0, y0 + 1), Point(x0 + txtSize.width + 1, y0 + int(1.5 * txtSize.height)), Scalar(255, 255, 255), -1); - putText(resImg, text, Point(x0, y0 + txtSize.height), font, 0.4, Scalar(0, 0, 0), 1); - } - - return resImg; -} - -int main(int argc, char** argv) -{ - CommandLineParser parser(argc, argv, keys); - - parser.about("Use this script to run Yolox deep learning networks in opencv_zoo using OpenCV."); - if (parser.has("help")) - { - parser.printMessage(); - return 0; - } - - string model = parser.get("model"); - float confThreshold = parser.get("confidence"); - float objThreshold = parser.get("obj"); - float nmsThreshold = parser.get("nms"); - bool vis = parser.get("vis"); - bool save = parser.get("save"); - int backendTargetid = parser.get("backend"); - - if (model.empty()) - { - CV_Error(Error::StsError, "Model file " + model + " not found"); - } - - YoloX modelNet(model, confThreshold, nmsThreshold, objThreshold, - backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second); - //! [Open a video file or an image file or a camera stream] - VideoCapture cap; - if (parser.has("input")) - cap.open(samples::findFile(parser.get("input"))); - else - cap.open(0); - if (!cap.isOpened()) - CV_Error(Error::StsError, "Cannot open video or file"); - Mat frame, inputBlob; - double letterboxScale; - - static const std::string kWinName = model; - int nbInference = 0; - while (waitKey(1) < 0) - { - cap >> frame; - if (frame.empty()) - { - cout << "Frame is empty" << endl; - waitKey(); - break; - } - pair w = letterBox(frame); - inputBlob = get<0>(w); - letterboxScale = get<1>(w); - TickMeter tm; - tm.start(); - Mat predictions = modelNet.infer(inputBlob); - tm.stop(); - cout << "Inference time: " << tm.getTimeMilli() << " ms\n"; - Mat img = visualize(predictions, frame, letterboxScale, tm.getFPS()); - if (save && parser.has("input")) - { - imwrite("result.jpg", img); - } - if (vis) - { - imshow(kWinName, img); - } - } - return 0; -} diff --git a/models/object_detection_yolox/demo.py b/models/object_detection_yolox/demo.py deleted file mode 100644 index a05f777e..00000000 --- a/models/object_detection_yolox/demo.py +++ /dev/null @@ -1,155 +0,0 @@ -import numpy as np -import cv2 as cv -import argparse - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from yolox import YoloX - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -classes = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', - 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', - 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', - 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', - 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', - 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', - 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', - 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', - 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', - 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', - 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', - 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush') - -def letterbox(srcimg, target_size=(640, 640)): - padded_img = np.ones((target_size[0], target_size[1], 3)).astype(np.float32) * 114.0 - ratio = min(target_size[0] / srcimg.shape[0], target_size[1] / srcimg.shape[1]) - resized_img = cv.resize( - srcimg, (int(srcimg.shape[1] * ratio), int(srcimg.shape[0] * ratio)), interpolation=cv.INTER_LINEAR - ).astype(np.float32) - padded_img[: int(srcimg.shape[0] * ratio), : int(srcimg.shape[1] * ratio)] = resized_img - - return padded_img, ratio - -def unletterbox(bbox, letterbox_scale): - return bbox / letterbox_scale - -def vis(dets, srcimg, letterbox_scale, fps=None): - res_img = srcimg.copy() - - if fps is not None: - fps_label = "FPS: %.2f" % fps - cv.putText(res_img, fps_label, (10, 25), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) - - for det in dets: - box = unletterbox(det[:4], letterbox_scale).astype(np.int32) - score = det[-2] - cls_id = int(det[-1]) - - x0, y0, w, h = box - - text = '{}:{:.1f}%'.format(classes[cls_id], score * 100) - font = cv.FONT_HERSHEY_SIMPLEX - txt_size = cv.getTextSize(text, font, 0.4, 1)[0] - cv.rectangle(res_img, (x0, y0 , w, h), (0, 255, 0), 2) - cv.rectangle(res_img, (x0, y0 + 1), (x0 + txt_size[0] + 1, y0 + int(1.5 * txt_size[1])), (255, 255, 255), -1) - cv.putText(res_img, text, (x0, y0 + txt_size[1]), font, 0.4, (0, 0, 0), thickness=1) - - return res_img - -if __name__=='__main__': - parser = argparse.ArgumentParser(description='Nanodet inference using OpenCV an contribution by Sri Siddarth Chakaravarthy part of GSOC_2022') - parser.add_argument('--input', '-i', type=str, - help='Path to the input image. Omit for using default camera.') - parser.add_argument('--model', '-m', type=str, default='object_detection_yolox_2022nov.onnx', - help="Path to the model") - parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) - parser.add_argument('--confidence', default=0.5, type=float, - help='Class confidence') - parser.add_argument('--nms', default=0.5, type=float, - help='Enter nms IOU threshold') - parser.add_argument('--obj', default=0.5, type=float, - help='Enter object threshold') - parser.add_argument('--save', '-s', action='store_true', - help='Specify to save results. This flag is invalid when using camera.') - parser.add_argument('--vis', '-v', action='store_true', - help='Specify to open a window for result visualization. This flag is invalid when using camera.') - args = parser.parse_args() - - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - - model_net = YoloX(modelPath= args.model, - confThreshold=args.confidence, - nmsThreshold=args.nms, - objThreshold=args.obj, - backendId=backend_id, - targetId=target_id) - - tm = cv.TickMeter() - tm.reset() - if args.input is not None: - image = cv.imread(args.input) - input_blob = cv.cvtColor(image, cv.COLOR_BGR2RGB) - input_blob, letterbox_scale = letterbox(input_blob) - - # Inference - tm.start() - preds = model_net.infer(input_blob) - tm.stop() - print("Inference time: {:.2f} ms".format(tm.getTimeMilli())) - - img = vis(preds, image, letterbox_scale) - - if args.save: - print('Results saved to result.jpg\n') - cv.imwrite('result.jpg', img) - - if args.vis: - cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE) - cv.imshow(args.input, img) - cv.waitKey(0) - - else: - print("Press any key to stop video capture") - deviceId = 0 - cap = cv.VideoCapture(deviceId) - - while cv.waitKey(1) < 0: - hasFrame, frame = cap.read() - if not hasFrame: - print('No frames grabbed!') - break - - input_blob = cv.cvtColor(frame, cv.COLOR_BGR2RGB) - input_blob, letterbox_scale = letterbox(input_blob) - - # Inference - tm.start() - preds = model_net.infer(input_blob) - tm.stop() - - img = vis(preds, frame, letterbox_scale, fps=tm.getFPS()) - - cv.imshow("YoloX Demo", img) - - tm.reset() diff --git a/models/object_detection_yolox/example_outputs/1_res.jpg b/models/object_detection_yolox/example_outputs/1_res.jpg deleted file mode 100644 index aab8c964..00000000 --- a/models/object_detection_yolox/example_outputs/1_res.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e03d69d6e1420bb84f3426b7d5f607cdfc86e522e33ee646cfc970e9ff53d9ea -size 124300 diff --git a/models/object_detection_yolox/example_outputs/2_res.jpg b/models/object_detection_yolox/example_outputs/2_res.jpg deleted file mode 100644 index a13b5f3e..00000000 --- a/models/object_detection_yolox/example_outputs/2_res.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4a54924474eeb1c2d8cbb4f2245e003c28aff8256eebfa1bf2653f4af41eaa66 -size 402200 diff --git a/models/object_detection_yolox/example_outputs/3_res.jpg b/models/object_detection_yolox/example_outputs/3_res.jpg deleted file mode 100644 index 42ac0a1f..00000000 --- a/models/object_detection_yolox/example_outputs/3_res.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e26d3b5cb7ac326fd2d431611f397ad710d210a22109fa6d876dacee262db63c -size 119379 diff --git a/models/object_detection_yolox/object_detection_yolox_2022nov.onnx b/models/object_detection_yolox/object_detection_yolox_2022nov.onnx deleted file mode 100644 index 0a22cdd5..00000000 --- a/models/object_detection_yolox/object_detection_yolox_2022nov.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c5c2d13e59ae883e6af3b45daea64af4833a4951c92d116ec270d9ddbe998063 -size 35858002 diff --git a/models/object_detection_yolox/object_detection_yolox_2022nov_int8.onnx b/models/object_detection_yolox/object_detection_yolox_2022nov_int8.onnx deleted file mode 100644 index af996081..00000000 --- a/models/object_detection_yolox/object_detection_yolox_2022nov_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:01a3b0f400b30bc1e45230e991b2e499ab42622485a330021947333fbaf03935 -size 9079452 diff --git a/models/object_detection_yolox/object_detection_yolox_2022nov_int8bq.onnx b/models/object_detection_yolox/object_detection_yolox_2022nov_int8bq.onnx deleted file mode 100644 index ef4f2d2d..00000000 --- a/models/object_detection_yolox/object_detection_yolox_2022nov_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dcaae0aaa2fea4167f89235ee340eb869d3707b25712218d4c7ce921ac90e2ba -size 9744418 diff --git a/models/object_detection_yolox/yolox.py b/models/object_detection_yolox/yolox.py deleted file mode 100644 index 503b3fce..00000000 --- a/models/object_detection_yolox/yolox.py +++ /dev/null @@ -1,85 +0,0 @@ -import numpy as np -import cv2 - -class YoloX: - def __init__(self, modelPath, confThreshold=0.35, nmsThreshold=0.5, objThreshold=0.5, backendId=0, targetId=0): - self.num_classes = 80 - self.net = cv2.dnn.readNet(modelPath) - self.input_size = (640, 640) - self.mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(1, 1, 3) - self.std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(1, 1, 3) - self.strides = [8, 16, 32] - self.confThreshold = confThreshold - self.nmsThreshold = nmsThreshold - self.objThreshold = objThreshold - self.backendId = backendId - self.targetId = targetId - self.net.setPreferableBackend(self.backendId) - self.net.setPreferableTarget(self.targetId) - - self.generateAnchors() - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self.backendId = backendId - self.targetId = targetId - self.net.setPreferableBackend(self.backendId) - self.net.setPreferableTarget(self.targetId) - - def preprocess(self, img): - blob = np.transpose(img, (2, 0, 1)) - return blob[np.newaxis, :, :, :] - - def infer(self, srcimg): - input_blob = self.preprocess(srcimg) - - self.net.setInput(input_blob) - outs = self.net.forward(self.net.getUnconnectedOutLayersNames()) - - predictions = self.postprocess(outs[0]) - return predictions - - def postprocess(self, outputs): - dets = outputs[0] - - dets[:, :2] = (dets[:, :2] + self.grids) * self.expanded_strides - dets[:, 2:4] = np.exp(dets[:, 2:4]) * self.expanded_strides - - # get boxes - boxes = dets[:, :4] - boxes_xywh = np.ones_like(boxes) - boxes_xywh[:, 0] = boxes[:, 0] - boxes[:, 2] / 2. - boxes_xywh[:, 1] = boxes[:, 1] - boxes[:, 3] / 2. - boxes_xywh[:, 2] = boxes[:, 2] - boxes_xywh[:, 3] = boxes[:, 3] - - # get scores and class indices - scores = dets[:, 4:5] * dets[:, 5:] - max_scores = np.amax(scores, axis=1) - max_scores_idx = np.argmax(scores, axis=1) - - keep = cv2.dnn.NMSBoxesBatched(boxes_xywh.tolist(), max_scores.tolist(), max_scores_idx.tolist(), self.confThreshold, self.nmsThreshold) - - candidates = np.concatenate([boxes_xywh, max_scores[:, None], max_scores_idx[:, None]], axis=1) - if len(keep) == 0: - return np.array([]) - return candidates[keep] - - def generateAnchors(self): - self.grids = [] - self.expanded_strides = [] - hsizes = [self.input_size[0] // stride for stride in self.strides] - wsizes = [self.input_size[1] // stride for stride in self.strides] - - for hsize, wsize, stride in zip(hsizes, wsizes, self.strides): - xv, yv = np.meshgrid(np.arange(hsize), np.arange(wsize)) - grid = np.stack((xv, yv), 2).reshape(1, -1, 2) - self.grids.append(grid) - shape = grid.shape[:2] - self.expanded_strides.append(np.full((*shape, 1), stride)) - - self.grids = np.concatenate(self.grids, 1) - self.expanded_strides = np.concatenate(self.expanded_strides, 1) diff --git a/models/object_tracking_vittrack/CMakeLists.txt b/models/object_tracking_vittrack/CMakeLists.txt deleted file mode 100644 index 51644e83..00000000 --- a/models/object_tracking_vittrack/CMakeLists.txt +++ /dev/null @@ -1,32 +0,0 @@ -cmake_minimum_required(VERSION 3.24) -set(project_name "opencv_zoo_object_tracking_vittrack") - -PROJECT (${project_name}) - -set(OPENCV_VERSION "4.10.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) -# Find OpenCV, you may need to set OpenCV_DIR variable -# to the absolute path to the directory containing OpenCVConfig.cmake file -# via the command line or GUI - -file(GLOB SourceFile - "demo.cpp") -# If the package has been found, several variables will -# be set, you can find the full list with descriptions -# in the OpenCVConfig.cmake file. -# Print some message showing some of them -message(STATUS "OpenCV library status:") -message(STATUS " config: ${OpenCV_DIR}") -message(STATUS " version: ${OpenCV_VERSION}") -message(STATUS " libraries: ${OpenCV_LIBS}") -message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}") - -# Declare the executable target built from your sources -add_executable(${project_name} ${SourceFile}) - -# Set C++ compilation standard to C++11 -set(CMAKE_CXX_STANDARD 11) - -# Link your application with OpenCV libraries -target_link_libraries(${project_name} PRIVATE ${OpenCV_LIBS}) diff --git a/models/object_tracking_vittrack/LICENSE b/models/object_tracking_vittrack/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/models/object_tracking_vittrack/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/models/object_tracking_vittrack/README.md b/models/object_tracking_vittrack/README.md deleted file mode 100644 index 0da588ad..00000000 --- a/models/object_tracking_vittrack/README.md +++ /dev/null @@ -1,86 +0,0 @@ -# VIT tracker - -VIT tracker(vision transformer tracker) is a much better model for real-time object tracking. VIT tracker can achieve speeds exceeding nanotrack by 20% in single-threaded mode with ARM chip, and the advantage becomes even more pronounced in multi-threaded mode. In addition, on the dataset, vit tracker demonstrates better performance compared to nanotrack. Moreover, vit trackerprovides confidence values during the tracking process, which can be used to determine if the tracking is currently lost. - -In target tracking tasks, the score is an important indicator that can indicate whether the current target is lost. In the video, vit tracker can track the target and display the current score in the upper left corner of the video. When the target is lost, the score drops significantly. While nanotrack will only return 0.9 score in any situation, so that we cannot determine whether the target is lost. - -Video demo: https://youtu.be/MJiPnu1ZQRI - -This model is contributed by [Pengyu Liu](https://github.com/lpylpy0514) in GSoC 2023 project [**Realtime object tracking models**](https://github.com/opencv/opencv/wiki/GSoC_2023#idea-realtime-object-tracking-models) - -**Note**: -- OpenCV > 4.8.0 is required. Build from source with instructions from https://opencv.org/get-started/.** -- `object_tracking_vittrack_2023sep_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - - -# Demo -## Python -```bash -# tracking on camera input -python demo.py - -# tracking on video -python demo.py --input /path/to/video - -# get help regarding various parameters -python demo.py --help -``` -## C++ -Install latest OpenCV and CMake >= 3.24.0 to get started. - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# tracking on camera input -./build/opencv_zoo_object_tracking_vittrack - -# tracking on video -./build/opencv_zoo_object_tracking_vittrack -i=/path/to/video - -# get help messages -./build/opencv_zoo_object_tracking_vittrack -h -``` - -# Example outputs - - - - -# Speed test - -NOTE: The speed below is tested by **onnxruntime** because opencv has poor support for the transformer architecture for now. - -ONNX speed test on ARM platform(apple M2)(ms): - -| thread nums | 1 | 2 | 3 | 4 | -| ----------- | ---- | ---- | ---- | ------------- | -| nanotrack | 5.25 | 4.86 | 4.72 | 4.49 | -| vit tracker | 4.18 | 2.41 | 1.97 | **1.46 (3X)** | - -ONNX speed test on x86 platform(intel i3 10105)(ms): - -| thread nums | 1 | 2 | 3 | 4 | -| ----------- | ---- | ---- | ---- | ---- | -| nanotrack | 3.20 | 2.75 | 2.46 | 2.55 | -| vit tracker | 3.84 | 2.37 | 2.10 | 2.01 | - -# Performance test - -preformance test on lasot dataset(AUC is the most important data. Higher AUC means better tracker): - -| LASOT | AUC | P | Pnorm | -| ----------- | ---- | ---- | ----- | -| nanotrack | 46.8 | 45.0 | 43.3 | -| vit tracker | 48.6 | 44.8 | 54.7 | - -# License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE). - -# Reference: - -OSTrack: https://github.com/botaoye/OSTrack - -OpenCV Sample: https://github.com/opencv/opencv/blob/4.x/samples/dnn/vit_tracker.cpp diff --git a/models/object_tracking_vittrack/demo.cpp b/models/object_tracking_vittrack/demo.cpp deleted file mode 100644 index c8ccb26b..00000000 --- a/models/object_tracking_vittrack/demo.cpp +++ /dev/null @@ -1,210 +0,0 @@ -#include -#include - -using namespace std; -using namespace cv; -using namespace dnn; - -struct TrackingResult -{ - bool isLocated; - Rect bbox; - float score; -}; - -class VitTrack -{ -public: - - VitTrack(const string& model_path, int backend_id = 0, int target_id = 0) - { - params.net = model_path; - params.backend = backend_id; - params.target = target_id; - model = TrackerVit::create(params); - } - - void init(const Mat& image, const Rect& roi) - { - model->init(image, roi); - } - - TrackingResult infer(const Mat& image) - { - TrackingResult result; - result.isLocated = model->update(image, result.bbox); - result.score = model->getTrackingScore(); - return result; - } - -private: - TrackerVit::Params params; - Ptr model; -}; - -Mat visualize(const Mat& image, const Rect& bbox, float score, bool isLocated, double fps = -1.0, - const Scalar& box_color = Scalar(0, 255, 0), const Scalar& text_color = Scalar(0, 255, 0), - double fontScale = 1.0, int fontSize = 1) -{ - Mat output = image.clone(); - int h = output.rows; - int w = output.cols; - - if (fps >= 0) - { - putText(output, "FPS: " + to_string(fps), Point(0, 30), FONT_HERSHEY_DUPLEX, fontScale, text_color, fontSize); - } - - if (isLocated && score >= 0.3) - { - rectangle(output, bbox, box_color, 2); - putText(output, format("%.2f", score), Point(bbox.x, bbox.y + 25), - FONT_HERSHEY_DUPLEX, fontScale, text_color, fontSize); - } - else - { - Size text_size = getTextSize("Target lost!", FONT_HERSHEY_DUPLEX, fontScale, fontSize, nullptr); - int text_x = (w - text_size.width) / 2; - int text_y = (h - text_size.height) / 2; - putText(output, "Target lost!", Point(text_x, text_y), FONT_HERSHEY_DUPLEX, fontScale, Scalar(0, 0, 255), fontSize); - } - - return output; -} - -int main(int argc, char** argv) -{ - CommandLineParser parser(argc, argv, - "{help h | | Print help message. }" - "{input i | |Set path to the input video. Omit for using default camera.}" - "{model_path |object_tracking_vittrack_2023sep.onnx |Set model path}" - "{backend_target bt |0 |Choose backend-target pair: 0 - OpenCV implementation + CPU, 1 - CUDA + GPU (CUDA), 2 - CUDA + GPU (CUDA FP16), 3 - TIM-VX + NPU, 4 - CANN + NPU}" - "{save s |false |Specify to save a file with results.}" - "{vis v |true |Specify to open a new window to show results.}"); - if (parser.has("help")) - { - parser.printMessage(); - return 0; - } - - string input = parser.get("input"); - string model_path = parser.get("model_path"); - int backend_target = parser.get("backend_target"); - bool save = parser.get("save"); - bool vis = parser.get("vis"); - - vector> backend_target_pairs = - { - {DNN_BACKEND_OPENCV, DNN_TARGET_CPU}, - {DNN_BACKEND_CUDA, DNN_TARGET_CUDA}, - {DNN_BACKEND_CUDA, DNN_TARGET_CUDA_FP16}, - {DNN_BACKEND_TIMVX, DNN_TARGET_NPU}, - {DNN_BACKEND_CANN, DNN_TARGET_NPU} - }; - - int backend_id = backend_target_pairs[backend_target][0]; - int target_id = backend_target_pairs[backend_target][1]; - - // Create VitTrack tracker - VitTrack tracker(model_path, backend_id, target_id); - - // Open video capture - VideoCapture video; - if (input.empty()) - { - video.open(0); // Default camera - } - else - { - video.open(input); - } - - if (!video.isOpened()) - { - cerr << "Error: Could not open video source" << endl; - return -1; - } - - // Select an object - Mat first_frame; - video >> first_frame; - - if (first_frame.empty()) - { - cerr << "No frames grabbed!" << endl; - return -1; - } - - Mat first_frame_copy = first_frame.clone(); - putText(first_frame_copy, "1. Drag a bounding box to track.", Point(0, 25), FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 255, 0)); - putText(first_frame_copy, "2. Press ENTER to confirm", Point(0, 50), FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 255, 0)); - Rect roi = selectROI("VitTrack Demo", first_frame_copy); - - if (roi.area() == 0) - { - cerr << "No ROI is selected! Exiting..." << endl; - return -1; - } - else - { - cout << "Selected ROI: " << roi << endl; - } - - // Create VideoWriter if save option is specified - VideoWriter output_video; - if (save) - { - Size frame_size = first_frame.size(); - output_video.open("output.mp4", VideoWriter::fourcc('m', 'p', '4', 'v'), video.get(CAP_PROP_FPS), frame_size); - if (!output_video.isOpened()) - { - cerr << "Error: Could not create output video stream" << endl; - return -1; - } - } - - // Initialize tracker with ROI - tracker.init(first_frame, roi); - - // Track frame by frame - TickMeter tm; - while (waitKey(1) < 0) - { - video >> first_frame; - if (first_frame.empty()) - { - cout << "End of video" << endl; - break; - } - - // Inference - tm.start(); - TrackingResult result = tracker.infer(first_frame); - tm.stop(); - - // Visualize - Mat frame = first_frame.clone(); - frame = visualize(frame, result.bbox, result.score, result.isLocated, tm.getFPS()); - - if (save) - { - output_video.write(frame); - } - - if (vis) - { - imshow("VitTrack Demo", frame); - } - tm.reset(); - } - - if (save) - { - output_video.release(); - } - - video.release(); - destroyAllWindows(); - - return 0; -} diff --git a/models/object_tracking_vittrack/demo.py b/models/object_tracking_vittrack/demo.py deleted file mode 100644 index 21d65878..00000000 --- a/models/object_tracking_vittrack/demo.py +++ /dev/null @@ -1,125 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. - -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from vittrack import VitTrack - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser( - description="VIT track opencv API") -parser.add_argument('--input', '-i', type=str, - help='Usage: Set path to the input video. Omit for using default camera.') -parser.add_argument('--model_path', type=str, default='object_tracking_vittrack_2023sep.onnx', - help='Usage: Set model path') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--save', '-s', action='store_true', default=False, - help='Usage: Specify to save a file with results.') -parser.add_argument('--vis', '-v', action='store_true', default=True, - help='Usage: Specify to open a new window to show results.') -args = parser.parse_args() -def visualize(image, bbox, score, isLocated, fps=None, box_color=(0, 255, 0),text_color=(0, 255, 0), fontScale = 1, fontSize = 1): - output = image.copy() - h, w, _ = output.shape - - if fps is not None: - cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 30), cv.FONT_HERSHEY_DUPLEX, fontScale, text_color, fontSize) - - if isLocated and score >= 0.3: - # bbox: Tuple of length 4 - x, y, w, h = bbox - cv.rectangle(output, (x, y), (x+w, y+h), box_color, 2) - cv.putText(output, '{:.2f}'.format(score), (x, y+25), cv.FONT_HERSHEY_DUPLEX, fontScale, text_color, fontSize) - else: - text_size, baseline = cv.getTextSize('Target lost!', cv.FONT_HERSHEY_DUPLEX, fontScale, fontSize) - text_x = int((w - text_size[0]) / 2) - text_y = int((h - text_size[1]) / 2) - cv.putText(output, 'Target lost!', (text_x, text_y), cv.FONT_HERSHEY_DUPLEX, fontScale, (0, 0, 255), fontSize) - - return output - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - - model = VitTrack( - model_path=args.model_path, - backend_id=backend_id, - target_id=target_id) - - # Read from args.input - _input = 0 if args.input is None else args.input - video = cv.VideoCapture(_input) - - # Select an object - has_frame, first_frame = video.read() - if not has_frame: - print('No frames grabbed!') - exit() - first_frame_copy = first_frame.copy() - cv.putText(first_frame_copy, "1. Drag a bounding box to track.", (0, 25), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0)) - cv.putText(first_frame_copy, "2. Press ENTER to confirm", (0, 50), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0)) - roi = cv.selectROI('VitTrack Demo', first_frame_copy) - - if np.all(np.array(roi) == 0): - print("No ROI is selected! Exiting ...") - exit() - else: - print("Selected ROI: {}".format(roi)) - - if args.save: - fps = video.get(cv.CAP_PROP_FPS) - frame_size = (first_frame.shape[1], first_frame.shape[0]) - output_video = cv.VideoWriter('output.mp4', cv.VideoWriter_fourcc(*'mp4v'), fps, frame_size) - - # Init tracker with ROI - model.init(first_frame, roi) - - # Track frame by frame - tm = cv.TickMeter() - while cv.waitKey(1) < 0: - has_frame, frame = video.read() - if not has_frame: - print('End of video') - break - # Inference - tm.start() - isLocated, bbox, score = model.infer(frame) - tm.stop() - # Visualize - frame = visualize(frame, bbox, score, isLocated, fps=tm.getFPS()) - if args.save: - output_video.write(frame) - - if args.vis: - cv.imshow('VitTrack Demo', frame) - tm.reset() - - if args.save: - output_video.release() - - video.release() - cv.destroyAllWindows() diff --git a/models/object_tracking_vittrack/example_outputs/vittrack_demo.gif b/models/object_tracking_vittrack/example_outputs/vittrack_demo.gif deleted file mode 100644 index 3aa482c3..00000000 --- a/models/object_tracking_vittrack/example_outputs/vittrack_demo.gif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:57e5a2497186f8171809fccc3cd84473aefc6fcd2ce2f0d5b92a1382e92a7768 -size 615598 diff --git a/models/object_tracking_vittrack/object_tracking_vittrack_2023sep.onnx b/models/object_tracking_vittrack/object_tracking_vittrack_2023sep.onnx deleted file mode 100644 index 7df76db6..00000000 --- a/models/object_tracking_vittrack/object_tracking_vittrack_2023sep.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2990f0b7cd44d92afa48cd97db6de7be113fc1d9594fddb74e2725c10478e91d -size 714726 diff --git a/models/object_tracking_vittrack/object_tracking_vittrack_2023sep_int8bq.onnx b/models/object_tracking_vittrack/object_tracking_vittrack_2023sep_int8bq.onnx deleted file mode 100644 index 23b52694..00000000 --- a/models/object_tracking_vittrack/object_tracking_vittrack_2023sep_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:54e8d58892a49de71fadf6673ba10193f7899324a4c1b8fe8c2f2d8d5d661fb4 -size 271327 diff --git a/models/object_tracking_vittrack/vittrack.py b/models/object_tracking_vittrack/vittrack.py deleted file mode 100644 index e710e995..00000000 --- a/models/object_tracking_vittrack/vittrack.py +++ /dev/null @@ -1,39 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. - -import numpy as np -import cv2 as cv - -class VitTrack: - def __init__(self, model_path, backend_id=0, target_id=0): - self.model_path = model_path - self.backend_id = backend_id - self.target_id = target_id - - self.params = cv.TrackerVit_Params() - self.params.net = self.model_path - self.params.backend = self.backend_id - self.params.target = self.target_id - - self.model = cv.TrackerVit_create(self.params) - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backend_id, target_id): - self.backend_id = backend_id - self.target_id = target_id - - self.params.backend = self.backend_id - self.params.target = self.target_id - - self.model = cv.TrackerVit_create(self.params) - - def init(self, image, roi): - self.model.init(image, roi) - - def infer(self, image): - is_located, bbox = self.model.update(image) - score = self.model.getTrackingScore() - return is_located, bbox, score diff --git a/models/optical_flow_estimation_raft/BSD-3-LICENSE.txt b/models/optical_flow_estimation_raft/BSD-3-LICENSE.txt deleted file mode 100644 index ed13d840..00000000 --- a/models/optical_flow_estimation_raft/BSD-3-LICENSE.txt +++ /dev/null @@ -1,29 +0,0 @@ -BSD 3-Clause License - -Copyright (c) 2020, princeton-vl -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/models/optical_flow_estimation_raft/MITLICENSE.txt b/models/optical_flow_estimation_raft/MITLICENSE.txt deleted file mode 100644 index 2aa0e4e0..00000000 --- a/models/optical_flow_estimation_raft/MITLICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2021 Jeong-gi Kwak - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/models/optical_flow_estimation_raft/README.md b/models/optical_flow_estimation_raft/README.md deleted file mode 100644 index bc4f8406..00000000 --- a/models/optical_flow_estimation_raft/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# RAFT -This model is originally created by Zachary Teed and Jia Deng of Princeton University. The source code for the model is at [their repository on GitHub](https://github.com/princeton-vl/RAFT), and the original [research paper](https://arxiv.org/abs/2003.12039) is published on [Arxiv](https://arxiv.org/abs/2003.12039). The model was converted to ONNX by [PINTO0309](https://github.com/PINTO0309) in his [model zoo](https://github.com/PINTO0309/PINTO_model_zoo/tree/main/252_RAFT). The ONNX model has several variations depending on the training dataset and input dimesnions. The model used in this demo is trained on Sintel dataset with input size of 360 $\times$ 480. - -**Note**: -- `optical_flow_estimation_raft_2023aug_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - -## Demo - -Run any of the following commands to try the demo: - -```shell -# run on camera input -python demo.py - -# run on two images and visualize result -python demo.py --input1 /path/to/image1 --input2 /path/to/image2 -vis - -# run on two images and save result -python demo.py --input1 /path/to/image1 --input2 /path/to/image2 -s - -# run on two images and both save and visualize result -python demo.py --input1 /path/to/image1 --input2 /path/to/image2 -s -vis - -# run on one video and visualize result -python demo.py --video /path/to/video -vis - -# run on one video and save result -python demo.py --video /path/to/video -s - -# run on one video and both save and visualize result -python demo.py --video /path/to/video -s -vis - -# get help regarding various parameters -python demo.py --help -``` - -While running on video, you can press q anytime to stop. The model demo runs on camera input, video input, or takes two images to compute optical flow across frames. The save and vis arguments of the shell command are only valid in the case of using video or two images as input. To run a different variation of the model, such as a model trained on a different dataset or with a different input size, refer to [RAFT ONNX in PINTO Model Zoo](https://github.com/PINTO0309/PINTO_model_zoo/tree/main/252_RAFT) to download your chosen model. And if your chosen model has different input shape from 360 $\times$ 480, **change the input shape in raft.py line 15 to the new input shape**. Then, add the model path to the --model argument of the shell command, such as in the following example commands: - -```shell -# run on camera input -python demo.py --model /path/to/model -# run on two images -python demo.py --input1 /path/to/image1 --input2 /path/to/image2 --model /path/to/model -# run on video -python demo.py --video /path/to/video --model /path/to/model -``` - -### Example outputs -The visualization argument displays both image inputs as well as out result. - -![Visualization example](./example_outputs/vis.png) - -The save argument saves the result only. - -![Output example](./example_outputs/result.jpg) - - - -## License - -The original RAFT model is under [BSD-3-Clause license](./BSD-3-LICENSE.txt).
-The conversion of the RAFT model to the ONNX format by [PINTO0309](https://github.com/PINTO0309/PINTO_model_zoo/tree/main/252_RAFT) is under [MIT License](./MITLICENSE.txt).
-Some of the code in demo.py and raft.py is adapted from [ibaiGorordo's repository](https://github.com/ibaiGorordo/ONNX-RAFT-Optical-Flow-Estimation/tree/main) under [BSD-3-Clause license](./BSD-3-LICENSE.txt).
- -## Reference - -- https://arxiv.org/abs/2003.12039 -- https://github.com/princeton-vl/RAFT -- https://github.com/ibaiGorordo/ONNX-RAFT-Optical-Flow-Estimation/tree/main -- https://github.com/PINTO0309/PINTO_model_zoo/tree/main/252_RAFT diff --git a/models/optical_flow_estimation_raft/demo.py b/models/optical_flow_estimation_raft/demo.py deleted file mode 100644 index 821be26c..00000000 --- a/models/optical_flow_estimation_raft/demo.py +++ /dev/null @@ -1,315 +0,0 @@ -import argparse - -import cv2 as cv -import numpy as np - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from raft import Raft - -parser = argparse.ArgumentParser(description='RAFT (https://github.com/princeton-vl/RAFT)') -parser.add_argument('--input1', '-i1', type=str, - help='Usage: Set input1 path to first image, omit if using camera or video.') -parser.add_argument('--input2', '-i2', type=str, - help='Usage: Set input2 path to second image, omit if using camera or video.') -parser.add_argument('--video', '-vid', type=str, - help='Usage: Set video path to desired input video, omit if using camera or two image inputs.') -parser.add_argument('--model', '-m', type=str, default='optical_flow_estimation_raft_2023aug.onnx', - help='Usage: Set model path, defaults to optical_flow_estimation_raft_2023aug.onnx.') -parser.add_argument('--save', '-s', action='store_true', - help='Usage: Specify to save a file with results. Invalid in case of camera input.') -parser.add_argument('--visual', '-vis', action='store_true', - help='Usage: Specify to open a new window to show results. Invalid in case of camera input.') -args = parser.parse_args() - -UNKNOWN_FLOW_THRESH = 1e7 - -def make_color_wheel(): - """ Generate color wheel according Middlebury color code. - - Returns: - Color wheel(numpy.ndarray): Color wheel - """ - RY = 15 - YG = 6 - GC = 4 - CB = 11 - BM = 13 - MR = 6 - - ncols = RY + YG + GC + CB + BM + MR - - colorwheel = np.zeros([ncols, 3]) - - col = 0 - - # RY - colorwheel[0:RY, 0] = 255 - colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY)) - col += RY - - # YG - colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG)) - colorwheel[col:col+YG, 1] = 255 - col += YG - - # GC - colorwheel[col:col+GC, 1] = 255 - colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC)) - col += GC - - # CB - colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB)) - colorwheel[col:col+CB, 2] = 255 - col += CB - - # BM - colorwheel[col:col+BM, 2] = 255 - colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM)) - col += + BM - - # MR - colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR)) - colorwheel[col:col+MR, 0] = 255 - - return colorwheel - -colorwheel = make_color_wheel() - -def compute_color(u, v): - """ Compute optical flow color map - - Args: - u(numpy.ndarray): Optical flow horizontal map - v(numpy.ndarray): Optical flow vertical map - - Returns: - img (numpy.ndarray): Optical flow in color code - """ - [h, w] = u.shape - img = np.zeros([h, w, 3]) - nanIdx = np.isnan(u) | np.isnan(v) - u[nanIdx] = 0 - v[nanIdx] = 0 - - ncols = np.size(colorwheel, 0) - - rad = np.sqrt(u**2+v**2) - - a = np.arctan2(-v, -u) / np.pi - - fk = (a+1) / 2 * (ncols - 1) + 1 - - k0 = np.floor(fk).astype(int) - - k1 = k0 + 1 - k1[k1 == ncols+1] = 1 - f = fk - k0 - - for i in range(0, np.size(colorwheel,1)): - tmp = colorwheel[:, i] - col0 = tmp[k0-1] / 255 - col1 = tmp[k1-1] / 255 - col = (1-f) * col0 + f * col1 - - idx = rad <= 1 - col[idx] = 1-rad[idx]*(1-col[idx]) - notidx = np.logical_not(idx) - - col[notidx] *= 0.75 - img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx))) - - return img - -def flow_to_image(flow): - """Convert flow into middlebury color code image - - Args: - flow (np.ndarray): The computed flow map - - Returns: - (np.ndarray): Image corresponding to the flow map. - """ - u = flow[:, :, 0] - v = flow[:, :, 1] - - maxu = -999. - maxv = -999. - minu = 999. - minv = 999. - - idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH) - u[idxUnknow] = 0 - v[idxUnknow] = 0 - - maxu = max(maxu, np.max(u)) - minu = min(minu, np.min(u)) - - maxv = max(maxv, np.max(v)) - minv = min(minv, np.min(v)) - - rad = np.sqrt(u ** 2 + v ** 2) - maxrad = max(-1, np.max(rad)) - - u = u/(maxrad + np.finfo(float).eps) - v = v/(maxrad + np.finfo(float).eps) - - img = compute_color(u, v) - - idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2) - img[idx] = 0 - - return np.uint8(img) - - -def draw_flow(flow_map, img_width, img_height): - """Convert flow map to image - - Args: - flow_map (np.ndarray): The computed flow map - img_width (int): The width of the first input photo - img_height (int): The height of the first input photo - - Returns: - (np.ndarray): Image corresponding to the flow map. - """ - # Convert flow to image - flow_img = flow_to_image(flow_map) - # Convert to BGR - flow_img = cv.cvtColor(flow_img, cv.COLOR_RGB2BGR) - # Resize the depth map to match the input image shape - return cv.resize(flow_img, (img_width, img_height)) - - -def visualize(image1, image2, flow_img): - """ - Combine two input images with resulting flow img and display them together - - Args: - image1 (np.ndarray): The first input image. - imag2 (np.ndarray): The second input image. - flow_img (np.ndarray): The output flow map drawn as an image - - Returns: - combined_img (np.ndarray): The visualized result. - """ - combined_img = np.hstack((image1, image2, flow_img)) - cv.namedWindow("Estimated flow", cv.WINDOW_NORMAL) - cv.imshow("Estimated flow", combined_img) - cv.waitKey(0) - return combined_img - - -if __name__ == '__main__': - # Instantiate RAFT - model = Raft(modelPath=args.model) - - if args.input1 is not None and args.input2 is not None: - # Read image - image1 = cv.imread(args.input1) - image2 = cv.imread(args.input2) - img_height, img_width, img_channels = image1.shape - - # Inference - result = model.infer(image1, image2) - - # Create flow image based on the result flow map - flow_image = draw_flow(result, img_width, img_height) - - # Save results if save is true - if args.save: - print('Results saved to result.jpg\n') - cv.imwrite('result.jpg', flow_image) - - # Visualize results in a new window - if args.visual: - input_output_visualization = visualize(image1, image2, flow_image) - - - elif args.video is not None: - cap = cv.VideoCapture(args.video) - FLOW_FRAME_OFFSET = 3 # Number of frame difference to estimate the optical flow - - if args.visual: - cv.namedWindow("Estimated flow", cv.WINDOW_NORMAL) - - frame_list = [] - img_array = [] - frame_num = 0 - while cap.isOpened(): - try: - # Read frame from the video - ret, prev_frame = cap.read() - frame_list.append(prev_frame) - if not ret: - break - except: - continue - - frame_num += 1 - if frame_num <= FLOW_FRAME_OFFSET: - continue - else: - frame_num = 0 - - result = model.infer(frame_list[0], frame_list[-1]) - img_height, img_width, img_channels = frame_list[0].shape - flow_img = draw_flow(result, img_width, img_height) - - alpha = 0.6 - combined_img = cv.addWeighted(frame_list[0], alpha, flow_img, (1-alpha),0) - - if args.visual: - cv.imshow("Estimated flow", combined_img) - img_array.append(combined_img) - # Remove the oldest frame - frame_list.pop(0) - - # Press key q to stop - if cv.waitKey(1) == ord('q'): - break - - cap.release() - - if args.save: - fourcc = cv.VideoWriter_fourcc(*'mp4v') - height,width,layers= img_array[0].shape - video = cv.VideoWriter('result.mp4', fourcc, 30.0, (width, height), isColor=True) - for img in img_array: - video.write(img) - video.release() - - cv.destroyAllWindows() - - - else: # Omit input to call default camera - deviceId = 0 - cap = cv.VideoCapture(deviceId) - w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH)) - h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) - - tm = cv.TickMeter() - while cv.waitKey(30) < 0: - hasFrame1, frame1 = cap.read() - hasFrame2, frame2 = cap.read() - if not hasFrame1: - print('First frame was not grabbed!') - break - - if not hasFrame2: - print('Second frame was not grabbed!') - break - - # Inference - tm.start() - result = model.infer(frame1, frame2) - tm.stop() - result = draw_flow(result, w, h) - - # Draw results on the input image - frame = visualize(frame1, frame2, result) - - tm.reset() diff --git a/models/optical_flow_estimation_raft/example_outputs/result.jpg b/models/optical_flow_estimation_raft/example_outputs/result.jpg deleted file mode 100644 index 893c7b15..00000000 --- a/models/optical_flow_estimation_raft/example_outputs/result.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b5b83f6dfeefaffc7e181f343fe134168f713ad01a74b34afb7fecccfa67268d -size 35139 diff --git a/models/optical_flow_estimation_raft/example_outputs/vis.png b/models/optical_flow_estimation_raft/example_outputs/vis.png deleted file mode 100644 index a9860bc2..00000000 --- a/models/optical_flow_estimation_raft/example_outputs/vis.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ecc4e01e285eeaddd195c14095d71a7a89c1c1c41fa45764f702931b62bcaa5d -size 1827108 diff --git a/models/optical_flow_estimation_raft/optical_flow_estimation_raft_2023aug.onnx b/models/optical_flow_estimation_raft/optical_flow_estimation_raft_2023aug.onnx deleted file mode 100644 index b084b822..00000000 --- a/models/optical_flow_estimation_raft/optical_flow_estimation_raft_2023aug.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3b479e0447894245b822bf0b7392155d31ccd22bb9b0ee55503066bf6f6f869a -size 64119337 diff --git a/models/optical_flow_estimation_raft/optical_flow_estimation_raft_2023aug_int8bq.onnx b/models/optical_flow_estimation_raft/optical_flow_estimation_raft_2023aug_int8bq.onnx deleted file mode 100644 index f16aaed3..00000000 --- a/models/optical_flow_estimation_raft/optical_flow_estimation_raft_2023aug_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d54cf6e44be6da7efa8843b696b24cf24a8cf03e877dd5207ae5556cfa6f11ec -size 48845109 diff --git a/models/optical_flow_estimation_raft/raft.py b/models/optical_flow_estimation_raft/raft.py deleted file mode 100644 index e0557cce..00000000 --- a/models/optical_flow_estimation_raft/raft.py +++ /dev/null @@ -1,53 +0,0 @@ -# This file is part of OpenCV Zoo project. - -import cv2 as cv -import numpy as np - - -class Raft: - def __init__(self, modelPath): - self._modelPath = modelPath - self.model = cv.dnn.readNet(self._modelPath) - - self.input_names = ['0', '1'] - self.first_input_name = self.input_names[0] - self.second_input_name = self.input_names[1] - self.input_shape = [360, 480] # change if going to use different model with different input shape - self.input_height = self.input_shape[0] - self.input_width = self.input_shape[1] - - @property - def name(self): - return self.__class__.__name__ - - def _preprocess(self, image): - - image = cv.cvtColor(image, cv.COLOR_BGR2RGB) - img_input = cv.resize(image, (self.input_width,self.input_height)) - img_input = img_input.transpose(2, 0, 1) - img_input = img_input[np.newaxis,:,:,:] - img_input = img_input.astype(np.float32) - return img_input - - def infer(self, image1, image2): - - # Preprocess - input_1 = self._preprocess(image1) - input_2 = self._preprocess(image2) - - # Forward - self.model.setInput(input_1, self.first_input_name) - self.model.setInput(input_2, self.second_input_name) - layer_names = self.model.getLayerNames() - outputlayers = [layer_names[i-1] for i in self.model.getUnconnectedOutLayers()] - output = self.model.forward(outputlayers) - - # Postprocess - results = self._postprocess(output) - - return results - - def _postprocess(self, output): - - flow_map = output[1][0].transpose(1, 2, 0) - return flow_map \ No newline at end of file diff --git a/models/palm_detection_mediapipe/CMakeLists.txt b/models/palm_detection_mediapipe/CMakeLists.txt deleted file mode 100644 index 823d7e17..00000000 --- a/models/palm_detection_mediapipe/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ -cmake_minimum_required(VERSION 3.24.0) -project(opencv_zoo_qrcode_wechatqrcode) - -set(OPENCV_VERSION "4.10.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") - -# Find OpenCV -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) - -add_executable(demo demo.cpp) -target_link_libraries(demo ${OpenCV_LIBS}) diff --git a/models/palm_detection_mediapipe/LICENSE b/models/palm_detection_mediapipe/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/models/palm_detection_mediapipe/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/models/palm_detection_mediapipe/README.md b/models/palm_detection_mediapipe/README.md deleted file mode 100644 index ff889c13..00000000 --- a/models/palm_detection_mediapipe/README.md +++ /dev/null @@ -1,60 +0,0 @@ -# Palm detector from MediaPipe Handpose - -This model detects palm bounding boxes and palm landmarks, and is converted from TFLite to ONNX using following tools: - -- TFLite model to ONNX: https://github.com/onnx/tensorflow-onnx -- simplified by [onnx-simplifier](https://github.com/daquexian/onnx-simplifier) - -SSD Anchors are generated from [GenMediaPipePalmDectionSSDAnchors](https://github.com/VimalMollyn/GenMediaPipePalmDectionSSDAnchors) - -**Note**: -- Visit https://github.com/google/mediapipe/blob/master/docs/solutions/models.md#hands for models of larger scale. -- `palm_detection_mediapipe_2023feb_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - -## Demo - -### Python - -Run the following commands to try the demo: - -```bash -# detect on camera input -python demo.py -# detect on an image -python demo.py -i /path/to/image -v - -# get help regarding various parameters -python demo.py --help -``` - -### C++ - -Install latest OpenCV (with opencv_contrib) and CMake >= 3.24.0 to get started with: - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# detect on camera input -./build/demo -# detect on an image -./build/demo -i=/path/to/image -v -# get help messages -./build/demo -h -``` - -### Example outputs - -![webcam demo](./example_outputs/mppalmdet_demo.gif) - -## License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE). - -## Reference - -- MediaPipe Handpose: https://developers.google.com/mediapipe/solutions/vision/hand_landmarker -- MediaPipe hands model and model card: https://github.com/google/mediapipe/blob/master/docs/solutions/models.md#hands -- Handpose TFJS:https://github.com/tensorflow/tfjs-models/tree/master/handpose -- Int8 model quantized with rgb evaluation set of FreiHAND: https://lmb.informatik.uni-freiburg.de/resources/datasets/FreihandDataset.en.html \ No newline at end of file diff --git a/models/palm_detection_mediapipe/demo.cpp b/models/palm_detection_mediapipe/demo.cpp deleted file mode 100644 index 96bd07ee..00000000 --- a/models/palm_detection_mediapipe/demo.cpp +++ /dev/null @@ -1,2379 +0,0 @@ -#include -#include -#include -#include -#include - -const std::vector> - backend_target_pairs = { - {cv::dnn::DNN_BACKEND_OPENCV, cv::dnn::DNN_TARGET_CPU}, - {cv::dnn::DNN_BACKEND_CUDA, cv::dnn::DNN_TARGET_CUDA}, - {cv::dnn::DNN_BACKEND_CUDA, cv::dnn::DNN_TARGET_CUDA_FP16}, - {cv::dnn::DNN_BACKEND_TIMVX, cv::dnn::DNN_TARGET_NPU}, - {cv::dnn::DNN_BACKEND_CANN, cv::dnn::DNN_TARGET_NPU}}; - -class MPPalmDet { - private: - std::string model_path; - float nms_threshold; - float score_threshold; - int topK; - int backend_id; - int target_id; - cv::Size input_size; - cv::dnn::Net model; - std::vector anchors; - - std::vector loadAnchors(); - - std::pair preprocess(const cv::Mat& image) { - cv::Point2i pad_bias(0, 0); - float ratio = - std::min(static_cast(input_size.width) / image.cols, - static_cast(input_size.height) / image.rows); - - cv::Mat processed_image; - if (image.rows != input_size.height || image.cols != input_size.width) { - cv::Size ratio_size(static_cast(image.cols * ratio), - static_cast(image.rows * ratio)); - cv::resize(image, processed_image, ratio_size); - - int pad_h = input_size.height - ratio_size.height; - int pad_w = input_size.width - ratio_size.width; - pad_bias.x = pad_w / 2; - pad_bias.y = pad_h / 2; - - cv::copyMakeBorder(processed_image, processed_image, pad_bias.y, - pad_h - pad_bias.y, pad_bias.x, - pad_w - pad_bias.x, cv::BORDER_CONSTANT, - cv::Scalar(0, 0, 0)); - } else { - processed_image = image.clone(); - } - - // Create blob with correct parameters - cv::Mat blob; - cv::dnn::Image2BlobParams params; - params.datalayout = cv::dnn::DNN_LAYOUT_NHWC; - params.ddepth = CV_32F; - params.mean = cv::Scalar::all(0); - params.scalefactor = cv::Scalar::all(1.0 / 255.0); - params.size = input_size; - params.swapRB = true; - params.paddingmode = cv::dnn::DNN_PMODE_NULL; - - blob = cv::dnn::blobFromImageWithParams(processed_image, params); - - pad_bias.x = static_cast(pad_bias.x / ratio); - pad_bias.y = static_cast(pad_bias.y / ratio); - - return {blob, pad_bias}; - } - - std::vector> postprocess( - const std::vector& output_blobs, const cv::Size& original_size, - const cv::Point2i& pad_bias) { - cv::Mat scores = - output_blobs[1].reshape(1, output_blobs[1].total() / 1); - cv::Mat boxes = - output_blobs[0].reshape(1, output_blobs[0].total() / 18); - - std::vector score_vec; - std::vector boxes_vec; - std::vector> landmarks_vec; - - float scale = std::max(original_size.height, original_size.width); - - // Process all detections first - for (int i = 0; i < scores.rows; i++) { - float score = 1.0f / (1.0f + std::exp(-scores.at(i, 0))); - - // Extract box and landmark deltas - cv::Mat box_delta = boxes.row(i).colRange(0, 4); - cv::Mat landmark_delta = boxes.row(i).colRange(4, 18); - cv::Point2f anchor = anchors[i]; - - // Normalize box deltas by input size - cv::Point2f cxy_delta(box_delta.at(0) / input_size.width, - box_delta.at(1) / input_size.height); - cv::Point2f wh_delta(box_delta.at(2) / input_size.width, - box_delta.at(3) / input_size.height); - - // Calculate box coordinates (scale first, then subtract pad_bias) - cv::Point2f xy1( - (cxy_delta.x - wh_delta.x / 2 + anchor.x) * scale - pad_bias.x, - (cxy_delta.y - wh_delta.y / 2 + anchor.y) * scale - pad_bias.y); - cv::Point2f xy2( - (cxy_delta.x + wh_delta.x / 2 + anchor.x) * scale - pad_bias.x, - (cxy_delta.y + wh_delta.y / 2 + anchor.y) * scale - pad_bias.y); - - if (score > score_threshold) { - score_vec.push_back(score); - boxes_vec.push_back( - cv::Rect2f(xy1.x, xy1.y, xy2.x - xy1.x, xy2.y - xy1.y)); - - // Process landmarks - std::vector landmarks; - for (int j = 0; j < 7; j++) { - // Normalize by input size - float dx = - landmark_delta.at(j * 2) / input_size.width; - float dy = - landmark_delta.at(j * 2 + 1) / input_size.height; - - // Add anchor - dx += anchor.x; - dy += anchor.y; - - // Scale and subtract pad_bias in one step - dx = dx * scale - pad_bias.x; - dy = dy * scale - pad_bias.y; - - landmarks.push_back(cv::Point2f(dx, dy)); - } - landmarks_vec.push_back(landmarks); - } - } - - // Perform NMS - std::vector indices; - std::vector boxes_int; - for (const auto& box : boxes_vec) { - boxes_int.push_back(cv::Rect( - static_cast(box.x), static_cast(box.y), - static_cast(box.width), static_cast(box.height))); - } - cv::dnn::NMSBoxes(boxes_int, score_vec, score_threshold, nms_threshold, - indices); - - // Prepare results - std::vector> results; - for (int idx : indices) { - std::vector result; - result.push_back(boxes_vec[idx].x); - result.push_back(boxes_vec[idx].y); - result.push_back(boxes_vec[idx].x + boxes_vec[idx].width); - result.push_back(boxes_vec[idx].y + boxes_vec[idx].height); - - for (const auto& point : landmarks_vec[idx]) { - result.push_back(point.x); - result.push_back(point.y); - } - result.push_back(score_vec[idx]); - results.push_back(result); - } - - return results; - } - - public: - MPPalmDet(const std::string& modelPath, float nmsThreshold = 0.3f, - float scoreThreshold = 0.5f, int topK = 5000, - int backendId = cv::dnn::DNN_BACKEND_DEFAULT, - int targetId = cv::dnn::DNN_TARGET_CPU) - : model_path(modelPath), - nms_threshold(nmsThreshold), - score_threshold(scoreThreshold), - topK(topK), - backend_id(backendId), - target_id(targetId), - input_size(192, 192) { - model = cv::dnn::readNet(model_path); - model.setPreferableBackend(backend_id); - model.setPreferableTarget(target_id); - anchors = loadAnchors(); - } - - void setBackendAndTarget(int backendId, int targetId) { - backend_id = backendId; - target_id = targetId; - model.setPreferableBackend(backend_id); - model.setPreferableTarget(target_id); - } - - std::vector> infer(const cv::Mat& image) { - std::pair preprocess_result = preprocess(image); - cv::Mat preprocessed_image = preprocess_result.first; - cv::Point2i pad_bias = preprocess_result.second; - model.setInput(preprocessed_image); - std::vector outputs; - model.forward(outputs, model.getUnconnectedOutLayersNames()); - return postprocess(outputs, image.size(), pad_bias); - } -}; - -cv::Mat visualize(const cv::Mat& image, - const std::vector>& results, - bool print_results = false, float fps = 0.0f) { - cv::Mat output = image.clone(); - - if (fps > 0) { - cv::putText(output, cv::format("FPS: %.2f", fps), cv::Point(0, 15), - cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 255)); - } - - for (size_t i = 0; i < results.size(); i++) { - const std::vector& result = results[i]; - float score = result.back(); - - // Draw box - using direct coordinates - cv::rectangle( - output, - cv::Point(static_cast(result[0]), static_cast(result[1])), - cv::Point(static_cast(result[2]), static_cast(result[3])), - cv::Scalar(0, 255, 0), 2); - - // Put score - using first coordinate of box - cv::putText(output, cv::format("%.4f", score), - cv::Point(static_cast(result[0]), - static_cast(result[1]) + 12), - cv::FONT_HERSHEY_DUPLEX, 0.5, cv::Scalar(0, 255, 0)); - - // Draw landmarks - for (size_t j = 0; j < 7; j++) { - cv::Point point(static_cast(result[4 + j * 2]), - static_cast(result[4 + j * 2 + 1])); - cv::circle(output, point, 2, cv::Scalar(0, 0, 255), 2); - } - - if (print_results) { - std::cout << "-----------palm " << i + 1 << "-----------\n"; - std::cout << "score: " << score << "\n"; - std::cout << "palm box: [" << result[0] << ", " << result[1] << ", " - << result[2] << ", " << result[3] << "]\n"; - std::cout << "palm landmarks:\n"; - for (size_t j = 0; j < 7; j++) { - std::cout << "\t(" << result[4 + j * 2] << ", " - << result[4 + j * 2 + 1] << ")\n"; - } - } - } - - return output; -} - -int main(int argc, char** argv) { - cv::CommandLineParser parser( - argc, argv, - "{help h usage ? | | print this message }" - "{input i | | path to input image }" - "{model m | palm_detection_mediapipe_2023feb.onnx | path to " - "model file }" - "{backend_target bt | 0 | backend-target pair (0:OpenCV CPU, 1:CUDA, " - "2:CUDA FP16, 3:TIM-VX NPU, 4:CANN NPU) }" - "{score_threshold | 0.8 | minimum confidence threshold }" - "{nms_threshold | 0.3 | NMS threshold }" - "{save s | | save results to file }" - "{vis v | | visualize results }"); - - if (parser.has("help")) { - parser.printMessage(); - return 0; - } - - int backend_target = parser.get("backend_target"); - if (backend_target < 0 || backend_target >= backend_target_pairs.size()) { - std::cerr << "Error: Invalid backend_target value" << std::endl; - return -1; - } - - int backend_id = backend_target_pairs[backend_target].first; - int target_id = backend_target_pairs[backend_target].second; - - // Create detector directly - MPPalmDet detector(parser.get("model"), - parser.get("nms_threshold"), - parser.get("score_threshold"), - 5000, // topK - backend_id, target_id); - - // Process image if input is provided - if (parser.has("input")) { - cv::Mat image = cv::imread(parser.get("input")); - if (image.empty()) { - std::cerr << "Error: Could not read image: " - << parser.get("input") << std::endl; - return -1; - } - - std::vector> results = detector.infer(image); - if (results.empty()) { - std::cout << "Hand not detected" << std::endl; - } - - cv::Mat output = visualize(image, results, true); - - if (parser.has("save")) { - cv::imwrite("result.jpg", output); - std::cout << "Results saved to result.jpg\n" << std::endl; - } - - if (parser.has("vis")) { - cv::namedWindow(parser.get("input"), - cv::WINDOW_AUTOSIZE); - cv::imshow(parser.get("input"), output); - cv::waitKey(0); - } - } - // Process camera feed if no input image - else { - cv::VideoCapture cap(0); - if (!cap.isOpened()) { - std::cerr << "Error: Could not open camera" << std::endl; - return -1; - } - - std::chrono::steady_clock::time_point start_time = - std::chrono::steady_clock::now(); - int frame_count = 0; - - while (true) { - cv::Mat frame; - cap >> frame; - if (frame.empty()) { - std::cout << "No frames grabbed!" << std::endl; - break; - } - - std::vector> results = detector.infer(frame); - frame_count++; - - std::chrono::steady_clock::time_point current_time = - std::chrono::steady_clock::now(); - float fps = - frame_count / (std::chrono::duration_cast( - current_time - start_time) - .count() + - 1); - - cv::Mat output = visualize(frame, results, false, fps); - cv::imshow("MPPalmDet Demo", output); - - if (cv::waitKey(1) >= 0) break; - } - } - - return 0; -} - -std::vector MPPalmDet::loadAnchors() { - std::vector anchors = { -{0.02083333f, 0.02083333f}, - {0.02083333f, 0.02083333f}, - {0.06250000f, 0.02083333f}, - {0.06250000f, 0.02083333f}, - {0.10416666f, 0.02083333f}, - {0.10416666f, 0.02083333f}, - {0.14583333f, 0.02083333f}, - {0.14583333f, 0.02083333f}, - {0.18750000f, 0.02083333f}, - {0.18750000f, 0.02083333f}, - {0.22916667f, 0.02083333f}, - {0.22916667f, 0.02083333f}, - {0.27083334f, 0.02083333f}, - {0.27083334f, 0.02083333f}, - {0.31250000f, 0.02083333f}, - {0.31250000f, 0.02083333f}, - {0.35416666f, 0.02083333f}, - {0.35416666f, 0.02083333f}, - {0.39583334f, 0.02083333f}, - {0.39583334f, 0.02083333f}, - {0.43750000f, 0.02083333f}, - {0.43750000f, 0.02083333f}, - {0.47916666f, 0.02083333f}, - {0.47916666f, 0.02083333f}, - {0.52083330f, 0.02083333f}, - {0.52083330f, 0.02083333f}, - {0.56250000f, 0.02083333f}, - {0.56250000f, 0.02083333f}, - {0.60416670f, 0.02083333f}, - {0.60416670f, 0.02083333f}, - {0.64583330f, 0.02083333f}, - {0.64583330f, 0.02083333f}, - {0.68750000f, 0.02083333f}, - {0.68750000f, 0.02083333f}, - {0.72916670f, 0.02083333f}, - {0.72916670f, 0.02083333f}, - {0.77083330f, 0.02083333f}, - {0.77083330f, 0.02083333f}, - {0.81250000f, 0.02083333f}, - {0.81250000f, 0.02083333f}, - {0.85416670f, 0.02083333f}, - {0.85416670f, 0.02083333f}, - {0.89583330f, 0.02083333f}, - {0.89583330f, 0.02083333f}, - {0.93750000f, 0.02083333f}, - {0.93750000f, 0.02083333f}, - {0.97916670f, 0.02083333f}, - {0.97916670f, 0.02083333f}, - {0.02083333f, 0.06250000f}, - {0.02083333f, 0.06250000f}, - {0.06250000f, 0.06250000f}, - {0.06250000f, 0.06250000f}, - {0.10416666f, 0.06250000f}, - {0.10416666f, 0.06250000f}, - {0.14583333f, 0.06250000f}, - {0.14583333f, 0.06250000f}, - {0.18750000f, 0.06250000f}, - {0.18750000f, 0.06250000f}, - {0.22916667f, 0.06250000f}, - {0.22916667f, 0.06250000f}, - {0.27083334f, 0.06250000f}, - {0.27083334f, 0.06250000f}, - {0.31250000f, 0.06250000f}, - {0.31250000f, 0.06250000f}, - {0.35416666f, 0.06250000f}, - {0.35416666f, 0.06250000f}, - {0.39583334f, 0.06250000f}, - {0.39583334f, 0.06250000f}, - {0.43750000f, 0.06250000f}, - {0.43750000f, 0.06250000f}, - {0.47916666f, 0.06250000f}, - {0.47916666f, 0.06250000f}, - {0.52083330f, 0.06250000f}, - {0.52083330f, 0.06250000f}, - {0.56250000f, 0.06250000f}, - {0.56250000f, 0.06250000f}, - {0.60416670f, 0.06250000f}, - {0.60416670f, 0.06250000f}, - {0.64583330f, 0.06250000f}, - {0.64583330f, 0.06250000f}, - {0.68750000f, 0.06250000f}, - {0.68750000f, 0.06250000f}, - {0.72916670f, 0.06250000f}, - {0.72916670f, 0.06250000f}, - {0.77083330f, 0.06250000f}, - {0.77083330f, 0.06250000f}, - {0.81250000f, 0.06250000f}, - {0.81250000f, 0.06250000f}, - {0.85416670f, 0.06250000f}, - {0.85416670f, 0.06250000f}, - {0.89583330f, 0.06250000f}, - {0.89583330f, 0.06250000f}, - {0.93750000f, 0.06250000f}, - {0.93750000f, 0.06250000f}, - {0.97916670f, 0.06250000f}, - {0.97916670f, 0.06250000f}, - {0.02083333f, 0.10416666f}, - {0.02083333f, 0.10416666f}, - {0.06250000f, 0.10416666f}, - {0.06250000f, 0.10416666f}, - {0.10416666f, 0.10416666f}, - {0.10416666f, 0.10416666f}, - {0.14583333f, 0.10416666f}, - {0.14583333f, 0.10416666f}, - {0.18750000f, 0.10416666f}, - {0.18750000f, 0.10416666f}, - {0.22916667f, 0.10416666f}, - {0.22916667f, 0.10416666f}, - {0.27083334f, 0.10416666f}, - {0.27083334f, 0.10416666f}, - {0.31250000f, 0.10416666f}, - {0.31250000f, 0.10416666f}, - {0.35416666f, 0.10416666f}, - {0.35416666f, 0.10416666f}, - {0.39583334f, 0.10416666f}, - {0.39583334f, 0.10416666f}, - {0.43750000f, 0.10416666f}, - {0.43750000f, 0.10416666f}, - {0.47916666f, 0.10416666f}, - {0.47916666f, 0.10416666f}, - {0.52083330f, 0.10416666f}, - {0.52083330f, 0.10416666f}, - {0.56250000f, 0.10416666f}, - {0.56250000f, 0.10416666f}, - {0.60416670f, 0.10416666f}, - {0.60416670f, 0.10416666f}, - {0.64583330f, 0.10416666f}, - {0.64583330f, 0.10416666f}, - {0.68750000f, 0.10416666f}, - {0.68750000f, 0.10416666f}, - {0.72916670f, 0.10416666f}, - {0.72916670f, 0.10416666f}, - {0.77083330f, 0.10416666f}, - {0.77083330f, 0.10416666f}, - {0.81250000f, 0.10416666f}, - {0.81250000f, 0.10416666f}, - {0.85416670f, 0.10416666f}, - {0.85416670f, 0.10416666f}, - {0.89583330f, 0.10416666f}, - {0.89583330f, 0.10416666f}, - {0.93750000f, 0.10416666f}, - {0.93750000f, 0.10416666f}, - {0.97916670f, 0.10416666f}, - {0.97916670f, 0.10416666f}, - {0.02083333f, 0.14583333f}, - {0.02083333f, 0.14583333f}, - {0.06250000f, 0.14583333f}, - {0.06250000f, 0.14583333f}, - {0.10416666f, 0.14583333f}, - {0.10416666f, 0.14583333f}, - {0.14583333f, 0.14583333f}, - {0.14583333f, 0.14583333f}, - {0.18750000f, 0.14583333f}, - {0.18750000f, 0.14583333f}, - {0.22916667f, 0.14583333f}, - {0.22916667f, 0.14583333f}, - {0.27083334f, 0.14583333f}, - {0.27083334f, 0.14583333f}, - {0.31250000f, 0.14583333f}, - {0.31250000f, 0.14583333f}, - {0.35416666f, 0.14583333f}, - {0.35416666f, 0.14583333f}, - {0.39583334f, 0.14583333f}, - {0.39583334f, 0.14583333f}, - {0.43750000f, 0.14583333f}, - {0.43750000f, 0.14583333f}, - {0.47916666f, 0.14583333f}, - {0.47916666f, 0.14583333f}, - {0.52083330f, 0.14583333f}, - {0.52083330f, 0.14583333f}, - {0.56250000f, 0.14583333f}, - {0.56250000f, 0.14583333f}, - {0.60416670f, 0.14583333f}, - {0.60416670f, 0.14583333f}, - {0.64583330f, 0.14583333f}, - {0.64583330f, 0.14583333f}, - {0.68750000f, 0.14583333f}, - {0.68750000f, 0.14583333f}, - {0.72916670f, 0.14583333f}, - {0.72916670f, 0.14583333f}, - {0.77083330f, 0.14583333f}, - {0.77083330f, 0.14583333f}, - {0.81250000f, 0.14583333f}, - {0.81250000f, 0.14583333f}, - {0.85416670f, 0.14583333f}, - {0.85416670f, 0.14583333f}, - {0.89583330f, 0.14583333f}, - {0.89583330f, 0.14583333f}, - {0.93750000f, 0.14583333f}, - {0.93750000f, 0.14583333f}, - {0.97916670f, 0.14583333f}, - {0.97916670f, 0.14583333f}, - {0.02083333f, 0.18750000f}, - {0.02083333f, 0.18750000f}, - {0.06250000f, 0.18750000f}, - {0.06250000f, 0.18750000f}, - {0.10416666f, 0.18750000f}, - {0.10416666f, 0.18750000f}, - {0.14583333f, 0.18750000f}, - {0.14583333f, 0.18750000f}, - {0.18750000f, 0.18750000f}, - {0.18750000f, 0.18750000f}, - {0.22916667f, 0.18750000f}, - {0.22916667f, 0.18750000f}, - {0.27083334f, 0.18750000f}, - {0.27083334f, 0.18750000f}, - {0.31250000f, 0.18750000f}, - {0.31250000f, 0.18750000f}, - {0.35416666f, 0.18750000f}, - {0.35416666f, 0.18750000f}, - {0.39583334f, 0.18750000f}, - {0.39583334f, 0.18750000f}, - {0.43750000f, 0.18750000f}, - {0.43750000f, 0.18750000f}, - {0.47916666f, 0.18750000f}, - {0.47916666f, 0.18750000f}, - {0.52083330f, 0.18750000f}, - {0.52083330f, 0.18750000f}, - {0.56250000f, 0.18750000f}, - {0.56250000f, 0.18750000f}, - {0.60416670f, 0.18750000f}, - {0.60416670f, 0.18750000f}, - {0.64583330f, 0.18750000f}, - {0.64583330f, 0.18750000f}, - {0.68750000f, 0.18750000f}, - {0.68750000f, 0.18750000f}, - {0.72916670f, 0.18750000f}, - {0.72916670f, 0.18750000f}, - {0.77083330f, 0.18750000f}, - {0.77083330f, 0.18750000f}, - {0.81250000f, 0.18750000f}, - {0.81250000f, 0.18750000f}, - {0.85416670f, 0.18750000f}, - {0.85416670f, 0.18750000f}, - {0.89583330f, 0.18750000f}, - {0.89583330f, 0.18750000f}, - {0.93750000f, 0.18750000f}, - {0.93750000f, 0.18750000f}, - {0.97916670f, 0.18750000f}, - {0.97916670f, 0.18750000f}, - {0.02083333f, 0.22916667f}, - {0.02083333f, 0.22916667f}, - {0.06250000f, 0.22916667f}, - {0.06250000f, 0.22916667f}, - {0.10416666f, 0.22916667f}, - {0.10416666f, 0.22916667f}, - {0.14583333f, 0.22916667f}, - {0.14583333f, 0.22916667f}, - {0.18750000f, 0.22916667f}, - {0.18750000f, 0.22916667f}, - {0.22916667f, 0.22916667f}, - {0.22916667f, 0.22916667f}, - {0.27083334f, 0.22916667f}, - {0.27083334f, 0.22916667f}, - {0.31250000f, 0.22916667f}, - {0.31250000f, 0.22916667f}, - {0.35416666f, 0.22916667f}, - {0.35416666f, 0.22916667f}, - {0.39583334f, 0.22916667f}, - {0.39583334f, 0.22916667f}, - {0.43750000f, 0.22916667f}, - {0.43750000f, 0.22916667f}, - {0.47916666f, 0.22916667f}, - {0.47916666f, 0.22916667f}, - {0.52083330f, 0.22916667f}, - {0.52083330f, 0.22916667f}, - {0.56250000f, 0.22916667f}, - {0.56250000f, 0.22916667f}, - {0.60416670f, 0.22916667f}, - {0.60416670f, 0.22916667f}, - {0.64583330f, 0.22916667f}, - {0.64583330f, 0.22916667f}, - {0.68750000f, 0.22916667f}, - {0.68750000f, 0.22916667f}, - {0.72916670f, 0.22916667f}, - {0.72916670f, 0.22916667f}, - {0.77083330f, 0.22916667f}, - {0.77083330f, 0.22916667f}, - {0.81250000f, 0.22916667f}, - {0.81250000f, 0.22916667f}, - {0.85416670f, 0.22916667f}, - {0.85416670f, 0.22916667f}, - {0.89583330f, 0.22916667f}, - {0.89583330f, 0.22916667f}, - {0.93750000f, 0.22916667f}, - {0.93750000f, 0.22916667f}, - {0.97916670f, 0.22916667f}, - {0.97916670f, 0.22916667f}, - {0.02083333f, 0.27083334f}, - {0.02083333f, 0.27083334f}, - {0.06250000f, 0.27083334f}, - {0.06250000f, 0.27083334f}, - {0.10416666f, 0.27083334f}, - {0.10416666f, 0.27083334f}, - {0.14583333f, 0.27083334f}, - {0.14583333f, 0.27083334f}, - {0.18750000f, 0.27083334f}, - {0.18750000f, 0.27083334f}, - {0.22916667f, 0.27083334f}, - {0.22916667f, 0.27083334f}, - {0.27083334f, 0.27083334f}, - {0.27083334f, 0.27083334f}, - {0.31250000f, 0.27083334f}, - {0.31250000f, 0.27083334f}, - {0.35416666f, 0.27083334f}, - {0.35416666f, 0.27083334f}, - {0.39583334f, 0.27083334f}, - {0.39583334f, 0.27083334f}, - {0.43750000f, 0.27083334f}, - {0.43750000f, 0.27083334f}, - {0.47916666f, 0.27083334f}, - {0.47916666f, 0.27083334f}, - {0.52083330f, 0.27083334f}, - {0.52083330f, 0.27083334f}, - {0.56250000f, 0.27083334f}, - {0.56250000f, 0.27083334f}, - {0.60416670f, 0.27083334f}, - {0.60416670f, 0.27083334f}, - {0.64583330f, 0.27083334f}, - {0.64583330f, 0.27083334f}, - {0.68750000f, 0.27083334f}, - {0.68750000f, 0.27083334f}, - {0.72916670f, 0.27083334f}, - {0.72916670f, 0.27083334f}, - {0.77083330f, 0.27083334f}, - {0.77083330f, 0.27083334f}, - {0.81250000f, 0.27083334f}, - {0.81250000f, 0.27083334f}, - {0.85416670f, 0.27083334f}, - {0.85416670f, 0.27083334f}, - {0.89583330f, 0.27083334f}, - {0.89583330f, 0.27083334f}, - {0.93750000f, 0.27083334f}, - {0.93750000f, 0.27083334f}, - {0.97916670f, 0.27083334f}, - {0.97916670f, 0.27083334f}, - {0.02083333f, 0.31250000f}, - {0.02083333f, 0.31250000f}, - {0.06250000f, 0.31250000f}, - {0.06250000f, 0.31250000f}, - {0.10416666f, 0.31250000f}, - {0.10416666f, 0.31250000f}, - {0.14583333f, 0.31250000f}, - {0.14583333f, 0.31250000f}, - {0.18750000f, 0.31250000f}, - {0.18750000f, 0.31250000f}, - {0.22916667f, 0.31250000f}, - {0.22916667f, 0.31250000f}, - {0.27083334f, 0.31250000f}, - {0.27083334f, 0.31250000f}, - {0.31250000f, 0.31250000f}, - {0.31250000f, 0.31250000f}, - {0.35416666f, 0.31250000f}, - {0.35416666f, 0.31250000f}, - {0.39583334f, 0.31250000f}, - {0.39583334f, 0.31250000f}, - {0.43750000f, 0.31250000f}, - {0.43750000f, 0.31250000f}, - {0.47916666f, 0.31250000f}, - {0.47916666f, 0.31250000f}, - {0.52083330f, 0.31250000f}, - {0.52083330f, 0.31250000f}, - {0.56250000f, 0.31250000f}, - {0.56250000f, 0.31250000f}, - {0.60416670f, 0.31250000f}, - {0.60416670f, 0.31250000f}, - {0.64583330f, 0.31250000f}, - {0.64583330f, 0.31250000f}, - {0.68750000f, 0.31250000f}, - {0.68750000f, 0.31250000f}, - {0.72916670f, 0.31250000f}, - {0.72916670f, 0.31250000f}, - {0.77083330f, 0.31250000f}, - {0.77083330f, 0.31250000f}, - {0.81250000f, 0.31250000f}, - {0.81250000f, 0.31250000f}, - {0.85416670f, 0.31250000f}, - {0.85416670f, 0.31250000f}, - {0.89583330f, 0.31250000f}, - {0.89583330f, 0.31250000f}, - {0.93750000f, 0.31250000f}, - {0.93750000f, 0.31250000f}, - {0.97916670f, 0.31250000f}, - {0.97916670f, 0.31250000f}, - {0.02083333f, 0.35416666f}, - {0.02083333f, 0.35416666f}, - {0.06250000f, 0.35416666f}, - {0.06250000f, 0.35416666f}, - {0.10416666f, 0.35416666f}, - {0.10416666f, 0.35416666f}, - {0.14583333f, 0.35416666f}, - {0.14583333f, 0.35416666f}, - {0.18750000f, 0.35416666f}, - {0.18750000f, 0.35416666f}, - {0.22916667f, 0.35416666f}, - {0.22916667f, 0.35416666f}, - {0.27083334f, 0.35416666f}, - {0.27083334f, 0.35416666f}, - {0.31250000f, 0.35416666f}, - {0.31250000f, 0.35416666f}, - {0.35416666f, 0.35416666f}, - {0.35416666f, 0.35416666f}, - {0.39583334f, 0.35416666f}, - {0.39583334f, 0.35416666f}, - {0.43750000f, 0.35416666f}, - {0.43750000f, 0.35416666f}, - {0.47916666f, 0.35416666f}, - {0.47916666f, 0.35416666f}, - {0.52083330f, 0.35416666f}, - {0.52083330f, 0.35416666f}, - {0.56250000f, 0.35416666f}, - {0.56250000f, 0.35416666f}, - {0.60416670f, 0.35416666f}, - {0.60416670f, 0.35416666f}, - {0.64583330f, 0.35416666f}, - {0.64583330f, 0.35416666f}, - {0.68750000f, 0.35416666f}, - {0.68750000f, 0.35416666f}, - {0.72916670f, 0.35416666f}, - {0.72916670f, 0.35416666f}, - {0.77083330f, 0.35416666f}, - {0.77083330f, 0.35416666f}, - {0.81250000f, 0.35416666f}, - {0.81250000f, 0.35416666f}, - {0.85416670f, 0.35416666f}, - {0.85416670f, 0.35416666f}, - {0.89583330f, 0.35416666f}, - {0.89583330f, 0.35416666f}, - {0.93750000f, 0.35416666f}, - {0.93750000f, 0.35416666f}, - {0.97916670f, 0.35416666f}, - {0.97916670f, 0.35416666f}, - {0.02083333f, 0.39583334f}, - {0.02083333f, 0.39583334f}, - {0.06250000f, 0.39583334f}, - {0.06250000f, 0.39583334f}, - {0.10416666f, 0.39583334f}, - {0.10416666f, 0.39583334f}, - {0.14583333f, 0.39583334f}, - {0.14583333f, 0.39583334f}, - {0.18750000f, 0.39583334f}, - {0.18750000f, 0.39583334f}, - {0.22916667f, 0.39583334f}, - {0.22916667f, 0.39583334f}, - {0.27083334f, 0.39583334f}, - {0.27083334f, 0.39583334f}, - {0.31250000f, 0.39583334f}, - {0.31250000f, 0.39583334f}, - {0.35416666f, 0.39583334f}, - {0.35416666f, 0.39583334f}, - {0.39583334f, 0.39583334f}, - {0.39583334f, 0.39583334f}, - {0.43750000f, 0.39583334f}, - {0.43750000f, 0.39583334f}, - {0.47916666f, 0.39583334f}, - {0.47916666f, 0.39583334f}, - {0.52083330f, 0.39583334f}, - {0.52083330f, 0.39583334f}, - {0.56250000f, 0.39583334f}, - {0.56250000f, 0.39583334f}, - {0.60416670f, 0.39583334f}, - {0.60416670f, 0.39583334f}, - {0.64583330f, 0.39583334f}, - {0.64583330f, 0.39583334f}, - {0.68750000f, 0.39583334f}, - {0.68750000f, 0.39583334f}, - {0.72916670f, 0.39583334f}, - {0.72916670f, 0.39583334f}, - {0.77083330f, 0.39583334f}, - {0.77083330f, 0.39583334f}, - {0.81250000f, 0.39583334f}, - {0.81250000f, 0.39583334f}, - {0.85416670f, 0.39583334f}, - {0.85416670f, 0.39583334f}, - {0.89583330f, 0.39583334f}, - {0.89583330f, 0.39583334f}, - {0.93750000f, 0.39583334f}, - {0.93750000f, 0.39583334f}, - {0.97916670f, 0.39583334f}, - {0.97916670f, 0.39583334f}, - {0.02083333f, 0.43750000f}, - {0.02083333f, 0.43750000f}, - {0.06250000f, 0.43750000f}, - {0.06250000f, 0.43750000f}, - {0.10416666f, 0.43750000f}, - {0.10416666f, 0.43750000f}, - {0.14583333f, 0.43750000f}, - {0.14583333f, 0.43750000f}, - {0.18750000f, 0.43750000f}, - {0.18750000f, 0.43750000f}, - {0.22916667f, 0.43750000f}, - {0.22916667f, 0.43750000f}, - {0.27083334f, 0.43750000f}, - {0.27083334f, 0.43750000f}, - {0.31250000f, 0.43750000f}, - {0.31250000f, 0.43750000f}, - {0.35416666f, 0.43750000f}, - {0.35416666f, 0.43750000f}, - {0.39583334f, 0.43750000f}, - {0.39583334f, 0.43750000f}, - {0.43750000f, 0.43750000f}, - {0.43750000f, 0.43750000f}, - {0.47916666f, 0.43750000f}, - {0.47916666f, 0.43750000f}, - {0.52083330f, 0.43750000f}, - {0.52083330f, 0.43750000f}, - {0.56250000f, 0.43750000f}, - {0.56250000f, 0.43750000f}, - {0.60416670f, 0.43750000f}, - {0.60416670f, 0.43750000f}, - {0.64583330f, 0.43750000f}, - {0.64583330f, 0.43750000f}, - {0.68750000f, 0.43750000f}, - {0.68750000f, 0.43750000f}, - {0.72916670f, 0.43750000f}, - {0.72916670f, 0.43750000f}, - {0.77083330f, 0.43750000f}, - {0.77083330f, 0.43750000f}, - {0.81250000f, 0.43750000f}, - {0.81250000f, 0.43750000f}, - {0.85416670f, 0.43750000f}, - {0.85416670f, 0.43750000f}, - {0.89583330f, 0.43750000f}, - {0.89583330f, 0.43750000f}, - {0.93750000f, 0.43750000f}, - {0.93750000f, 0.43750000f}, - {0.97916670f, 0.43750000f}, - {0.97916670f, 0.43750000f}, - {0.02083333f, 0.47916666f}, - {0.02083333f, 0.47916666f}, - {0.06250000f, 0.47916666f}, - {0.06250000f, 0.47916666f}, - {0.10416666f, 0.47916666f}, - {0.10416666f, 0.47916666f}, - {0.14583333f, 0.47916666f}, - {0.14583333f, 0.47916666f}, - {0.18750000f, 0.47916666f}, - {0.18750000f, 0.47916666f}, - {0.22916667f, 0.47916666f}, - {0.22916667f, 0.47916666f}, - {0.27083334f, 0.47916666f}, - {0.27083334f, 0.47916666f}, - {0.31250000f, 0.47916666f}, - {0.31250000f, 0.47916666f}, - {0.35416666f, 0.47916666f}, - {0.35416666f, 0.47916666f}, - {0.39583334f, 0.47916666f}, - {0.39583334f, 0.47916666f}, - {0.43750000f, 0.47916666f}, - {0.43750000f, 0.47916666f}, - {0.47916666f, 0.47916666f}, - {0.47916666f, 0.47916666f}, - {0.52083330f, 0.47916666f}, - {0.52083330f, 0.47916666f}, - {0.56250000f, 0.47916666f}, - {0.56250000f, 0.47916666f}, - {0.60416670f, 0.47916666f}, - {0.60416670f, 0.47916666f}, - {0.64583330f, 0.47916666f}, - {0.64583330f, 0.47916666f}, - {0.68750000f, 0.47916666f}, - {0.68750000f, 0.47916666f}, - {0.72916670f, 0.47916666f}, - {0.72916670f, 0.47916666f}, - {0.77083330f, 0.47916666f}, - {0.77083330f, 0.47916666f}, - {0.81250000f, 0.47916666f}, - {0.81250000f, 0.47916666f}, - {0.85416670f, 0.47916666f}, - {0.85416670f, 0.47916666f}, - {0.89583330f, 0.47916666f}, - {0.89583330f, 0.47916666f}, - {0.93750000f, 0.47916666f}, - {0.93750000f, 0.47916666f}, - {0.97916670f, 0.47916666f}, - {0.97916670f, 0.47916666f}, - {0.02083333f, 0.52083330f}, - {0.02083333f, 0.52083330f}, - {0.06250000f, 0.52083330f}, - {0.06250000f, 0.52083330f}, - {0.10416666f, 0.52083330f}, - {0.10416666f, 0.52083330f}, - {0.14583333f, 0.52083330f}, - {0.14583333f, 0.52083330f}, - {0.18750000f, 0.52083330f}, - {0.18750000f, 0.52083330f}, - {0.22916667f, 0.52083330f}, - {0.22916667f, 0.52083330f}, - {0.27083334f, 0.52083330f}, - {0.27083334f, 0.52083330f}, - {0.31250000f, 0.52083330f}, - {0.31250000f, 0.52083330f}, - {0.35416666f, 0.52083330f}, - {0.35416666f, 0.52083330f}, - {0.39583334f, 0.52083330f}, - {0.39583334f, 0.52083330f}, - {0.43750000f, 0.52083330f}, - {0.43750000f, 0.52083330f}, - {0.47916666f, 0.52083330f}, - {0.47916666f, 0.52083330f}, - {0.52083330f, 0.52083330f}, - {0.52083330f, 0.52083330f}, - {0.56250000f, 0.52083330f}, - {0.56250000f, 0.52083330f}, - {0.60416670f, 0.52083330f}, - {0.60416670f, 0.52083330f}, - {0.64583330f, 0.52083330f}, - {0.64583330f, 0.52083330f}, - {0.68750000f, 0.52083330f}, - {0.68750000f, 0.52083330f}, - {0.72916670f, 0.52083330f}, - {0.72916670f, 0.52083330f}, - {0.77083330f, 0.52083330f}, - {0.77083330f, 0.52083330f}, - {0.81250000f, 0.52083330f}, - {0.81250000f, 0.52083330f}, - {0.85416670f, 0.52083330f}, - {0.85416670f, 0.52083330f}, - {0.89583330f, 0.52083330f}, - {0.89583330f, 0.52083330f}, - {0.93750000f, 0.52083330f}, - {0.93750000f, 0.52083330f}, - {0.97916670f, 0.52083330f}, - {0.97916670f, 0.52083330f}, - {0.02083333f, 0.56250000f}, - {0.02083333f, 0.56250000f}, - {0.06250000f, 0.56250000f}, - {0.06250000f, 0.56250000f}, - {0.10416666f, 0.56250000f}, - {0.10416666f, 0.56250000f}, - {0.14583333f, 0.56250000f}, - {0.14583333f, 0.56250000f}, - {0.18750000f, 0.56250000f}, - {0.18750000f, 0.56250000f}, - {0.22916667f, 0.56250000f}, - {0.22916667f, 0.56250000f}, - {0.27083334f, 0.56250000f}, - {0.27083334f, 0.56250000f}, - {0.31250000f, 0.56250000f}, - {0.31250000f, 0.56250000f}, - {0.35416666f, 0.56250000f}, - {0.35416666f, 0.56250000f}, - {0.39583334f, 0.56250000f}, - {0.39583334f, 0.56250000f}, - {0.43750000f, 0.56250000f}, - {0.43750000f, 0.56250000f}, - {0.47916666f, 0.56250000f}, - {0.47916666f, 0.56250000f}, - {0.52083330f, 0.56250000f}, - {0.52083330f, 0.56250000f}, - {0.56250000f, 0.56250000f}, - {0.56250000f, 0.56250000f}, - {0.60416670f, 0.56250000f}, - {0.60416670f, 0.56250000f}, - {0.64583330f, 0.56250000f}, - {0.64583330f, 0.56250000f}, - {0.68750000f, 0.56250000f}, - {0.68750000f, 0.56250000f}, - {0.72916670f, 0.56250000f}, - {0.72916670f, 0.56250000f}, - {0.77083330f, 0.56250000f}, - {0.77083330f, 0.56250000f}, - {0.81250000f, 0.56250000f}, - {0.81250000f, 0.56250000f}, - {0.85416670f, 0.56250000f}, - {0.85416670f, 0.56250000f}, - {0.89583330f, 0.56250000f}, - {0.89583330f, 0.56250000f}, - {0.93750000f, 0.56250000f}, - {0.93750000f, 0.56250000f}, - {0.97916670f, 0.56250000f}, - {0.97916670f, 0.56250000f}, - {0.02083333f, 0.60416670f}, - {0.02083333f, 0.60416670f}, - {0.06250000f, 0.60416670f}, - {0.06250000f, 0.60416670f}, - {0.10416666f, 0.60416670f}, - {0.10416666f, 0.60416670f}, - {0.14583333f, 0.60416670f}, - {0.14583333f, 0.60416670f}, - {0.18750000f, 0.60416670f}, - {0.18750000f, 0.60416670f}, - {0.22916667f, 0.60416670f}, - {0.22916667f, 0.60416670f}, - {0.27083334f, 0.60416670f}, - {0.27083334f, 0.60416670f}, - {0.31250000f, 0.60416670f}, - {0.31250000f, 0.60416670f}, - {0.35416666f, 0.60416670f}, - {0.35416666f, 0.60416670f}, - {0.39583334f, 0.60416670f}, - {0.39583334f, 0.60416670f}, - {0.43750000f, 0.60416670f}, - {0.43750000f, 0.60416670f}, - {0.47916666f, 0.60416670f}, - {0.47916666f, 0.60416670f}, - {0.52083330f, 0.60416670f}, - {0.52083330f, 0.60416670f}, - {0.56250000f, 0.60416670f}, - {0.56250000f, 0.60416670f}, - {0.60416670f, 0.60416670f}, - {0.60416670f, 0.60416670f}, - {0.64583330f, 0.60416670f}, - {0.64583330f, 0.60416670f}, - {0.68750000f, 0.60416670f}, - {0.68750000f, 0.60416670f}, - {0.72916670f, 0.60416670f}, - {0.72916670f, 0.60416670f}, - {0.77083330f, 0.60416670f}, - {0.77083330f, 0.60416670f}, - {0.81250000f, 0.60416670f}, - {0.81250000f, 0.60416670f}, - {0.85416670f, 0.60416670f}, - {0.85416670f, 0.60416670f}, - {0.89583330f, 0.60416670f}, - {0.89583330f, 0.60416670f}, - {0.93750000f, 0.60416670f}, - {0.93750000f, 0.60416670f}, - {0.97916670f, 0.60416670f}, - {0.97916670f, 0.60416670f}, - {0.02083333f, 0.64583330f}, - {0.02083333f, 0.64583330f}, - {0.06250000f, 0.64583330f}, - {0.06250000f, 0.64583330f}, - {0.10416666f, 0.64583330f}, - {0.10416666f, 0.64583330f}, - {0.14583333f, 0.64583330f}, - {0.14583333f, 0.64583330f}, - {0.18750000f, 0.64583330f}, - {0.18750000f, 0.64583330f}, - {0.22916667f, 0.64583330f}, - {0.22916667f, 0.64583330f}, - {0.27083334f, 0.64583330f}, - {0.27083334f, 0.64583330f}, - {0.31250000f, 0.64583330f}, - {0.31250000f, 0.64583330f}, - {0.35416666f, 0.64583330f}, - {0.35416666f, 0.64583330f}, - {0.39583334f, 0.64583330f}, - {0.39583334f, 0.64583330f}, - {0.43750000f, 0.64583330f}, - {0.43750000f, 0.64583330f}, - {0.47916666f, 0.64583330f}, - {0.47916666f, 0.64583330f}, - {0.52083330f, 0.64583330f}, - {0.52083330f, 0.64583330f}, - {0.56250000f, 0.64583330f}, - {0.56250000f, 0.64583330f}, - {0.60416670f, 0.64583330f}, - {0.60416670f, 0.64583330f}, - {0.64583330f, 0.64583330f}, - {0.64583330f, 0.64583330f}, - {0.68750000f, 0.64583330f}, - {0.68750000f, 0.64583330f}, - {0.72916670f, 0.64583330f}, - {0.72916670f, 0.64583330f}, - {0.77083330f, 0.64583330f}, - {0.77083330f, 0.64583330f}, - {0.81250000f, 0.64583330f}, - {0.81250000f, 0.64583330f}, - {0.85416670f, 0.64583330f}, - {0.85416670f, 0.64583330f}, - {0.89583330f, 0.64583330f}, - {0.89583330f, 0.64583330f}, - {0.93750000f, 0.64583330f}, - {0.93750000f, 0.64583330f}, - {0.97916670f, 0.64583330f}, - {0.97916670f, 0.64583330f}, - {0.02083333f, 0.68750000f}, - {0.02083333f, 0.68750000f}, - {0.06250000f, 0.68750000f}, - {0.06250000f, 0.68750000f}, - {0.10416666f, 0.68750000f}, - {0.10416666f, 0.68750000f}, - {0.14583333f, 0.68750000f}, - {0.14583333f, 0.68750000f}, - {0.18750000f, 0.68750000f}, - {0.18750000f, 0.68750000f}, - {0.22916667f, 0.68750000f}, - {0.22916667f, 0.68750000f}, - {0.27083334f, 0.68750000f}, - {0.27083334f, 0.68750000f}, - {0.31250000f, 0.68750000f}, - {0.31250000f, 0.68750000f}, - {0.35416666f, 0.68750000f}, - {0.35416666f, 0.68750000f}, - {0.39583334f, 0.68750000f}, - {0.39583334f, 0.68750000f}, - {0.43750000f, 0.68750000f}, - {0.43750000f, 0.68750000f}, - {0.47916666f, 0.68750000f}, - {0.47916666f, 0.68750000f}, - {0.52083330f, 0.68750000f}, - {0.52083330f, 0.68750000f}, - {0.56250000f, 0.68750000f}, - {0.56250000f, 0.68750000f}, - {0.60416670f, 0.68750000f}, - {0.60416670f, 0.68750000f}, - {0.64583330f, 0.68750000f}, - {0.64583330f, 0.68750000f}, - {0.68750000f, 0.68750000f}, - {0.68750000f, 0.68750000f}, - {0.72916670f, 0.68750000f}, - {0.72916670f, 0.68750000f}, - {0.77083330f, 0.68750000f}, - {0.77083330f, 0.68750000f}, - {0.81250000f, 0.68750000f}, - {0.81250000f, 0.68750000f}, - {0.85416670f, 0.68750000f}, - {0.85416670f, 0.68750000f}, - {0.89583330f, 0.68750000f}, - {0.89583330f, 0.68750000f}, - {0.93750000f, 0.68750000f}, - {0.93750000f, 0.68750000f}, - {0.97916670f, 0.68750000f}, - {0.97916670f, 0.68750000f}, - {0.02083333f, 0.72916670f}, - {0.02083333f, 0.72916670f}, - {0.06250000f, 0.72916670f}, - {0.06250000f, 0.72916670f}, - {0.10416666f, 0.72916670f}, - {0.10416666f, 0.72916670f}, - {0.14583333f, 0.72916670f}, - {0.14583333f, 0.72916670f}, - {0.18750000f, 0.72916670f}, - {0.18750000f, 0.72916670f}, - {0.22916667f, 0.72916670f}, - {0.22916667f, 0.72916670f}, - {0.27083334f, 0.72916670f}, - {0.27083334f, 0.72916670f}, - {0.31250000f, 0.72916670f}, - {0.31250000f, 0.72916670f}, - {0.35416666f, 0.72916670f}, - {0.35416666f, 0.72916670f}, - {0.39583334f, 0.72916670f}, - {0.39583334f, 0.72916670f}, - {0.43750000f, 0.72916670f}, - {0.43750000f, 0.72916670f}, - {0.47916666f, 0.72916670f}, - {0.47916666f, 0.72916670f}, - {0.52083330f, 0.72916670f}, - {0.52083330f, 0.72916670f}, - {0.56250000f, 0.72916670f}, - {0.56250000f, 0.72916670f}, - {0.60416670f, 0.72916670f}, - {0.60416670f, 0.72916670f}, - {0.64583330f, 0.72916670f}, - {0.64583330f, 0.72916670f}, - {0.68750000f, 0.72916670f}, - {0.68750000f, 0.72916670f}, - {0.72916670f, 0.72916670f}, - {0.72916670f, 0.72916670f}, - {0.77083330f, 0.72916670f}, - {0.77083330f, 0.72916670f}, - {0.81250000f, 0.72916670f}, - {0.81250000f, 0.72916670f}, - {0.85416670f, 0.72916670f}, - {0.85416670f, 0.72916670f}, - {0.89583330f, 0.72916670f}, - {0.89583330f, 0.72916670f}, - {0.93750000f, 0.72916670f}, - {0.93750000f, 0.72916670f}, - {0.97916670f, 0.72916670f}, - {0.97916670f, 0.72916670f}, - {0.02083333f, 0.77083330f}, - {0.02083333f, 0.77083330f}, - {0.06250000f, 0.77083330f}, - {0.06250000f, 0.77083330f}, - {0.10416666f, 0.77083330f}, - {0.10416666f, 0.77083330f}, - {0.14583333f, 0.77083330f}, - {0.14583333f, 0.77083330f}, - {0.18750000f, 0.77083330f}, - {0.18750000f, 0.77083330f}, - {0.22916667f, 0.77083330f}, - {0.22916667f, 0.77083330f}, - {0.27083334f, 0.77083330f}, - {0.27083334f, 0.77083330f}, - {0.31250000f, 0.77083330f}, - {0.31250000f, 0.77083330f}, - {0.35416666f, 0.77083330f}, - {0.35416666f, 0.77083330f}, - {0.39583334f, 0.77083330f}, - {0.39583334f, 0.77083330f}, - {0.43750000f, 0.77083330f}, - {0.43750000f, 0.77083330f}, - {0.47916666f, 0.77083330f}, - {0.47916666f, 0.77083330f}, - {0.52083330f, 0.77083330f}, - {0.52083330f, 0.77083330f}, - {0.56250000f, 0.77083330f}, - {0.56250000f, 0.77083330f}, - {0.60416670f, 0.77083330f}, - {0.60416670f, 0.77083330f}, - {0.64583330f, 0.77083330f}, - {0.64583330f, 0.77083330f}, - {0.68750000f, 0.77083330f}, - {0.68750000f, 0.77083330f}, - {0.72916670f, 0.77083330f}, - {0.72916670f, 0.77083330f}, - {0.77083330f, 0.77083330f}, - {0.77083330f, 0.77083330f}, - {0.81250000f, 0.77083330f}, - {0.81250000f, 0.77083330f}, - {0.85416670f, 0.77083330f}, - {0.85416670f, 0.77083330f}, - {0.89583330f, 0.77083330f}, - {0.89583330f, 0.77083330f}, - {0.93750000f, 0.77083330f}, - {0.93750000f, 0.77083330f}, - {0.97916670f, 0.77083330f}, - {0.97916670f, 0.77083330f}, - {0.02083333f, 0.81250000f}, - {0.02083333f, 0.81250000f}, - {0.06250000f, 0.81250000f}, - {0.06250000f, 0.81250000f}, - {0.10416666f, 0.81250000f}, - {0.10416666f, 0.81250000f}, - {0.14583333f, 0.81250000f}, - {0.14583333f, 0.81250000f}, - {0.18750000f, 0.81250000f}, - {0.18750000f, 0.81250000f}, - {0.22916667f, 0.81250000f}, - {0.22916667f, 0.81250000f}, - {0.27083334f, 0.81250000f}, - {0.27083334f, 0.81250000f}, - {0.31250000f, 0.81250000f}, - {0.31250000f, 0.81250000f}, - {0.35416666f, 0.81250000f}, - {0.35416666f, 0.81250000f}, - {0.39583334f, 0.81250000f}, - {0.39583334f, 0.81250000f}, - {0.43750000f, 0.81250000f}, - {0.43750000f, 0.81250000f}, - {0.47916666f, 0.81250000f}, - {0.47916666f, 0.81250000f}, - {0.52083330f, 0.81250000f}, - {0.52083330f, 0.81250000f}, - {0.56250000f, 0.81250000f}, - {0.56250000f, 0.81250000f}, - {0.60416670f, 0.81250000f}, - {0.60416670f, 0.81250000f}, - {0.64583330f, 0.81250000f}, - {0.64583330f, 0.81250000f}, - {0.68750000f, 0.81250000f}, - {0.68750000f, 0.81250000f}, - {0.72916670f, 0.81250000f}, - {0.72916670f, 0.81250000f}, - {0.77083330f, 0.81250000f}, - {0.77083330f, 0.81250000f}, - {0.81250000f, 0.81250000f}, - {0.81250000f, 0.81250000f}, - {0.85416670f, 0.81250000f}, - {0.85416670f, 0.81250000f}, - {0.89583330f, 0.81250000f}, - {0.89583330f, 0.81250000f}, - {0.93750000f, 0.81250000f}, - {0.93750000f, 0.81250000f}, - {0.97916670f, 0.81250000f}, - {0.97916670f, 0.81250000f}, - {0.02083333f, 0.85416670f}, - {0.02083333f, 0.85416670f}, - {0.06250000f, 0.85416670f}, - {0.06250000f, 0.85416670f}, - {0.10416666f, 0.85416670f}, - {0.10416666f, 0.85416670f}, - {0.14583333f, 0.85416670f}, - {0.14583333f, 0.85416670f}, - {0.18750000f, 0.85416670f}, - {0.18750000f, 0.85416670f}, - {0.22916667f, 0.85416670f}, - {0.22916667f, 0.85416670f}, - {0.27083334f, 0.85416670f}, - {0.27083334f, 0.85416670f}, - {0.31250000f, 0.85416670f}, - {0.31250000f, 0.85416670f}, - {0.35416666f, 0.85416670f}, - {0.35416666f, 0.85416670f}, - {0.39583334f, 0.85416670f}, - {0.39583334f, 0.85416670f}, - {0.43750000f, 0.85416670f}, - {0.43750000f, 0.85416670f}, - {0.47916666f, 0.85416670f}, - {0.47916666f, 0.85416670f}, - {0.52083330f, 0.85416670f}, - {0.52083330f, 0.85416670f}, - {0.56250000f, 0.85416670f}, - {0.56250000f, 0.85416670f}, - {0.60416670f, 0.85416670f}, - {0.60416670f, 0.85416670f}, - {0.64583330f, 0.85416670f}, - {0.64583330f, 0.85416670f}, - {0.68750000f, 0.85416670f}, - {0.68750000f, 0.85416670f}, - {0.72916670f, 0.85416670f}, - {0.72916670f, 0.85416670f}, - {0.77083330f, 0.85416670f}, - {0.77083330f, 0.85416670f}, - {0.81250000f, 0.85416670f}, - {0.81250000f, 0.85416670f}, - {0.85416670f, 0.85416670f}, - {0.85416670f, 0.85416670f}, - {0.89583330f, 0.85416670f}, - {0.89583330f, 0.85416670f}, - {0.93750000f, 0.85416670f}, - {0.93750000f, 0.85416670f}, - {0.97916670f, 0.85416670f}, - {0.97916670f, 0.85416670f}, - {0.02083333f, 0.89583330f}, - {0.02083333f, 0.89583330f}, - {0.06250000f, 0.89583330f}, - {0.06250000f, 0.89583330f}, - {0.10416666f, 0.89583330f}, - {0.10416666f, 0.89583330f}, - {0.14583333f, 0.89583330f}, - {0.14583333f, 0.89583330f}, - {0.18750000f, 0.89583330f}, - {0.18750000f, 0.89583330f}, - {0.22916667f, 0.89583330f}, - {0.22916667f, 0.89583330f}, - {0.27083334f, 0.89583330f}, - {0.27083334f, 0.89583330f}, - {0.31250000f, 0.89583330f}, - {0.31250000f, 0.89583330f}, - {0.35416666f, 0.89583330f}, - {0.35416666f, 0.89583330f}, - {0.39583334f, 0.89583330f}, - {0.39583334f, 0.89583330f}, - {0.43750000f, 0.89583330f}, - {0.43750000f, 0.89583330f}, - {0.47916666f, 0.89583330f}, - {0.47916666f, 0.89583330f}, - {0.52083330f, 0.89583330f}, - {0.52083330f, 0.89583330f}, - {0.56250000f, 0.89583330f}, - {0.56250000f, 0.89583330f}, - {0.60416670f, 0.89583330f}, - {0.60416670f, 0.89583330f}, - {0.64583330f, 0.89583330f}, - {0.64583330f, 0.89583330f}, - {0.68750000f, 0.89583330f}, - {0.68750000f, 0.89583330f}, - {0.72916670f, 0.89583330f}, - {0.72916670f, 0.89583330f}, - {0.77083330f, 0.89583330f}, - {0.77083330f, 0.89583330f}, - {0.81250000f, 0.89583330f}, - {0.81250000f, 0.89583330f}, - {0.85416670f, 0.89583330f}, - {0.85416670f, 0.89583330f}, - {0.89583330f, 0.89583330f}, - {0.89583330f, 0.89583330f}, - {0.93750000f, 0.89583330f}, - {0.93750000f, 0.89583330f}, - {0.97916670f, 0.89583330f}, - {0.97916670f, 0.89583330f}, - {0.02083333f, 0.93750000f}, - {0.02083333f, 0.93750000f}, - {0.06250000f, 0.93750000f}, - {0.06250000f, 0.93750000f}, - {0.10416666f, 0.93750000f}, - {0.10416666f, 0.93750000f}, - {0.14583333f, 0.93750000f}, - {0.14583333f, 0.93750000f}, - {0.18750000f, 0.93750000f}, - {0.18750000f, 0.93750000f}, - {0.22916667f, 0.93750000f}, - {0.22916667f, 0.93750000f}, - {0.27083334f, 0.93750000f}, - {0.27083334f, 0.93750000f}, - {0.31250000f, 0.93750000f}, - {0.31250000f, 0.93750000f}, - {0.35416666f, 0.93750000f}, - {0.35416666f, 0.93750000f}, - {0.39583334f, 0.93750000f}, - {0.39583334f, 0.93750000f}, - {0.43750000f, 0.93750000f}, - {0.43750000f, 0.93750000f}, - {0.47916666f, 0.93750000f}, - {0.47916666f, 0.93750000f}, - {0.52083330f, 0.93750000f}, - {0.52083330f, 0.93750000f}, - {0.56250000f, 0.93750000f}, - {0.56250000f, 0.93750000f}, - {0.60416670f, 0.93750000f}, - {0.60416670f, 0.93750000f}, - {0.64583330f, 0.93750000f}, - {0.64583330f, 0.93750000f}, - {0.68750000f, 0.93750000f}, - {0.68750000f, 0.93750000f}, - {0.72916670f, 0.93750000f}, - {0.72916670f, 0.93750000f}, - {0.77083330f, 0.93750000f}, - {0.77083330f, 0.93750000f}, - {0.81250000f, 0.93750000f}, - {0.81250000f, 0.93750000f}, - {0.85416670f, 0.93750000f}, - {0.85416670f, 0.93750000f}, - {0.89583330f, 0.93750000f}, - {0.89583330f, 0.93750000f}, - {0.93750000f, 0.93750000f}, - {0.93750000f, 0.93750000f}, - {0.97916670f, 0.93750000f}, - {0.97916670f, 0.93750000f}, - {0.02083333f, 0.97916670f}, - {0.02083333f, 0.97916670f}, - {0.06250000f, 0.97916670f}, - {0.06250000f, 0.97916670f}, - {0.10416666f, 0.97916670f}, - {0.10416666f, 0.97916670f}, - {0.14583333f, 0.97916670f}, - {0.14583333f, 0.97916670f}, - {0.18750000f, 0.97916670f}, - {0.18750000f, 0.97916670f}, - {0.22916667f, 0.97916670f}, - {0.22916667f, 0.97916670f}, - {0.27083334f, 0.97916670f}, - {0.27083334f, 0.97916670f}, - {0.31250000f, 0.97916670f}, - {0.31250000f, 0.97916670f}, - {0.35416666f, 0.97916670f}, - {0.35416666f, 0.97916670f}, - {0.39583334f, 0.97916670f}, - {0.39583334f, 0.97916670f}, - {0.43750000f, 0.97916670f}, - {0.43750000f, 0.97916670f}, - {0.47916666f, 0.97916670f}, - {0.47916666f, 0.97916670f}, - {0.52083330f, 0.97916670f}, - {0.52083330f, 0.97916670f}, - {0.56250000f, 0.97916670f}, - {0.56250000f, 0.97916670f}, - {0.60416670f, 0.97916670f}, - {0.60416670f, 0.97916670f}, - {0.64583330f, 0.97916670f}, - {0.64583330f, 0.97916670f}, - {0.68750000f, 0.97916670f}, - {0.68750000f, 0.97916670f}, - {0.72916670f, 0.97916670f}, - {0.72916670f, 0.97916670f}, - {0.77083330f, 0.97916670f}, - {0.77083330f, 0.97916670f}, - {0.81250000f, 0.97916670f}, - {0.81250000f, 0.97916670f}, - {0.85416670f, 0.97916670f}, - {0.85416670f, 0.97916670f}, - {0.89583330f, 0.97916670f}, - {0.89583330f, 0.97916670f}, - {0.93750000f, 0.97916670f}, - {0.93750000f, 0.97916670f}, - {0.97916670f, 0.97916670f}, - {0.97916670f, 0.97916670f}, - {0.04166667f, 0.04166667f}, - {0.04166667f, 0.04166667f}, - {0.04166667f, 0.04166667f}, - {0.04166667f, 0.04166667f}, - {0.04166667f, 0.04166667f}, - {0.04166667f, 0.04166667f}, - {0.12500000f, 0.04166667f}, - {0.12500000f, 0.04166667f}, - {0.12500000f, 0.04166667f}, - {0.12500000f, 0.04166667f}, - {0.12500000f, 0.04166667f}, - {0.12500000f, 0.04166667f}, - {0.20833333f, 0.04166667f}, - {0.20833333f, 0.04166667f}, - {0.20833333f, 0.04166667f}, - {0.20833333f, 0.04166667f}, - {0.20833333f, 0.04166667f}, - {0.20833333f, 0.04166667f}, - {0.29166666f, 0.04166667f}, - {0.29166666f, 0.04166667f}, - {0.29166666f, 0.04166667f}, - {0.29166666f, 0.04166667f}, - {0.29166666f, 0.04166667f}, - {0.29166666f, 0.04166667f}, - {0.37500000f, 0.04166667f}, - {0.37500000f, 0.04166667f}, - {0.37500000f, 0.04166667f}, - {0.37500000f, 0.04166667f}, - {0.37500000f, 0.04166667f}, - {0.37500000f, 0.04166667f}, - {0.45833334f, 0.04166667f}, - {0.45833334f, 0.04166667f}, - {0.45833334f, 0.04166667f}, - {0.45833334f, 0.04166667f}, - {0.45833334f, 0.04166667f}, - {0.45833334f, 0.04166667f}, - {0.54166670f, 0.04166667f}, - {0.54166670f, 0.04166667f}, - {0.54166670f, 0.04166667f}, - {0.54166670f, 0.04166667f}, - {0.54166670f, 0.04166667f}, - {0.54166670f, 0.04166667f}, - {0.62500000f, 0.04166667f}, - {0.62500000f, 0.04166667f}, - {0.62500000f, 0.04166667f}, - {0.62500000f, 0.04166667f}, - {0.62500000f, 0.04166667f}, - {0.62500000f, 0.04166667f}, - {0.70833330f, 0.04166667f}, - {0.70833330f, 0.04166667f}, - {0.70833330f, 0.04166667f}, - {0.70833330f, 0.04166667f}, - {0.70833330f, 0.04166667f}, - {0.70833330f, 0.04166667f}, - {0.79166670f, 0.04166667f}, - {0.79166670f, 0.04166667f}, - {0.79166670f, 0.04166667f}, - {0.79166670f, 0.04166667f}, - {0.79166670f, 0.04166667f}, - {0.79166670f, 0.04166667f}, - {0.87500000f, 0.04166667f}, - {0.87500000f, 0.04166667f}, - {0.87500000f, 0.04166667f}, - {0.87500000f, 0.04166667f}, - {0.87500000f, 0.04166667f}, - {0.87500000f, 0.04166667f}, - {0.95833330f, 0.04166667f}, - {0.95833330f, 0.04166667f}, - {0.95833330f, 0.04166667f}, - {0.95833330f, 0.04166667f}, - {0.95833330f, 0.04166667f}, - {0.95833330f, 0.04166667f}, - {0.04166667f, 0.12500000f}, - {0.04166667f, 0.12500000f}, - {0.04166667f, 0.12500000f}, - {0.04166667f, 0.12500000f}, - {0.04166667f, 0.12500000f}, - {0.04166667f, 0.12500000f}, - {0.12500000f, 0.12500000f}, - {0.12500000f, 0.12500000f}, - {0.12500000f, 0.12500000f}, - {0.12500000f, 0.12500000f}, - {0.12500000f, 0.12500000f}, - {0.12500000f, 0.12500000f}, - {0.20833333f, 0.12500000f}, - {0.20833333f, 0.12500000f}, - {0.20833333f, 0.12500000f}, - {0.20833333f, 0.12500000f}, - {0.20833333f, 0.12500000f}, - {0.20833333f, 0.12500000f}, - {0.29166666f, 0.12500000f}, - {0.29166666f, 0.12500000f}, - {0.29166666f, 0.12500000f}, - {0.29166666f, 0.12500000f}, - {0.29166666f, 0.12500000f}, - {0.29166666f, 0.12500000f}, - {0.37500000f, 0.12500000f}, - {0.37500000f, 0.12500000f}, - {0.37500000f, 0.12500000f}, - {0.37500000f, 0.12500000f}, - {0.37500000f, 0.12500000f}, - {0.37500000f, 0.12500000f}, - {0.45833334f, 0.12500000f}, - {0.45833334f, 0.12500000f}, - {0.45833334f, 0.12500000f}, - {0.45833334f, 0.12500000f}, - {0.45833334f, 0.12500000f}, - {0.45833334f, 0.12500000f}, - {0.54166670f, 0.12500000f}, - {0.54166670f, 0.12500000f}, - {0.54166670f, 0.12500000f}, - {0.54166670f, 0.12500000f}, - {0.54166670f, 0.12500000f}, - {0.54166670f, 0.12500000f}, - {0.62500000f, 0.12500000f}, - {0.62500000f, 0.12500000f}, - {0.62500000f, 0.12500000f}, - {0.62500000f, 0.12500000f}, - {0.62500000f, 0.12500000f}, - {0.62500000f, 0.12500000f}, - {0.70833330f, 0.12500000f}, - {0.70833330f, 0.12500000f}, - {0.70833330f, 0.12500000f}, - {0.70833330f, 0.12500000f}, - {0.70833330f, 0.12500000f}, - {0.70833330f, 0.12500000f}, - {0.79166670f, 0.12500000f}, - {0.79166670f, 0.12500000f}, - {0.79166670f, 0.12500000f}, - {0.79166670f, 0.12500000f}, - {0.79166670f, 0.12500000f}, - {0.79166670f, 0.12500000f}, - {0.87500000f, 0.12500000f}, - {0.87500000f, 0.12500000f}, - {0.87500000f, 0.12500000f}, - {0.87500000f, 0.12500000f}, - {0.87500000f, 0.12500000f}, - {0.87500000f, 0.12500000f}, - {0.95833330f, 0.12500000f}, - {0.95833330f, 0.12500000f}, - {0.95833330f, 0.12500000f}, - {0.95833330f, 0.12500000f}, - {0.95833330f, 0.12500000f}, - {0.95833330f, 0.12500000f}, - {0.04166667f, 0.20833333f}, - {0.04166667f, 0.20833333f}, - {0.04166667f, 0.20833333f}, - {0.04166667f, 0.20833333f}, - {0.04166667f, 0.20833333f}, - {0.04166667f, 0.20833333f}, - {0.12500000f, 0.20833333f}, - {0.12500000f, 0.20833333f}, - {0.12500000f, 0.20833333f}, - {0.12500000f, 0.20833333f}, - {0.12500000f, 0.20833333f}, - {0.12500000f, 0.20833333f}, - {0.20833333f, 0.20833333f}, - {0.20833333f, 0.20833333f}, - {0.20833333f, 0.20833333f}, - {0.20833333f, 0.20833333f}, - {0.20833333f, 0.20833333f}, - {0.20833333f, 0.20833333f}, - {0.29166666f, 0.20833333f}, - {0.29166666f, 0.20833333f}, - {0.29166666f, 0.20833333f}, - {0.29166666f, 0.20833333f}, - {0.29166666f, 0.20833333f}, - {0.29166666f, 0.20833333f}, - {0.37500000f, 0.20833333f}, - {0.37500000f, 0.20833333f}, - {0.37500000f, 0.20833333f}, - {0.37500000f, 0.20833333f}, - {0.37500000f, 0.20833333f}, - {0.37500000f, 0.20833333f}, - {0.45833334f, 0.20833333f}, - {0.45833334f, 0.20833333f}, - {0.45833334f, 0.20833333f}, - {0.45833334f, 0.20833333f}, - {0.45833334f, 0.20833333f}, - {0.45833334f, 0.20833333f}, - {0.54166670f, 0.20833333f}, - {0.54166670f, 0.20833333f}, - {0.54166670f, 0.20833333f}, - {0.54166670f, 0.20833333f}, - {0.54166670f, 0.20833333f}, - {0.54166670f, 0.20833333f}, - {0.62500000f, 0.20833333f}, - {0.62500000f, 0.20833333f}, - {0.62500000f, 0.20833333f}, - {0.62500000f, 0.20833333f}, - {0.62500000f, 0.20833333f}, - {0.62500000f, 0.20833333f}, - {0.70833330f, 0.20833333f}, - {0.70833330f, 0.20833333f}, - {0.70833330f, 0.20833333f}, - {0.70833330f, 0.20833333f}, - {0.70833330f, 0.20833333f}, - {0.70833330f, 0.20833333f}, - {0.79166670f, 0.20833333f}, - {0.79166670f, 0.20833333f}, - {0.79166670f, 0.20833333f}, - {0.79166670f, 0.20833333f}, - {0.79166670f, 0.20833333f}, - {0.79166670f, 0.20833333f}, - {0.87500000f, 0.20833333f}, - {0.87500000f, 0.20833333f}, - {0.87500000f, 0.20833333f}, - {0.87500000f, 0.20833333f}, - {0.87500000f, 0.20833333f}, - {0.87500000f, 0.20833333f}, - {0.95833330f, 0.20833333f}, - {0.95833330f, 0.20833333f}, - {0.95833330f, 0.20833333f}, - {0.95833330f, 0.20833333f}, - {0.95833330f, 0.20833333f}, - {0.95833330f, 0.20833333f}, - {0.04166667f, 0.29166666f}, - {0.04166667f, 0.29166666f}, - {0.04166667f, 0.29166666f}, - {0.04166667f, 0.29166666f}, - {0.04166667f, 0.29166666f}, - {0.04166667f, 0.29166666f}, - {0.12500000f, 0.29166666f}, - {0.12500000f, 0.29166666f}, - {0.12500000f, 0.29166666f}, - {0.12500000f, 0.29166666f}, - {0.12500000f, 0.29166666f}, - {0.12500000f, 0.29166666f}, - {0.20833333f, 0.29166666f}, - {0.20833333f, 0.29166666f}, - {0.20833333f, 0.29166666f}, - {0.20833333f, 0.29166666f}, - {0.20833333f, 0.29166666f}, - {0.20833333f, 0.29166666f}, - {0.29166666f, 0.29166666f}, - {0.29166666f, 0.29166666f}, - {0.29166666f, 0.29166666f}, - {0.29166666f, 0.29166666f}, - {0.29166666f, 0.29166666f}, - {0.29166666f, 0.29166666f}, - {0.37500000f, 0.29166666f}, - {0.37500000f, 0.29166666f}, - {0.37500000f, 0.29166666f}, - {0.37500000f, 0.29166666f}, - {0.37500000f, 0.29166666f}, - {0.37500000f, 0.29166666f}, - {0.45833334f, 0.29166666f}, - {0.45833334f, 0.29166666f}, - {0.45833334f, 0.29166666f}, - {0.45833334f, 0.29166666f}, - {0.45833334f, 0.29166666f}, - {0.45833334f, 0.29166666f}, - {0.54166670f, 0.29166666f}, - {0.54166670f, 0.29166666f}, - {0.54166670f, 0.29166666f}, - {0.54166670f, 0.29166666f}, - {0.54166670f, 0.29166666f}, - {0.54166670f, 0.29166666f}, - {0.62500000f, 0.29166666f}, - {0.62500000f, 0.29166666f}, - {0.62500000f, 0.29166666f}, - {0.62500000f, 0.29166666f}, - {0.62500000f, 0.29166666f}, - {0.62500000f, 0.29166666f}, - {0.70833330f, 0.29166666f}, - {0.70833330f, 0.29166666f}, - {0.70833330f, 0.29166666f}, - {0.70833330f, 0.29166666f}, - {0.70833330f, 0.29166666f}, - {0.70833330f, 0.29166666f}, - {0.79166670f, 0.29166666f}, - {0.79166670f, 0.29166666f}, - {0.79166670f, 0.29166666f}, - {0.79166670f, 0.29166666f}, - {0.79166670f, 0.29166666f}, - {0.79166670f, 0.29166666f}, - {0.87500000f, 0.29166666f}, - {0.87500000f, 0.29166666f}, - {0.87500000f, 0.29166666f}, - {0.87500000f, 0.29166666f}, - {0.87500000f, 0.29166666f}, - {0.87500000f, 0.29166666f}, - {0.95833330f, 0.29166666f}, - {0.95833330f, 0.29166666f}, - {0.95833330f, 0.29166666f}, - {0.95833330f, 0.29166666f}, - {0.95833330f, 0.29166666f}, - {0.95833330f, 0.29166666f}, - {0.04166667f, 0.37500000f}, - {0.04166667f, 0.37500000f}, - {0.04166667f, 0.37500000f}, - {0.04166667f, 0.37500000f}, - {0.04166667f, 0.37500000f}, - {0.04166667f, 0.37500000f}, - {0.12500000f, 0.37500000f}, - {0.12500000f, 0.37500000f}, - {0.12500000f, 0.37500000f}, - {0.12500000f, 0.37500000f}, - {0.12500000f, 0.37500000f}, - {0.12500000f, 0.37500000f}, - {0.20833333f, 0.37500000f}, - {0.20833333f, 0.37500000f}, - {0.20833333f, 0.37500000f}, - {0.20833333f, 0.37500000f}, - {0.20833333f, 0.37500000f}, - {0.20833333f, 0.37500000f}, - {0.29166666f, 0.37500000f}, - {0.29166666f, 0.37500000f}, - {0.29166666f, 0.37500000f}, - {0.29166666f, 0.37500000f}, - {0.29166666f, 0.37500000f}, - {0.29166666f, 0.37500000f}, - {0.37500000f, 0.37500000f}, - {0.37500000f, 0.37500000f}, - {0.37500000f, 0.37500000f}, - {0.37500000f, 0.37500000f}, - {0.37500000f, 0.37500000f}, - {0.37500000f, 0.37500000f}, - {0.45833334f, 0.37500000f}, - {0.45833334f, 0.37500000f}, - {0.45833334f, 0.37500000f}, - {0.45833334f, 0.37500000f}, - {0.45833334f, 0.37500000f}, - {0.45833334f, 0.37500000f}, - {0.54166670f, 0.37500000f}, - {0.54166670f, 0.37500000f}, - {0.54166670f, 0.37500000f}, - {0.54166670f, 0.37500000f}, - {0.54166670f, 0.37500000f}, - {0.54166670f, 0.37500000f}, - {0.62500000f, 0.37500000f}, - {0.62500000f, 0.37500000f}, - {0.62500000f, 0.37500000f}, - {0.62500000f, 0.37500000f}, - {0.62500000f, 0.37500000f}, - {0.62500000f, 0.37500000f}, - {0.70833330f, 0.37500000f}, - {0.70833330f, 0.37500000f}, - {0.70833330f, 0.37500000f}, - {0.70833330f, 0.37500000f}, - {0.70833330f, 0.37500000f}, - {0.70833330f, 0.37500000f}, - {0.79166670f, 0.37500000f}, - {0.79166670f, 0.37500000f}, - {0.79166670f, 0.37500000f}, - {0.79166670f, 0.37500000f}, - {0.79166670f, 0.37500000f}, - {0.79166670f, 0.37500000f}, - {0.87500000f, 0.37500000f}, - {0.87500000f, 0.37500000f}, - {0.87500000f, 0.37500000f}, - {0.87500000f, 0.37500000f}, - {0.87500000f, 0.37500000f}, - {0.87500000f, 0.37500000f}, - {0.95833330f, 0.37500000f}, - {0.95833330f, 0.37500000f}, - {0.95833330f, 0.37500000f}, - {0.95833330f, 0.37500000f}, - {0.95833330f, 0.37500000f}, - {0.95833330f, 0.37500000f}, - {0.04166667f, 0.45833334f}, - {0.04166667f, 0.45833334f}, - {0.04166667f, 0.45833334f}, - {0.04166667f, 0.45833334f}, - {0.04166667f, 0.45833334f}, - {0.04166667f, 0.45833334f}, - {0.12500000f, 0.45833334f}, - {0.12500000f, 0.45833334f}, - {0.12500000f, 0.45833334f}, - {0.12500000f, 0.45833334f}, - {0.12500000f, 0.45833334f}, - {0.12500000f, 0.45833334f}, - {0.20833333f, 0.45833334f}, - {0.20833333f, 0.45833334f}, - {0.20833333f, 0.45833334f}, - {0.20833333f, 0.45833334f}, - {0.20833333f, 0.45833334f}, - {0.20833333f, 0.45833334f}, - {0.29166666f, 0.45833334f}, - {0.29166666f, 0.45833334f}, - {0.29166666f, 0.45833334f}, - {0.29166666f, 0.45833334f}, - {0.29166666f, 0.45833334f}, - {0.29166666f, 0.45833334f}, - {0.37500000f, 0.45833334f}, - {0.37500000f, 0.45833334f}, - {0.37500000f, 0.45833334f}, - {0.37500000f, 0.45833334f}, - {0.37500000f, 0.45833334f}, - {0.37500000f, 0.45833334f}, - {0.45833334f, 0.45833334f}, - {0.45833334f, 0.45833334f}, - {0.45833334f, 0.45833334f}, - {0.45833334f, 0.45833334f}, - {0.45833334f, 0.45833334f}, - {0.45833334f, 0.45833334f}, - {0.54166670f, 0.45833334f}, - {0.54166670f, 0.45833334f}, - {0.54166670f, 0.45833334f}, - {0.54166670f, 0.45833334f}, - {0.54166670f, 0.45833334f}, - {0.54166670f, 0.45833334f}, - {0.62500000f, 0.45833334f}, - {0.62500000f, 0.45833334f}, - {0.62500000f, 0.45833334f}, - {0.62500000f, 0.45833334f}, - {0.62500000f, 0.45833334f}, - {0.62500000f, 0.45833334f}, - {0.70833330f, 0.45833334f}, - {0.70833330f, 0.45833334f}, - {0.70833330f, 0.45833334f}, - {0.70833330f, 0.45833334f}, - {0.70833330f, 0.45833334f}, - {0.70833330f, 0.45833334f}, - {0.79166670f, 0.45833334f}, - {0.79166670f, 0.45833334f}, - {0.79166670f, 0.45833334f}, - {0.79166670f, 0.45833334f}, - {0.79166670f, 0.45833334f}, - {0.79166670f, 0.45833334f}, - {0.87500000f, 0.45833334f}, - {0.87500000f, 0.45833334f}, - {0.87500000f, 0.45833334f}, - {0.87500000f, 0.45833334f}, - {0.87500000f, 0.45833334f}, - {0.87500000f, 0.45833334f}, - {0.95833330f, 0.45833334f}, - {0.95833330f, 0.45833334f}, - {0.95833330f, 0.45833334f}, - {0.95833330f, 0.45833334f}, - {0.95833330f, 0.45833334f}, - {0.95833330f, 0.45833334f}, - {0.04166667f, 0.54166670f}, - {0.04166667f, 0.54166670f}, - {0.04166667f, 0.54166670f}, - {0.04166667f, 0.54166670f}, - {0.04166667f, 0.54166670f}, - {0.04166667f, 0.54166670f}, - {0.12500000f, 0.54166670f}, - {0.12500000f, 0.54166670f}, - {0.12500000f, 0.54166670f}, - {0.12500000f, 0.54166670f}, - {0.12500000f, 0.54166670f}, - {0.12500000f, 0.54166670f}, - {0.20833333f, 0.54166670f}, - {0.20833333f, 0.54166670f}, - {0.20833333f, 0.54166670f}, - {0.20833333f, 0.54166670f}, - {0.20833333f, 0.54166670f}, - {0.20833333f, 0.54166670f}, - {0.29166666f, 0.54166670f}, - {0.29166666f, 0.54166670f}, - {0.29166666f, 0.54166670f}, - {0.29166666f, 0.54166670f}, - {0.29166666f, 0.54166670f}, - {0.29166666f, 0.54166670f}, - {0.37500000f, 0.54166670f}, - {0.37500000f, 0.54166670f}, - {0.37500000f, 0.54166670f}, - {0.37500000f, 0.54166670f}, - {0.37500000f, 0.54166670f}, - {0.37500000f, 0.54166670f}, - {0.45833334f, 0.54166670f}, - {0.45833334f, 0.54166670f}, - {0.45833334f, 0.54166670f}, - {0.45833334f, 0.54166670f}, - {0.45833334f, 0.54166670f}, - {0.45833334f, 0.54166670f}, - {0.54166670f, 0.54166670f}, - {0.54166670f, 0.54166670f}, - {0.54166670f, 0.54166670f}, - {0.54166670f, 0.54166670f}, - {0.54166670f, 0.54166670f}, - {0.54166670f, 0.54166670f}, - {0.62500000f, 0.54166670f}, - {0.62500000f, 0.54166670f}, - {0.62500000f, 0.54166670f}, - {0.62500000f, 0.54166670f}, - {0.62500000f, 0.54166670f}, - {0.62500000f, 0.54166670f}, - {0.70833330f, 0.54166670f}, - {0.70833330f, 0.54166670f}, - {0.70833330f, 0.54166670f}, - {0.70833330f, 0.54166670f}, - {0.70833330f, 0.54166670f}, - {0.70833330f, 0.54166670f}, - {0.79166670f, 0.54166670f}, - {0.79166670f, 0.54166670f}, - {0.79166670f, 0.54166670f}, - {0.79166670f, 0.54166670f}, - {0.79166670f, 0.54166670f}, - {0.79166670f, 0.54166670f}, - {0.87500000f, 0.54166670f}, - {0.87500000f, 0.54166670f}, - {0.87500000f, 0.54166670f}, - {0.87500000f, 0.54166670f}, - {0.87500000f, 0.54166670f}, - {0.87500000f, 0.54166670f}, - {0.95833330f, 0.54166670f}, - {0.95833330f, 0.54166670f}, - {0.95833330f, 0.54166670f}, - {0.95833330f, 0.54166670f}, - {0.95833330f, 0.54166670f}, - {0.95833330f, 0.54166670f}, - {0.04166667f, 0.62500000f}, - {0.04166667f, 0.62500000f}, - {0.04166667f, 0.62500000f}, - {0.04166667f, 0.62500000f}, - {0.04166667f, 0.62500000f}, - {0.04166667f, 0.62500000f}, - {0.12500000f, 0.62500000f}, - {0.12500000f, 0.62500000f}, - {0.12500000f, 0.62500000f}, - {0.12500000f, 0.62500000f}, - {0.12500000f, 0.62500000f}, - {0.12500000f, 0.62500000f}, - {0.20833333f, 0.62500000f}, - {0.20833333f, 0.62500000f}, - {0.20833333f, 0.62500000f}, - {0.20833333f, 0.62500000f}, - {0.20833333f, 0.62500000f}, - {0.20833333f, 0.62500000f}, - {0.29166666f, 0.62500000f}, - {0.29166666f, 0.62500000f}, - {0.29166666f, 0.62500000f}, - {0.29166666f, 0.62500000f}, - {0.29166666f, 0.62500000f}, - {0.29166666f, 0.62500000f}, - {0.37500000f, 0.62500000f}, - {0.37500000f, 0.62500000f}, - {0.37500000f, 0.62500000f}, - {0.37500000f, 0.62500000f}, - {0.37500000f, 0.62500000f}, - {0.37500000f, 0.62500000f}, - {0.45833334f, 0.62500000f}, - {0.45833334f, 0.62500000f}, - {0.45833334f, 0.62500000f}, - {0.45833334f, 0.62500000f}, - {0.45833334f, 0.62500000f}, - {0.45833334f, 0.62500000f}, - {0.54166670f, 0.62500000f}, - {0.54166670f, 0.62500000f}, - {0.54166670f, 0.62500000f}, - {0.54166670f, 0.62500000f}, - {0.54166670f, 0.62500000f}, - {0.54166670f, 0.62500000f}, - {0.62500000f, 0.62500000f}, - {0.62500000f, 0.62500000f}, - {0.62500000f, 0.62500000f}, - {0.62500000f, 0.62500000f}, - {0.62500000f, 0.62500000f}, - {0.62500000f, 0.62500000f}, - {0.70833330f, 0.62500000f}, - {0.70833330f, 0.62500000f}, - {0.70833330f, 0.62500000f}, - {0.70833330f, 0.62500000f}, - {0.70833330f, 0.62500000f}, - {0.70833330f, 0.62500000f}, - {0.79166670f, 0.62500000f}, - {0.79166670f, 0.62500000f}, - {0.79166670f, 0.62500000f}, - {0.79166670f, 0.62500000f}, - {0.79166670f, 0.62500000f}, - {0.79166670f, 0.62500000f}, - {0.87500000f, 0.62500000f}, - {0.87500000f, 0.62500000f}, - {0.87500000f, 0.62500000f}, - {0.87500000f, 0.62500000f}, - {0.87500000f, 0.62500000f}, - {0.87500000f, 0.62500000f}, - {0.95833330f, 0.62500000f}, - {0.95833330f, 0.62500000f}, - {0.95833330f, 0.62500000f}, - {0.95833330f, 0.62500000f}, - {0.95833330f, 0.62500000f}, - {0.95833330f, 0.62500000f}, - {0.04166667f, 0.70833330f}, - {0.04166667f, 0.70833330f}, - {0.04166667f, 0.70833330f}, - {0.04166667f, 0.70833330f}, - {0.04166667f, 0.70833330f}, - {0.04166667f, 0.70833330f}, - {0.12500000f, 0.70833330f}, - {0.12500000f, 0.70833330f}, - {0.12500000f, 0.70833330f}, - {0.12500000f, 0.70833330f}, - {0.12500000f, 0.70833330f}, - {0.12500000f, 0.70833330f}, - {0.20833333f, 0.70833330f}, - {0.20833333f, 0.70833330f}, - {0.20833333f, 0.70833330f}, - {0.20833333f, 0.70833330f}, - {0.20833333f, 0.70833330f}, - {0.20833333f, 0.70833330f}, - {0.29166666f, 0.70833330f}, - {0.29166666f, 0.70833330f}, - {0.29166666f, 0.70833330f}, - {0.29166666f, 0.70833330f}, - {0.29166666f, 0.70833330f}, - {0.29166666f, 0.70833330f}, - {0.37500000f, 0.70833330f}, - {0.37500000f, 0.70833330f}, - {0.37500000f, 0.70833330f}, - {0.37500000f, 0.70833330f}, - {0.37500000f, 0.70833330f}, - {0.37500000f, 0.70833330f}, - {0.45833334f, 0.70833330f}, - {0.45833334f, 0.70833330f}, - {0.45833334f, 0.70833330f}, - {0.45833334f, 0.70833330f}, - {0.45833334f, 0.70833330f}, - {0.45833334f, 0.70833330f}, - {0.54166670f, 0.70833330f}, - {0.54166670f, 0.70833330f}, - {0.54166670f, 0.70833330f}, - {0.54166670f, 0.70833330f}, - {0.54166670f, 0.70833330f}, - {0.54166670f, 0.70833330f}, - {0.62500000f, 0.70833330f}, - {0.62500000f, 0.70833330f}, - {0.62500000f, 0.70833330f}, - {0.62500000f, 0.70833330f}, - {0.62500000f, 0.70833330f}, - {0.62500000f, 0.70833330f}, - {0.70833330f, 0.70833330f}, - {0.70833330f, 0.70833330f}, - {0.70833330f, 0.70833330f}, - {0.70833330f, 0.70833330f}, - {0.70833330f, 0.70833330f}, - {0.70833330f, 0.70833330f}, - {0.79166670f, 0.70833330f}, - {0.79166670f, 0.70833330f}, - {0.79166670f, 0.70833330f}, - {0.79166670f, 0.70833330f}, - {0.79166670f, 0.70833330f}, - {0.79166670f, 0.70833330f}, - {0.87500000f, 0.70833330f}, - {0.87500000f, 0.70833330f}, - {0.87500000f, 0.70833330f}, - {0.87500000f, 0.70833330f}, - {0.87500000f, 0.70833330f}, - {0.87500000f, 0.70833330f}, - {0.95833330f, 0.70833330f}, - {0.95833330f, 0.70833330f}, - {0.95833330f, 0.70833330f}, - {0.95833330f, 0.70833330f}, - {0.95833330f, 0.70833330f}, - {0.95833330f, 0.70833330f}, - {0.04166667f, 0.79166670f}, - {0.04166667f, 0.79166670f}, - {0.04166667f, 0.79166670f}, - {0.04166667f, 0.79166670f}, - {0.04166667f, 0.79166670f}, - {0.04166667f, 0.79166670f}, - {0.12500000f, 0.79166670f}, - {0.12500000f, 0.79166670f}, - {0.12500000f, 0.79166670f}, - {0.12500000f, 0.79166670f}, - {0.12500000f, 0.79166670f}, - {0.12500000f, 0.79166670f}, - {0.20833333f, 0.79166670f}, - {0.20833333f, 0.79166670f}, - {0.20833333f, 0.79166670f}, - {0.20833333f, 0.79166670f}, - {0.20833333f, 0.79166670f}, - {0.20833333f, 0.79166670f}, - {0.29166666f, 0.79166670f}, - {0.29166666f, 0.79166670f}, - {0.29166666f, 0.79166670f}, - {0.29166666f, 0.79166670f}, - {0.29166666f, 0.79166670f}, - {0.29166666f, 0.79166670f}, - {0.37500000f, 0.79166670f}, - {0.37500000f, 0.79166670f}, - {0.37500000f, 0.79166670f}, - {0.37500000f, 0.79166670f}, - {0.37500000f, 0.79166670f}, - {0.37500000f, 0.79166670f}, - {0.45833334f, 0.79166670f}, - {0.45833334f, 0.79166670f}, - {0.45833334f, 0.79166670f}, - {0.45833334f, 0.79166670f}, - {0.45833334f, 0.79166670f}, - {0.45833334f, 0.79166670f}, - {0.54166670f, 0.79166670f}, - {0.54166670f, 0.79166670f}, - {0.54166670f, 0.79166670f}, - {0.54166670f, 0.79166670f}, - {0.54166670f, 0.79166670f}, - {0.54166670f, 0.79166670f}, - {0.62500000f, 0.79166670f}, - {0.62500000f, 0.79166670f}, - {0.62500000f, 0.79166670f}, - {0.62500000f, 0.79166670f}, - {0.62500000f, 0.79166670f}, - {0.62500000f, 0.79166670f}, - {0.70833330f, 0.79166670f}, - {0.70833330f, 0.79166670f}, - {0.70833330f, 0.79166670f}, - {0.70833330f, 0.79166670f}, - {0.70833330f, 0.79166670f}, - {0.70833330f, 0.79166670f}, - {0.79166670f, 0.79166670f}, - {0.79166670f, 0.79166670f}, - {0.79166670f, 0.79166670f}, - {0.79166670f, 0.79166670f}, - {0.79166670f, 0.79166670f}, - {0.79166670f, 0.79166670f}, - {0.87500000f, 0.79166670f}, - {0.87500000f, 0.79166670f}, - {0.87500000f, 0.79166670f}, - {0.87500000f, 0.79166670f}, - {0.87500000f, 0.79166670f}, - {0.87500000f, 0.79166670f}, - {0.95833330f, 0.79166670f}, - {0.95833330f, 0.79166670f}, - {0.95833330f, 0.79166670f}, - {0.95833330f, 0.79166670f}, - {0.95833330f, 0.79166670f}, - {0.95833330f, 0.79166670f}, - {0.04166667f, 0.87500000f}, - {0.04166667f, 0.87500000f}, - {0.04166667f, 0.87500000f}, - {0.04166667f, 0.87500000f}, - {0.04166667f, 0.87500000f}, - {0.04166667f, 0.87500000f}, - {0.12500000f, 0.87500000f}, - {0.12500000f, 0.87500000f}, - {0.12500000f, 0.87500000f}, - {0.12500000f, 0.87500000f}, - {0.12500000f, 0.87500000f}, - {0.12500000f, 0.87500000f}, - {0.20833333f, 0.87500000f}, - {0.20833333f, 0.87500000f}, - {0.20833333f, 0.87500000f}, - {0.20833333f, 0.87500000f}, - {0.20833333f, 0.87500000f}, - {0.20833333f, 0.87500000f}, - {0.29166666f, 0.87500000f}, - {0.29166666f, 0.87500000f}, - {0.29166666f, 0.87500000f}, - {0.29166666f, 0.87500000f}, - {0.29166666f, 0.87500000f}, - {0.29166666f, 0.87500000f}, - {0.37500000f, 0.87500000f}, - {0.37500000f, 0.87500000f}, - {0.37500000f, 0.87500000f}, - {0.37500000f, 0.87500000f}, - {0.37500000f, 0.87500000f}, - {0.37500000f, 0.87500000f}, - {0.45833334f, 0.87500000f}, - {0.45833334f, 0.87500000f}, - {0.45833334f, 0.87500000f}, - {0.45833334f, 0.87500000f}, - {0.45833334f, 0.87500000f}, - {0.45833334f, 0.87500000f}, - {0.54166670f, 0.87500000f}, - {0.54166670f, 0.87500000f}, - {0.54166670f, 0.87500000f}, - {0.54166670f, 0.87500000f}, - {0.54166670f, 0.87500000f}, - {0.54166670f, 0.87500000f}, - {0.62500000f, 0.87500000f}, - {0.62500000f, 0.87500000f}, - {0.62500000f, 0.87500000f}, - {0.62500000f, 0.87500000f}, - {0.62500000f, 0.87500000f}, - {0.62500000f, 0.87500000f}, - {0.70833330f, 0.87500000f}, - {0.70833330f, 0.87500000f}, - {0.70833330f, 0.87500000f}, - {0.70833330f, 0.87500000f}, - {0.70833330f, 0.87500000f}, - {0.70833330f, 0.87500000f}, - {0.79166670f, 0.87500000f}, - {0.79166670f, 0.87500000f}, - {0.79166670f, 0.87500000f}, - {0.79166670f, 0.87500000f}, - {0.79166670f, 0.87500000f}, - {0.79166670f, 0.87500000f}, - {0.87500000f, 0.87500000f}, - {0.87500000f, 0.87500000f}, - {0.87500000f, 0.87500000f}, - {0.87500000f, 0.87500000f}, - {0.87500000f, 0.87500000f}, - {0.87500000f, 0.87500000f}, - {0.95833330f, 0.87500000f}, - {0.95833330f, 0.87500000f}, - {0.95833330f, 0.87500000f}, - {0.95833330f, 0.87500000f}, - {0.95833330f, 0.87500000f}, - {0.95833330f, 0.87500000f}, - {0.04166667f, 0.95833330f}, - {0.04166667f, 0.95833330f}, - {0.04166667f, 0.95833330f}, - {0.04166667f, 0.95833330f}, - {0.04166667f, 0.95833330f}, - {0.04166667f, 0.95833330f}, - {0.12500000f, 0.95833330f}, - {0.12500000f, 0.95833330f}, - {0.12500000f, 0.95833330f}, - {0.12500000f, 0.95833330f}, - {0.12500000f, 0.95833330f}, - {0.12500000f, 0.95833330f}, - {0.20833333f, 0.95833330f}, - {0.20833333f, 0.95833330f}, - {0.20833333f, 0.95833330f}, - {0.20833333f, 0.95833330f}, - {0.20833333f, 0.95833330f}, - {0.20833333f, 0.95833330f}, - {0.29166666f, 0.95833330f}, - {0.29166666f, 0.95833330f}, - {0.29166666f, 0.95833330f}, - {0.29166666f, 0.95833330f}, - {0.29166666f, 0.95833330f}, - {0.29166666f, 0.95833330f}, - {0.37500000f, 0.95833330f}, - {0.37500000f, 0.95833330f}, - {0.37500000f, 0.95833330f}, - {0.37500000f, 0.95833330f}, - {0.37500000f, 0.95833330f}, - {0.37500000f, 0.95833330f}, - {0.45833334f, 0.95833330f}, - {0.45833334f, 0.95833330f}, - {0.45833334f, 0.95833330f}, - {0.45833334f, 0.95833330f}, - {0.45833334f, 0.95833330f}, - {0.45833334f, 0.95833330f}, - {0.54166670f, 0.95833330f}, - {0.54166670f, 0.95833330f}, - {0.54166670f, 0.95833330f}, - {0.54166670f, 0.95833330f}, - {0.54166670f, 0.95833330f}, - {0.54166670f, 0.95833330f}, - {0.62500000f, 0.95833330f}, - {0.62500000f, 0.95833330f}, - {0.62500000f, 0.95833330f}, - {0.62500000f, 0.95833330f}, - {0.62500000f, 0.95833330f}, - {0.62500000f, 0.95833330f}, - {0.70833330f, 0.95833330f}, - {0.70833330f, 0.95833330f}, - {0.70833330f, 0.95833330f}, - {0.70833330f, 0.95833330f}, - {0.70833330f, 0.95833330f}, - {0.70833330f, 0.95833330f}, - {0.79166670f, 0.95833330f}, - {0.79166670f, 0.95833330f}, - {0.79166670f, 0.95833330f}, - {0.79166670f, 0.95833330f}, - {0.79166670f, 0.95833330f}, - {0.79166670f, 0.95833330f}, - {0.87500000f, 0.95833330f}, - {0.87500000f, 0.95833330f}, - {0.87500000f, 0.95833330f}, - {0.87500000f, 0.95833330f}, - {0.87500000f, 0.95833330f}, - {0.87500000f, 0.95833330f}, - {0.95833330f, 0.95833330f}, - {0.95833330f, 0.95833330f}, - {0.95833330f, 0.95833330f}, - {0.95833330f, 0.95833330f}, - {0.95833330f, 0.95833330f}, - {0.95833330f, 0.95833330f}, - }; - return anchors; -} diff --git a/models/palm_detection_mediapipe/demo.py b/models/palm_detection_mediapipe/demo.py deleted file mode 100644 index 98fdf694..00000000 --- a/models/palm_detection_mediapipe/demo.py +++ /dev/null @@ -1,134 +0,0 @@ -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from mp_palmdet import MPPalmDet - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser(description='Hand Detector from MediaPipe') -parser.add_argument('--input', '-i', type=str, - help='Usage: Set path to the input image. Omit for using default camera.') -parser.add_argument('--model', '-m', type=str, default='./palm_detection_mediapipe_2023feb.onnx', - help='Usage: Set model path, defaults to palm_detection_mediapipe_2023feb.onnx.') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--score_threshold', type=float, default=0.8, - help='Usage: Set the minimum needed confidence for the model to identify a palm, defaults to 0.8. Smaller values may result in faster detection, but will limit accuracy. Filter out faces of confidence < conf_threshold. An empirical score threshold for the quantized model is 0.49.') -parser.add_argument('--nms_threshold', type=float, default=0.3, - help='Usage: Suppress bounding boxes of iou >= nms_threshold. Default = 0.3.') -parser.add_argument('--save', '-s', action='store_true', - help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.') -parser.add_argument('--vis', '-v', action='store_true', - help='Usage: Specify to open a new window to show results. Invalid in case of camera input.') -args = parser.parse_args() - -def visualize(image, results, print_results=False, fps=None): - output = image.copy() - - if fps is not None: - cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) - - for idx, palm in enumerate(results): - score = palm[-1] - palm_box = palm[0:4] - palm_landmarks = palm[4:-1].reshape(7, 2) - - # put score - palm_box = palm_box.astype(np.int32) - cv.putText(output, '{:.4f}'.format(score), (palm_box[0], palm_box[1]+12), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 255, 0)) - - # draw box - cv.rectangle(output, (palm_box[0], palm_box[1]), (palm_box[2], palm_box[3]), (0, 255, 0), 2) - - # draw points - palm_landmarks = palm_landmarks.astype(np.int32) - for p in palm_landmarks: - cv.circle(output, p, 2, (0, 0, 255), 2) - - # Print results - if print_results: - print('-----------palm {}-----------'.format(idx + 1)) - print('score: {:.2f}'.format(score)) - print('palm box: {}'.format(palm_box)) - print('palm landmarks: ') - for plm in palm_landmarks: - print('\t{}'.format(plm)) - - return output - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - - # Instantiate MPPalmDet - model = MPPalmDet(modelPath=args.model, - nmsThreshold=args.nms_threshold, - scoreThreshold=args.score_threshold, - backendId=backend_id, - targetId=target_id) - - # If input is an image - if args.input is not None: - image = cv.imread(args.input) - - # Inference - results = model.infer(image) - if len(results) == 0: - print('Hand not detected') - - # Draw results on the input image - image = visualize(image, results, print_results=True) - - # Save results if save is true - if args.save: - print('Resutls saved to result.jpg\n') - cv.imwrite('result.jpg', image) - - # Visualize results in a new window - if args.vis: - cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE) - cv.imshow(args.input, image) - cv.waitKey(0) - else: # Omit input to call default camera - deviceId = 0 - cap = cv.VideoCapture(deviceId) - - tm = cv.TickMeter() - while cv.waitKey(1) < 0: - hasFrame, frame = cap.read() - if not hasFrame: - print('No frames grabbed!') - break - - # Inference - tm.start() - results = model.infer(frame) - tm.stop() - - # Draw results on the input image - frame = visualize(frame, results, fps=tm.getFPS()) - - # Visualize results in a new Window - cv.imshow('MPPalmDet Demo', frame) - - tm.reset() diff --git a/models/palm_detection_mediapipe/example_outputs/mppalmdet_demo.gif b/models/palm_detection_mediapipe/example_outputs/mppalmdet_demo.gif deleted file mode 100644 index 98dae4d4..00000000 --- a/models/palm_detection_mediapipe/example_outputs/mppalmdet_demo.gif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e4a6e6ff306117f575807ea05bf06c67190bf16fe2d315873acd0824a678dfaf -size 2178521 diff --git a/models/palm_detection_mediapipe/mp_palmdet.py b/models/palm_detection_mediapipe/mp_palmdet.py deleted file mode 100644 index fd1f3bad..00000000 --- a/models/palm_detection_mediapipe/mp_palmdet.py +++ /dev/null @@ -1,2121 +0,0 @@ -import numpy as np -import cv2 as cv - -class MPPalmDet: - def __init__(self, modelPath, nmsThreshold=0.3, scoreThreshold=0.5, topK=5000, backendId=0, targetId=0): - self.model_path = modelPath - self.nms_threshold = nmsThreshold - self.score_threshold = scoreThreshold - self.topK = topK - self.backend_id = backendId - self.target_id = targetId - - self.input_size = np.array([192, 192]) # wh - - self.model = cv.dnn.readNet(self.model_path) - self.model.setPreferableBackend(self.backend_id) - self.model.setPreferableTarget(self.target_id) - - self.anchors = self._load_anchors() - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self.backend_id = backendId - self.target_id = targetId - self.model.setPreferableBackend(self.backend_id) - self.model.setPreferableTarget(self.target_id) - - def _preprocess(self, image): - pad_bias = np.array([0., 0.]) # left, top - ratio = min(self.input_size / image.shape[:2]) - if image.shape[0] != self.input_size[0] or image.shape[1] != self.input_size[1]: - # keep aspect ratio when resize - ratio_size = (np.array(image.shape[:2]) * ratio).astype(np.int32) - image = cv.resize(image, (ratio_size[1], ratio_size[0])) - pad_h = self.input_size[0] - ratio_size[0] - pad_w = self.input_size[1] - ratio_size[1] - pad_bias[0] = left = pad_w // 2 - pad_bias[1] = top = pad_h // 2 - right = pad_w - left - bottom = pad_h - top - image = cv.copyMakeBorder(image, top, bottom, left, right, cv.BORDER_CONSTANT, None, (0, 0, 0)) - image = cv.cvtColor(image, cv.COLOR_BGR2RGB) - image = image.astype(np.float32) / 255.0 # norm - pad_bias = (pad_bias / ratio).astype(np.int32) - return image[np.newaxis, :, :, :], pad_bias # hwc -> nhwc - - def infer(self, image): - h, w, _ = image.shape - - # Preprocess - input_blob, pad_bias = self._preprocess(image) - - # Forward - self.model.setInput(input_blob) - output_blob = self.model.forward(self.model.getUnconnectedOutLayersNames()) - - # Postprocess - results = self._postprocess(output_blob, np.array([w, h]), pad_bias) - - return results - - def _postprocess(self, output_blob, original_shape, pad_bias): - score = output_blob[1][0, :, 0] - box_delta = output_blob[0][0, :, 0:4] - landmark_delta = output_blob[0][0, :, 4:] - scale = max(original_shape) - - # get scores - score = score.astype(np.float64) - score = 1 / (1 + np.exp(-score)) - - # get boxes - cxy_delta = box_delta[:, :2] / self.input_size - wh_delta = box_delta[:, 2:] / self.input_size - xy1 = (cxy_delta - wh_delta / 2 + self.anchors) * scale - xy2 = (cxy_delta + wh_delta / 2 + self.anchors) * scale - boxes = np.concatenate([xy1, xy2], axis=1) - boxes -= [pad_bias[0], pad_bias[1], pad_bias[0], pad_bias[1]] - # NMS - keep_idx = cv.dnn.NMSBoxes(boxes, score, self.score_threshold, self.nms_threshold, top_k=self.topK) - if len(keep_idx) == 0: - return np.empty(shape=(0, 19)) - selected_score = score[keep_idx] - selected_box = boxes[keep_idx] - - # get landmarks - selected_landmarks = landmark_delta[keep_idx].reshape(-1, 7, 2) - selected_landmarks = selected_landmarks / self.input_size - selected_anchors = self.anchors[keep_idx] - for idx, landmark in enumerate(selected_landmarks): - landmark += selected_anchors[idx] - selected_landmarks *= scale - selected_landmarks -= pad_bias - - # [ - # [bbox_coords, landmarks_coords, score] - # ... - # [bbox_coords, landmarks_coords, score] - # ] - return np.c_[selected_box.reshape(-1, 4), selected_landmarks.reshape(-1, 14), selected_score.reshape(-1, 1)] - - def _load_anchors(self): - return np.array([[0.02083333, 0.02083333], - [0.02083333, 0.02083333], - [0.0625, 0.02083333], - [0.0625, 0.02083333], - [0.10416666, 0.02083333], - [0.10416666, 0.02083333], - [0.14583333, 0.02083333], - [0.14583333, 0.02083333], - [0.1875, 0.02083333], - [0.1875, 0.02083333], - [0.22916667, 0.02083333], - [0.22916667, 0.02083333], - [0.27083334, 0.02083333], - [0.27083334, 0.02083333], - [0.3125, 0.02083333], - [0.3125, 0.02083333], - [0.35416666, 0.02083333], - [0.35416666, 0.02083333], - [0.39583334, 0.02083333], - [0.39583334, 0.02083333], - [0.4375, 0.02083333], - [0.4375, 0.02083333], - [0.47916666, 0.02083333], - [0.47916666, 0.02083333], - [0.5208333, 0.02083333], - [0.5208333, 0.02083333], - [0.5625, 0.02083333], - [0.5625, 0.02083333], - [0.6041667, 0.02083333], - [0.6041667, 0.02083333], - [0.6458333, 0.02083333], - [0.6458333, 0.02083333], - [0.6875, 0.02083333], - [0.6875, 0.02083333], - [0.7291667, 0.02083333], - [0.7291667, 0.02083333], - [0.7708333, 0.02083333], - [0.7708333, 0.02083333], - [0.8125, 0.02083333], - [0.8125, 0.02083333], - [0.8541667, 0.02083333], - [0.8541667, 0.02083333], - [0.8958333, 0.02083333], - [0.8958333, 0.02083333], - [0.9375, 0.02083333], - [0.9375, 0.02083333], - [0.9791667, 0.02083333], - [0.9791667, 0.02083333], - [0.02083333, 0.0625], - [0.02083333, 0.0625], - [0.0625, 0.0625], - [0.0625, 0.0625], - [0.10416666, 0.0625], - [0.10416666, 0.0625], - [0.14583333, 0.0625], - [0.14583333, 0.0625], - [0.1875, 0.0625], - [0.1875, 0.0625], - [0.22916667, 0.0625], - [0.22916667, 0.0625], - [0.27083334, 0.0625], - [0.27083334, 0.0625], - [0.3125, 0.0625], - [0.3125, 0.0625], - [0.35416666, 0.0625], - [0.35416666, 0.0625], - [0.39583334, 0.0625], - [0.39583334, 0.0625], - [0.4375, 0.0625], - [0.4375, 0.0625], - [0.47916666, 0.0625], - [0.47916666, 0.0625], - [0.5208333, 0.0625], - [0.5208333, 0.0625], - [0.5625, 0.0625], - [0.5625, 0.0625], - [0.6041667, 0.0625], - [0.6041667, 0.0625], - [0.6458333, 0.0625], - [0.6458333, 0.0625], - [0.6875, 0.0625], - [0.6875, 0.0625], - [0.7291667, 0.0625], - [0.7291667, 0.0625], - [0.7708333, 0.0625], - [0.7708333, 0.0625], - [0.8125, 0.0625], - [0.8125, 0.0625], - [0.8541667, 0.0625], - [0.8541667, 0.0625], - [0.8958333, 0.0625], - [0.8958333, 0.0625], - [0.9375, 0.0625], - [0.9375, 0.0625], - [0.9791667, 0.0625], - [0.9791667, 0.0625], - [0.02083333, 0.10416666], - [0.02083333, 0.10416666], - [0.0625, 0.10416666], - [0.0625, 0.10416666], - [0.10416666, 0.10416666], - [0.10416666, 0.10416666], - [0.14583333, 0.10416666], - [0.14583333, 0.10416666], - [0.1875, 0.10416666], - [0.1875, 0.10416666], - [0.22916667, 0.10416666], - [0.22916667, 0.10416666], - [0.27083334, 0.10416666], - [0.27083334, 0.10416666], - [0.3125, 0.10416666], - [0.3125, 0.10416666], - [0.35416666, 0.10416666], - [0.35416666, 0.10416666], - [0.39583334, 0.10416666], - [0.39583334, 0.10416666], - [0.4375, 0.10416666], - [0.4375, 0.10416666], - [0.47916666, 0.10416666], - [0.47916666, 0.10416666], - [0.5208333, 0.10416666], - [0.5208333, 0.10416666], - [0.5625, 0.10416666], - [0.5625, 0.10416666], - [0.6041667, 0.10416666], - [0.6041667, 0.10416666], - [0.6458333, 0.10416666], - [0.6458333, 0.10416666], - [0.6875, 0.10416666], - [0.6875, 0.10416666], - [0.7291667, 0.10416666], - [0.7291667, 0.10416666], - [0.7708333, 0.10416666], - [0.7708333, 0.10416666], - [0.8125, 0.10416666], - [0.8125, 0.10416666], - [0.8541667, 0.10416666], - [0.8541667, 0.10416666], - [0.8958333, 0.10416666], - [0.8958333, 0.10416666], - [0.9375, 0.10416666], - [0.9375, 0.10416666], - [0.9791667, 0.10416666], - [0.9791667, 0.10416666], - [0.02083333, 0.14583333], - [0.02083333, 0.14583333], - [0.0625, 0.14583333], - [0.0625, 0.14583333], - [0.10416666, 0.14583333], - [0.10416666, 0.14583333], - [0.14583333, 0.14583333], - [0.14583333, 0.14583333], - [0.1875, 0.14583333], - [0.1875, 0.14583333], - [0.22916667, 0.14583333], - [0.22916667, 0.14583333], - [0.27083334, 0.14583333], - [0.27083334, 0.14583333], - [0.3125, 0.14583333], - [0.3125, 0.14583333], - [0.35416666, 0.14583333], - [0.35416666, 0.14583333], - [0.39583334, 0.14583333], - [0.39583334, 0.14583333], - [0.4375, 0.14583333], - [0.4375, 0.14583333], - [0.47916666, 0.14583333], - [0.47916666, 0.14583333], - [0.5208333, 0.14583333], - [0.5208333, 0.14583333], - [0.5625, 0.14583333], - [0.5625, 0.14583333], - [0.6041667, 0.14583333], - [0.6041667, 0.14583333], - [0.6458333, 0.14583333], - [0.6458333, 0.14583333], - [0.6875, 0.14583333], - [0.6875, 0.14583333], - [0.7291667, 0.14583333], - [0.7291667, 0.14583333], - [0.7708333, 0.14583333], - [0.7708333, 0.14583333], - [0.8125, 0.14583333], - [0.8125, 0.14583333], - [0.8541667, 0.14583333], - [0.8541667, 0.14583333], - [0.8958333, 0.14583333], - [0.8958333, 0.14583333], - [0.9375, 0.14583333], - [0.9375, 0.14583333], - [0.9791667, 0.14583333], - [0.9791667, 0.14583333], - [0.02083333, 0.1875], - [0.02083333, 0.1875], - [0.0625, 0.1875], - [0.0625, 0.1875], - [0.10416666, 0.1875], - [0.10416666, 0.1875], - [0.14583333, 0.1875], - [0.14583333, 0.1875], - [0.1875, 0.1875], - [0.1875, 0.1875], - [0.22916667, 0.1875], - [0.22916667, 0.1875], - [0.27083334, 0.1875], - [0.27083334, 0.1875], - [0.3125, 0.1875], - [0.3125, 0.1875], - [0.35416666, 0.1875], - [0.35416666, 0.1875], - [0.39583334, 0.1875], - [0.39583334, 0.1875], - [0.4375, 0.1875], - [0.4375, 0.1875], - [0.47916666, 0.1875], - [0.47916666, 0.1875], - [0.5208333, 0.1875], - [0.5208333, 0.1875], - [0.5625, 0.1875], - [0.5625, 0.1875], - [0.6041667, 0.1875], - [0.6041667, 0.1875], - [0.6458333, 0.1875], - [0.6458333, 0.1875], - [0.6875, 0.1875], - [0.6875, 0.1875], - [0.7291667, 0.1875], - [0.7291667, 0.1875], - [0.7708333, 0.1875], - [0.7708333, 0.1875], - [0.8125, 0.1875], - [0.8125, 0.1875], - [0.8541667, 0.1875], - [0.8541667, 0.1875], - [0.8958333, 0.1875], - [0.8958333, 0.1875], - [0.9375, 0.1875], - [0.9375, 0.1875], - [0.9791667, 0.1875], - [0.9791667, 0.1875], - [0.02083333, 0.22916667], - [0.02083333, 0.22916667], - [0.0625, 0.22916667], - [0.0625, 0.22916667], - [0.10416666, 0.22916667], - [0.10416666, 0.22916667], - [0.14583333, 0.22916667], - [0.14583333, 0.22916667], - [0.1875, 0.22916667], - [0.1875, 0.22916667], - [0.22916667, 0.22916667], - [0.22916667, 0.22916667], - [0.27083334, 0.22916667], - [0.27083334, 0.22916667], - [0.3125, 0.22916667], - [0.3125, 0.22916667], - [0.35416666, 0.22916667], - [0.35416666, 0.22916667], - [0.39583334, 0.22916667], - [0.39583334, 0.22916667], - [0.4375, 0.22916667], - [0.4375, 0.22916667], - [0.47916666, 0.22916667], - [0.47916666, 0.22916667], - [0.5208333, 0.22916667], - [0.5208333, 0.22916667], - [0.5625, 0.22916667], - [0.5625, 0.22916667], - [0.6041667, 0.22916667], - [0.6041667, 0.22916667], - [0.6458333, 0.22916667], - [0.6458333, 0.22916667], - [0.6875, 0.22916667], - [0.6875, 0.22916667], - [0.7291667, 0.22916667], - [0.7291667, 0.22916667], - [0.7708333, 0.22916667], - [0.7708333, 0.22916667], - [0.8125, 0.22916667], - [0.8125, 0.22916667], - [0.8541667, 0.22916667], - [0.8541667, 0.22916667], - [0.8958333, 0.22916667], - [0.8958333, 0.22916667], - [0.9375, 0.22916667], - [0.9375, 0.22916667], - [0.9791667, 0.22916667], - [0.9791667, 0.22916667], - [0.02083333, 0.27083334], - [0.02083333, 0.27083334], - [0.0625, 0.27083334], - [0.0625, 0.27083334], - [0.10416666, 0.27083334], - [0.10416666, 0.27083334], - [0.14583333, 0.27083334], - [0.14583333, 0.27083334], - [0.1875, 0.27083334], - [0.1875, 0.27083334], - [0.22916667, 0.27083334], - [0.22916667, 0.27083334], - [0.27083334, 0.27083334], - [0.27083334, 0.27083334], - [0.3125, 0.27083334], - [0.3125, 0.27083334], - [0.35416666, 0.27083334], - [0.35416666, 0.27083334], - [0.39583334, 0.27083334], - [0.39583334, 0.27083334], - [0.4375, 0.27083334], - [0.4375, 0.27083334], - [0.47916666, 0.27083334], - [0.47916666, 0.27083334], - [0.5208333, 0.27083334], - [0.5208333, 0.27083334], - [0.5625, 0.27083334], - [0.5625, 0.27083334], - [0.6041667, 0.27083334], - [0.6041667, 0.27083334], - [0.6458333, 0.27083334], - [0.6458333, 0.27083334], - [0.6875, 0.27083334], - [0.6875, 0.27083334], - [0.7291667, 0.27083334], - [0.7291667, 0.27083334], - [0.7708333, 0.27083334], - [0.7708333, 0.27083334], - [0.8125, 0.27083334], - [0.8125, 0.27083334], - [0.8541667, 0.27083334], - [0.8541667, 0.27083334], - [0.8958333, 0.27083334], - [0.8958333, 0.27083334], - [0.9375, 0.27083334], - [0.9375, 0.27083334], - [0.9791667, 0.27083334], - [0.9791667, 0.27083334], - [0.02083333, 0.3125], - [0.02083333, 0.3125], - [0.0625, 0.3125], - [0.0625, 0.3125], - [0.10416666, 0.3125], - [0.10416666, 0.3125], - [0.14583333, 0.3125], - [0.14583333, 0.3125], - [0.1875, 0.3125], - [0.1875, 0.3125], - [0.22916667, 0.3125], - [0.22916667, 0.3125], - [0.27083334, 0.3125], - [0.27083334, 0.3125], - [0.3125, 0.3125], - [0.3125, 0.3125], - [0.35416666, 0.3125], - [0.35416666, 0.3125], - [0.39583334, 0.3125], - [0.39583334, 0.3125], - [0.4375, 0.3125], - [0.4375, 0.3125], - [0.47916666, 0.3125], - [0.47916666, 0.3125], - [0.5208333, 0.3125], - [0.5208333, 0.3125], - [0.5625, 0.3125], - [0.5625, 0.3125], - [0.6041667, 0.3125], - [0.6041667, 0.3125], - [0.6458333, 0.3125], - [0.6458333, 0.3125], - [0.6875, 0.3125], - [0.6875, 0.3125], - [0.7291667, 0.3125], - [0.7291667, 0.3125], - [0.7708333, 0.3125], - [0.7708333, 0.3125], - [0.8125, 0.3125], - [0.8125, 0.3125], - [0.8541667, 0.3125], - [0.8541667, 0.3125], - [0.8958333, 0.3125], - [0.8958333, 0.3125], - [0.9375, 0.3125], - [0.9375, 0.3125], - [0.9791667, 0.3125], - [0.9791667, 0.3125], - [0.02083333, 0.35416666], - [0.02083333, 0.35416666], - [0.0625, 0.35416666], - [0.0625, 0.35416666], - [0.10416666, 0.35416666], - [0.10416666, 0.35416666], - [0.14583333, 0.35416666], - [0.14583333, 0.35416666], - [0.1875, 0.35416666], - [0.1875, 0.35416666], - [0.22916667, 0.35416666], - [0.22916667, 0.35416666], - [0.27083334, 0.35416666], - [0.27083334, 0.35416666], - [0.3125, 0.35416666], - [0.3125, 0.35416666], - [0.35416666, 0.35416666], - [0.35416666, 0.35416666], - [0.39583334, 0.35416666], - [0.39583334, 0.35416666], - [0.4375, 0.35416666], - [0.4375, 0.35416666], - [0.47916666, 0.35416666], - [0.47916666, 0.35416666], - [0.5208333, 0.35416666], - [0.5208333, 0.35416666], - [0.5625, 0.35416666], - [0.5625, 0.35416666], - [0.6041667, 0.35416666], - [0.6041667, 0.35416666], - [0.6458333, 0.35416666], - [0.6458333, 0.35416666], - [0.6875, 0.35416666], - [0.6875, 0.35416666], - [0.7291667, 0.35416666], - [0.7291667, 0.35416666], - [0.7708333, 0.35416666], - [0.7708333, 0.35416666], - [0.8125, 0.35416666], - [0.8125, 0.35416666], - [0.8541667, 0.35416666], - [0.8541667, 0.35416666], - [0.8958333, 0.35416666], - [0.8958333, 0.35416666], - [0.9375, 0.35416666], - [0.9375, 0.35416666], - [0.9791667, 0.35416666], - [0.9791667, 0.35416666], - [0.02083333, 0.39583334], - [0.02083333, 0.39583334], - [0.0625, 0.39583334], - [0.0625, 0.39583334], - [0.10416666, 0.39583334], - [0.10416666, 0.39583334], - [0.14583333, 0.39583334], - [0.14583333, 0.39583334], - [0.1875, 0.39583334], - [0.1875, 0.39583334], - [0.22916667, 0.39583334], - [0.22916667, 0.39583334], - [0.27083334, 0.39583334], - [0.27083334, 0.39583334], - [0.3125, 0.39583334], - [0.3125, 0.39583334], - [0.35416666, 0.39583334], - [0.35416666, 0.39583334], - [0.39583334, 0.39583334], - [0.39583334, 0.39583334], - [0.4375, 0.39583334], - [0.4375, 0.39583334], - [0.47916666, 0.39583334], - [0.47916666, 0.39583334], - [0.5208333, 0.39583334], - [0.5208333, 0.39583334], - [0.5625, 0.39583334], - [0.5625, 0.39583334], - [0.6041667, 0.39583334], - [0.6041667, 0.39583334], - [0.6458333, 0.39583334], - [0.6458333, 0.39583334], - [0.6875, 0.39583334], - [0.6875, 0.39583334], - [0.7291667, 0.39583334], - [0.7291667, 0.39583334], - [0.7708333, 0.39583334], - [0.7708333, 0.39583334], - [0.8125, 0.39583334], - [0.8125, 0.39583334], - [0.8541667, 0.39583334], - [0.8541667, 0.39583334], - [0.8958333, 0.39583334], - [0.8958333, 0.39583334], - [0.9375, 0.39583334], - [0.9375, 0.39583334], - [0.9791667, 0.39583334], - [0.9791667, 0.39583334], - [0.02083333, 0.4375], - [0.02083333, 0.4375], - [0.0625, 0.4375], - [0.0625, 0.4375], - [0.10416666, 0.4375], - [0.10416666, 0.4375], - [0.14583333, 0.4375], - [0.14583333, 0.4375], - [0.1875, 0.4375], - [0.1875, 0.4375], - [0.22916667, 0.4375], - [0.22916667, 0.4375], - [0.27083334, 0.4375], - [0.27083334, 0.4375], - [0.3125, 0.4375], - [0.3125, 0.4375], - [0.35416666, 0.4375], - [0.35416666, 0.4375], - [0.39583334, 0.4375], - [0.39583334, 0.4375], - [0.4375, 0.4375], - [0.4375, 0.4375], - [0.47916666, 0.4375], - [0.47916666, 0.4375], - [0.5208333, 0.4375], - [0.5208333, 0.4375], - [0.5625, 0.4375], - [0.5625, 0.4375], - [0.6041667, 0.4375], - [0.6041667, 0.4375], - [0.6458333, 0.4375], - [0.6458333, 0.4375], - [0.6875, 0.4375], - [0.6875, 0.4375], - [0.7291667, 0.4375], - [0.7291667, 0.4375], - [0.7708333, 0.4375], - [0.7708333, 0.4375], - [0.8125, 0.4375], - [0.8125, 0.4375], - [0.8541667, 0.4375], - [0.8541667, 0.4375], - [0.8958333, 0.4375], - [0.8958333, 0.4375], - [0.9375, 0.4375], - [0.9375, 0.4375], - [0.9791667, 0.4375], - [0.9791667, 0.4375], - [0.02083333, 0.47916666], - [0.02083333, 0.47916666], - [0.0625, 0.47916666], - [0.0625, 0.47916666], - [0.10416666, 0.47916666], - [0.10416666, 0.47916666], - [0.14583333, 0.47916666], - [0.14583333, 0.47916666], - [0.1875, 0.47916666], - [0.1875, 0.47916666], - [0.22916667, 0.47916666], - [0.22916667, 0.47916666], - [0.27083334, 0.47916666], - [0.27083334, 0.47916666], - [0.3125, 0.47916666], - [0.3125, 0.47916666], - [0.35416666, 0.47916666], - [0.35416666, 0.47916666], - [0.39583334, 0.47916666], - [0.39583334, 0.47916666], - [0.4375, 0.47916666], - [0.4375, 0.47916666], - [0.47916666, 0.47916666], - [0.47916666, 0.47916666], - [0.5208333, 0.47916666], - [0.5208333, 0.47916666], - [0.5625, 0.47916666], - [0.5625, 0.47916666], - [0.6041667, 0.47916666], - [0.6041667, 0.47916666], - [0.6458333, 0.47916666], - [0.6458333, 0.47916666], - [0.6875, 0.47916666], - [0.6875, 0.47916666], - [0.7291667, 0.47916666], - [0.7291667, 0.47916666], - [0.7708333, 0.47916666], - [0.7708333, 0.47916666], - [0.8125, 0.47916666], - [0.8125, 0.47916666], - [0.8541667, 0.47916666], - [0.8541667, 0.47916666], - [0.8958333, 0.47916666], - [0.8958333, 0.47916666], - [0.9375, 0.47916666], - [0.9375, 0.47916666], - [0.9791667, 0.47916666], - [0.9791667, 0.47916666], - [0.02083333, 0.5208333], - [0.02083333, 0.5208333], - [0.0625, 0.5208333], - [0.0625, 0.5208333], - [0.10416666, 0.5208333], - [0.10416666, 0.5208333], - [0.14583333, 0.5208333], - [0.14583333, 0.5208333], - [0.1875, 0.5208333], - [0.1875, 0.5208333], - [0.22916667, 0.5208333], - [0.22916667, 0.5208333], - [0.27083334, 0.5208333], - [0.27083334, 0.5208333], - [0.3125, 0.5208333], - [0.3125, 0.5208333], - [0.35416666, 0.5208333], - [0.35416666, 0.5208333], - [0.39583334, 0.5208333], - [0.39583334, 0.5208333], - [0.4375, 0.5208333], - [0.4375, 0.5208333], - [0.47916666, 0.5208333], - [0.47916666, 0.5208333], - [0.5208333, 0.5208333], - [0.5208333, 0.5208333], - [0.5625, 0.5208333], - [0.5625, 0.5208333], - [0.6041667, 0.5208333], - [0.6041667, 0.5208333], - [0.6458333, 0.5208333], - [0.6458333, 0.5208333], - [0.6875, 0.5208333], - [0.6875, 0.5208333], - [0.7291667, 0.5208333], - [0.7291667, 0.5208333], - [0.7708333, 0.5208333], - [0.7708333, 0.5208333], - [0.8125, 0.5208333], - [0.8125, 0.5208333], - [0.8541667, 0.5208333], - [0.8541667, 0.5208333], - [0.8958333, 0.5208333], - [0.8958333, 0.5208333], - [0.9375, 0.5208333], - [0.9375, 0.5208333], - [0.9791667, 0.5208333], - [0.9791667, 0.5208333], - [0.02083333, 0.5625], - [0.02083333, 0.5625], - [0.0625, 0.5625], - [0.0625, 0.5625], - [0.10416666, 0.5625], - [0.10416666, 0.5625], - [0.14583333, 0.5625], - [0.14583333, 0.5625], - [0.1875, 0.5625], - [0.1875, 0.5625], - [0.22916667, 0.5625], - [0.22916667, 0.5625], - [0.27083334, 0.5625], - [0.27083334, 0.5625], - [0.3125, 0.5625], - [0.3125, 0.5625], - [0.35416666, 0.5625], - [0.35416666, 0.5625], - [0.39583334, 0.5625], - [0.39583334, 0.5625], - [0.4375, 0.5625], - [0.4375, 0.5625], - [0.47916666, 0.5625], - [0.47916666, 0.5625], - [0.5208333, 0.5625], - [0.5208333, 0.5625], - [0.5625, 0.5625], - [0.5625, 0.5625], - [0.6041667, 0.5625], - [0.6041667, 0.5625], - [0.6458333, 0.5625], - [0.6458333, 0.5625], - [0.6875, 0.5625], - [0.6875, 0.5625], - [0.7291667, 0.5625], - [0.7291667, 0.5625], - [0.7708333, 0.5625], - [0.7708333, 0.5625], - [0.8125, 0.5625], - [0.8125, 0.5625], - [0.8541667, 0.5625], - [0.8541667, 0.5625], - [0.8958333, 0.5625], - [0.8958333, 0.5625], - [0.9375, 0.5625], - [0.9375, 0.5625], - [0.9791667, 0.5625], - [0.9791667, 0.5625], - [0.02083333, 0.6041667], - [0.02083333, 0.6041667], - [0.0625, 0.6041667], - [0.0625, 0.6041667], - [0.10416666, 0.6041667], - [0.10416666, 0.6041667], - [0.14583333, 0.6041667], - [0.14583333, 0.6041667], - [0.1875, 0.6041667], - [0.1875, 0.6041667], - [0.22916667, 0.6041667], - [0.22916667, 0.6041667], - [0.27083334, 0.6041667], - [0.27083334, 0.6041667], - [0.3125, 0.6041667], - [0.3125, 0.6041667], - [0.35416666, 0.6041667], - [0.35416666, 0.6041667], - [0.39583334, 0.6041667], - [0.39583334, 0.6041667], - [0.4375, 0.6041667], - [0.4375, 0.6041667], - [0.47916666, 0.6041667], - [0.47916666, 0.6041667], - [0.5208333, 0.6041667], - [0.5208333, 0.6041667], - [0.5625, 0.6041667], - [0.5625, 0.6041667], - [0.6041667, 0.6041667], - [0.6041667, 0.6041667], - [0.6458333, 0.6041667], - [0.6458333, 0.6041667], - [0.6875, 0.6041667], - [0.6875, 0.6041667], - [0.7291667, 0.6041667], - [0.7291667, 0.6041667], - [0.7708333, 0.6041667], - [0.7708333, 0.6041667], - [0.8125, 0.6041667], - [0.8125, 0.6041667], - [0.8541667, 0.6041667], - [0.8541667, 0.6041667], - [0.8958333, 0.6041667], - [0.8958333, 0.6041667], - [0.9375, 0.6041667], - [0.9375, 0.6041667], - [0.9791667, 0.6041667], - [0.9791667, 0.6041667], - [0.02083333, 0.6458333], - [0.02083333, 0.6458333], - [0.0625, 0.6458333], - [0.0625, 0.6458333], - [0.10416666, 0.6458333], - [0.10416666, 0.6458333], - [0.14583333, 0.6458333], - [0.14583333, 0.6458333], - [0.1875, 0.6458333], - [0.1875, 0.6458333], - [0.22916667, 0.6458333], - [0.22916667, 0.6458333], - [0.27083334, 0.6458333], - [0.27083334, 0.6458333], - [0.3125, 0.6458333], - [0.3125, 0.6458333], - [0.35416666, 0.6458333], - [0.35416666, 0.6458333], - [0.39583334, 0.6458333], - [0.39583334, 0.6458333], - [0.4375, 0.6458333], - [0.4375, 0.6458333], - [0.47916666, 0.6458333], - [0.47916666, 0.6458333], - [0.5208333, 0.6458333], - [0.5208333, 0.6458333], - [0.5625, 0.6458333], - [0.5625, 0.6458333], - [0.6041667, 0.6458333], - [0.6041667, 0.6458333], - [0.6458333, 0.6458333], - [0.6458333, 0.6458333], - [0.6875, 0.6458333], - [0.6875, 0.6458333], - [0.7291667, 0.6458333], - [0.7291667, 0.6458333], - [0.7708333, 0.6458333], - [0.7708333, 0.6458333], - [0.8125, 0.6458333], - [0.8125, 0.6458333], - [0.8541667, 0.6458333], - [0.8541667, 0.6458333], - [0.8958333, 0.6458333], - [0.8958333, 0.6458333], - [0.9375, 0.6458333], - [0.9375, 0.6458333], - [0.9791667, 0.6458333], - [0.9791667, 0.6458333], - [0.02083333, 0.6875], - [0.02083333, 0.6875], - [0.0625, 0.6875], - [0.0625, 0.6875], - [0.10416666, 0.6875], - [0.10416666, 0.6875], - [0.14583333, 0.6875], - [0.14583333, 0.6875], - [0.1875, 0.6875], - [0.1875, 0.6875], - [0.22916667, 0.6875], - [0.22916667, 0.6875], - [0.27083334, 0.6875], - [0.27083334, 0.6875], - [0.3125, 0.6875], - [0.3125, 0.6875], - [0.35416666, 0.6875], - [0.35416666, 0.6875], - [0.39583334, 0.6875], - [0.39583334, 0.6875], - [0.4375, 0.6875], - [0.4375, 0.6875], - [0.47916666, 0.6875], - [0.47916666, 0.6875], - [0.5208333, 0.6875], - [0.5208333, 0.6875], - [0.5625, 0.6875], - [0.5625, 0.6875], - [0.6041667, 0.6875], - [0.6041667, 0.6875], - [0.6458333, 0.6875], - [0.6458333, 0.6875], - [0.6875, 0.6875], - [0.6875, 0.6875], - [0.7291667, 0.6875], - [0.7291667, 0.6875], - [0.7708333, 0.6875], - [0.7708333, 0.6875], - [0.8125, 0.6875], - [0.8125, 0.6875], - [0.8541667, 0.6875], - [0.8541667, 0.6875], - [0.8958333, 0.6875], - [0.8958333, 0.6875], - [0.9375, 0.6875], - [0.9375, 0.6875], - [0.9791667, 0.6875], - [0.9791667, 0.6875], - [0.02083333, 0.7291667], - [0.02083333, 0.7291667], - [0.0625, 0.7291667], - [0.0625, 0.7291667], - [0.10416666, 0.7291667], - [0.10416666, 0.7291667], - [0.14583333, 0.7291667], - [0.14583333, 0.7291667], - [0.1875, 0.7291667], - [0.1875, 0.7291667], - [0.22916667, 0.7291667], - [0.22916667, 0.7291667], - [0.27083334, 0.7291667], - [0.27083334, 0.7291667], - [0.3125, 0.7291667], - [0.3125, 0.7291667], - [0.35416666, 0.7291667], - [0.35416666, 0.7291667], - [0.39583334, 0.7291667], - [0.39583334, 0.7291667], - [0.4375, 0.7291667], - [0.4375, 0.7291667], - [0.47916666, 0.7291667], - [0.47916666, 0.7291667], - [0.5208333, 0.7291667], - [0.5208333, 0.7291667], - [0.5625, 0.7291667], - [0.5625, 0.7291667], - [0.6041667, 0.7291667], - [0.6041667, 0.7291667], - [0.6458333, 0.7291667], - [0.6458333, 0.7291667], - [0.6875, 0.7291667], - [0.6875, 0.7291667], - [0.7291667, 0.7291667], - [0.7291667, 0.7291667], - [0.7708333, 0.7291667], - [0.7708333, 0.7291667], - [0.8125, 0.7291667], - [0.8125, 0.7291667], - [0.8541667, 0.7291667], - [0.8541667, 0.7291667], - [0.8958333, 0.7291667], - [0.8958333, 0.7291667], - [0.9375, 0.7291667], - [0.9375, 0.7291667], - [0.9791667, 0.7291667], - [0.9791667, 0.7291667], - [0.02083333, 0.7708333], - [0.02083333, 0.7708333], - [0.0625, 0.7708333], - [0.0625, 0.7708333], - [0.10416666, 0.7708333], - [0.10416666, 0.7708333], - [0.14583333, 0.7708333], - [0.14583333, 0.7708333], - [0.1875, 0.7708333], - [0.1875, 0.7708333], - [0.22916667, 0.7708333], - [0.22916667, 0.7708333], - [0.27083334, 0.7708333], - [0.27083334, 0.7708333], - [0.3125, 0.7708333], - [0.3125, 0.7708333], - [0.35416666, 0.7708333], - [0.35416666, 0.7708333], - [0.39583334, 0.7708333], - [0.39583334, 0.7708333], - [0.4375, 0.7708333], - [0.4375, 0.7708333], - [0.47916666, 0.7708333], - [0.47916666, 0.7708333], - [0.5208333, 0.7708333], - [0.5208333, 0.7708333], - [0.5625, 0.7708333], - [0.5625, 0.7708333], - [0.6041667, 0.7708333], - [0.6041667, 0.7708333], - [0.6458333, 0.7708333], - [0.6458333, 0.7708333], - [0.6875, 0.7708333], - [0.6875, 0.7708333], - [0.7291667, 0.7708333], - [0.7291667, 0.7708333], - [0.7708333, 0.7708333], - [0.7708333, 0.7708333], - [0.8125, 0.7708333], - [0.8125, 0.7708333], - [0.8541667, 0.7708333], - [0.8541667, 0.7708333], - [0.8958333, 0.7708333], - [0.8958333, 0.7708333], - [0.9375, 0.7708333], - [0.9375, 0.7708333], - [0.9791667, 0.7708333], - [0.9791667, 0.7708333], - [0.02083333, 0.8125], - [0.02083333, 0.8125], - [0.0625, 0.8125], - [0.0625, 0.8125], - [0.10416666, 0.8125], - [0.10416666, 0.8125], - [0.14583333, 0.8125], - [0.14583333, 0.8125], - [0.1875, 0.8125], - [0.1875, 0.8125], - [0.22916667, 0.8125], - [0.22916667, 0.8125], - [0.27083334, 0.8125], - [0.27083334, 0.8125], - [0.3125, 0.8125], - [0.3125, 0.8125], - [0.35416666, 0.8125], - [0.35416666, 0.8125], - [0.39583334, 0.8125], - [0.39583334, 0.8125], - [0.4375, 0.8125], - [0.4375, 0.8125], - [0.47916666, 0.8125], - [0.47916666, 0.8125], - [0.5208333, 0.8125], - [0.5208333, 0.8125], - [0.5625, 0.8125], - [0.5625, 0.8125], - [0.6041667, 0.8125], - [0.6041667, 0.8125], - [0.6458333, 0.8125], - [0.6458333, 0.8125], - [0.6875, 0.8125], - [0.6875, 0.8125], - [0.7291667, 0.8125], - [0.7291667, 0.8125], - [0.7708333, 0.8125], - [0.7708333, 0.8125], - [0.8125, 0.8125], - [0.8125, 0.8125], - [0.8541667, 0.8125], - [0.8541667, 0.8125], - [0.8958333, 0.8125], - [0.8958333, 0.8125], - [0.9375, 0.8125], - [0.9375, 0.8125], - [0.9791667, 0.8125], - [0.9791667, 0.8125], - [0.02083333, 0.8541667], - [0.02083333, 0.8541667], - [0.0625, 0.8541667], - [0.0625, 0.8541667], - [0.10416666, 0.8541667], - [0.10416666, 0.8541667], - [0.14583333, 0.8541667], - [0.14583333, 0.8541667], - [0.1875, 0.8541667], - [0.1875, 0.8541667], - [0.22916667, 0.8541667], - [0.22916667, 0.8541667], - [0.27083334, 0.8541667], - [0.27083334, 0.8541667], - [0.3125, 0.8541667], - [0.3125, 0.8541667], - [0.35416666, 0.8541667], - [0.35416666, 0.8541667], - [0.39583334, 0.8541667], - [0.39583334, 0.8541667], - [0.4375, 0.8541667], - [0.4375, 0.8541667], - [0.47916666, 0.8541667], - [0.47916666, 0.8541667], - [0.5208333, 0.8541667], - [0.5208333, 0.8541667], - [0.5625, 0.8541667], - [0.5625, 0.8541667], - [0.6041667, 0.8541667], - [0.6041667, 0.8541667], - [0.6458333, 0.8541667], - [0.6458333, 0.8541667], - [0.6875, 0.8541667], - [0.6875, 0.8541667], - [0.7291667, 0.8541667], - [0.7291667, 0.8541667], - [0.7708333, 0.8541667], - [0.7708333, 0.8541667], - [0.8125, 0.8541667], - [0.8125, 0.8541667], - [0.8541667, 0.8541667], - [0.8541667, 0.8541667], - [0.8958333, 0.8541667], - [0.8958333, 0.8541667], - [0.9375, 0.8541667], - [0.9375, 0.8541667], - [0.9791667, 0.8541667], - [0.9791667, 0.8541667], - [0.02083333, 0.8958333], - [0.02083333, 0.8958333], - [0.0625, 0.8958333], - [0.0625, 0.8958333], - [0.10416666, 0.8958333], - [0.10416666, 0.8958333], - [0.14583333, 0.8958333], - [0.14583333, 0.8958333], - [0.1875, 0.8958333], - [0.1875, 0.8958333], - [0.22916667, 0.8958333], - [0.22916667, 0.8958333], - [0.27083334, 0.8958333], - [0.27083334, 0.8958333], - [0.3125, 0.8958333], - [0.3125, 0.8958333], - [0.35416666, 0.8958333], - [0.35416666, 0.8958333], - [0.39583334, 0.8958333], - [0.39583334, 0.8958333], - [0.4375, 0.8958333], - [0.4375, 0.8958333], - [0.47916666, 0.8958333], - [0.47916666, 0.8958333], - [0.5208333, 0.8958333], - [0.5208333, 0.8958333], - [0.5625, 0.8958333], - [0.5625, 0.8958333], - [0.6041667, 0.8958333], - [0.6041667, 0.8958333], - [0.6458333, 0.8958333], - [0.6458333, 0.8958333], - [0.6875, 0.8958333], - [0.6875, 0.8958333], - [0.7291667, 0.8958333], - [0.7291667, 0.8958333], - [0.7708333, 0.8958333], - [0.7708333, 0.8958333], - [0.8125, 0.8958333], - [0.8125, 0.8958333], - [0.8541667, 0.8958333], - [0.8541667, 0.8958333], - [0.8958333, 0.8958333], - [0.8958333, 0.8958333], - [0.9375, 0.8958333], - [0.9375, 0.8958333], - [0.9791667, 0.8958333], - [0.9791667, 0.8958333], - [0.02083333, 0.9375], - [0.02083333, 0.9375], - [0.0625, 0.9375], - [0.0625, 0.9375], - [0.10416666, 0.9375], - [0.10416666, 0.9375], - [0.14583333, 0.9375], - [0.14583333, 0.9375], - [0.1875, 0.9375], - [0.1875, 0.9375], - [0.22916667, 0.9375], - [0.22916667, 0.9375], - [0.27083334, 0.9375], - [0.27083334, 0.9375], - [0.3125, 0.9375], - [0.3125, 0.9375], - [0.35416666, 0.9375], - [0.35416666, 0.9375], - [0.39583334, 0.9375], - [0.39583334, 0.9375], - [0.4375, 0.9375], - [0.4375, 0.9375], - [0.47916666, 0.9375], - [0.47916666, 0.9375], - [0.5208333, 0.9375], - [0.5208333, 0.9375], - [0.5625, 0.9375], - [0.5625, 0.9375], - [0.6041667, 0.9375], - [0.6041667, 0.9375], - [0.6458333, 0.9375], - [0.6458333, 0.9375], - [0.6875, 0.9375], - [0.6875, 0.9375], - [0.7291667, 0.9375], - [0.7291667, 0.9375], - [0.7708333, 0.9375], - [0.7708333, 0.9375], - [0.8125, 0.9375], - [0.8125, 0.9375], - [0.8541667, 0.9375], - [0.8541667, 0.9375], - [0.8958333, 0.9375], - [0.8958333, 0.9375], - [0.9375, 0.9375], - [0.9375, 0.9375], - [0.9791667, 0.9375], - [0.9791667, 0.9375], - [0.02083333, 0.9791667], - [0.02083333, 0.9791667], - [0.0625, 0.9791667], - [0.0625, 0.9791667], - [0.10416666, 0.9791667], - [0.10416666, 0.9791667], - [0.14583333, 0.9791667], - [0.14583333, 0.9791667], - [0.1875, 0.9791667], - [0.1875, 0.9791667], - [0.22916667, 0.9791667], - [0.22916667, 0.9791667], - [0.27083334, 0.9791667], - [0.27083334, 0.9791667], - [0.3125, 0.9791667], - [0.3125, 0.9791667], - [0.35416666, 0.9791667], - [0.35416666, 0.9791667], - [0.39583334, 0.9791667], - [0.39583334, 0.9791667], - [0.4375, 0.9791667], - [0.4375, 0.9791667], - [0.47916666, 0.9791667], - [0.47916666, 0.9791667], - [0.5208333, 0.9791667], - [0.5208333, 0.9791667], - [0.5625, 0.9791667], - [0.5625, 0.9791667], - [0.6041667, 0.9791667], - [0.6041667, 0.9791667], - [0.6458333, 0.9791667], - [0.6458333, 0.9791667], - [0.6875, 0.9791667], - [0.6875, 0.9791667], - [0.7291667, 0.9791667], - [0.7291667, 0.9791667], - [0.7708333, 0.9791667], - [0.7708333, 0.9791667], - [0.8125, 0.9791667], - [0.8125, 0.9791667], - [0.8541667, 0.9791667], - [0.8541667, 0.9791667], - [0.8958333, 0.9791667], - [0.8958333, 0.9791667], - [0.9375, 0.9791667], - [0.9375, 0.9791667], - [0.9791667, 0.9791667], - [0.9791667, 0.9791667], - [0.04166667, 0.04166667], - [0.04166667, 0.04166667], - [0.04166667, 0.04166667], - [0.04166667, 0.04166667], - [0.04166667, 0.04166667], - [0.04166667, 0.04166667], - [0.125, 0.04166667], - [0.125, 0.04166667], - [0.125, 0.04166667], - [0.125, 0.04166667], - [0.125, 0.04166667], - [0.125, 0.04166667], - [0.20833333, 0.04166667], - [0.20833333, 0.04166667], - [0.20833333, 0.04166667], - [0.20833333, 0.04166667], - [0.20833333, 0.04166667], - [0.20833333, 0.04166667], - [0.29166666, 0.04166667], - [0.29166666, 0.04166667], - [0.29166666, 0.04166667], - [0.29166666, 0.04166667], - [0.29166666, 0.04166667], - [0.29166666, 0.04166667], - [0.375, 0.04166667], - [0.375, 0.04166667], - [0.375, 0.04166667], - [0.375, 0.04166667], - [0.375, 0.04166667], - [0.375, 0.04166667], - [0.45833334, 0.04166667], - [0.45833334, 0.04166667], - [0.45833334, 0.04166667], - [0.45833334, 0.04166667], - [0.45833334, 0.04166667], - [0.45833334, 0.04166667], - [0.5416667, 0.04166667], - [0.5416667, 0.04166667], - [0.5416667, 0.04166667], - [0.5416667, 0.04166667], - [0.5416667, 0.04166667], - [0.5416667, 0.04166667], - [0.625, 0.04166667], - [0.625, 0.04166667], - [0.625, 0.04166667], - [0.625, 0.04166667], - [0.625, 0.04166667], - [0.625, 0.04166667], - [0.7083333, 0.04166667], - [0.7083333, 0.04166667], - [0.7083333, 0.04166667], - [0.7083333, 0.04166667], - [0.7083333, 0.04166667], - [0.7083333, 0.04166667], - [0.7916667, 0.04166667], - [0.7916667, 0.04166667], - [0.7916667, 0.04166667], - [0.7916667, 0.04166667], - [0.7916667, 0.04166667], - [0.7916667, 0.04166667], - [0.875, 0.04166667], - [0.875, 0.04166667], - [0.875, 0.04166667], - [0.875, 0.04166667], - [0.875, 0.04166667], - [0.875, 0.04166667], - [0.9583333, 0.04166667], - [0.9583333, 0.04166667], - [0.9583333, 0.04166667], - [0.9583333, 0.04166667], - [0.9583333, 0.04166667], - [0.9583333, 0.04166667], - [0.04166667, 0.125], - [0.04166667, 0.125], - [0.04166667, 0.125], - [0.04166667, 0.125], - [0.04166667, 0.125], - [0.04166667, 0.125], - [0.125, 0.125], - [0.125, 0.125], - [0.125, 0.125], - [0.125, 0.125], - [0.125, 0.125], - [0.125, 0.125], - [0.20833333, 0.125], - [0.20833333, 0.125], - [0.20833333, 0.125], - [0.20833333, 0.125], - [0.20833333, 0.125], - [0.20833333, 0.125], - [0.29166666, 0.125], - [0.29166666, 0.125], - [0.29166666, 0.125], - [0.29166666, 0.125], - [0.29166666, 0.125], - [0.29166666, 0.125], - [0.375, 0.125], - [0.375, 0.125], - [0.375, 0.125], - [0.375, 0.125], - [0.375, 0.125], - [0.375, 0.125], - [0.45833334, 0.125], - [0.45833334, 0.125], - [0.45833334, 0.125], - [0.45833334, 0.125], - [0.45833334, 0.125], - [0.45833334, 0.125], - [0.5416667, 0.125], - [0.5416667, 0.125], - [0.5416667, 0.125], - [0.5416667, 0.125], - [0.5416667, 0.125], - [0.5416667, 0.125], - [0.625, 0.125], - [0.625, 0.125], - [0.625, 0.125], - [0.625, 0.125], - [0.625, 0.125], - [0.625, 0.125], - [0.7083333, 0.125], - [0.7083333, 0.125], - [0.7083333, 0.125], - [0.7083333, 0.125], - [0.7083333, 0.125], - [0.7083333, 0.125], - [0.7916667, 0.125], - [0.7916667, 0.125], - [0.7916667, 0.125], - [0.7916667, 0.125], - [0.7916667, 0.125], - [0.7916667, 0.125], - [0.875, 0.125], - [0.875, 0.125], - [0.875, 0.125], - [0.875, 0.125], - [0.875, 0.125], - [0.875, 0.125], - [0.9583333, 0.125], - [0.9583333, 0.125], - [0.9583333, 0.125], - [0.9583333, 0.125], - [0.9583333, 0.125], - [0.9583333, 0.125], - [0.04166667, 0.20833333], - [0.04166667, 0.20833333], - [0.04166667, 0.20833333], - [0.04166667, 0.20833333], - [0.04166667, 0.20833333], - [0.04166667, 0.20833333], - [0.125, 0.20833333], - [0.125, 0.20833333], - [0.125, 0.20833333], - [0.125, 0.20833333], - [0.125, 0.20833333], - [0.125, 0.20833333], - [0.20833333, 0.20833333], - [0.20833333, 0.20833333], - [0.20833333, 0.20833333], - [0.20833333, 0.20833333], - [0.20833333, 0.20833333], - [0.20833333, 0.20833333], - [0.29166666, 0.20833333], - [0.29166666, 0.20833333], - [0.29166666, 0.20833333], - [0.29166666, 0.20833333], - [0.29166666, 0.20833333], - [0.29166666, 0.20833333], - [0.375, 0.20833333], - [0.375, 0.20833333], - [0.375, 0.20833333], - [0.375, 0.20833333], - [0.375, 0.20833333], - [0.375, 0.20833333], - [0.45833334, 0.20833333], - [0.45833334, 0.20833333], - [0.45833334, 0.20833333], - [0.45833334, 0.20833333], - [0.45833334, 0.20833333], - [0.45833334, 0.20833333], - [0.5416667, 0.20833333], - [0.5416667, 0.20833333], - [0.5416667, 0.20833333], - [0.5416667, 0.20833333], - [0.5416667, 0.20833333], - [0.5416667, 0.20833333], - [0.625, 0.20833333], - [0.625, 0.20833333], - [0.625, 0.20833333], - [0.625, 0.20833333], - [0.625, 0.20833333], - [0.625, 0.20833333], - [0.7083333, 0.20833333], - [0.7083333, 0.20833333], - [0.7083333, 0.20833333], - [0.7083333, 0.20833333], - [0.7083333, 0.20833333], - [0.7083333, 0.20833333], - [0.7916667, 0.20833333], - [0.7916667, 0.20833333], - [0.7916667, 0.20833333], - [0.7916667, 0.20833333], - [0.7916667, 0.20833333], - [0.7916667, 0.20833333], - [0.875, 0.20833333], - [0.875, 0.20833333], - [0.875, 0.20833333], - [0.875, 0.20833333], - [0.875, 0.20833333], - [0.875, 0.20833333], - [0.9583333, 0.20833333], - [0.9583333, 0.20833333], - [0.9583333, 0.20833333], - [0.9583333, 0.20833333], - [0.9583333, 0.20833333], - [0.9583333, 0.20833333], - [0.04166667, 0.29166666], - [0.04166667, 0.29166666], - [0.04166667, 0.29166666], - [0.04166667, 0.29166666], - [0.04166667, 0.29166666], - [0.04166667, 0.29166666], - [0.125, 0.29166666], - [0.125, 0.29166666], - [0.125, 0.29166666], - [0.125, 0.29166666], - [0.125, 0.29166666], - [0.125, 0.29166666], - [0.20833333, 0.29166666], - [0.20833333, 0.29166666], - [0.20833333, 0.29166666], - [0.20833333, 0.29166666], - [0.20833333, 0.29166666], - [0.20833333, 0.29166666], - [0.29166666, 0.29166666], - [0.29166666, 0.29166666], - [0.29166666, 0.29166666], - [0.29166666, 0.29166666], - [0.29166666, 0.29166666], - [0.29166666, 0.29166666], - [0.375, 0.29166666], - [0.375, 0.29166666], - [0.375, 0.29166666], - [0.375, 0.29166666], - [0.375, 0.29166666], - [0.375, 0.29166666], - [0.45833334, 0.29166666], - [0.45833334, 0.29166666], - [0.45833334, 0.29166666], - [0.45833334, 0.29166666], - [0.45833334, 0.29166666], - [0.45833334, 0.29166666], - [0.5416667, 0.29166666], - [0.5416667, 0.29166666], - [0.5416667, 0.29166666], - [0.5416667, 0.29166666], - [0.5416667, 0.29166666], - [0.5416667, 0.29166666], - [0.625, 0.29166666], - [0.625, 0.29166666], - [0.625, 0.29166666], - [0.625, 0.29166666], - [0.625, 0.29166666], - [0.625, 0.29166666], - [0.7083333, 0.29166666], - [0.7083333, 0.29166666], - [0.7083333, 0.29166666], - [0.7083333, 0.29166666], - [0.7083333, 0.29166666], - [0.7083333, 0.29166666], - [0.7916667, 0.29166666], - [0.7916667, 0.29166666], - [0.7916667, 0.29166666], - [0.7916667, 0.29166666], - [0.7916667, 0.29166666], - [0.7916667, 0.29166666], - [0.875, 0.29166666], - [0.875, 0.29166666], - [0.875, 0.29166666], - [0.875, 0.29166666], - [0.875, 0.29166666], - [0.875, 0.29166666], - [0.9583333, 0.29166666], - [0.9583333, 0.29166666], - [0.9583333, 0.29166666], - [0.9583333, 0.29166666], - [0.9583333, 0.29166666], - [0.9583333, 0.29166666], - [0.04166667, 0.375], - [0.04166667, 0.375], - [0.04166667, 0.375], - [0.04166667, 0.375], - [0.04166667, 0.375], - [0.04166667, 0.375], - [0.125, 0.375], - [0.125, 0.375], - [0.125, 0.375], - [0.125, 0.375], - [0.125, 0.375], - [0.125, 0.375], - [0.20833333, 0.375], - [0.20833333, 0.375], - [0.20833333, 0.375], - [0.20833333, 0.375], - [0.20833333, 0.375], - [0.20833333, 0.375], - [0.29166666, 0.375], - [0.29166666, 0.375], - [0.29166666, 0.375], - [0.29166666, 0.375], - [0.29166666, 0.375], - [0.29166666, 0.375], - [0.375, 0.375], - [0.375, 0.375], - [0.375, 0.375], - [0.375, 0.375], - [0.375, 0.375], - [0.375, 0.375], - [0.45833334, 0.375], - [0.45833334, 0.375], - [0.45833334, 0.375], - [0.45833334, 0.375], - [0.45833334, 0.375], - [0.45833334, 0.375], - [0.5416667, 0.375], - [0.5416667, 0.375], - [0.5416667, 0.375], - [0.5416667, 0.375], - [0.5416667, 0.375], - [0.5416667, 0.375], - [0.625, 0.375], - [0.625, 0.375], - [0.625, 0.375], - [0.625, 0.375], - [0.625, 0.375], - [0.625, 0.375], - [0.7083333, 0.375], - [0.7083333, 0.375], - [0.7083333, 0.375], - [0.7083333, 0.375], - [0.7083333, 0.375], - [0.7083333, 0.375], - [0.7916667, 0.375], - [0.7916667, 0.375], - [0.7916667, 0.375], - [0.7916667, 0.375], - [0.7916667, 0.375], - [0.7916667, 0.375], - [0.875, 0.375], - [0.875, 0.375], - [0.875, 0.375], - [0.875, 0.375], - [0.875, 0.375], - [0.875, 0.375], - [0.9583333, 0.375], - [0.9583333, 0.375], - [0.9583333, 0.375], - [0.9583333, 0.375], - [0.9583333, 0.375], - [0.9583333, 0.375], - [0.04166667, 0.45833334], - [0.04166667, 0.45833334], - [0.04166667, 0.45833334], - [0.04166667, 0.45833334], - [0.04166667, 0.45833334], - [0.04166667, 0.45833334], - [0.125, 0.45833334], - [0.125, 0.45833334], - [0.125, 0.45833334], - [0.125, 0.45833334], - [0.125, 0.45833334], - [0.125, 0.45833334], - [0.20833333, 0.45833334], - [0.20833333, 0.45833334], - [0.20833333, 0.45833334], - [0.20833333, 0.45833334], - [0.20833333, 0.45833334], - [0.20833333, 0.45833334], - [0.29166666, 0.45833334], - [0.29166666, 0.45833334], - [0.29166666, 0.45833334], - [0.29166666, 0.45833334], - [0.29166666, 0.45833334], - [0.29166666, 0.45833334], - [0.375, 0.45833334], - [0.375, 0.45833334], - [0.375, 0.45833334], - [0.375, 0.45833334], - [0.375, 0.45833334], - [0.375, 0.45833334], - [0.45833334, 0.45833334], - [0.45833334, 0.45833334], - [0.45833334, 0.45833334], - [0.45833334, 0.45833334], - [0.45833334, 0.45833334], - [0.45833334, 0.45833334], - [0.5416667, 0.45833334], - [0.5416667, 0.45833334], - [0.5416667, 0.45833334], - [0.5416667, 0.45833334], - [0.5416667, 0.45833334], - [0.5416667, 0.45833334], - [0.625, 0.45833334], - [0.625, 0.45833334], - [0.625, 0.45833334], - [0.625, 0.45833334], - [0.625, 0.45833334], - [0.625, 0.45833334], - [0.7083333, 0.45833334], - [0.7083333, 0.45833334], - [0.7083333, 0.45833334], - [0.7083333, 0.45833334], - [0.7083333, 0.45833334], - [0.7083333, 0.45833334], - [0.7916667, 0.45833334], - [0.7916667, 0.45833334], - [0.7916667, 0.45833334], - [0.7916667, 0.45833334], - [0.7916667, 0.45833334], - [0.7916667, 0.45833334], - [0.875, 0.45833334], - [0.875, 0.45833334], - [0.875, 0.45833334], - [0.875, 0.45833334], - [0.875, 0.45833334], - [0.875, 0.45833334], - [0.9583333, 0.45833334], - [0.9583333, 0.45833334], - [0.9583333, 0.45833334], - [0.9583333, 0.45833334], - [0.9583333, 0.45833334], - [0.9583333, 0.45833334], - [0.04166667, 0.5416667], - [0.04166667, 0.5416667], - [0.04166667, 0.5416667], - [0.04166667, 0.5416667], - [0.04166667, 0.5416667], - [0.04166667, 0.5416667], - [0.125, 0.5416667], - [0.125, 0.5416667], - [0.125, 0.5416667], - [0.125, 0.5416667], - [0.125, 0.5416667], - [0.125, 0.5416667], - [0.20833333, 0.5416667], - [0.20833333, 0.5416667], - [0.20833333, 0.5416667], - [0.20833333, 0.5416667], - [0.20833333, 0.5416667], - [0.20833333, 0.5416667], - [0.29166666, 0.5416667], - [0.29166666, 0.5416667], - [0.29166666, 0.5416667], - [0.29166666, 0.5416667], - [0.29166666, 0.5416667], - [0.29166666, 0.5416667], - [0.375, 0.5416667], - [0.375, 0.5416667], - [0.375, 0.5416667], - [0.375, 0.5416667], - [0.375, 0.5416667], - [0.375, 0.5416667], - [0.45833334, 0.5416667], - [0.45833334, 0.5416667], - [0.45833334, 0.5416667], - [0.45833334, 0.5416667], - [0.45833334, 0.5416667], - [0.45833334, 0.5416667], - [0.5416667, 0.5416667], - [0.5416667, 0.5416667], - [0.5416667, 0.5416667], - [0.5416667, 0.5416667], - [0.5416667, 0.5416667], - [0.5416667, 0.5416667], - [0.625, 0.5416667], - [0.625, 0.5416667], - [0.625, 0.5416667], - [0.625, 0.5416667], - [0.625, 0.5416667], - [0.625, 0.5416667], - [0.7083333, 0.5416667], - [0.7083333, 0.5416667], - [0.7083333, 0.5416667], - [0.7083333, 0.5416667], - [0.7083333, 0.5416667], - [0.7083333, 0.5416667], - [0.7916667, 0.5416667], - [0.7916667, 0.5416667], - [0.7916667, 0.5416667], - [0.7916667, 0.5416667], - [0.7916667, 0.5416667], - [0.7916667, 0.5416667], - [0.875, 0.5416667], - [0.875, 0.5416667], - [0.875, 0.5416667], - [0.875, 0.5416667], - [0.875, 0.5416667], - [0.875, 0.5416667], - [0.9583333, 0.5416667], - [0.9583333, 0.5416667], - [0.9583333, 0.5416667], - [0.9583333, 0.5416667], - [0.9583333, 0.5416667], - [0.9583333, 0.5416667], - [0.04166667, 0.625], - [0.04166667, 0.625], - [0.04166667, 0.625], - [0.04166667, 0.625], - [0.04166667, 0.625], - [0.04166667, 0.625], - [0.125, 0.625], - [0.125, 0.625], - [0.125, 0.625], - [0.125, 0.625], - [0.125, 0.625], - [0.125, 0.625], - [0.20833333, 0.625], - [0.20833333, 0.625], - [0.20833333, 0.625], - [0.20833333, 0.625], - [0.20833333, 0.625], - [0.20833333, 0.625], - [0.29166666, 0.625], - [0.29166666, 0.625], - [0.29166666, 0.625], - [0.29166666, 0.625], - [0.29166666, 0.625], - [0.29166666, 0.625], - [0.375, 0.625], - [0.375, 0.625], - [0.375, 0.625], - [0.375, 0.625], - [0.375, 0.625], - [0.375, 0.625], - [0.45833334, 0.625], - [0.45833334, 0.625], - [0.45833334, 0.625], - [0.45833334, 0.625], - [0.45833334, 0.625], - [0.45833334, 0.625], - [0.5416667, 0.625], - [0.5416667, 0.625], - [0.5416667, 0.625], - [0.5416667, 0.625], - [0.5416667, 0.625], - [0.5416667, 0.625], - [0.625, 0.625], - [0.625, 0.625], - [0.625, 0.625], - [0.625, 0.625], - [0.625, 0.625], - [0.625, 0.625], - [0.7083333, 0.625], - [0.7083333, 0.625], - [0.7083333, 0.625], - [0.7083333, 0.625], - [0.7083333, 0.625], - [0.7083333, 0.625], - [0.7916667, 0.625], - [0.7916667, 0.625], - [0.7916667, 0.625], - [0.7916667, 0.625], - [0.7916667, 0.625], - [0.7916667, 0.625], - [0.875, 0.625], - [0.875, 0.625], - [0.875, 0.625], - [0.875, 0.625], - [0.875, 0.625], - [0.875, 0.625], - [0.9583333, 0.625], - [0.9583333, 0.625], - [0.9583333, 0.625], - [0.9583333, 0.625], - [0.9583333, 0.625], - [0.9583333, 0.625], - [0.04166667, 0.7083333], - [0.04166667, 0.7083333], - [0.04166667, 0.7083333], - [0.04166667, 0.7083333], - [0.04166667, 0.7083333], - [0.04166667, 0.7083333], - [0.125, 0.7083333], - [0.125, 0.7083333], - [0.125, 0.7083333], - [0.125, 0.7083333], - [0.125, 0.7083333], - [0.125, 0.7083333], - [0.20833333, 0.7083333], - [0.20833333, 0.7083333], - [0.20833333, 0.7083333], - [0.20833333, 0.7083333], - [0.20833333, 0.7083333], - [0.20833333, 0.7083333], - [0.29166666, 0.7083333], - [0.29166666, 0.7083333], - [0.29166666, 0.7083333], - [0.29166666, 0.7083333], - [0.29166666, 0.7083333], - [0.29166666, 0.7083333], - [0.375, 0.7083333], - [0.375, 0.7083333], - [0.375, 0.7083333], - [0.375, 0.7083333], - [0.375, 0.7083333], - [0.375, 0.7083333], - [0.45833334, 0.7083333], - [0.45833334, 0.7083333], - [0.45833334, 0.7083333], - [0.45833334, 0.7083333], - [0.45833334, 0.7083333], - [0.45833334, 0.7083333], - [0.5416667, 0.7083333], - [0.5416667, 0.7083333], - [0.5416667, 0.7083333], - [0.5416667, 0.7083333], - [0.5416667, 0.7083333], - [0.5416667, 0.7083333], - [0.625, 0.7083333], - [0.625, 0.7083333], - [0.625, 0.7083333], - [0.625, 0.7083333], - [0.625, 0.7083333], - [0.625, 0.7083333], - [0.7083333, 0.7083333], - [0.7083333, 0.7083333], - [0.7083333, 0.7083333], - [0.7083333, 0.7083333], - [0.7083333, 0.7083333], - [0.7083333, 0.7083333], - [0.7916667, 0.7083333], - [0.7916667, 0.7083333], - [0.7916667, 0.7083333], - [0.7916667, 0.7083333], - [0.7916667, 0.7083333], - [0.7916667, 0.7083333], - [0.875, 0.7083333], - [0.875, 0.7083333], - [0.875, 0.7083333], - [0.875, 0.7083333], - [0.875, 0.7083333], - [0.875, 0.7083333], - [0.9583333, 0.7083333], - [0.9583333, 0.7083333], - [0.9583333, 0.7083333], - [0.9583333, 0.7083333], - [0.9583333, 0.7083333], - [0.9583333, 0.7083333], - [0.04166667, 0.7916667], - [0.04166667, 0.7916667], - [0.04166667, 0.7916667], - [0.04166667, 0.7916667], - [0.04166667, 0.7916667], - [0.04166667, 0.7916667], - [0.125, 0.7916667], - [0.125, 0.7916667], - [0.125, 0.7916667], - [0.125, 0.7916667], - [0.125, 0.7916667], - [0.125, 0.7916667], - [0.20833333, 0.7916667], - [0.20833333, 0.7916667], - [0.20833333, 0.7916667], - [0.20833333, 0.7916667], - [0.20833333, 0.7916667], - [0.20833333, 0.7916667], - [0.29166666, 0.7916667], - [0.29166666, 0.7916667], - [0.29166666, 0.7916667], - [0.29166666, 0.7916667], - [0.29166666, 0.7916667], - [0.29166666, 0.7916667], - [0.375, 0.7916667], - [0.375, 0.7916667], - [0.375, 0.7916667], - [0.375, 0.7916667], - [0.375, 0.7916667], - [0.375, 0.7916667], - [0.45833334, 0.7916667], - [0.45833334, 0.7916667], - [0.45833334, 0.7916667], - [0.45833334, 0.7916667], - [0.45833334, 0.7916667], - [0.45833334, 0.7916667], - [0.5416667, 0.7916667], - [0.5416667, 0.7916667], - [0.5416667, 0.7916667], - [0.5416667, 0.7916667], - [0.5416667, 0.7916667], - [0.5416667, 0.7916667], - [0.625, 0.7916667], - [0.625, 0.7916667], - [0.625, 0.7916667], - [0.625, 0.7916667], - [0.625, 0.7916667], - [0.625, 0.7916667], - [0.7083333, 0.7916667], - [0.7083333, 0.7916667], - [0.7083333, 0.7916667], - [0.7083333, 0.7916667], - [0.7083333, 0.7916667], - [0.7083333, 0.7916667], - [0.7916667, 0.7916667], - [0.7916667, 0.7916667], - [0.7916667, 0.7916667], - [0.7916667, 0.7916667], - [0.7916667, 0.7916667], - [0.7916667, 0.7916667], - [0.875, 0.7916667], - [0.875, 0.7916667], - [0.875, 0.7916667], - [0.875, 0.7916667], - [0.875, 0.7916667], - [0.875, 0.7916667], - [0.9583333, 0.7916667], - [0.9583333, 0.7916667], - [0.9583333, 0.7916667], - [0.9583333, 0.7916667], - [0.9583333, 0.7916667], - [0.9583333, 0.7916667], - [0.04166667, 0.875], - [0.04166667, 0.875], - [0.04166667, 0.875], - [0.04166667, 0.875], - [0.04166667, 0.875], - [0.04166667, 0.875], - [0.125, 0.875], - [0.125, 0.875], - [0.125, 0.875], - [0.125, 0.875], - [0.125, 0.875], - [0.125, 0.875], - [0.20833333, 0.875], - [0.20833333, 0.875], - [0.20833333, 0.875], - [0.20833333, 0.875], - [0.20833333, 0.875], - [0.20833333, 0.875], - [0.29166666, 0.875], - [0.29166666, 0.875], - [0.29166666, 0.875], - [0.29166666, 0.875], - [0.29166666, 0.875], - [0.29166666, 0.875], - [0.375, 0.875], - [0.375, 0.875], - [0.375, 0.875], - [0.375, 0.875], - [0.375, 0.875], - [0.375, 0.875], - [0.45833334, 0.875], - [0.45833334, 0.875], - [0.45833334, 0.875], - [0.45833334, 0.875], - [0.45833334, 0.875], - [0.45833334, 0.875], - [0.5416667, 0.875], - [0.5416667, 0.875], - [0.5416667, 0.875], - [0.5416667, 0.875], - [0.5416667, 0.875], - [0.5416667, 0.875], - [0.625, 0.875], - [0.625, 0.875], - [0.625, 0.875], - [0.625, 0.875], - [0.625, 0.875], - [0.625, 0.875], - [0.7083333, 0.875], - [0.7083333, 0.875], - [0.7083333, 0.875], - [0.7083333, 0.875], - [0.7083333, 0.875], - [0.7083333, 0.875], - [0.7916667, 0.875], - [0.7916667, 0.875], - [0.7916667, 0.875], - [0.7916667, 0.875], - [0.7916667, 0.875], - [0.7916667, 0.875], - [0.875, 0.875], - [0.875, 0.875], - [0.875, 0.875], - [0.875, 0.875], - [0.875, 0.875], - [0.875, 0.875], - [0.9583333, 0.875], - [0.9583333, 0.875], - [0.9583333, 0.875], - [0.9583333, 0.875], - [0.9583333, 0.875], - [0.9583333, 0.875], - [0.04166667, 0.9583333], - [0.04166667, 0.9583333], - [0.04166667, 0.9583333], - [0.04166667, 0.9583333], - [0.04166667, 0.9583333], - [0.04166667, 0.9583333], - [0.125, 0.9583333], - [0.125, 0.9583333], - [0.125, 0.9583333], - [0.125, 0.9583333], - [0.125, 0.9583333], - [0.125, 0.9583333], - [0.20833333, 0.9583333], - [0.20833333, 0.9583333], - [0.20833333, 0.9583333], - [0.20833333, 0.9583333], - [0.20833333, 0.9583333], - [0.20833333, 0.9583333], - [0.29166666, 0.9583333], - [0.29166666, 0.9583333], - [0.29166666, 0.9583333], - [0.29166666, 0.9583333], - [0.29166666, 0.9583333], - [0.29166666, 0.9583333], - [0.375, 0.9583333], - [0.375, 0.9583333], - [0.375, 0.9583333], - [0.375, 0.9583333], - [0.375, 0.9583333], - [0.375, 0.9583333], - [0.45833334, 0.9583333], - [0.45833334, 0.9583333], - [0.45833334, 0.9583333], - [0.45833334, 0.9583333], - [0.45833334, 0.9583333], - [0.45833334, 0.9583333], - [0.5416667, 0.9583333], - [0.5416667, 0.9583333], - [0.5416667, 0.9583333], - [0.5416667, 0.9583333], - [0.5416667, 0.9583333], - [0.5416667, 0.9583333], - [0.625, 0.9583333], - [0.625, 0.9583333], - [0.625, 0.9583333], - [0.625, 0.9583333], - [0.625, 0.9583333], - [0.625, 0.9583333], - [0.7083333, 0.9583333], - [0.7083333, 0.9583333], - [0.7083333, 0.9583333], - [0.7083333, 0.9583333], - [0.7083333, 0.9583333], - [0.7083333, 0.9583333], - [0.7916667, 0.9583333], - [0.7916667, 0.9583333], - [0.7916667, 0.9583333], - [0.7916667, 0.9583333], - [0.7916667, 0.9583333], - [0.7916667, 0.9583333], - [0.875, 0.9583333], - [0.875, 0.9583333], - [0.875, 0.9583333], - [0.875, 0.9583333], - [0.875, 0.9583333], - [0.875, 0.9583333], - [0.9583333, 0.9583333], - [0.9583333, 0.9583333], - [0.9583333, 0.9583333], - [0.9583333, 0.9583333], - [0.9583333, 0.9583333], - [0.9583333, 0.9583333]], dtype=np.float32) diff --git a/models/palm_detection_mediapipe/palm_detection_mediapipe_2023feb.onnx b/models/palm_detection_mediapipe/palm_detection_mediapipe_2023feb.onnx deleted file mode 100644 index a899f870..00000000 --- a/models/palm_detection_mediapipe/palm_detection_mediapipe_2023feb.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:78ff51c38496b7fc8b8ebdb6cc8c1abb02fa6c38427c6848254cdaba57fcce7c -size 3905734 diff --git a/models/palm_detection_mediapipe/palm_detection_mediapipe_2023feb_int8.onnx b/models/palm_detection_mediapipe/palm_detection_mediapipe_2023feb_int8.onnx deleted file mode 100644 index 8e4c39d8..00000000 --- a/models/palm_detection_mediapipe/palm_detection_mediapipe_2023feb_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9f014de96ef5b6816b3eb9a5fed21a7371ef0f104ea440aa19ce9129fe2af5f6 -size 1157004 diff --git a/models/palm_detection_mediapipe/palm_detection_mediapipe_2023feb_int8bq.onnx b/models/palm_detection_mediapipe/palm_detection_mediapipe_2023feb_int8bq.onnx deleted file mode 100644 index a19254c6..00000000 --- a/models/palm_detection_mediapipe/palm_detection_mediapipe_2023feb_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d0096a81cf82349b00d0c4cb965973662a86967a8d44ccd8134da2a2f408ee5c -size 1169351 diff --git a/models/person_detection_mediapipe/CMakeLists.txt b/models/person_detection_mediapipe/CMakeLists.txt deleted file mode 100644 index e3f4b051..00000000 --- a/models/person_detection_mediapipe/CMakeLists.txt +++ /dev/null @@ -1,29 +0,0 @@ -cmake_minimum_required(VERSION 3.24.0) -set(project_name "opencv_zoo_person_detection_mediapipe") - -PROJECT (${project_name}) - -set(OPENCV_VERSION "4.10.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) -# Find OpenCV, you may need to set OpenCV_DIR variable -# to the absolute path to the directory containing OpenCVConfig.cmake file -# via the command line or GUI - -file(GLOB SourceFile - "demo.cpp") -# If the package has been found, several variables will -# be set, you can find the full list with descriptions -# in the OpenCVConfig.cmake file. -# Print some message showing some of them -message(STATUS "OpenCV library status:") -message(STATUS " config: ${OpenCV_DIR}") -message(STATUS " version: ${OpenCV_VERSION}") -message(STATUS " libraries: ${OpenCV_LIBS}") -message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}") - -# Declare the executable target built from your sources -add_executable(${project_name} ${SourceFile}) - -# Link your application with OpenCV libraries -target_link_libraries(${project_name} PRIVATE ${OpenCV_LIBS}) diff --git a/models/person_detection_mediapipe/LICENSE b/models/person_detection_mediapipe/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/models/person_detection_mediapipe/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/models/person_detection_mediapipe/README.md b/models/person_detection_mediapipe/README.md deleted file mode 100644 index bba8bd44..00000000 --- a/models/person_detection_mediapipe/README.md +++ /dev/null @@ -1,57 +0,0 @@ -# Person detector from MediaPipe Pose - -This model detects upper body and full body keypoints of a person, and is downloaded from https://github.com/PINTO0309/PINTO_model_zoo/blob/main/053_BlazePose/20_densify_pose_detection/download.sh or converted from TFLite to ONNX using following tools: - -- TFLite model to ONNX with MediaPipe custom `densify` op: https://github.com/PINTO0309/tflite2tensorflow -- simplified by [onnx-simplifier](https://github.com/daquexian/onnx-simplifier) - -SSD Anchors are generated from [GenMediaPipePalmDectionSSDAnchors](https://github.com/VimalMollyn/GenMediaPipePalmDectionSSDAnchors) - -**Note**: -- `person_detection_mediapipe_2023mar_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - -## Demo - -### Python - -Run the following commands to try the demo: - -```bash -# detect on camera input -python demo.py -# detect on an image -python demo.py -i /path/to/image -v - -# get help regarding various parameters -python demo.py --help -``` - -### C++ - -Install latest OpenCV and CMake >= 3.24.0 to get started with: - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# detect on camera input -./build/opencv_zoo_person_detection_mediapipe -# detect on an image -./build/opencv_zoo_person_detection_mediapipe -m=/path/to/model -i=/path/to/image -v -# get help messages -./build/opencv_zoo_person_detection_mediapipe -h -``` - -### Example outputs - -![webcam demo](./example_outputs/mppersondet_demo.webp) - -## License - -All files in this directory are licensed under [Apache 2.0 License](LICENSE). - -## Reference -- MediaPipe Pose: https://developers.google.com/mediapipe/solutions/vision/pose_landmarker -- MediaPipe pose model and model card: https://github.com/google/mediapipe/blob/master/docs/solutions/models.md#pose -- BlazePose TFJS: https://github.com/tensorflow/tfjs-models/tree/master/pose-detection/src/blazepose_tfjs diff --git a/models/person_detection_mediapipe/demo.cpp b/models/person_detection_mediapipe/demo.cpp deleted file mode 100644 index 59149fd3..00000000 --- a/models/person_detection_mediapipe/demo.cpp +++ /dev/null @@ -1,2522 +0,0 @@ -#include -#include -#include - -#include - -using namespace std; -using namespace cv; -using namespace dnn; - -vector< pair > backendTargetPairs = { - std::make_pair(dnn::DNN_BACKEND_OPENCV, dnn::DNN_TARGET_CPU), - std::make_pair(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA), - std::make_pair(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA_FP16), - std::make_pair(dnn::DNN_BACKEND_TIMVX, dnn::DNN_TARGET_NPU), - std::make_pair(dnn::DNN_BACKEND_CANN, dnn::DNN_TARGET_NPU) }; - - -Mat getMediapipeAnchor(); - -class MPPersonDet { -private: - Net net; - string modelPath; - Size inputSize; - float scoreThreshold; - float nmsThreshold; - dnn::Backend backendId; - dnn::Target targetId; - int topK; - Mat anchors; - -public: - MPPersonDet(string modPath, float nmsThresh = 0.3, float scoreThresh = 0.5, int tok = 5000, dnn::Backend bId = DNN_BACKEND_DEFAULT, dnn::Target tId = DNN_TARGET_CPU) : - modelPath(modPath), nmsThreshold(nmsThresh), - scoreThreshold(scoreThresh), topK(tok), - backendId(bId), targetId(tId) - { - this->inputSize = Size(224, 224); - this->net = readNet(this->modelPath); - this->net.setPreferableBackend(this->backendId); - this->net.setPreferableTarget(this->targetId); - this->anchors = getMediapipeAnchor(); - } - - pair preprocess(Mat img) - { - Mat blob; - Image2BlobParams paramMediapipe; - paramMediapipe.datalayout = DNN_LAYOUT_NCHW; - paramMediapipe.ddepth = CV_32F; - paramMediapipe.mean = Scalar::all(127.5); - paramMediapipe.scalefactor = Scalar::all(1 / 127.5); - paramMediapipe.size = this->inputSize; - paramMediapipe.swapRB = true; - paramMediapipe.paddingmode = DNN_PMODE_LETTERBOX; - - double ratio = min(this->inputSize.height / double(img.rows), this->inputSize.width / double(img.cols)); - Size padBias(0, 0); - if (img.rows != this->inputSize.height || img.cols != this->inputSize.width) - { - // keep aspect ratio when resize - Size ratioSize(int(img.cols * ratio), int(img.rows * ratio)); - int padH = this->inputSize.height - ratioSize.height; - int padW = this->inputSize.width - ratioSize.width; - padBias.width = padW / 2; - padBias.height = padH / 2; - } - blob = blobFromImageWithParams(img, paramMediapipe); - padBias = Size(int(padBias.width / ratio), int(padBias.height / ratio)); - return pair(blob, padBias); - } - - Mat infer(Mat srcimg) - { - pair w = this->preprocess(srcimg); - Mat inputBlob = get<0>(w); - Size padBias = get<1>(w); - this->net.setInput(inputBlob); - vector outs; - this->net.forward(outs, this->net.getUnconnectedOutLayersNames()); - Mat predictions = this->postprocess(outs, Size(srcimg.cols, srcimg.rows), padBias); - return predictions; - } - - Mat postprocess(vector outputs, Size orgSize, Size padBias) - { - Mat score = outputs[1].reshape(0, outputs[1].size[0]); - Mat boxLandDelta = outputs[0].reshape(outputs[0].size[0], outputs[0].size[1]); - Mat boxDelta = boxLandDelta.colRange(0, 4); - Mat landmarkDelta = boxLandDelta.colRange(4, boxLandDelta.cols); - double scale = max(orgSize.height, orgSize.width); - Mat mask = score < -100; - score.setTo(-100, mask); - mask = score > 100; - score.setTo(100, mask); - Mat deno; - exp(-score, deno); - divide(1.0, 1 + deno, score); - boxDelta.colRange(0, 1) = boxDelta.colRange(0, 1) / this->inputSize.width; - boxDelta.colRange(1, 2) = boxDelta.colRange(1, 2) / this->inputSize.height; - boxDelta.colRange(2, 3) = boxDelta.colRange(2, 3) / this->inputSize.width; - boxDelta.colRange(3, 4) = boxDelta.colRange(3, 4) / this->inputSize.height; - Mat xy1 = (boxDelta.colRange(0, 2) - boxDelta.colRange(2, 4) / 2 + this->anchors) * scale; - Mat xy2 = (boxDelta.colRange(0, 2) + boxDelta.colRange(2, 4) / 2 + this->anchors) * scale; - Mat boxes; - hconcat(xy1, xy2, boxes); - vector< Rect2d > rotBoxes(boxes.rows); - boxes.colRange(0, 1) = boxes.colRange(0, 1) - padBias.width; - boxes.colRange(1, 2) = boxes.colRange(1, 2) - padBias.height; - boxes.colRange(2, 3) = boxes.colRange(2, 3) - padBias.width; - boxes.colRange(3, 4) = boxes.colRange(3, 4) - padBias.height; - for (int i = 0; i < boxes.rows; i++) - { - rotBoxes[i] = Rect2d(Point2d(boxes.at(i, 0), boxes.at(i, 1)), Point2d(boxes.at(i, 2), boxes.at(i, 3))); - } - vector< int > keep; - NMSBoxes(rotBoxes, score, this->scoreThreshold, this->nmsThreshold, keep, this->topK); - if (keep.size() == 0) - return Mat(); - int nbCols = landmarkDelta.cols + boxes.cols + 1; - Mat candidates(int(keep.size()), nbCols, CV_32FC1); - int row = 0; - for (auto idx : keep) - { - candidates.at(row, nbCols - 1) = score.at(idx); - boxes.row(idx).copyTo(candidates.row(row).colRange(0, 4)); - candidates.at(row, 4) = (landmarkDelta.at(idx, 0) / this->inputSize.width + this->anchors.at(idx, 0)) * scale - padBias.width; - candidates.at(row, 5) = (landmarkDelta.at(idx, 1) / this->inputSize.height + this->anchors.at(idx, 1)) * scale - padBias.height; - candidates.at(row, 6) = (landmarkDelta.at(idx, 2) / this->inputSize.width + this->anchors.at(idx, 0)) * scale - padBias.width; - candidates.at(row, 7) = (landmarkDelta.at(idx, 3) / this->inputSize.height + this->anchors.at(idx, 1)) * scale - padBias.height; - candidates.at(row, 8) = (landmarkDelta.at(idx, 4) / this->inputSize.width + this->anchors.at(idx, 0)) * scale - padBias.width; - candidates.at(row, 9) = (landmarkDelta.at(idx, 5) / this->inputSize.height + this->anchors.at(idx, 1)) * scale - padBias.height; - candidates.at(row, 10) = (landmarkDelta.at(idx, 6) / this->inputSize.width + this->anchors.at(idx, 0)) * scale - padBias.width; - candidates.at(row, 11) = (landmarkDelta.at(idx, 7) / this->inputSize.height + this->anchors.at(idx, 1)) * scale - padBias.height; - row++; - } - return candidates; - - } - - -}; -std::string keys = -"{ help h | | Print help message. }" -"{ model m | person_detection_mediapipe_2023mar.onnx | Usage: Path to the model, defaults to person_detection_mediapipe_2023mar.onnx }" -"{ input i | | Path to input image or video file. Skip this argument to capture frames from a camera.}" -"{ score_threshold | 0.5 | Usage: Set the minimum needed confidence for the model to identify a person, defaults to 0.5. Smaller values may result in faster detection, but will limit accuracy. Filter out persons of confidence < conf_threshold. }" -"{ nms_threshold | 0.3 | Usage: Suppress bounding boxes of iou >= nms_threshold. Default = 0.3. }" -"{ top_k | 1 | Usage: Keep top_k bounding boxes before NMS. }" -"{ save s | 0 | Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input. }" -"{ vis v | 1 | Usage: Specify to open a new window to show results. Invalid in case of camera input. }" -"{ backend bt | 0 | Choose one of computation backends: " -"0: (default) OpenCV implementation + CPU, " -"1: CUDA + GPU (CUDA), " -"2: CUDA + GPU (CUDA FP16), " -"3: TIM-VX + NPU, " -"4: CANN + NPU}"; - -Mat visualize(Mat img, Mat results, double fps = -1) -{ - Mat resImg = img.clone(); - if (fps > 0) - putText(resImg, format("FPS: %2f", fps), Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 0, 255)); - - for (int row = 0; row < results.rows; row++) - { - float score = results.at(row, results.cols - 1); - Mat personLandmarks; - results.row(row).colRange(4, results.cols - 1).reshape(0, 4).convertTo(personLandmarks, CV_32S); - - Point hipPoint = Point(personLandmarks.row(0)); - Point fullBody = Point(personLandmarks.row(1)); - Point shoulderPoint = Point(personLandmarks.row(2)); - Point upperBody = Point(personLandmarks.row(3)); - - // draw circle for full body - int radius = int(norm(hipPoint - fullBody)); - circle(resImg, hipPoint, radius, Scalar(255, 0, 0), 2); - - // draw circle for upper body - radius = int(norm(shoulderPoint - upperBody)); - circle(resImg, shoulderPoint, radius, Scalar(0, 255, 255), 2); - - // draw points for each keypoint - for (int iRow=0; iRow < personLandmarks.rows; iRow++) - circle(resImg, Point(personLandmarks.row(iRow)), 2, Scalar(0, 0, 255), 2); - putText(resImg, format("Score: %4f", score), Point(0, resImg.rows - 48), FONT_HERSHEY_DUPLEX, 0.5, Scalar(0, 255, 0)); - } - // put score - putText(resImg, string("Yellow: upper body circle"), Point(0, resImg.rows - 36), FONT_HERSHEY_DUPLEX, 0.5, Scalar(0, 255, 255)); - putText(resImg, string("Blue: full body circle"), Point(0, resImg.rows - 24), FONT_HERSHEY_DUPLEX, 0.5, Scalar(255, 0, 0)); - putText(resImg, string("Red: keypoint"), Point(0, resImg.rows - 12), FONT_HERSHEY_DUPLEX, 0.5, Scalar(0, 0, 255)); - - return resImg; -} - -int main(int argc, char** argv) -{ - CommandLineParser parser(argc, argv, keys); - - parser.about("Person Detector from MediaPipe"); - if (parser.has("help")) - { - parser.printMessage(); - return 0; - } - - string model = parser.get("model"); - float scoreThreshold = parser.get("score_threshold"); - float nmsThreshold = parser.get("nms_threshold"); - int topK = parser.get("top_k"); - bool vis = parser.get("vis"); - bool save = parser.get("save"); - int backendTargetid = parser.get("backend"); - - if (model.empty()) - { - CV_Error(Error::StsError, "Model file " + model + " not found"); - } - VideoCapture cap; - if (parser.has("input")) - cap.open(samples::findFile(parser.get("input"))); - else - cap.open(0); - Mat frame; - - MPPersonDet modelNet(model, nmsThreshold, scoreThreshold, topK, - backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second); - //! [Open a video file or an image file or a camera stream] - if (!cap.isOpened()) - CV_Error(Error::StsError, "Cannot open video or file"); - - static const std::string kWinName = "MPPersonDet Demo"; - while (waitKey(1) < 0) - { - cap >> frame; - if (frame.empty()) - { - cout << "Frame is empty" << endl; - waitKey(); - break; - } - TickMeter tm; - tm.start(); - Mat results = modelNet.infer(frame); - tm.stop(); - cout << "Inference time: " << tm.getTimeMilli() << " ms\n"; - Mat img = visualize(frame, results, tm.getFPS()); - if (save && parser.has("input")) - { - cout << "Results saved to result.jpg\n"; - imwrite("result.jpg", img); - } - - if (vis || !parser.has("input")) - { - imshow(kWinName, img); - } - } - return 0; -} - - -Mat getMediapipeAnchor() -{ - Mat anchor= (Mat_(2254,2) << 0.017857142857142856, 0.017857142857142856, - 0.017857142857142856, 0.017857142857142856, - 0.05357142857142857, 0.017857142857142856, - 0.05357142857142857, 0.017857142857142856, - 0.08928571428571429, 0.017857142857142856, - 0.08928571428571429, 0.017857142857142856, - 0.125, 0.017857142857142856, - 0.125, 0.017857142857142856, - 0.16071428571428573, 0.017857142857142856, - 0.16071428571428573, 0.017857142857142856, - 0.19642857142857142, 0.017857142857142856, - 0.19642857142857142, 0.017857142857142856, - 0.23214285714285715, 0.017857142857142856, - 0.23214285714285715, 0.017857142857142856, - 0.26785714285714285, 0.017857142857142856, - 0.26785714285714285, 0.017857142857142856, - 0.30357142857142855, 0.017857142857142856, - 0.30357142857142855, 0.017857142857142856, - 0.3392857142857143, 0.017857142857142856, - 0.3392857142857143, 0.017857142857142856, - 0.375, 0.017857142857142856, - 0.375, 0.017857142857142856, - 0.4107142857142857, 0.017857142857142856, - 0.4107142857142857, 0.017857142857142856, - 0.44642857142857145, 0.017857142857142856, - 0.44642857142857145, 0.017857142857142856, - 0.48214285714285715, 0.017857142857142856, - 0.48214285714285715, 0.017857142857142856, - 0.5178571428571429, 0.017857142857142856, - 0.5178571428571429, 0.017857142857142856, - 0.5535714285714286, 0.017857142857142856, - 0.5535714285714286, 0.017857142857142856, - 0.5892857142857143, 0.017857142857142856, - 0.5892857142857143, 0.017857142857142856, - 0.625, 0.017857142857142856, - 0.625, 0.017857142857142856, - 0.6607142857142857, 0.017857142857142856, - 0.6607142857142857, 0.017857142857142856, - 0.6964285714285714, 0.017857142857142856, - 0.6964285714285714, 0.017857142857142856, - 0.7321428571428571, 0.017857142857142856, - 0.7321428571428571, 0.017857142857142856, - 0.7678571428571429, 0.017857142857142856, - 0.7678571428571429, 0.017857142857142856, - 0.8035714285714286, 0.017857142857142856, - 0.8035714285714286, 0.017857142857142856, - 0.8392857142857143, 0.017857142857142856, - 0.8392857142857143, 0.017857142857142856, - 0.875, 0.017857142857142856, - 0.875, 0.017857142857142856, - 0.9107142857142857, 0.017857142857142856, - 0.9107142857142857, 0.017857142857142856, - 0.9464285714285714, 0.017857142857142856, - 0.9464285714285714, 0.017857142857142856, - 0.9821428571428571, 0.017857142857142856, - 0.9821428571428571, 0.017857142857142856, - 0.017857142857142856, 0.05357142857142857, - 0.017857142857142856, 0.05357142857142857, - 0.05357142857142857, 0.05357142857142857, - 0.05357142857142857, 0.05357142857142857, - 0.08928571428571429, 0.05357142857142857, - 0.08928571428571429, 0.05357142857142857, - 0.125, 0.05357142857142857, - 0.125, 0.05357142857142857, - 0.16071428571428573, 0.05357142857142857, - 0.16071428571428573, 0.05357142857142857, - 0.19642857142857142, 0.05357142857142857, - 0.19642857142857142, 0.05357142857142857, - 0.23214285714285715, 0.05357142857142857, - 0.23214285714285715, 0.05357142857142857, - 0.26785714285714285, 0.05357142857142857, - 0.26785714285714285, 0.05357142857142857, - 0.30357142857142855, 0.05357142857142857, - 0.30357142857142855, 0.05357142857142857, - 0.3392857142857143, 0.05357142857142857, - 0.3392857142857143, 0.05357142857142857, - 0.375, 0.05357142857142857, - 0.375, 0.05357142857142857, - 0.4107142857142857, 0.05357142857142857, - 0.4107142857142857, 0.05357142857142857, - 0.44642857142857145, 0.05357142857142857, - 0.44642857142857145, 0.05357142857142857, - 0.48214285714285715, 0.05357142857142857, - 0.48214285714285715, 0.05357142857142857, - 0.5178571428571429, 0.05357142857142857, - 0.5178571428571429, 0.05357142857142857, - 0.5535714285714286, 0.05357142857142857, - 0.5535714285714286, 0.05357142857142857, - 0.5892857142857143, 0.05357142857142857, - 0.5892857142857143, 0.05357142857142857, - 0.625, 0.05357142857142857, - 0.625, 0.05357142857142857, - 0.6607142857142857, 0.05357142857142857, - 0.6607142857142857, 0.05357142857142857, - 0.6964285714285714, 0.05357142857142857, - 0.6964285714285714, 0.05357142857142857, - 0.7321428571428571, 0.05357142857142857, - 0.7321428571428571, 0.05357142857142857, - 0.7678571428571429, 0.05357142857142857, - 0.7678571428571429, 0.05357142857142857, - 0.8035714285714286, 0.05357142857142857, - 0.8035714285714286, 0.05357142857142857, - 0.8392857142857143, 0.05357142857142857, - 0.8392857142857143, 0.05357142857142857, - 0.875, 0.05357142857142857, - 0.875, 0.05357142857142857, - 0.9107142857142857, 0.05357142857142857, - 0.9107142857142857, 0.05357142857142857, - 0.9464285714285714, 0.05357142857142857, - 0.9464285714285714, 0.05357142857142857, - 0.9821428571428571, 0.05357142857142857, - 0.9821428571428571, 0.05357142857142857, - 0.017857142857142856, 0.08928571428571429, - 0.017857142857142856, 0.08928571428571429, - 0.05357142857142857, 0.08928571428571429, - 0.05357142857142857, 0.08928571428571429, - 0.08928571428571429, 0.08928571428571429, - 0.08928571428571429, 0.08928571428571429, - 0.125, 0.08928571428571429, - 0.125, 0.08928571428571429, - 0.16071428571428573, 0.08928571428571429, - 0.16071428571428573, 0.08928571428571429, - 0.19642857142857142, 0.08928571428571429, - 0.19642857142857142, 0.08928571428571429, - 0.23214285714285715, 0.08928571428571429, - 0.23214285714285715, 0.08928571428571429, - 0.26785714285714285, 0.08928571428571429, - 0.26785714285714285, 0.08928571428571429, - 0.30357142857142855, 0.08928571428571429, - 0.30357142857142855, 0.08928571428571429, - 0.3392857142857143, 0.08928571428571429, - 0.3392857142857143, 0.08928571428571429, - 0.375, 0.08928571428571429, - 0.375, 0.08928571428571429, - 0.4107142857142857, 0.08928571428571429, - 0.4107142857142857, 0.08928571428571429, - 0.44642857142857145, 0.08928571428571429, - 0.44642857142857145, 0.08928571428571429, - 0.48214285714285715, 0.08928571428571429, - 0.48214285714285715, 0.08928571428571429, - 0.5178571428571429, 0.08928571428571429, - 0.5178571428571429, 0.08928571428571429, - 0.5535714285714286, 0.08928571428571429, - 0.5535714285714286, 0.08928571428571429, - 0.5892857142857143, 0.08928571428571429, - 0.5892857142857143, 0.08928571428571429, - 0.625, 0.08928571428571429, - 0.625, 0.08928571428571429, - 0.6607142857142857, 0.08928571428571429, - 0.6607142857142857, 0.08928571428571429, - 0.6964285714285714, 0.08928571428571429, - 0.6964285714285714, 0.08928571428571429, - 0.7321428571428571, 0.08928571428571429, - 0.7321428571428571, 0.08928571428571429, - 0.7678571428571429, 0.08928571428571429, - 0.7678571428571429, 0.08928571428571429, - 0.8035714285714286, 0.08928571428571429, - 0.8035714285714286, 0.08928571428571429, - 0.8392857142857143, 0.08928571428571429, - 0.8392857142857143, 0.08928571428571429, - 0.875, 0.08928571428571429, - 0.875, 0.08928571428571429, - 0.9107142857142857, 0.08928571428571429, - 0.9107142857142857, 0.08928571428571429, - 0.9464285714285714, 0.08928571428571429, - 0.9464285714285714, 0.08928571428571429, - 0.9821428571428571, 0.08928571428571429, - 0.9821428571428571, 0.08928571428571429, - 0.017857142857142856, 0.125, - 0.017857142857142856, 0.125, - 0.05357142857142857, 0.125, - 0.05357142857142857, 0.125, - 0.08928571428571429, 0.125, - 0.08928571428571429, 0.125, - 0.125, 0.125, - 0.125, 0.125, - 0.16071428571428573, 0.125, - 0.16071428571428573, 0.125, - 0.19642857142857142, 0.125, - 0.19642857142857142, 0.125, - 0.23214285714285715, 0.125, - 0.23214285714285715, 0.125, - 0.26785714285714285, 0.125, - 0.26785714285714285, 0.125, - 0.30357142857142855, 0.125, - 0.30357142857142855, 0.125, - 0.3392857142857143, 0.125, - 0.3392857142857143, 0.125, - 0.375, 0.125, - 0.375, 0.125, - 0.4107142857142857, 0.125, - 0.4107142857142857, 0.125, - 0.44642857142857145, 0.125, - 0.44642857142857145, 0.125, - 0.48214285714285715, 0.125, - 0.48214285714285715, 0.125, - 0.5178571428571429, 0.125, - 0.5178571428571429, 0.125, - 0.5535714285714286, 0.125, - 0.5535714285714286, 0.125, - 0.5892857142857143, 0.125, - 0.5892857142857143, 0.125, - 0.625, 0.125, - 0.625, 0.125, - 0.6607142857142857, 0.125, - 0.6607142857142857, 0.125, - 0.6964285714285714, 0.125, - 0.6964285714285714, 0.125, - 0.7321428571428571, 0.125, - 0.7321428571428571, 0.125, - 0.7678571428571429, 0.125, - 0.7678571428571429, 0.125, - 0.8035714285714286, 0.125, - 0.8035714285714286, 0.125, - 0.8392857142857143, 0.125, - 0.8392857142857143, 0.125, - 0.875, 0.125, - 0.875, 0.125, - 0.9107142857142857, 0.125, - 0.9107142857142857, 0.125, - 0.9464285714285714, 0.125, - 0.9464285714285714, 0.125, - 0.9821428571428571, 0.125, - 0.9821428571428571, 0.125, - 0.017857142857142856, 0.16071428571428573, - 0.017857142857142856, 0.16071428571428573, - 0.05357142857142857, 0.16071428571428573, - 0.05357142857142857, 0.16071428571428573, - 0.08928571428571429, 0.16071428571428573, - 0.08928571428571429, 0.16071428571428573, - 0.125, 0.16071428571428573, - 0.125, 0.16071428571428573, - 0.16071428571428573, 0.16071428571428573, - 0.16071428571428573, 0.16071428571428573, - 0.19642857142857142, 0.16071428571428573, - 0.19642857142857142, 0.16071428571428573, - 0.23214285714285715, 0.16071428571428573, - 0.23214285714285715, 0.16071428571428573, - 0.26785714285714285, 0.16071428571428573, - 0.26785714285714285, 0.16071428571428573, - 0.30357142857142855, 0.16071428571428573, - 0.30357142857142855, 0.16071428571428573, - 0.3392857142857143, 0.16071428571428573, - 0.3392857142857143, 0.16071428571428573, - 0.375, 0.16071428571428573, - 0.375, 0.16071428571428573, - 0.4107142857142857, 0.16071428571428573, - 0.4107142857142857, 0.16071428571428573, - 0.44642857142857145, 0.16071428571428573, - 0.44642857142857145, 0.16071428571428573, - 0.48214285714285715, 0.16071428571428573, - 0.48214285714285715, 0.16071428571428573, - 0.5178571428571429, 0.16071428571428573, - 0.5178571428571429, 0.16071428571428573, - 0.5535714285714286, 0.16071428571428573, - 0.5535714285714286, 0.16071428571428573, - 0.5892857142857143, 0.16071428571428573, - 0.5892857142857143, 0.16071428571428573, - 0.625, 0.16071428571428573, - 0.625, 0.16071428571428573, - 0.6607142857142857, 0.16071428571428573, - 0.6607142857142857, 0.16071428571428573, - 0.6964285714285714, 0.16071428571428573, - 0.6964285714285714, 0.16071428571428573, - 0.7321428571428571, 0.16071428571428573, - 0.7321428571428571, 0.16071428571428573, - 0.7678571428571429, 0.16071428571428573, - 0.7678571428571429, 0.16071428571428573, - 0.8035714285714286, 0.16071428571428573, - 0.8035714285714286, 0.16071428571428573, - 0.8392857142857143, 0.16071428571428573, - 0.8392857142857143, 0.16071428571428573, - 0.875, 0.16071428571428573, - 0.875, 0.16071428571428573, - 0.9107142857142857, 0.16071428571428573, - 0.9107142857142857, 0.16071428571428573, - 0.9464285714285714, 0.16071428571428573, - 0.9464285714285714, 0.16071428571428573, - 0.9821428571428571, 0.16071428571428573, - 0.9821428571428571, 0.16071428571428573, - 0.017857142857142856, 0.19642857142857142, - 0.017857142857142856, 0.19642857142857142, - 0.05357142857142857, 0.19642857142857142, - 0.05357142857142857, 0.19642857142857142, - 0.08928571428571429, 0.19642857142857142, - 0.08928571428571429, 0.19642857142857142, - 0.125, 0.19642857142857142, - 0.125, 0.19642857142857142, - 0.16071428571428573, 0.19642857142857142, - 0.16071428571428573, 0.19642857142857142, - 0.19642857142857142, 0.19642857142857142, - 0.19642857142857142, 0.19642857142857142, - 0.23214285714285715, 0.19642857142857142, - 0.23214285714285715, 0.19642857142857142, - 0.26785714285714285, 0.19642857142857142, - 0.26785714285714285, 0.19642857142857142, - 0.30357142857142855, 0.19642857142857142, - 0.30357142857142855, 0.19642857142857142, - 0.3392857142857143, 0.19642857142857142, - 0.3392857142857143, 0.19642857142857142, - 0.375, 0.19642857142857142, - 0.375, 0.19642857142857142, - 0.4107142857142857, 0.19642857142857142, - 0.4107142857142857, 0.19642857142857142, - 0.44642857142857145, 0.19642857142857142, - 0.44642857142857145, 0.19642857142857142, - 0.48214285714285715, 0.19642857142857142, - 0.48214285714285715, 0.19642857142857142, - 0.5178571428571429, 0.19642857142857142, - 0.5178571428571429, 0.19642857142857142, - 0.5535714285714286, 0.19642857142857142, - 0.5535714285714286, 0.19642857142857142, - 0.5892857142857143, 0.19642857142857142, - 0.5892857142857143, 0.19642857142857142, - 0.625, 0.19642857142857142, - 0.625, 0.19642857142857142, - 0.6607142857142857, 0.19642857142857142, - 0.6607142857142857, 0.19642857142857142, - 0.6964285714285714, 0.19642857142857142, - 0.6964285714285714, 0.19642857142857142, - 0.7321428571428571, 0.19642857142857142, - 0.7321428571428571, 0.19642857142857142, - 0.7678571428571429, 0.19642857142857142, - 0.7678571428571429, 0.19642857142857142, - 0.8035714285714286, 0.19642857142857142, - 0.8035714285714286, 0.19642857142857142, - 0.8392857142857143, 0.19642857142857142, - 0.8392857142857143, 0.19642857142857142, - 0.875, 0.19642857142857142, - 0.875, 0.19642857142857142, - 0.9107142857142857, 0.19642857142857142, - 0.9107142857142857, 0.19642857142857142, - 0.9464285714285714, 0.19642857142857142, - 0.9464285714285714, 0.19642857142857142, - 0.9821428571428571, 0.19642857142857142, - 0.9821428571428571, 0.19642857142857142, - 0.017857142857142856, 0.23214285714285715, - 0.017857142857142856, 0.23214285714285715, - 0.05357142857142857, 0.23214285714285715, - 0.05357142857142857, 0.23214285714285715, - 0.08928571428571429, 0.23214285714285715, - 0.08928571428571429, 0.23214285714285715, - 0.125, 0.23214285714285715, - 0.125, 0.23214285714285715, - 0.16071428571428573, 0.23214285714285715, - 0.16071428571428573, 0.23214285714285715, - 0.19642857142857142, 0.23214285714285715, - 0.19642857142857142, 0.23214285714285715, - 0.23214285714285715, 0.23214285714285715, - 0.23214285714285715, 0.23214285714285715, - 0.26785714285714285, 0.23214285714285715, - 0.26785714285714285, 0.23214285714285715, - 0.30357142857142855, 0.23214285714285715, - 0.30357142857142855, 0.23214285714285715, - 0.3392857142857143, 0.23214285714285715, - 0.3392857142857143, 0.23214285714285715, - 0.375, 0.23214285714285715, - 0.375, 0.23214285714285715, - 0.4107142857142857, 0.23214285714285715, - 0.4107142857142857, 0.23214285714285715, - 0.44642857142857145, 0.23214285714285715, - 0.44642857142857145, 0.23214285714285715, - 0.48214285714285715, 0.23214285714285715, - 0.48214285714285715, 0.23214285714285715, - 0.5178571428571429, 0.23214285714285715, - 0.5178571428571429, 0.23214285714285715, - 0.5535714285714286, 0.23214285714285715, - 0.5535714285714286, 0.23214285714285715, - 0.5892857142857143, 0.23214285714285715, - 0.5892857142857143, 0.23214285714285715, - 0.625, 0.23214285714285715, - 0.625, 0.23214285714285715, - 0.6607142857142857, 0.23214285714285715, - 0.6607142857142857, 0.23214285714285715, - 0.6964285714285714, 0.23214285714285715, - 0.6964285714285714, 0.23214285714285715, - 0.7321428571428571, 0.23214285714285715, - 0.7321428571428571, 0.23214285714285715, - 0.7678571428571429, 0.23214285714285715, - 0.7678571428571429, 0.23214285714285715, - 0.8035714285714286, 0.23214285714285715, - 0.8035714285714286, 0.23214285714285715, - 0.8392857142857143, 0.23214285714285715, - 0.8392857142857143, 0.23214285714285715, - 0.875, 0.23214285714285715, - 0.875, 0.23214285714285715, - 0.9107142857142857, 0.23214285714285715, - 0.9107142857142857, 0.23214285714285715, - 0.9464285714285714, 0.23214285714285715, - 0.9464285714285714, 0.23214285714285715, - 0.9821428571428571, 0.23214285714285715, - 0.9821428571428571, 0.23214285714285715, - 0.017857142857142856, 0.26785714285714285, - 0.017857142857142856, 0.26785714285714285, - 0.05357142857142857, 0.26785714285714285, - 0.05357142857142857, 0.26785714285714285, - 0.08928571428571429, 0.26785714285714285, - 0.08928571428571429, 0.26785714285714285, - 0.125, 0.26785714285714285, - 0.125, 0.26785714285714285, - 0.16071428571428573, 0.26785714285714285, - 0.16071428571428573, 0.26785714285714285, - 0.19642857142857142, 0.26785714285714285, - 0.19642857142857142, 0.26785714285714285, - 0.23214285714285715, 0.26785714285714285, - 0.23214285714285715, 0.26785714285714285, - 0.26785714285714285, 0.26785714285714285, - 0.26785714285714285, 0.26785714285714285, - 0.30357142857142855, 0.26785714285714285, - 0.30357142857142855, 0.26785714285714285, - 0.3392857142857143, 0.26785714285714285, - 0.3392857142857143, 0.26785714285714285, - 0.375, 0.26785714285714285, - 0.375, 0.26785714285714285, - 0.4107142857142857, 0.26785714285714285, - 0.4107142857142857, 0.26785714285714285, - 0.44642857142857145, 0.26785714285714285, - 0.44642857142857145, 0.26785714285714285, - 0.48214285714285715, 0.26785714285714285, - 0.48214285714285715, 0.26785714285714285, - 0.5178571428571429, 0.26785714285714285, - 0.5178571428571429, 0.26785714285714285, - 0.5535714285714286, 0.26785714285714285, - 0.5535714285714286, 0.26785714285714285, - 0.5892857142857143, 0.26785714285714285, - 0.5892857142857143, 0.26785714285714285, - 0.625, 0.26785714285714285, - 0.625, 0.26785714285714285, - 0.6607142857142857, 0.26785714285714285, - 0.6607142857142857, 0.26785714285714285, - 0.6964285714285714, 0.26785714285714285, - 0.6964285714285714, 0.26785714285714285, - 0.7321428571428571, 0.26785714285714285, - 0.7321428571428571, 0.26785714285714285, - 0.7678571428571429, 0.26785714285714285, - 0.7678571428571429, 0.26785714285714285, - 0.8035714285714286, 0.26785714285714285, - 0.8035714285714286, 0.26785714285714285, - 0.8392857142857143, 0.26785714285714285, - 0.8392857142857143, 0.26785714285714285, - 0.875, 0.26785714285714285, - 0.875, 0.26785714285714285, - 0.9107142857142857, 0.26785714285714285, - 0.9107142857142857, 0.26785714285714285, - 0.9464285714285714, 0.26785714285714285, - 0.9464285714285714, 0.26785714285714285, - 0.9821428571428571, 0.26785714285714285, - 0.9821428571428571, 0.26785714285714285, - 0.017857142857142856, 0.30357142857142855, - 0.017857142857142856, 0.30357142857142855, - 0.05357142857142857, 0.30357142857142855, - 0.05357142857142857, 0.30357142857142855, - 0.08928571428571429, 0.30357142857142855, - 0.08928571428571429, 0.30357142857142855, - 0.125, 0.30357142857142855, - 0.125, 0.30357142857142855, - 0.16071428571428573, 0.30357142857142855, - 0.16071428571428573, 0.30357142857142855, - 0.19642857142857142, 0.30357142857142855, - 0.19642857142857142, 0.30357142857142855, - 0.23214285714285715, 0.30357142857142855, - 0.23214285714285715, 0.30357142857142855, - 0.26785714285714285, 0.30357142857142855, - 0.26785714285714285, 0.30357142857142855, - 0.30357142857142855, 0.30357142857142855, - 0.30357142857142855, 0.30357142857142855, - 0.3392857142857143, 0.30357142857142855, - 0.3392857142857143, 0.30357142857142855, - 0.375, 0.30357142857142855, - 0.375, 0.30357142857142855, - 0.4107142857142857, 0.30357142857142855, - 0.4107142857142857, 0.30357142857142855, - 0.44642857142857145, 0.30357142857142855, - 0.44642857142857145, 0.30357142857142855, - 0.48214285714285715, 0.30357142857142855, - 0.48214285714285715, 0.30357142857142855, - 0.5178571428571429, 0.30357142857142855, - 0.5178571428571429, 0.30357142857142855, - 0.5535714285714286, 0.30357142857142855, - 0.5535714285714286, 0.30357142857142855, - 0.5892857142857143, 0.30357142857142855, - 0.5892857142857143, 0.30357142857142855, - 0.625, 0.30357142857142855, - 0.625, 0.30357142857142855, - 0.6607142857142857, 0.30357142857142855, - 0.6607142857142857, 0.30357142857142855, - 0.6964285714285714, 0.30357142857142855, - 0.6964285714285714, 0.30357142857142855, - 0.7321428571428571, 0.30357142857142855, - 0.7321428571428571, 0.30357142857142855, - 0.7678571428571429, 0.30357142857142855, - 0.7678571428571429, 0.30357142857142855, - 0.8035714285714286, 0.30357142857142855, - 0.8035714285714286, 0.30357142857142855, - 0.8392857142857143, 0.30357142857142855, - 0.8392857142857143, 0.30357142857142855, - 0.875, 0.30357142857142855, - 0.875, 0.30357142857142855, - 0.9107142857142857, 0.30357142857142855, - 0.9107142857142857, 0.30357142857142855, - 0.9464285714285714, 0.30357142857142855, - 0.9464285714285714, 0.30357142857142855, - 0.9821428571428571, 0.30357142857142855, - 0.9821428571428571, 0.30357142857142855, - 0.017857142857142856, 0.3392857142857143, - 0.017857142857142856, 0.3392857142857143, - 0.05357142857142857, 0.3392857142857143, - 0.05357142857142857, 0.3392857142857143, - 0.08928571428571429, 0.3392857142857143, - 0.08928571428571429, 0.3392857142857143, - 0.125, 0.3392857142857143, - 0.125, 0.3392857142857143, - 0.16071428571428573, 0.3392857142857143, - 0.16071428571428573, 0.3392857142857143, - 0.19642857142857142, 0.3392857142857143, - 0.19642857142857142, 0.3392857142857143, - 0.23214285714285715, 0.3392857142857143, - 0.23214285714285715, 0.3392857142857143, - 0.26785714285714285, 0.3392857142857143, - 0.26785714285714285, 0.3392857142857143, - 0.30357142857142855, 0.3392857142857143, - 0.30357142857142855, 0.3392857142857143, - 0.3392857142857143, 0.3392857142857143, - 0.3392857142857143, 0.3392857142857143, - 0.375, 0.3392857142857143, - 0.375, 0.3392857142857143, - 0.4107142857142857, 0.3392857142857143, - 0.4107142857142857, 0.3392857142857143, - 0.44642857142857145, 0.3392857142857143, - 0.44642857142857145, 0.3392857142857143, - 0.48214285714285715, 0.3392857142857143, - 0.48214285714285715, 0.3392857142857143, - 0.5178571428571429, 0.3392857142857143, - 0.5178571428571429, 0.3392857142857143, - 0.5535714285714286, 0.3392857142857143, - 0.5535714285714286, 0.3392857142857143, - 0.5892857142857143, 0.3392857142857143, - 0.5892857142857143, 0.3392857142857143, - 0.625, 0.3392857142857143, - 0.625, 0.3392857142857143, - 0.6607142857142857, 0.3392857142857143, - 0.6607142857142857, 0.3392857142857143, - 0.6964285714285714, 0.3392857142857143, - 0.6964285714285714, 0.3392857142857143, - 0.7321428571428571, 0.3392857142857143, - 0.7321428571428571, 0.3392857142857143, - 0.7678571428571429, 0.3392857142857143, - 0.7678571428571429, 0.3392857142857143, - 0.8035714285714286, 0.3392857142857143, - 0.8035714285714286, 0.3392857142857143, - 0.8392857142857143, 0.3392857142857143, - 0.8392857142857143, 0.3392857142857143, - 0.875, 0.3392857142857143, - 0.875, 0.3392857142857143, - 0.9107142857142857, 0.3392857142857143, - 0.9107142857142857, 0.3392857142857143, - 0.9464285714285714, 0.3392857142857143, - 0.9464285714285714, 0.3392857142857143, - 0.9821428571428571, 0.3392857142857143, - 0.9821428571428571, 0.3392857142857143, - 0.017857142857142856, 0.375, - 0.017857142857142856, 0.375, - 0.05357142857142857, 0.375, - 0.05357142857142857, 0.375, - 0.08928571428571429, 0.375, - 0.08928571428571429, 0.375, - 0.125, 0.375, - 0.125, 0.375, - 0.16071428571428573, 0.375, - 0.16071428571428573, 0.375, - 0.19642857142857142, 0.375, - 0.19642857142857142, 0.375, - 0.23214285714285715, 0.375, - 0.23214285714285715, 0.375, - 0.26785714285714285, 0.375, - 0.26785714285714285, 0.375, - 0.30357142857142855, 0.375, - 0.30357142857142855, 0.375, - 0.3392857142857143, 0.375, - 0.3392857142857143, 0.375, - 0.375, 0.375, - 0.375, 0.375, - 0.4107142857142857, 0.375, - 0.4107142857142857, 0.375, - 0.44642857142857145, 0.375, - 0.44642857142857145, 0.375, - 0.48214285714285715, 0.375, - 0.48214285714285715, 0.375, - 0.5178571428571429, 0.375, - 0.5178571428571429, 0.375, - 0.5535714285714286, 0.375, - 0.5535714285714286, 0.375, - 0.5892857142857143, 0.375, - 0.5892857142857143, 0.375, - 0.625, 0.375, - 0.625, 0.375, - 0.6607142857142857, 0.375, - 0.6607142857142857, 0.375, - 0.6964285714285714, 0.375, - 0.6964285714285714, 0.375, - 0.7321428571428571, 0.375, - 0.7321428571428571, 0.375, - 0.7678571428571429, 0.375, - 0.7678571428571429, 0.375, - 0.8035714285714286, 0.375, - 0.8035714285714286, 0.375, - 0.8392857142857143, 0.375, - 0.8392857142857143, 0.375, - 0.875, 0.375, - 0.875, 0.375, - 0.9107142857142857, 0.375, - 0.9107142857142857, 0.375, - 0.9464285714285714, 0.375, - 0.9464285714285714, 0.375, - 0.9821428571428571, 0.375, - 0.9821428571428571, 0.375, - 0.017857142857142856, 0.4107142857142857, - 0.017857142857142856, 0.4107142857142857, - 0.05357142857142857, 0.4107142857142857, - 0.05357142857142857, 0.4107142857142857, - 0.08928571428571429, 0.4107142857142857, - 0.08928571428571429, 0.4107142857142857, - 0.125, 0.4107142857142857, - 0.125, 0.4107142857142857, - 0.16071428571428573, 0.4107142857142857, - 0.16071428571428573, 0.4107142857142857, - 0.19642857142857142, 0.4107142857142857, - 0.19642857142857142, 0.4107142857142857, - 0.23214285714285715, 0.4107142857142857, - 0.23214285714285715, 0.4107142857142857, - 0.26785714285714285, 0.4107142857142857, - 0.26785714285714285, 0.4107142857142857, - 0.30357142857142855, 0.4107142857142857, - 0.30357142857142855, 0.4107142857142857, - 0.3392857142857143, 0.4107142857142857, - 0.3392857142857143, 0.4107142857142857, - 0.375, 0.4107142857142857, - 0.375, 0.4107142857142857, - 0.4107142857142857, 0.4107142857142857, - 0.4107142857142857, 0.4107142857142857, - 0.44642857142857145, 0.4107142857142857, - 0.44642857142857145, 0.4107142857142857, - 0.48214285714285715, 0.4107142857142857, - 0.48214285714285715, 0.4107142857142857, - 0.5178571428571429, 0.4107142857142857, - 0.5178571428571429, 0.4107142857142857, - 0.5535714285714286, 0.4107142857142857, - 0.5535714285714286, 0.4107142857142857, - 0.5892857142857143, 0.4107142857142857, - 0.5892857142857143, 0.4107142857142857, - 0.625, 0.4107142857142857, - 0.625, 0.4107142857142857, - 0.6607142857142857, 0.4107142857142857, - 0.6607142857142857, 0.4107142857142857, - 0.6964285714285714, 0.4107142857142857, - 0.6964285714285714, 0.4107142857142857, - 0.7321428571428571, 0.4107142857142857, - 0.7321428571428571, 0.4107142857142857, - 0.7678571428571429, 0.4107142857142857, - 0.7678571428571429, 0.4107142857142857, - 0.8035714285714286, 0.4107142857142857, - 0.8035714285714286, 0.4107142857142857, - 0.8392857142857143, 0.4107142857142857, - 0.8392857142857143, 0.4107142857142857, - 0.875, 0.4107142857142857, - 0.875, 0.4107142857142857, - 0.9107142857142857, 0.4107142857142857, - 0.9107142857142857, 0.4107142857142857, - 0.9464285714285714, 0.4107142857142857, - 0.9464285714285714, 0.4107142857142857, - 0.9821428571428571, 0.4107142857142857, - 0.9821428571428571, 0.4107142857142857, - 0.017857142857142856, 0.44642857142857145, - 0.017857142857142856, 0.44642857142857145, - 0.05357142857142857, 0.44642857142857145, - 0.05357142857142857, 0.44642857142857145, - 0.08928571428571429, 0.44642857142857145, - 0.08928571428571429, 0.44642857142857145, - 0.125, 0.44642857142857145, - 0.125, 0.44642857142857145, - 0.16071428571428573, 0.44642857142857145, - 0.16071428571428573, 0.44642857142857145, - 0.19642857142857142, 0.44642857142857145, - 0.19642857142857142, 0.44642857142857145, - 0.23214285714285715, 0.44642857142857145, - 0.23214285714285715, 0.44642857142857145, - 0.26785714285714285, 0.44642857142857145, - 0.26785714285714285, 0.44642857142857145, - 0.30357142857142855, 0.44642857142857145, - 0.30357142857142855, 0.44642857142857145, - 0.3392857142857143, 0.44642857142857145, - 0.3392857142857143, 0.44642857142857145, - 0.375, 0.44642857142857145, - 0.375, 0.44642857142857145, - 0.4107142857142857, 0.44642857142857145, - 0.4107142857142857, 0.44642857142857145, - 0.44642857142857145, 0.44642857142857145, - 0.44642857142857145, 0.44642857142857145, - 0.48214285714285715, 0.44642857142857145, - 0.48214285714285715, 0.44642857142857145, - 0.5178571428571429, 0.44642857142857145, - 0.5178571428571429, 0.44642857142857145, - 0.5535714285714286, 0.44642857142857145, - 0.5535714285714286, 0.44642857142857145, - 0.5892857142857143, 0.44642857142857145, - 0.5892857142857143, 0.44642857142857145, - 0.625, 0.44642857142857145, - 0.625, 0.44642857142857145, - 0.6607142857142857, 0.44642857142857145, - 0.6607142857142857, 0.44642857142857145, - 0.6964285714285714, 0.44642857142857145, - 0.6964285714285714, 0.44642857142857145, - 0.7321428571428571, 0.44642857142857145, - 0.7321428571428571, 0.44642857142857145, - 0.7678571428571429, 0.44642857142857145, - 0.7678571428571429, 0.44642857142857145, - 0.8035714285714286, 0.44642857142857145, - 0.8035714285714286, 0.44642857142857145, - 0.8392857142857143, 0.44642857142857145, - 0.8392857142857143, 0.44642857142857145, - 0.875, 0.44642857142857145, - 0.875, 0.44642857142857145, - 0.9107142857142857, 0.44642857142857145, - 0.9107142857142857, 0.44642857142857145, - 0.9464285714285714, 0.44642857142857145, - 0.9464285714285714, 0.44642857142857145, - 0.9821428571428571, 0.44642857142857145, - 0.9821428571428571, 0.44642857142857145, - 0.017857142857142856, 0.48214285714285715, - 0.017857142857142856, 0.48214285714285715, - 0.05357142857142857, 0.48214285714285715, - 0.05357142857142857, 0.48214285714285715, - 0.08928571428571429, 0.48214285714285715, - 0.08928571428571429, 0.48214285714285715, - 0.125, 0.48214285714285715, - 0.125, 0.48214285714285715, - 0.16071428571428573, 0.48214285714285715, - 0.16071428571428573, 0.48214285714285715, - 0.19642857142857142, 0.48214285714285715, - 0.19642857142857142, 0.48214285714285715, - 0.23214285714285715, 0.48214285714285715, - 0.23214285714285715, 0.48214285714285715, - 0.26785714285714285, 0.48214285714285715, - 0.26785714285714285, 0.48214285714285715, - 0.30357142857142855, 0.48214285714285715, - 0.30357142857142855, 0.48214285714285715, - 0.3392857142857143, 0.48214285714285715, - 0.3392857142857143, 0.48214285714285715, - 0.375, 0.48214285714285715, - 0.375, 0.48214285714285715, - 0.4107142857142857, 0.48214285714285715, - 0.4107142857142857, 0.48214285714285715, - 0.44642857142857145, 0.48214285714285715, - 0.44642857142857145, 0.48214285714285715, - 0.48214285714285715, 0.48214285714285715, - 0.48214285714285715, 0.48214285714285715, - 0.5178571428571429, 0.48214285714285715, - 0.5178571428571429, 0.48214285714285715, - 0.5535714285714286, 0.48214285714285715, - 0.5535714285714286, 0.48214285714285715, - 0.5892857142857143, 0.48214285714285715, - 0.5892857142857143, 0.48214285714285715, - 0.625, 0.48214285714285715, - 0.625, 0.48214285714285715, - 0.6607142857142857, 0.48214285714285715, - 0.6607142857142857, 0.48214285714285715, - 0.6964285714285714, 0.48214285714285715, - 0.6964285714285714, 0.48214285714285715, - 0.7321428571428571, 0.48214285714285715, - 0.7321428571428571, 0.48214285714285715, - 0.7678571428571429, 0.48214285714285715, - 0.7678571428571429, 0.48214285714285715, - 0.8035714285714286, 0.48214285714285715, - 0.8035714285714286, 0.48214285714285715, - 0.8392857142857143, 0.48214285714285715, - 0.8392857142857143, 0.48214285714285715, - 0.875, 0.48214285714285715, - 0.875, 0.48214285714285715, - 0.9107142857142857, 0.48214285714285715, - 0.9107142857142857, 0.48214285714285715, - 0.9464285714285714, 0.48214285714285715, - 0.9464285714285714, 0.48214285714285715, - 0.9821428571428571, 0.48214285714285715, - 0.9821428571428571, 0.48214285714285715, - 0.017857142857142856, 0.5178571428571429, - 0.017857142857142856, 0.5178571428571429, - 0.05357142857142857, 0.5178571428571429, - 0.05357142857142857, 0.5178571428571429, - 0.08928571428571429, 0.5178571428571429, - 0.08928571428571429, 0.5178571428571429, - 0.125, 0.5178571428571429, - 0.125, 0.5178571428571429, - 0.16071428571428573, 0.5178571428571429, - 0.16071428571428573, 0.5178571428571429, - 0.19642857142857142, 0.5178571428571429, - 0.19642857142857142, 0.5178571428571429, - 0.23214285714285715, 0.5178571428571429, - 0.23214285714285715, 0.5178571428571429, - 0.26785714285714285, 0.5178571428571429, - 0.26785714285714285, 0.5178571428571429, - 0.30357142857142855, 0.5178571428571429, - 0.30357142857142855, 0.5178571428571429, - 0.3392857142857143, 0.5178571428571429, - 0.3392857142857143, 0.5178571428571429, - 0.375, 0.5178571428571429, - 0.375, 0.5178571428571429, - 0.4107142857142857, 0.5178571428571429, - 0.4107142857142857, 0.5178571428571429, - 0.44642857142857145, 0.5178571428571429, - 0.44642857142857145, 0.5178571428571429, - 0.48214285714285715, 0.5178571428571429, - 0.48214285714285715, 0.5178571428571429, - 0.5178571428571429, 0.5178571428571429, - 0.5178571428571429, 0.5178571428571429, - 0.5535714285714286, 0.5178571428571429, - 0.5535714285714286, 0.5178571428571429, - 0.5892857142857143, 0.5178571428571429, - 0.5892857142857143, 0.5178571428571429, - 0.625, 0.5178571428571429, - 0.625, 0.5178571428571429, - 0.6607142857142857, 0.5178571428571429, - 0.6607142857142857, 0.5178571428571429, - 0.6964285714285714, 0.5178571428571429, - 0.6964285714285714, 0.5178571428571429, - 0.7321428571428571, 0.5178571428571429, - 0.7321428571428571, 0.5178571428571429, - 0.7678571428571429, 0.5178571428571429, - 0.7678571428571429, 0.5178571428571429, - 0.8035714285714286, 0.5178571428571429, - 0.8035714285714286, 0.5178571428571429, - 0.8392857142857143, 0.5178571428571429, - 0.8392857142857143, 0.5178571428571429, - 0.875, 0.5178571428571429, - 0.875, 0.5178571428571429, - 0.9107142857142857, 0.5178571428571429, - 0.9107142857142857, 0.5178571428571429, - 0.9464285714285714, 0.5178571428571429, - 0.9464285714285714, 0.5178571428571429, - 0.9821428571428571, 0.5178571428571429, - 0.9821428571428571, 0.5178571428571429, - 0.017857142857142856, 0.5535714285714286, - 0.017857142857142856, 0.5535714285714286, - 0.05357142857142857, 0.5535714285714286, - 0.05357142857142857, 0.5535714285714286, - 0.08928571428571429, 0.5535714285714286, - 0.08928571428571429, 0.5535714285714286, - 0.125, 0.5535714285714286, - 0.125, 0.5535714285714286, - 0.16071428571428573, 0.5535714285714286, - 0.16071428571428573, 0.5535714285714286, - 0.19642857142857142, 0.5535714285714286, - 0.19642857142857142, 0.5535714285714286, - 0.23214285714285715, 0.5535714285714286, - 0.23214285714285715, 0.5535714285714286, - 0.26785714285714285, 0.5535714285714286, - 0.26785714285714285, 0.5535714285714286, - 0.30357142857142855, 0.5535714285714286, - 0.30357142857142855, 0.5535714285714286, - 0.3392857142857143, 0.5535714285714286, - 0.3392857142857143, 0.5535714285714286, - 0.375, 0.5535714285714286, - 0.375, 0.5535714285714286, - 0.4107142857142857, 0.5535714285714286, - 0.4107142857142857, 0.5535714285714286, - 0.44642857142857145, 0.5535714285714286, - 0.44642857142857145, 0.5535714285714286, - 0.48214285714285715, 0.5535714285714286, - 0.48214285714285715, 0.5535714285714286, - 0.5178571428571429, 0.5535714285714286, - 0.5178571428571429, 0.5535714285714286, - 0.5535714285714286, 0.5535714285714286, - 0.5535714285714286, 0.5535714285714286, - 0.5892857142857143, 0.5535714285714286, - 0.5892857142857143, 0.5535714285714286, - 0.625, 0.5535714285714286, - 0.625, 0.5535714285714286, - 0.6607142857142857, 0.5535714285714286, - 0.6607142857142857, 0.5535714285714286, - 0.6964285714285714, 0.5535714285714286, - 0.6964285714285714, 0.5535714285714286, - 0.7321428571428571, 0.5535714285714286, - 0.7321428571428571, 0.5535714285714286, - 0.7678571428571429, 0.5535714285714286, - 0.7678571428571429, 0.5535714285714286, - 0.8035714285714286, 0.5535714285714286, - 0.8035714285714286, 0.5535714285714286, - 0.8392857142857143, 0.5535714285714286, - 0.8392857142857143, 0.5535714285714286, - 0.875, 0.5535714285714286, - 0.875, 0.5535714285714286, - 0.9107142857142857, 0.5535714285714286, - 0.9107142857142857, 0.5535714285714286, - 0.9464285714285714, 0.5535714285714286, - 0.9464285714285714, 0.5535714285714286, - 0.9821428571428571, 0.5535714285714286, - 0.9821428571428571, 0.5535714285714286, - 0.017857142857142856, 0.5892857142857143, - 0.017857142857142856, 0.5892857142857143, - 0.05357142857142857, 0.5892857142857143, - 0.05357142857142857, 0.5892857142857143, - 0.08928571428571429, 0.5892857142857143, - 0.08928571428571429, 0.5892857142857143, - 0.125, 0.5892857142857143, - 0.125, 0.5892857142857143, - 0.16071428571428573, 0.5892857142857143, - 0.16071428571428573, 0.5892857142857143, - 0.19642857142857142, 0.5892857142857143, - 0.19642857142857142, 0.5892857142857143, - 0.23214285714285715, 0.5892857142857143, - 0.23214285714285715, 0.5892857142857143, - 0.26785714285714285, 0.5892857142857143, - 0.26785714285714285, 0.5892857142857143, - 0.30357142857142855, 0.5892857142857143, - 0.30357142857142855, 0.5892857142857143, - 0.3392857142857143, 0.5892857142857143, - 0.3392857142857143, 0.5892857142857143, - 0.375, 0.5892857142857143, - 0.375, 0.5892857142857143, - 0.4107142857142857, 0.5892857142857143, - 0.4107142857142857, 0.5892857142857143, - 0.44642857142857145, 0.5892857142857143, - 0.44642857142857145, 0.5892857142857143, - 0.48214285714285715, 0.5892857142857143, - 0.48214285714285715, 0.5892857142857143, - 0.5178571428571429, 0.5892857142857143, - 0.5178571428571429, 0.5892857142857143, - 0.5535714285714286, 0.5892857142857143, - 0.5535714285714286, 0.5892857142857143, - 0.5892857142857143, 0.5892857142857143, - 0.5892857142857143, 0.5892857142857143, - 0.625, 0.5892857142857143, - 0.625, 0.5892857142857143, - 0.6607142857142857, 0.5892857142857143, - 0.6607142857142857, 0.5892857142857143, - 0.6964285714285714, 0.5892857142857143, - 0.6964285714285714, 0.5892857142857143, - 0.7321428571428571, 0.5892857142857143, - 0.7321428571428571, 0.5892857142857143, - 0.7678571428571429, 0.5892857142857143, - 0.7678571428571429, 0.5892857142857143, - 0.8035714285714286, 0.5892857142857143, - 0.8035714285714286, 0.5892857142857143, - 0.8392857142857143, 0.5892857142857143, - 0.8392857142857143, 0.5892857142857143, - 0.875, 0.5892857142857143, - 0.875, 0.5892857142857143, - 0.9107142857142857, 0.5892857142857143, - 0.9107142857142857, 0.5892857142857143, - 0.9464285714285714, 0.5892857142857143, - 0.9464285714285714, 0.5892857142857143, - 0.9821428571428571, 0.5892857142857143, - 0.9821428571428571, 0.5892857142857143, - 0.017857142857142856, 0.625, - 0.017857142857142856, 0.625, - 0.05357142857142857, 0.625, - 0.05357142857142857, 0.625, - 0.08928571428571429, 0.625, - 0.08928571428571429, 0.625, - 0.125, 0.625, - 0.125, 0.625, - 0.16071428571428573, 0.625, - 0.16071428571428573, 0.625, - 0.19642857142857142, 0.625, - 0.19642857142857142, 0.625, - 0.23214285714285715, 0.625, - 0.23214285714285715, 0.625, - 0.26785714285714285, 0.625, - 0.26785714285714285, 0.625, - 0.30357142857142855, 0.625, - 0.30357142857142855, 0.625, - 0.3392857142857143, 0.625, - 0.3392857142857143, 0.625, - 0.375, 0.625, - 0.375, 0.625, - 0.4107142857142857, 0.625, - 0.4107142857142857, 0.625, - 0.44642857142857145, 0.625, - 0.44642857142857145, 0.625, - 0.48214285714285715, 0.625, - 0.48214285714285715, 0.625, - 0.5178571428571429, 0.625, - 0.5178571428571429, 0.625, - 0.5535714285714286, 0.625, - 0.5535714285714286, 0.625, - 0.5892857142857143, 0.625, - 0.5892857142857143, 0.625, - 0.625, 0.625, - 0.625, 0.625, - 0.6607142857142857, 0.625, - 0.6607142857142857, 0.625, - 0.6964285714285714, 0.625, - 0.6964285714285714, 0.625, - 0.7321428571428571, 0.625, - 0.7321428571428571, 0.625, - 0.7678571428571429, 0.625, - 0.7678571428571429, 0.625, - 0.8035714285714286, 0.625, - 0.8035714285714286, 0.625, - 0.8392857142857143, 0.625, - 0.8392857142857143, 0.625, - 0.875, 0.625, - 0.875, 0.625, - 0.9107142857142857, 0.625, - 0.9107142857142857, 0.625, - 0.9464285714285714, 0.625, - 0.9464285714285714, 0.625, - 0.9821428571428571, 0.625, - 0.9821428571428571, 0.625, - 0.017857142857142856, 0.6607142857142857, - 0.017857142857142856, 0.6607142857142857, - 0.05357142857142857, 0.6607142857142857, - 0.05357142857142857, 0.6607142857142857, - 0.08928571428571429, 0.6607142857142857, - 0.08928571428571429, 0.6607142857142857, - 0.125, 0.6607142857142857, - 0.125, 0.6607142857142857, - 0.16071428571428573, 0.6607142857142857, - 0.16071428571428573, 0.6607142857142857, - 0.19642857142857142, 0.6607142857142857, - 0.19642857142857142, 0.6607142857142857, - 0.23214285714285715, 0.6607142857142857, - 0.23214285714285715, 0.6607142857142857, - 0.26785714285714285, 0.6607142857142857, - 0.26785714285714285, 0.6607142857142857, - 0.30357142857142855, 0.6607142857142857, - 0.30357142857142855, 0.6607142857142857, - 0.3392857142857143, 0.6607142857142857, - 0.3392857142857143, 0.6607142857142857, - 0.375, 0.6607142857142857, - 0.375, 0.6607142857142857, - 0.4107142857142857, 0.6607142857142857, - 0.4107142857142857, 0.6607142857142857, - 0.44642857142857145, 0.6607142857142857, - 0.44642857142857145, 0.6607142857142857, - 0.48214285714285715, 0.6607142857142857, - 0.48214285714285715, 0.6607142857142857, - 0.5178571428571429, 0.6607142857142857, - 0.5178571428571429, 0.6607142857142857, - 0.5535714285714286, 0.6607142857142857, - 0.5535714285714286, 0.6607142857142857, - 0.5892857142857143, 0.6607142857142857, - 0.5892857142857143, 0.6607142857142857, - 0.625, 0.6607142857142857, - 0.625, 0.6607142857142857, - 0.6607142857142857, 0.6607142857142857, - 0.6607142857142857, 0.6607142857142857, - 0.6964285714285714, 0.6607142857142857, - 0.6964285714285714, 0.6607142857142857, - 0.7321428571428571, 0.6607142857142857, - 0.7321428571428571, 0.6607142857142857, - 0.7678571428571429, 0.6607142857142857, - 0.7678571428571429, 0.6607142857142857, - 0.8035714285714286, 0.6607142857142857, - 0.8035714285714286, 0.6607142857142857, - 0.8392857142857143, 0.6607142857142857, - 0.8392857142857143, 0.6607142857142857, - 0.875, 0.6607142857142857, - 0.875, 0.6607142857142857, - 0.9107142857142857, 0.6607142857142857, - 0.9107142857142857, 0.6607142857142857, - 0.9464285714285714, 0.6607142857142857, - 0.9464285714285714, 0.6607142857142857, - 0.9821428571428571, 0.6607142857142857, - 0.9821428571428571, 0.6607142857142857, - 0.017857142857142856, 0.6964285714285714, - 0.017857142857142856, 0.6964285714285714, - 0.05357142857142857, 0.6964285714285714, - 0.05357142857142857, 0.6964285714285714, - 0.08928571428571429, 0.6964285714285714, - 0.08928571428571429, 0.6964285714285714, - 0.125, 0.6964285714285714, - 0.125, 0.6964285714285714, - 0.16071428571428573, 0.6964285714285714, - 0.16071428571428573, 0.6964285714285714, - 0.19642857142857142, 0.6964285714285714, - 0.19642857142857142, 0.6964285714285714, - 0.23214285714285715, 0.6964285714285714, - 0.23214285714285715, 0.6964285714285714, - 0.26785714285714285, 0.6964285714285714, - 0.26785714285714285, 0.6964285714285714, - 0.30357142857142855, 0.6964285714285714, - 0.30357142857142855, 0.6964285714285714, - 0.3392857142857143, 0.6964285714285714, - 0.3392857142857143, 0.6964285714285714, - 0.375, 0.6964285714285714, - 0.375, 0.6964285714285714, - 0.4107142857142857, 0.6964285714285714, - 0.4107142857142857, 0.6964285714285714, - 0.44642857142857145, 0.6964285714285714, - 0.44642857142857145, 0.6964285714285714, - 0.48214285714285715, 0.6964285714285714, - 0.48214285714285715, 0.6964285714285714, - 0.5178571428571429, 0.6964285714285714, - 0.5178571428571429, 0.6964285714285714, - 0.5535714285714286, 0.6964285714285714, - 0.5535714285714286, 0.6964285714285714, - 0.5892857142857143, 0.6964285714285714, - 0.5892857142857143, 0.6964285714285714, - 0.625, 0.6964285714285714, - 0.625, 0.6964285714285714, - 0.6607142857142857, 0.6964285714285714, - 0.6607142857142857, 0.6964285714285714, - 0.6964285714285714, 0.6964285714285714, - 0.6964285714285714, 0.6964285714285714, - 0.7321428571428571, 0.6964285714285714, - 0.7321428571428571, 0.6964285714285714, - 0.7678571428571429, 0.6964285714285714, - 0.7678571428571429, 0.6964285714285714, - 0.8035714285714286, 0.6964285714285714, - 0.8035714285714286, 0.6964285714285714, - 0.8392857142857143, 0.6964285714285714, - 0.8392857142857143, 0.6964285714285714, - 0.875, 0.6964285714285714, - 0.875, 0.6964285714285714, - 0.9107142857142857, 0.6964285714285714, - 0.9107142857142857, 0.6964285714285714, - 0.9464285714285714, 0.6964285714285714, - 0.9464285714285714, 0.6964285714285714, - 0.9821428571428571, 0.6964285714285714, - 0.9821428571428571, 0.6964285714285714, - 0.017857142857142856, 0.7321428571428571, - 0.017857142857142856, 0.7321428571428571, - 0.05357142857142857, 0.7321428571428571, - 0.05357142857142857, 0.7321428571428571, - 0.08928571428571429, 0.7321428571428571, - 0.08928571428571429, 0.7321428571428571, - 0.125, 0.7321428571428571, - 0.125, 0.7321428571428571, - 0.16071428571428573, 0.7321428571428571, - 0.16071428571428573, 0.7321428571428571, - 0.19642857142857142, 0.7321428571428571, - 0.19642857142857142, 0.7321428571428571, - 0.23214285714285715, 0.7321428571428571, - 0.23214285714285715, 0.7321428571428571, - 0.26785714285714285, 0.7321428571428571, - 0.26785714285714285, 0.7321428571428571, - 0.30357142857142855, 0.7321428571428571, - 0.30357142857142855, 0.7321428571428571, - 0.3392857142857143, 0.7321428571428571, - 0.3392857142857143, 0.7321428571428571, - 0.375, 0.7321428571428571, - 0.375, 0.7321428571428571, - 0.4107142857142857, 0.7321428571428571, - 0.4107142857142857, 0.7321428571428571, - 0.44642857142857145, 0.7321428571428571, - 0.44642857142857145, 0.7321428571428571, - 0.48214285714285715, 0.7321428571428571, - 0.48214285714285715, 0.7321428571428571, - 0.5178571428571429, 0.7321428571428571, - 0.5178571428571429, 0.7321428571428571, - 0.5535714285714286, 0.7321428571428571, - 0.5535714285714286, 0.7321428571428571, - 0.5892857142857143, 0.7321428571428571, - 0.5892857142857143, 0.7321428571428571, - 0.625, 0.7321428571428571, - 0.625, 0.7321428571428571, - 0.6607142857142857, 0.7321428571428571, - 0.6607142857142857, 0.7321428571428571, - 0.6964285714285714, 0.7321428571428571, - 0.6964285714285714, 0.7321428571428571, - 0.7321428571428571, 0.7321428571428571, - 0.7321428571428571, 0.7321428571428571, - 0.7678571428571429, 0.7321428571428571, - 0.7678571428571429, 0.7321428571428571, - 0.8035714285714286, 0.7321428571428571, - 0.8035714285714286, 0.7321428571428571, - 0.8392857142857143, 0.7321428571428571, - 0.8392857142857143, 0.7321428571428571, - 0.875, 0.7321428571428571, - 0.875, 0.7321428571428571, - 0.9107142857142857, 0.7321428571428571, - 0.9107142857142857, 0.7321428571428571, - 0.9464285714285714, 0.7321428571428571, - 0.9464285714285714, 0.7321428571428571, - 0.9821428571428571, 0.7321428571428571, - 0.9821428571428571, 0.7321428571428571, - 0.017857142857142856, 0.7678571428571429, - 0.017857142857142856, 0.7678571428571429, - 0.05357142857142857, 0.7678571428571429, - 0.05357142857142857, 0.7678571428571429, - 0.08928571428571429, 0.7678571428571429, - 0.08928571428571429, 0.7678571428571429, - 0.125, 0.7678571428571429, - 0.125, 0.7678571428571429, - 0.16071428571428573, 0.7678571428571429, - 0.16071428571428573, 0.7678571428571429, - 0.19642857142857142, 0.7678571428571429, - 0.19642857142857142, 0.7678571428571429, - 0.23214285714285715, 0.7678571428571429, - 0.23214285714285715, 0.7678571428571429, - 0.26785714285714285, 0.7678571428571429, - 0.26785714285714285, 0.7678571428571429, - 0.30357142857142855, 0.7678571428571429, - 0.30357142857142855, 0.7678571428571429, - 0.3392857142857143, 0.7678571428571429, - 0.3392857142857143, 0.7678571428571429, - 0.375, 0.7678571428571429, - 0.375, 0.7678571428571429, - 0.4107142857142857, 0.7678571428571429, - 0.4107142857142857, 0.7678571428571429, - 0.44642857142857145, 0.7678571428571429, - 0.44642857142857145, 0.7678571428571429, - 0.48214285714285715, 0.7678571428571429, - 0.48214285714285715, 0.7678571428571429, - 0.5178571428571429, 0.7678571428571429, - 0.5178571428571429, 0.7678571428571429, - 0.5535714285714286, 0.7678571428571429, - 0.5535714285714286, 0.7678571428571429, - 0.5892857142857143, 0.7678571428571429, - 0.5892857142857143, 0.7678571428571429, - 0.625, 0.7678571428571429, - 0.625, 0.7678571428571429, - 0.6607142857142857, 0.7678571428571429, - 0.6607142857142857, 0.7678571428571429, - 0.6964285714285714, 0.7678571428571429, - 0.6964285714285714, 0.7678571428571429, - 0.7321428571428571, 0.7678571428571429, - 0.7321428571428571, 0.7678571428571429, - 0.7678571428571429, 0.7678571428571429, - 0.7678571428571429, 0.7678571428571429, - 0.8035714285714286, 0.7678571428571429, - 0.8035714285714286, 0.7678571428571429, - 0.8392857142857143, 0.7678571428571429, - 0.8392857142857143, 0.7678571428571429, - 0.875, 0.7678571428571429, - 0.875, 0.7678571428571429, - 0.9107142857142857, 0.7678571428571429, - 0.9107142857142857, 0.7678571428571429, - 0.9464285714285714, 0.7678571428571429, - 0.9464285714285714, 0.7678571428571429, - 0.9821428571428571, 0.7678571428571429, - 0.9821428571428571, 0.7678571428571429, - 0.017857142857142856, 0.8035714285714286, - 0.017857142857142856, 0.8035714285714286, - 0.05357142857142857, 0.8035714285714286, - 0.05357142857142857, 0.8035714285714286, - 0.08928571428571429, 0.8035714285714286, - 0.08928571428571429, 0.8035714285714286, - 0.125, 0.8035714285714286, - 0.125, 0.8035714285714286, - 0.16071428571428573, 0.8035714285714286, - 0.16071428571428573, 0.8035714285714286, - 0.19642857142857142, 0.8035714285714286, - 0.19642857142857142, 0.8035714285714286, - 0.23214285714285715, 0.8035714285714286, - 0.23214285714285715, 0.8035714285714286, - 0.26785714285714285, 0.8035714285714286, - 0.26785714285714285, 0.8035714285714286, - 0.30357142857142855, 0.8035714285714286, - 0.30357142857142855, 0.8035714285714286, - 0.3392857142857143, 0.8035714285714286, - 0.3392857142857143, 0.8035714285714286, - 0.375, 0.8035714285714286, - 0.375, 0.8035714285714286, - 0.4107142857142857, 0.8035714285714286, - 0.4107142857142857, 0.8035714285714286, - 0.44642857142857145, 0.8035714285714286, - 0.44642857142857145, 0.8035714285714286, - 0.48214285714285715, 0.8035714285714286, - 0.48214285714285715, 0.8035714285714286, - 0.5178571428571429, 0.8035714285714286, - 0.5178571428571429, 0.8035714285714286, - 0.5535714285714286, 0.8035714285714286, - 0.5535714285714286, 0.8035714285714286, - 0.5892857142857143, 0.8035714285714286, - 0.5892857142857143, 0.8035714285714286, - 0.625, 0.8035714285714286, - 0.625, 0.8035714285714286, - 0.6607142857142857, 0.8035714285714286, - 0.6607142857142857, 0.8035714285714286, - 0.6964285714285714, 0.8035714285714286, - 0.6964285714285714, 0.8035714285714286, - 0.7321428571428571, 0.8035714285714286, - 0.7321428571428571, 0.8035714285714286, - 0.7678571428571429, 0.8035714285714286, - 0.7678571428571429, 0.8035714285714286, - 0.8035714285714286, 0.8035714285714286, - 0.8035714285714286, 0.8035714285714286, - 0.8392857142857143, 0.8035714285714286, - 0.8392857142857143, 0.8035714285714286, - 0.875, 0.8035714285714286, - 0.875, 0.8035714285714286, - 0.9107142857142857, 0.8035714285714286, - 0.9107142857142857, 0.8035714285714286, - 0.9464285714285714, 0.8035714285714286, - 0.9464285714285714, 0.8035714285714286, - 0.9821428571428571, 0.8035714285714286, - 0.9821428571428571, 0.8035714285714286, - 0.017857142857142856, 0.8392857142857143, - 0.017857142857142856, 0.8392857142857143, - 0.05357142857142857, 0.8392857142857143, - 0.05357142857142857, 0.8392857142857143, - 0.08928571428571429, 0.8392857142857143, - 0.08928571428571429, 0.8392857142857143, - 0.125, 0.8392857142857143, - 0.125, 0.8392857142857143, - 0.16071428571428573, 0.8392857142857143, - 0.16071428571428573, 0.8392857142857143, - 0.19642857142857142, 0.8392857142857143, - 0.19642857142857142, 0.8392857142857143, - 0.23214285714285715, 0.8392857142857143, - 0.23214285714285715, 0.8392857142857143, - 0.26785714285714285, 0.8392857142857143, - 0.26785714285714285, 0.8392857142857143, - 0.30357142857142855, 0.8392857142857143, - 0.30357142857142855, 0.8392857142857143, - 0.3392857142857143, 0.8392857142857143, - 0.3392857142857143, 0.8392857142857143, - 0.375, 0.8392857142857143, - 0.375, 0.8392857142857143, - 0.4107142857142857, 0.8392857142857143, - 0.4107142857142857, 0.8392857142857143, - 0.44642857142857145, 0.8392857142857143, - 0.44642857142857145, 0.8392857142857143, - 0.48214285714285715, 0.8392857142857143, - 0.48214285714285715, 0.8392857142857143, - 0.5178571428571429, 0.8392857142857143, - 0.5178571428571429, 0.8392857142857143, - 0.5535714285714286, 0.8392857142857143, - 0.5535714285714286, 0.8392857142857143, - 0.5892857142857143, 0.8392857142857143, - 0.5892857142857143, 0.8392857142857143, - 0.625, 0.8392857142857143, - 0.625, 0.8392857142857143, - 0.6607142857142857, 0.8392857142857143, - 0.6607142857142857, 0.8392857142857143, - 0.6964285714285714, 0.8392857142857143, - 0.6964285714285714, 0.8392857142857143, - 0.7321428571428571, 0.8392857142857143, - 0.7321428571428571, 0.8392857142857143, - 0.7678571428571429, 0.8392857142857143, - 0.7678571428571429, 0.8392857142857143, - 0.8035714285714286, 0.8392857142857143, - 0.8035714285714286, 0.8392857142857143, - 0.8392857142857143, 0.8392857142857143, - 0.8392857142857143, 0.8392857142857143, - 0.875, 0.8392857142857143, - 0.875, 0.8392857142857143, - 0.9107142857142857, 0.8392857142857143, - 0.9107142857142857, 0.8392857142857143, - 0.9464285714285714, 0.8392857142857143, - 0.9464285714285714, 0.8392857142857143, - 0.9821428571428571, 0.8392857142857143, - 0.9821428571428571, 0.8392857142857143, - 0.017857142857142856, 0.875, - 0.017857142857142856, 0.875, - 0.05357142857142857, 0.875, - 0.05357142857142857, 0.875, - 0.08928571428571429, 0.875, - 0.08928571428571429, 0.875, - 0.125, 0.875, - 0.125, 0.875, - 0.16071428571428573, 0.875, - 0.16071428571428573, 0.875, - 0.19642857142857142, 0.875, - 0.19642857142857142, 0.875, - 0.23214285714285715, 0.875, - 0.23214285714285715, 0.875, - 0.26785714285714285, 0.875, - 0.26785714285714285, 0.875, - 0.30357142857142855, 0.875, - 0.30357142857142855, 0.875, - 0.3392857142857143, 0.875, - 0.3392857142857143, 0.875, - 0.375, 0.875, - 0.375, 0.875, - 0.4107142857142857, 0.875, - 0.4107142857142857, 0.875, - 0.44642857142857145, 0.875, - 0.44642857142857145, 0.875, - 0.48214285714285715, 0.875, - 0.48214285714285715, 0.875, - 0.5178571428571429, 0.875, - 0.5178571428571429, 0.875, - 0.5535714285714286, 0.875, - 0.5535714285714286, 0.875, - 0.5892857142857143, 0.875, - 0.5892857142857143, 0.875, - 0.625, 0.875, - 0.625, 0.875, - 0.6607142857142857, 0.875, - 0.6607142857142857, 0.875, - 0.6964285714285714, 0.875, - 0.6964285714285714, 0.875, - 0.7321428571428571, 0.875, - 0.7321428571428571, 0.875, - 0.7678571428571429, 0.875, - 0.7678571428571429, 0.875, - 0.8035714285714286, 0.875, - 0.8035714285714286, 0.875, - 0.8392857142857143, 0.875, - 0.8392857142857143, 0.875, - 0.875, 0.875, - 0.875, 0.875, - 0.9107142857142857, 0.875, - 0.9107142857142857, 0.875, - 0.9464285714285714, 0.875, - 0.9464285714285714, 0.875, - 0.9821428571428571, 0.875, - 0.9821428571428571, 0.875, - 0.017857142857142856, 0.9107142857142857, - 0.017857142857142856, 0.9107142857142857, - 0.05357142857142857, 0.9107142857142857, - 0.05357142857142857, 0.9107142857142857, - 0.08928571428571429, 0.9107142857142857, - 0.08928571428571429, 0.9107142857142857, - 0.125, 0.9107142857142857, - 0.125, 0.9107142857142857, - 0.16071428571428573, 0.9107142857142857, - 0.16071428571428573, 0.9107142857142857, - 0.19642857142857142, 0.9107142857142857, - 0.19642857142857142, 0.9107142857142857, - 0.23214285714285715, 0.9107142857142857, - 0.23214285714285715, 0.9107142857142857, - 0.26785714285714285, 0.9107142857142857, - 0.26785714285714285, 0.9107142857142857, - 0.30357142857142855, 0.9107142857142857, - 0.30357142857142855, 0.9107142857142857, - 0.3392857142857143, 0.9107142857142857, - 0.3392857142857143, 0.9107142857142857, - 0.375, 0.9107142857142857, - 0.375, 0.9107142857142857, - 0.4107142857142857, 0.9107142857142857, - 0.4107142857142857, 0.9107142857142857, - 0.44642857142857145, 0.9107142857142857, - 0.44642857142857145, 0.9107142857142857, - 0.48214285714285715, 0.9107142857142857, - 0.48214285714285715, 0.9107142857142857, - 0.5178571428571429, 0.9107142857142857, - 0.5178571428571429, 0.9107142857142857, - 0.5535714285714286, 0.9107142857142857, - 0.5535714285714286, 0.9107142857142857, - 0.5892857142857143, 0.9107142857142857, - 0.5892857142857143, 0.9107142857142857, - 0.625, 0.9107142857142857, - 0.625, 0.9107142857142857, - 0.6607142857142857, 0.9107142857142857, - 0.6607142857142857, 0.9107142857142857, - 0.6964285714285714, 0.9107142857142857, - 0.6964285714285714, 0.9107142857142857, - 0.7321428571428571, 0.9107142857142857, - 0.7321428571428571, 0.9107142857142857, - 0.7678571428571429, 0.9107142857142857, - 0.7678571428571429, 0.9107142857142857, - 0.8035714285714286, 0.9107142857142857, - 0.8035714285714286, 0.9107142857142857, - 0.8392857142857143, 0.9107142857142857, - 0.8392857142857143, 0.9107142857142857, - 0.875, 0.9107142857142857, - 0.875, 0.9107142857142857, - 0.9107142857142857, 0.9107142857142857, - 0.9107142857142857, 0.9107142857142857, - 0.9464285714285714, 0.9107142857142857, - 0.9464285714285714, 0.9107142857142857, - 0.9821428571428571, 0.9107142857142857, - 0.9821428571428571, 0.9107142857142857, - 0.017857142857142856, 0.9464285714285714, - 0.017857142857142856, 0.9464285714285714, - 0.05357142857142857, 0.9464285714285714, - 0.05357142857142857, 0.9464285714285714, - 0.08928571428571429, 0.9464285714285714, - 0.08928571428571429, 0.9464285714285714, - 0.125, 0.9464285714285714, - 0.125, 0.9464285714285714, - 0.16071428571428573, 0.9464285714285714, - 0.16071428571428573, 0.9464285714285714, - 0.19642857142857142, 0.9464285714285714, - 0.19642857142857142, 0.9464285714285714, - 0.23214285714285715, 0.9464285714285714, - 0.23214285714285715, 0.9464285714285714, - 0.26785714285714285, 0.9464285714285714, - 0.26785714285714285, 0.9464285714285714, - 0.30357142857142855, 0.9464285714285714, - 0.30357142857142855, 0.9464285714285714, - 0.3392857142857143, 0.9464285714285714, - 0.3392857142857143, 0.9464285714285714, - 0.375, 0.9464285714285714, - 0.375, 0.9464285714285714, - 0.4107142857142857, 0.9464285714285714, - 0.4107142857142857, 0.9464285714285714, - 0.44642857142857145, 0.9464285714285714, - 0.44642857142857145, 0.9464285714285714, - 0.48214285714285715, 0.9464285714285714, - 0.48214285714285715, 0.9464285714285714, - 0.5178571428571429, 0.9464285714285714, - 0.5178571428571429, 0.9464285714285714, - 0.5535714285714286, 0.9464285714285714, - 0.5535714285714286, 0.9464285714285714, - 0.5892857142857143, 0.9464285714285714, - 0.5892857142857143, 0.9464285714285714, - 0.625, 0.9464285714285714, - 0.625, 0.9464285714285714, - 0.6607142857142857, 0.9464285714285714, - 0.6607142857142857, 0.9464285714285714, - 0.6964285714285714, 0.9464285714285714, - 0.6964285714285714, 0.9464285714285714, - 0.7321428571428571, 0.9464285714285714, - 0.7321428571428571, 0.9464285714285714, - 0.7678571428571429, 0.9464285714285714, - 0.7678571428571429, 0.9464285714285714, - 0.8035714285714286, 0.9464285714285714, - 0.8035714285714286, 0.9464285714285714, - 0.8392857142857143, 0.9464285714285714, - 0.8392857142857143, 0.9464285714285714, - 0.875, 0.9464285714285714, - 0.875, 0.9464285714285714, - 0.9107142857142857, 0.9464285714285714, - 0.9107142857142857, 0.9464285714285714, - 0.9464285714285714, 0.9464285714285714, - 0.9464285714285714, 0.9464285714285714, - 0.9821428571428571, 0.9464285714285714, - 0.9821428571428571, 0.9464285714285714, - 0.017857142857142856, 0.9821428571428571, - 0.017857142857142856, 0.9821428571428571, - 0.05357142857142857, 0.9821428571428571, - 0.05357142857142857, 0.9821428571428571, - 0.08928571428571429, 0.9821428571428571, - 0.08928571428571429, 0.9821428571428571, - 0.125, 0.9821428571428571, - 0.125, 0.9821428571428571, - 0.16071428571428573, 0.9821428571428571, - 0.16071428571428573, 0.9821428571428571, - 0.19642857142857142, 0.9821428571428571, - 0.19642857142857142, 0.9821428571428571, - 0.23214285714285715, 0.9821428571428571, - 0.23214285714285715, 0.9821428571428571, - 0.26785714285714285, 0.9821428571428571, - 0.26785714285714285, 0.9821428571428571, - 0.30357142857142855, 0.9821428571428571, - 0.30357142857142855, 0.9821428571428571, - 0.3392857142857143, 0.9821428571428571, - 0.3392857142857143, 0.9821428571428571, - 0.375, 0.9821428571428571, - 0.375, 0.9821428571428571, - 0.4107142857142857, 0.9821428571428571, - 0.4107142857142857, 0.9821428571428571, - 0.44642857142857145, 0.9821428571428571, - 0.44642857142857145, 0.9821428571428571, - 0.48214285714285715, 0.9821428571428571, - 0.48214285714285715, 0.9821428571428571, - 0.5178571428571429, 0.9821428571428571, - 0.5178571428571429, 0.9821428571428571, - 0.5535714285714286, 0.9821428571428571, - 0.5535714285714286, 0.9821428571428571, - 0.5892857142857143, 0.9821428571428571, - 0.5892857142857143, 0.9821428571428571, - 0.625, 0.9821428571428571, - 0.625, 0.9821428571428571, - 0.6607142857142857, 0.9821428571428571, - 0.6607142857142857, 0.9821428571428571, - 0.6964285714285714, 0.9821428571428571, - 0.6964285714285714, 0.9821428571428571, - 0.7321428571428571, 0.9821428571428571, - 0.7321428571428571, 0.9821428571428571, - 0.7678571428571429, 0.9821428571428571, - 0.7678571428571429, 0.9821428571428571, - 0.8035714285714286, 0.9821428571428571, - 0.8035714285714286, 0.9821428571428571, - 0.8392857142857143, 0.9821428571428571, - 0.8392857142857143, 0.9821428571428571, - 0.875, 0.9821428571428571, - 0.875, 0.9821428571428571, - 0.9107142857142857, 0.9821428571428571, - 0.9107142857142857, 0.9821428571428571, - 0.9464285714285714, 0.9821428571428571, - 0.9464285714285714, 0.9821428571428571, - 0.9821428571428571, 0.9821428571428571, - 0.9821428571428571, 0.9821428571428571, - 0.03571428571428571, 0.03571428571428571, - 0.03571428571428571, 0.03571428571428571, - 0.10714285714285714, 0.03571428571428571, - 0.10714285714285714, 0.03571428571428571, - 0.17857142857142858, 0.03571428571428571, - 0.17857142857142858, 0.03571428571428571, - 0.25, 0.03571428571428571, - 0.25, 0.03571428571428571, - 0.32142857142857145, 0.03571428571428571, - 0.32142857142857145, 0.03571428571428571, - 0.39285714285714285, 0.03571428571428571, - 0.39285714285714285, 0.03571428571428571, - 0.4642857142857143, 0.03571428571428571, - 0.4642857142857143, 0.03571428571428571, - 0.5357142857142857, 0.03571428571428571, - 0.5357142857142857, 0.03571428571428571, - 0.6071428571428571, 0.03571428571428571, - 0.6071428571428571, 0.03571428571428571, - 0.6785714285714286, 0.03571428571428571, - 0.6785714285714286, 0.03571428571428571, - 0.75, 0.03571428571428571, - 0.75, 0.03571428571428571, - 0.8214285714285714, 0.03571428571428571, - 0.8214285714285714, 0.03571428571428571, - 0.8928571428571429, 0.03571428571428571, - 0.8928571428571429, 0.03571428571428571, - 0.9642857142857143, 0.03571428571428571, - 0.9642857142857143, 0.03571428571428571, - 0.03571428571428571, 0.10714285714285714, - 0.03571428571428571, 0.10714285714285714, - 0.10714285714285714, 0.10714285714285714, - 0.10714285714285714, 0.10714285714285714, - 0.17857142857142858, 0.10714285714285714, - 0.17857142857142858, 0.10714285714285714, - 0.25, 0.10714285714285714, - 0.25, 0.10714285714285714, - 0.32142857142857145, 0.10714285714285714, - 0.32142857142857145, 0.10714285714285714, - 0.39285714285714285, 0.10714285714285714, - 0.39285714285714285, 0.10714285714285714, - 0.4642857142857143, 0.10714285714285714, - 0.4642857142857143, 0.10714285714285714, - 0.5357142857142857, 0.10714285714285714, - 0.5357142857142857, 0.10714285714285714, - 0.6071428571428571, 0.10714285714285714, - 0.6071428571428571, 0.10714285714285714, - 0.6785714285714286, 0.10714285714285714, - 0.6785714285714286, 0.10714285714285714, - 0.75, 0.10714285714285714, - 0.75, 0.10714285714285714, - 0.8214285714285714, 0.10714285714285714, - 0.8214285714285714, 0.10714285714285714, - 0.8928571428571429, 0.10714285714285714, - 0.8928571428571429, 0.10714285714285714, - 0.9642857142857143, 0.10714285714285714, - 0.9642857142857143, 0.10714285714285714, - 0.03571428571428571, 0.17857142857142858, - 0.03571428571428571, 0.17857142857142858, - 0.10714285714285714, 0.17857142857142858, - 0.10714285714285714, 0.17857142857142858, - 0.17857142857142858, 0.17857142857142858, - 0.17857142857142858, 0.17857142857142858, - 0.25, 0.17857142857142858, - 0.25, 0.17857142857142858, - 0.32142857142857145, 0.17857142857142858, - 0.32142857142857145, 0.17857142857142858, - 0.39285714285714285, 0.17857142857142858, - 0.39285714285714285, 0.17857142857142858, - 0.4642857142857143, 0.17857142857142858, - 0.4642857142857143, 0.17857142857142858, - 0.5357142857142857, 0.17857142857142858, - 0.5357142857142857, 0.17857142857142858, - 0.6071428571428571, 0.17857142857142858, - 0.6071428571428571, 0.17857142857142858, - 0.6785714285714286, 0.17857142857142858, - 0.6785714285714286, 0.17857142857142858, - 0.75, 0.17857142857142858, - 0.75, 0.17857142857142858, - 0.8214285714285714, 0.17857142857142858, - 0.8214285714285714, 0.17857142857142858, - 0.8928571428571429, 0.17857142857142858, - 0.8928571428571429, 0.17857142857142858, - 0.9642857142857143, 0.17857142857142858, - 0.9642857142857143, 0.17857142857142858, - 0.03571428571428571, 0.25, - 0.03571428571428571, 0.25, - 0.10714285714285714, 0.25, - 0.10714285714285714, 0.25, - 0.17857142857142858, 0.25, - 0.17857142857142858, 0.25, - 0.25, 0.25, - 0.25, 0.25, - 0.32142857142857145, 0.25, - 0.32142857142857145, 0.25, - 0.39285714285714285, 0.25, - 0.39285714285714285, 0.25, - 0.4642857142857143, 0.25, - 0.4642857142857143, 0.25, - 0.5357142857142857, 0.25, - 0.5357142857142857, 0.25, - 0.6071428571428571, 0.25, - 0.6071428571428571, 0.25, - 0.6785714285714286, 0.25, - 0.6785714285714286, 0.25, - 0.75, 0.25, - 0.75, 0.25, - 0.8214285714285714, 0.25, - 0.8214285714285714, 0.25, - 0.8928571428571429, 0.25, - 0.8928571428571429, 0.25, - 0.9642857142857143, 0.25, - 0.9642857142857143, 0.25, - 0.03571428571428571, 0.32142857142857145, - 0.03571428571428571, 0.32142857142857145, - 0.10714285714285714, 0.32142857142857145, - 0.10714285714285714, 0.32142857142857145, - 0.17857142857142858, 0.32142857142857145, - 0.17857142857142858, 0.32142857142857145, - 0.25, 0.32142857142857145, - 0.25, 0.32142857142857145, - 0.32142857142857145, 0.32142857142857145, - 0.32142857142857145, 0.32142857142857145, - 0.39285714285714285, 0.32142857142857145, - 0.39285714285714285, 0.32142857142857145, - 0.4642857142857143, 0.32142857142857145, - 0.4642857142857143, 0.32142857142857145, - 0.5357142857142857, 0.32142857142857145, - 0.5357142857142857, 0.32142857142857145, - 0.6071428571428571, 0.32142857142857145, - 0.6071428571428571, 0.32142857142857145, - 0.6785714285714286, 0.32142857142857145, - 0.6785714285714286, 0.32142857142857145, - 0.75, 0.32142857142857145, - 0.75, 0.32142857142857145, - 0.8214285714285714, 0.32142857142857145, - 0.8214285714285714, 0.32142857142857145, - 0.8928571428571429, 0.32142857142857145, - 0.8928571428571429, 0.32142857142857145, - 0.9642857142857143, 0.32142857142857145, - 0.9642857142857143, 0.32142857142857145, - 0.03571428571428571, 0.39285714285714285, - 0.03571428571428571, 0.39285714285714285, - 0.10714285714285714, 0.39285714285714285, - 0.10714285714285714, 0.39285714285714285, - 0.17857142857142858, 0.39285714285714285, - 0.17857142857142858, 0.39285714285714285, - 0.25, 0.39285714285714285, - 0.25, 0.39285714285714285, - 0.32142857142857145, 0.39285714285714285, - 0.32142857142857145, 0.39285714285714285, - 0.39285714285714285, 0.39285714285714285, - 0.39285714285714285, 0.39285714285714285, - 0.4642857142857143, 0.39285714285714285, - 0.4642857142857143, 0.39285714285714285, - 0.5357142857142857, 0.39285714285714285, - 0.5357142857142857, 0.39285714285714285, - 0.6071428571428571, 0.39285714285714285, - 0.6071428571428571, 0.39285714285714285, - 0.6785714285714286, 0.39285714285714285, - 0.6785714285714286, 0.39285714285714285, - 0.75, 0.39285714285714285, - 0.75, 0.39285714285714285, - 0.8214285714285714, 0.39285714285714285, - 0.8214285714285714, 0.39285714285714285, - 0.8928571428571429, 0.39285714285714285, - 0.8928571428571429, 0.39285714285714285, - 0.9642857142857143, 0.39285714285714285, - 0.9642857142857143, 0.39285714285714285, - 0.03571428571428571, 0.4642857142857143, - 0.03571428571428571, 0.4642857142857143, - 0.10714285714285714, 0.4642857142857143, - 0.10714285714285714, 0.4642857142857143, - 0.17857142857142858, 0.4642857142857143, - 0.17857142857142858, 0.4642857142857143, - 0.25, 0.4642857142857143, - 0.25, 0.4642857142857143, - 0.32142857142857145, 0.4642857142857143, - 0.32142857142857145, 0.4642857142857143, - 0.39285714285714285, 0.4642857142857143, - 0.39285714285714285, 0.4642857142857143, - 0.4642857142857143, 0.4642857142857143, - 0.4642857142857143, 0.4642857142857143, - 0.5357142857142857, 0.4642857142857143, - 0.5357142857142857, 0.4642857142857143, - 0.6071428571428571, 0.4642857142857143, - 0.6071428571428571, 0.4642857142857143, - 0.6785714285714286, 0.4642857142857143, - 0.6785714285714286, 0.4642857142857143, - 0.75, 0.4642857142857143, - 0.75, 0.4642857142857143, - 0.8214285714285714, 0.4642857142857143, - 0.8214285714285714, 0.4642857142857143, - 0.8928571428571429, 0.4642857142857143, - 0.8928571428571429, 0.4642857142857143, - 0.9642857142857143, 0.4642857142857143, - 0.9642857142857143, 0.4642857142857143, - 0.03571428571428571, 0.5357142857142857, - 0.03571428571428571, 0.5357142857142857, - 0.10714285714285714, 0.5357142857142857, - 0.10714285714285714, 0.5357142857142857, - 0.17857142857142858, 0.5357142857142857, - 0.17857142857142858, 0.5357142857142857, - 0.25, 0.5357142857142857, - 0.25, 0.5357142857142857, - 0.32142857142857145, 0.5357142857142857, - 0.32142857142857145, 0.5357142857142857, - 0.39285714285714285, 0.5357142857142857, - 0.39285714285714285, 0.5357142857142857, - 0.4642857142857143, 0.5357142857142857, - 0.4642857142857143, 0.5357142857142857, - 0.5357142857142857, 0.5357142857142857, - 0.5357142857142857, 0.5357142857142857, - 0.6071428571428571, 0.5357142857142857, - 0.6071428571428571, 0.5357142857142857, - 0.6785714285714286, 0.5357142857142857, - 0.6785714285714286, 0.5357142857142857, - 0.75, 0.5357142857142857, - 0.75, 0.5357142857142857, - 0.8214285714285714, 0.5357142857142857, - 0.8214285714285714, 0.5357142857142857, - 0.8928571428571429, 0.5357142857142857, - 0.8928571428571429, 0.5357142857142857, - 0.9642857142857143, 0.5357142857142857, - 0.9642857142857143, 0.5357142857142857, - 0.03571428571428571, 0.6071428571428571, - 0.03571428571428571, 0.6071428571428571, - 0.10714285714285714, 0.6071428571428571, - 0.10714285714285714, 0.6071428571428571, - 0.17857142857142858, 0.6071428571428571, - 0.17857142857142858, 0.6071428571428571, - 0.25, 0.6071428571428571, - 0.25, 0.6071428571428571, - 0.32142857142857145, 0.6071428571428571, - 0.32142857142857145, 0.6071428571428571, - 0.39285714285714285, 0.6071428571428571, - 0.39285714285714285, 0.6071428571428571, - 0.4642857142857143, 0.6071428571428571, - 0.4642857142857143, 0.6071428571428571, - 0.5357142857142857, 0.6071428571428571, - 0.5357142857142857, 0.6071428571428571, - 0.6071428571428571, 0.6071428571428571, - 0.6071428571428571, 0.6071428571428571, - 0.6785714285714286, 0.6071428571428571, - 0.6785714285714286, 0.6071428571428571, - 0.75, 0.6071428571428571, - 0.75, 0.6071428571428571, - 0.8214285714285714, 0.6071428571428571, - 0.8214285714285714, 0.6071428571428571, - 0.8928571428571429, 0.6071428571428571, - 0.8928571428571429, 0.6071428571428571, - 0.9642857142857143, 0.6071428571428571, - 0.9642857142857143, 0.6071428571428571, - 0.03571428571428571, 0.6785714285714286, - 0.03571428571428571, 0.6785714285714286, - 0.10714285714285714, 0.6785714285714286, - 0.10714285714285714, 0.6785714285714286, - 0.17857142857142858, 0.6785714285714286, - 0.17857142857142858, 0.6785714285714286, - 0.25, 0.6785714285714286, - 0.25, 0.6785714285714286, - 0.32142857142857145, 0.6785714285714286, - 0.32142857142857145, 0.6785714285714286, - 0.39285714285714285, 0.6785714285714286, - 0.39285714285714285, 0.6785714285714286, - 0.4642857142857143, 0.6785714285714286, - 0.4642857142857143, 0.6785714285714286, - 0.5357142857142857, 0.6785714285714286, - 0.5357142857142857, 0.6785714285714286, - 0.6071428571428571, 0.6785714285714286, - 0.6071428571428571, 0.6785714285714286, - 0.6785714285714286, 0.6785714285714286, - 0.6785714285714286, 0.6785714285714286, - 0.75, 0.6785714285714286, - 0.75, 0.6785714285714286, - 0.8214285714285714, 0.6785714285714286, - 0.8214285714285714, 0.6785714285714286, - 0.8928571428571429, 0.6785714285714286, - 0.8928571428571429, 0.6785714285714286, - 0.9642857142857143, 0.6785714285714286, - 0.9642857142857143, 0.6785714285714286, - 0.03571428571428571, 0.75, - 0.03571428571428571, 0.75, - 0.10714285714285714, 0.75, - 0.10714285714285714, 0.75, - 0.17857142857142858, 0.75, - 0.17857142857142858, 0.75, - 0.25, 0.75, - 0.25, 0.75, - 0.32142857142857145, 0.75, - 0.32142857142857145, 0.75, - 0.39285714285714285, 0.75, - 0.39285714285714285, 0.75, - 0.4642857142857143, 0.75, - 0.4642857142857143, 0.75, - 0.5357142857142857, 0.75, - 0.5357142857142857, 0.75, - 0.6071428571428571, 0.75, - 0.6071428571428571, 0.75, - 0.6785714285714286, 0.75, - 0.6785714285714286, 0.75, - 0.75, 0.75, - 0.75, 0.75, - 0.8214285714285714, 0.75, - 0.8214285714285714, 0.75, - 0.8928571428571429, 0.75, - 0.8928571428571429, 0.75, - 0.9642857142857143, 0.75, - 0.9642857142857143, 0.75, - 0.03571428571428571, 0.8214285714285714, - 0.03571428571428571, 0.8214285714285714, - 0.10714285714285714, 0.8214285714285714, - 0.10714285714285714, 0.8214285714285714, - 0.17857142857142858, 0.8214285714285714, - 0.17857142857142858, 0.8214285714285714, - 0.25, 0.8214285714285714, - 0.25, 0.8214285714285714, - 0.32142857142857145, 0.8214285714285714, - 0.32142857142857145, 0.8214285714285714, - 0.39285714285714285, 0.8214285714285714, - 0.39285714285714285, 0.8214285714285714, - 0.4642857142857143, 0.8214285714285714, - 0.4642857142857143, 0.8214285714285714, - 0.5357142857142857, 0.8214285714285714, - 0.5357142857142857, 0.8214285714285714, - 0.6071428571428571, 0.8214285714285714, - 0.6071428571428571, 0.8214285714285714, - 0.6785714285714286, 0.8214285714285714, - 0.6785714285714286, 0.8214285714285714, - 0.75, 0.8214285714285714, - 0.75, 0.8214285714285714, - 0.8214285714285714, 0.8214285714285714, - 0.8214285714285714, 0.8214285714285714, - 0.8928571428571429, 0.8214285714285714, - 0.8928571428571429, 0.8214285714285714, - 0.9642857142857143, 0.8214285714285714, - 0.9642857142857143, 0.8214285714285714, - 0.03571428571428571, 0.8928571428571429, - 0.03571428571428571, 0.8928571428571429, - 0.10714285714285714, 0.8928571428571429, - 0.10714285714285714, 0.8928571428571429, - 0.17857142857142858, 0.8928571428571429, - 0.17857142857142858, 0.8928571428571429, - 0.25, 0.8928571428571429, - 0.25, 0.8928571428571429, - 0.32142857142857145, 0.8928571428571429, - 0.32142857142857145, 0.8928571428571429, - 0.39285714285714285, 0.8928571428571429, - 0.39285714285714285, 0.8928571428571429, - 0.4642857142857143, 0.8928571428571429, - 0.4642857142857143, 0.8928571428571429, - 0.5357142857142857, 0.8928571428571429, - 0.5357142857142857, 0.8928571428571429, - 0.6071428571428571, 0.8928571428571429, - 0.6071428571428571, 0.8928571428571429, - 0.6785714285714286, 0.8928571428571429, - 0.6785714285714286, 0.8928571428571429, - 0.75, 0.8928571428571429, - 0.75, 0.8928571428571429, - 0.8214285714285714, 0.8928571428571429, - 0.8214285714285714, 0.8928571428571429, - 0.8928571428571429, 0.8928571428571429, - 0.8928571428571429, 0.8928571428571429, - 0.9642857142857143, 0.8928571428571429, - 0.9642857142857143, 0.8928571428571429, - 0.03571428571428571, 0.9642857142857143, - 0.03571428571428571, 0.9642857142857143, - 0.10714285714285714, 0.9642857142857143, - 0.10714285714285714, 0.9642857142857143, - 0.17857142857142858, 0.9642857142857143, - 0.17857142857142858, 0.9642857142857143, - 0.25, 0.9642857142857143, - 0.25, 0.9642857142857143, - 0.32142857142857145, 0.9642857142857143, - 0.32142857142857145, 0.9642857142857143, - 0.39285714285714285, 0.9642857142857143, - 0.39285714285714285, 0.9642857142857143, - 0.4642857142857143, 0.9642857142857143, - 0.4642857142857143, 0.9642857142857143, - 0.5357142857142857, 0.9642857142857143, - 0.5357142857142857, 0.9642857142857143, - 0.6071428571428571, 0.9642857142857143, - 0.6071428571428571, 0.9642857142857143, - 0.6785714285714286, 0.9642857142857143, - 0.6785714285714286, 0.9642857142857143, - 0.75, 0.9642857142857143, - 0.75, 0.9642857142857143, - 0.8214285714285714, 0.9642857142857143, - 0.8214285714285714, 0.9642857142857143, - 0.8928571428571429, 0.9642857142857143, - 0.8928571428571429, 0.9642857142857143, - 0.9642857142857143, 0.9642857142857143, - 0.9642857142857143, 0.9642857142857143, - 0.07142857142857142, 0.07142857142857142, - 0.07142857142857142, 0.07142857142857142, - 0.07142857142857142, 0.07142857142857142, - 0.07142857142857142, 0.07142857142857142, - 0.07142857142857142, 0.07142857142857142, - 0.07142857142857142, 0.07142857142857142, - 0.21428571428571427, 0.07142857142857142, - 0.21428571428571427, 0.07142857142857142, - 0.21428571428571427, 0.07142857142857142, - 0.21428571428571427, 0.07142857142857142, - 0.21428571428571427, 0.07142857142857142, - 0.21428571428571427, 0.07142857142857142, - 0.35714285714285715, 0.07142857142857142, - 0.35714285714285715, 0.07142857142857142, - 0.35714285714285715, 0.07142857142857142, - 0.35714285714285715, 0.07142857142857142, - 0.35714285714285715, 0.07142857142857142, - 0.35714285714285715, 0.07142857142857142, - 0.5, 0.07142857142857142, - 0.5, 0.07142857142857142, - 0.5, 0.07142857142857142, - 0.5, 0.07142857142857142, - 0.5, 0.07142857142857142, - 0.5, 0.07142857142857142, - 0.6428571428571429, 0.07142857142857142, - 0.6428571428571429, 0.07142857142857142, - 0.6428571428571429, 0.07142857142857142, - 0.6428571428571429, 0.07142857142857142, - 0.6428571428571429, 0.07142857142857142, - 0.6428571428571429, 0.07142857142857142, - 0.7857142857142857, 0.07142857142857142, - 0.7857142857142857, 0.07142857142857142, - 0.7857142857142857, 0.07142857142857142, - 0.7857142857142857, 0.07142857142857142, - 0.7857142857142857, 0.07142857142857142, - 0.7857142857142857, 0.07142857142857142, - 0.9285714285714286, 0.07142857142857142, - 0.9285714285714286, 0.07142857142857142, - 0.9285714285714286, 0.07142857142857142, - 0.9285714285714286, 0.07142857142857142, - 0.9285714285714286, 0.07142857142857142, - 0.9285714285714286, 0.07142857142857142, - 0.07142857142857142, 0.21428571428571427, - 0.07142857142857142, 0.21428571428571427, - 0.07142857142857142, 0.21428571428571427, - 0.07142857142857142, 0.21428571428571427, - 0.07142857142857142, 0.21428571428571427, - 0.07142857142857142, 0.21428571428571427, - 0.21428571428571427, 0.21428571428571427, - 0.21428571428571427, 0.21428571428571427, - 0.21428571428571427, 0.21428571428571427, - 0.21428571428571427, 0.21428571428571427, - 0.21428571428571427, 0.21428571428571427, - 0.21428571428571427, 0.21428571428571427, - 0.35714285714285715, 0.21428571428571427, - 0.35714285714285715, 0.21428571428571427, - 0.35714285714285715, 0.21428571428571427, - 0.35714285714285715, 0.21428571428571427, - 0.35714285714285715, 0.21428571428571427, - 0.35714285714285715, 0.21428571428571427, - 0.5, 0.21428571428571427, - 0.5, 0.21428571428571427, - 0.5, 0.21428571428571427, - 0.5, 0.21428571428571427, - 0.5, 0.21428571428571427, - 0.5, 0.21428571428571427, - 0.6428571428571429, 0.21428571428571427, - 0.6428571428571429, 0.21428571428571427, - 0.6428571428571429, 0.21428571428571427, - 0.6428571428571429, 0.21428571428571427, - 0.6428571428571429, 0.21428571428571427, - 0.6428571428571429, 0.21428571428571427, - 0.7857142857142857, 0.21428571428571427, - 0.7857142857142857, 0.21428571428571427, - 0.7857142857142857, 0.21428571428571427, - 0.7857142857142857, 0.21428571428571427, - 0.7857142857142857, 0.21428571428571427, - 0.7857142857142857, 0.21428571428571427, - 0.9285714285714286, 0.21428571428571427, - 0.9285714285714286, 0.21428571428571427, - 0.9285714285714286, 0.21428571428571427, - 0.9285714285714286, 0.21428571428571427, - 0.9285714285714286, 0.21428571428571427, - 0.9285714285714286, 0.21428571428571427, - 0.07142857142857142, 0.35714285714285715, - 0.07142857142857142, 0.35714285714285715, - 0.07142857142857142, 0.35714285714285715, - 0.07142857142857142, 0.35714285714285715, - 0.07142857142857142, 0.35714285714285715, - 0.07142857142857142, 0.35714285714285715, - 0.21428571428571427, 0.35714285714285715, - 0.21428571428571427, 0.35714285714285715, - 0.21428571428571427, 0.35714285714285715, - 0.21428571428571427, 0.35714285714285715, - 0.21428571428571427, 0.35714285714285715, - 0.21428571428571427, 0.35714285714285715, - 0.35714285714285715, 0.35714285714285715, - 0.35714285714285715, 0.35714285714285715, - 0.35714285714285715, 0.35714285714285715, - 0.35714285714285715, 0.35714285714285715, - 0.35714285714285715, 0.35714285714285715, - 0.35714285714285715, 0.35714285714285715, - 0.5, 0.35714285714285715, - 0.5, 0.35714285714285715, - 0.5, 0.35714285714285715, - 0.5, 0.35714285714285715, - 0.5, 0.35714285714285715, - 0.5, 0.35714285714285715, - 0.6428571428571429, 0.35714285714285715, - 0.6428571428571429, 0.35714285714285715, - 0.6428571428571429, 0.35714285714285715, - 0.6428571428571429, 0.35714285714285715, - 0.6428571428571429, 0.35714285714285715, - 0.6428571428571429, 0.35714285714285715, - 0.7857142857142857, 0.35714285714285715, - 0.7857142857142857, 0.35714285714285715, - 0.7857142857142857, 0.35714285714285715, - 0.7857142857142857, 0.35714285714285715, - 0.7857142857142857, 0.35714285714285715, - 0.7857142857142857, 0.35714285714285715, - 0.9285714285714286, 0.35714285714285715, - 0.9285714285714286, 0.35714285714285715, - 0.9285714285714286, 0.35714285714285715, - 0.9285714285714286, 0.35714285714285715, - 0.9285714285714286, 0.35714285714285715, - 0.9285714285714286, 0.35714285714285715, - 0.07142857142857142, 0.5, - 0.07142857142857142, 0.5, - 0.07142857142857142, 0.5, - 0.07142857142857142, 0.5, - 0.07142857142857142, 0.5, - 0.07142857142857142, 0.5, - 0.21428571428571427, 0.5, - 0.21428571428571427, 0.5, - 0.21428571428571427, 0.5, - 0.21428571428571427, 0.5, - 0.21428571428571427, 0.5, - 0.21428571428571427, 0.5, - 0.35714285714285715, 0.5, - 0.35714285714285715, 0.5, - 0.35714285714285715, 0.5, - 0.35714285714285715, 0.5, - 0.35714285714285715, 0.5, - 0.35714285714285715, 0.5, - 0.5, 0.5, - 0.5, 0.5, - 0.5, 0.5, - 0.5, 0.5, - 0.5, 0.5, - 0.5, 0.5, - 0.6428571428571429, 0.5, - 0.6428571428571429, 0.5, - 0.6428571428571429, 0.5, - 0.6428571428571429, 0.5, - 0.6428571428571429, 0.5, - 0.6428571428571429, 0.5, - 0.7857142857142857, 0.5, - 0.7857142857142857, 0.5, - 0.7857142857142857, 0.5, - 0.7857142857142857, 0.5, - 0.7857142857142857, 0.5, - 0.7857142857142857, 0.5, - 0.9285714285714286, 0.5, - 0.9285714285714286, 0.5, - 0.9285714285714286, 0.5, - 0.9285714285714286, 0.5, - 0.9285714285714286, 0.5, - 0.9285714285714286, 0.5, - 0.07142857142857142, 0.6428571428571429, - 0.07142857142857142, 0.6428571428571429, - 0.07142857142857142, 0.6428571428571429, - 0.07142857142857142, 0.6428571428571429, - 0.07142857142857142, 0.6428571428571429, - 0.07142857142857142, 0.6428571428571429, - 0.21428571428571427, 0.6428571428571429, - 0.21428571428571427, 0.6428571428571429, - 0.21428571428571427, 0.6428571428571429, - 0.21428571428571427, 0.6428571428571429, - 0.21428571428571427, 0.6428571428571429, - 0.21428571428571427, 0.6428571428571429, - 0.35714285714285715, 0.6428571428571429, - 0.35714285714285715, 0.6428571428571429, - 0.35714285714285715, 0.6428571428571429, - 0.35714285714285715, 0.6428571428571429, - 0.35714285714285715, 0.6428571428571429, - 0.35714285714285715, 0.6428571428571429, - 0.5, 0.6428571428571429, - 0.5, 0.6428571428571429, - 0.5, 0.6428571428571429, - 0.5, 0.6428571428571429, - 0.5, 0.6428571428571429, - 0.5, 0.6428571428571429, - 0.6428571428571429, 0.6428571428571429, - 0.6428571428571429, 0.6428571428571429, - 0.6428571428571429, 0.6428571428571429, - 0.6428571428571429, 0.6428571428571429, - 0.6428571428571429, 0.6428571428571429, - 0.6428571428571429, 0.6428571428571429, - 0.7857142857142857, 0.6428571428571429, - 0.7857142857142857, 0.6428571428571429, - 0.7857142857142857, 0.6428571428571429, - 0.7857142857142857, 0.6428571428571429, - 0.7857142857142857, 0.6428571428571429, - 0.7857142857142857, 0.6428571428571429, - 0.9285714285714286, 0.6428571428571429, - 0.9285714285714286, 0.6428571428571429, - 0.9285714285714286, 0.6428571428571429, - 0.9285714285714286, 0.6428571428571429, - 0.9285714285714286, 0.6428571428571429, - 0.9285714285714286, 0.6428571428571429, - 0.07142857142857142, 0.7857142857142857, - 0.07142857142857142, 0.7857142857142857, - 0.07142857142857142, 0.7857142857142857, - 0.07142857142857142, 0.7857142857142857, - 0.07142857142857142, 0.7857142857142857, - 0.07142857142857142, 0.7857142857142857, - 0.21428571428571427, 0.7857142857142857, - 0.21428571428571427, 0.7857142857142857, - 0.21428571428571427, 0.7857142857142857, - 0.21428571428571427, 0.7857142857142857, - 0.21428571428571427, 0.7857142857142857, - 0.21428571428571427, 0.7857142857142857, - 0.35714285714285715, 0.7857142857142857, - 0.35714285714285715, 0.7857142857142857, - 0.35714285714285715, 0.7857142857142857, - 0.35714285714285715, 0.7857142857142857, - 0.35714285714285715, 0.7857142857142857, - 0.35714285714285715, 0.7857142857142857, - 0.5, 0.7857142857142857, - 0.5, 0.7857142857142857, - 0.5, 0.7857142857142857, - 0.5, 0.7857142857142857, - 0.5, 0.7857142857142857, - 0.5, 0.7857142857142857, - 0.6428571428571429, 0.7857142857142857, - 0.6428571428571429, 0.7857142857142857, - 0.6428571428571429, 0.7857142857142857, - 0.6428571428571429, 0.7857142857142857, - 0.6428571428571429, 0.7857142857142857, - 0.6428571428571429, 0.7857142857142857, - 0.7857142857142857, 0.7857142857142857, - 0.7857142857142857, 0.7857142857142857, - 0.7857142857142857, 0.7857142857142857, - 0.7857142857142857, 0.7857142857142857, - 0.7857142857142857, 0.7857142857142857, - 0.7857142857142857, 0.7857142857142857, - 0.9285714285714286, 0.7857142857142857, - 0.9285714285714286, 0.7857142857142857, - 0.9285714285714286, 0.7857142857142857, - 0.9285714285714286, 0.7857142857142857, - 0.9285714285714286, 0.7857142857142857, - 0.9285714285714286, 0.7857142857142857, - 0.07142857142857142, 0.9285714285714286, - 0.07142857142857142, 0.9285714285714286, - 0.07142857142857142, 0.9285714285714286, - 0.07142857142857142, 0.9285714285714286, - 0.07142857142857142, 0.9285714285714286, - 0.07142857142857142, 0.9285714285714286, - 0.21428571428571427, 0.9285714285714286, - 0.21428571428571427, 0.9285714285714286, - 0.21428571428571427, 0.9285714285714286, - 0.21428571428571427, 0.9285714285714286, - 0.21428571428571427, 0.9285714285714286, - 0.21428571428571427, 0.9285714285714286, - 0.35714285714285715, 0.9285714285714286, - 0.35714285714285715, 0.9285714285714286, - 0.35714285714285715, 0.9285714285714286, - 0.35714285714285715, 0.9285714285714286, - 0.35714285714285715, 0.9285714285714286, - 0.35714285714285715, 0.9285714285714286, - 0.5, 0.9285714285714286, - 0.5, 0.9285714285714286, - 0.5, 0.9285714285714286, - 0.5, 0.9285714285714286, - 0.5, 0.9285714285714286, - 0.5, 0.9285714285714286, - 0.6428571428571429, 0.9285714285714286, - 0.6428571428571429, 0.9285714285714286, - 0.6428571428571429, 0.9285714285714286, - 0.6428571428571429, 0.9285714285714286, - 0.6428571428571429, 0.9285714285714286, - 0.6428571428571429, 0.9285714285714286, - 0.7857142857142857, 0.9285714285714286, - 0.7857142857142857, 0.9285714285714286, - 0.7857142857142857, 0.9285714285714286, - 0.7857142857142857, 0.9285714285714286, - 0.7857142857142857, 0.9285714285714286, - 0.7857142857142857, 0.9285714285714286, - 0.9285714285714286, 0.9285714285714286, - 0.9285714285714286, 0.9285714285714286, - 0.9285714285714286, 0.9285714285714286, - 0.9285714285714286, 0.9285714285714286, - 0.9285714285714286, 0.9285714285714286, - 0.9285714285714286, 0.9285714285714286); - return anchor; -} diff --git a/models/person_detection_mediapipe/demo.py b/models/person_detection_mediapipe/demo.py deleted file mode 100644 index 43bb5855..00000000 --- a/models/person_detection_mediapipe/demo.py +++ /dev/null @@ -1,140 +0,0 @@ -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from mp_persondet import MPPersonDet - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser(description='Person Detector from MediaPipe') -parser.add_argument('--input', '-i', type=str, - help='Usage: Set path to the input image. Omit for using default camera.') -parser.add_argument('--model', '-m', type=str, default='./person_detection_mediapipe_2023mar.onnx', - help='Usage: Set model path, defaults to person_detection_mediapipe_2023mar.onnx') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--score_threshold', type=float, default=0.5, - help='Usage: Set the minimum needed confidence for the model to identify a person, defaults to 0.5. Smaller values may result in faster detection, but will limit accuracy. Filter out persons of confidence < conf_threshold.') -parser.add_argument('--nms_threshold', type=float, default=0.3, - help='Usage: Suppress bounding boxes of iou >= nms_threshold. Default = 0.3.') -parser.add_argument('--top_k', type=int, default=5000, - help='Usage: Keep top_k bounding boxes before NMS.') -parser.add_argument('--save', '-s', action='store_true', - help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.') -parser.add_argument('--vis', '-v', action='store_true', - help='Usage: Specify to open a new window to show results. Invalid in case of camera input.') -args = parser.parse_args() - -def visualize(image, results, fps=None): - output = image.copy() - - if fps is not None: - cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) - - for idx, person in enumerate(results): - score = person[-1] - person_landmarks = person[4:-1].reshape(4, 2).astype(np.int32) - - hip_point = person_landmarks[0] - full_body = person_landmarks[1] - shoulder_point = person_landmarks[2] - upper_body = person_landmarks[3] - - # draw circle for full body - radius = np.linalg.norm(hip_point - full_body).astype(np.int32) - cv.circle(output, hip_point, radius, (255, 0, 0), 2) - - # draw circle for upper body - radius = np.linalg.norm(shoulder_point - upper_body).astype(np.int32) - cv.circle(output, shoulder_point, radius, (0, 255, 255), 2) - - # draw points for each keypoint - for p in person_landmarks: - cv.circle(output, p, 2, (0, 0, 255), 2) - - # put score - cv.putText(output, 'Score: {:.4f}'.format(score), (0, output.shape[0] - 48), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 255, 0)) - - cv.putText(output, 'Yellow: upper body circle', (0, output.shape[0] - 36), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 255, 255)) - cv.putText(output, 'Blue: full body circle', (0, output.shape[0] - 24), cv.FONT_HERSHEY_DUPLEX, 0.5, (255, 0, 0)) - cv.putText(output, 'Red: keypoint', (0, output.shape[0] - 12), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255)) - - return output - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - - # Instantiate MPPersonDet - model = MPPersonDet(modelPath=args.model, - nmsThreshold=args.nms_threshold, - scoreThreshold=args.score_threshold, - topK=args.top_k, - backendId=backend_id, - targetId=target_id) - - # If input is an image - if args.input is not None: - image = cv.imread(args.input) - - # Inference - results = model.infer(image) - if len(results) == 0: - print('Person not detected') - - # Draw results on the input image - image = visualize(image, results) - - # Save results if save is true - if args.save: - print('Resutls saved to result.jpg\n') - cv.imwrite('result.jpg', image) - - # Visualize results in a new window - if args.vis: - cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE) - cv.imshow(args.input, image) - cv.waitKey(0) - else: # Omit input to call default camera - deviceId = 0 - cap = cv.VideoCapture(deviceId) - - tm = cv.TickMeter() - while cv.waitKey(1) < 0: - hasFrame, frame = cap.read() - if not hasFrame: - print('No frames grabbed!') - break - - # Inference - tm.start() - results = model.infer(frame) - tm.stop() - - # Draw results on the input image - frame = visualize(frame, results, fps=tm.getFPS()) - - # Visualize results in a new Window - cv.imshow('MPPersonDet Demo', frame) - - tm.reset() - diff --git a/models/person_detection_mediapipe/example_outputs/mppersondet_demo.webp b/models/person_detection_mediapipe/example_outputs/mppersondet_demo.webp deleted file mode 100644 index 7cc4ec96..00000000 --- a/models/person_detection_mediapipe/example_outputs/mppersondet_demo.webp +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5c2aeb6b5f2afa91063c737f983cf7e46e8096decd8476cc7817c0f8523d22e1 -size 708710 diff --git a/models/person_detection_mediapipe/mp_persondet.py b/models/person_detection_mediapipe/mp_persondet.py deleted file mode 100644 index 39d73720..00000000 --- a/models/person_detection_mediapipe/mp_persondet.py +++ /dev/null @@ -1,2366 +0,0 @@ -import numpy as np -import cv2 as cv - -class MPPersonDet: - def __init__(self, modelPath, nmsThreshold=0.3, scoreThreshold=0.5, topK=5000, backendId=0, targetId=0): - self.model_path = modelPath - self.nms_threshold = nmsThreshold - self.score_threshold = scoreThreshold - self.topK = topK - self.backend_id = backendId - self.target_id = targetId - - self.input_size = np.array([224, 224]) # wh - - self.model = cv.dnn.readNet(self.model_path) - self.model.setPreferableBackend(self.backend_id) - self.model.setPreferableTarget(self.target_id) - - self.anchors = self._load_anchors() - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self.backend_id = backendId - self.target_id = targetId - self.model.setPreferableBackend(self.backend_id) - self.model.setPreferableTarget(self.target_id) - - def _preprocess(self, image): - pad_bias = np.array([0., 0.]) # left, top - image = cv.cvtColor(image, cv.COLOR_BGR2RGB) - image = image.astype(np.float32) / 255.0 # norm - image = (image - 0.5) * 2 # [0, 1] -> [-1, 1] - ratio = min(self.input_size / image.shape[:2]) - if image.shape[0] != self.input_size[0] or image.shape[1] != self.input_size[1]: - # keep aspect ratio when resize - ratio_size = (np.array(image.shape[:2]) * ratio).astype(np.int32) - image = cv.resize(image, (ratio_size[1], ratio_size[0])) - pad_h = self.input_size[0] - ratio_size[0] - pad_w = self.input_size[1] - ratio_size[1] - pad_bias[0] = left = pad_w // 2 - pad_bias[1] = top = pad_h // 2 - right = pad_w - left - bottom = pad_h - top - image = cv.copyMakeBorder(image, top, bottom, left, right, cv.BORDER_CONSTANT, None, (0, 0, 0)) - - blob = np.transpose(image, [2, 0, 1]) - pad_bias = (pad_bias / ratio).astype(np.int32) - return blob[np.newaxis, :, :, :], pad_bias # chw -> nchw - - def infer(self, image): - h, w, _ = image.shape - - # Preprocess - input_blob, pad_bias = self._preprocess(image) - - # Forward - self.model.setInput(input_blob) - output_blob = self.model.forward(self.model.getUnconnectedOutLayersNames()) - - # Postprocess - results = self._postprocess(output_blob, np.array([w, h]), pad_bias) - - return results - - def _postprocess(self, output_blob, original_shape, pad_bias): - score = output_blob[1][0, :, 0] - box_delta = output_blob[0][0, :, 0:4] - landmark_delta = output_blob[0][0, :, 4:] - scale = max(original_shape) - - # get scores - score = score.astype(np.float64) - score = np.clip(score, -100, 100) - score = 1 / (1 + np.exp(-score)) - - # get boxes - cxy_delta = box_delta[:, :2] / self.input_size - wh_delta = box_delta[:, 2:] / self.input_size - xy1 = (cxy_delta - wh_delta / 2 + self.anchors) * scale - xy2 = (cxy_delta + wh_delta / 2 + self.anchors) * scale - boxes = np.concatenate([xy1, xy2], axis=1) - boxes -= [pad_bias[0], pad_bias[1], pad_bias[0], pad_bias[1]] - # NMS - keep_idx = cv.dnn.NMSBoxes(boxes, score, self.score_threshold, self.nms_threshold, top_k=self.topK) - if len(keep_idx) == 0: - return np.empty(shape=(0, 13)) - selected_score = score[keep_idx] - selected_box = boxes[keep_idx] - - # get landmarks - selected_landmarks = landmark_delta[keep_idx].reshape(-1, 4, 2) - selected_landmarks = selected_landmarks / self.input_size - selected_anchors = self.anchors[keep_idx] - for idx, landmark in enumerate(selected_landmarks): - landmark += selected_anchors[idx] - selected_landmarks *= scale - selected_landmarks -= pad_bias - - # TODO: still don't know the meaning of face bbox - # each landmark: hip center point; full body point; shoulder center point; upper body point; - # - # [ - # [face_bbox, landmarks, score] - # ... - # [face_bbox, landmarks, score] - # ] - return np.c_[selected_box.reshape(-1, 4), selected_landmarks.reshape(-1, 8), selected_score.reshape(-1, 1)] - - def _load_anchors(self): - return np.array([[0.017857142857142856, 0.017857142857142856], - [0.017857142857142856, 0.017857142857142856], - [0.05357142857142857, 0.017857142857142856], - [0.05357142857142857, 0.017857142857142856], - [0.08928571428571429, 0.017857142857142856], - [0.08928571428571429, 0.017857142857142856], - [0.125, 0.017857142857142856], - [0.125, 0.017857142857142856], - [0.16071428571428573, 0.017857142857142856], - [0.16071428571428573, 0.017857142857142856], - [0.19642857142857142, 0.017857142857142856], - [0.19642857142857142, 0.017857142857142856], - [0.23214285714285715, 0.017857142857142856], - [0.23214285714285715, 0.017857142857142856], - [0.26785714285714285, 0.017857142857142856], - [0.26785714285714285, 0.017857142857142856], - [0.30357142857142855, 0.017857142857142856], - [0.30357142857142855, 0.017857142857142856], - [0.3392857142857143, 0.017857142857142856], - [0.3392857142857143, 0.017857142857142856], - [0.375, 0.017857142857142856], - [0.375, 0.017857142857142856], - [0.4107142857142857, 0.017857142857142856], - [0.4107142857142857, 0.017857142857142856], - [0.44642857142857145, 0.017857142857142856], - [0.44642857142857145, 0.017857142857142856], - [0.48214285714285715, 0.017857142857142856], - [0.48214285714285715, 0.017857142857142856], - [0.5178571428571429, 0.017857142857142856], - [0.5178571428571429, 0.017857142857142856], - [0.5535714285714286, 0.017857142857142856], - [0.5535714285714286, 0.017857142857142856], - [0.5892857142857143, 0.017857142857142856], - [0.5892857142857143, 0.017857142857142856], - [0.625, 0.017857142857142856], - [0.625, 0.017857142857142856], - [0.6607142857142857, 0.017857142857142856], - [0.6607142857142857, 0.017857142857142856], - [0.6964285714285714, 0.017857142857142856], - [0.6964285714285714, 0.017857142857142856], - [0.7321428571428571, 0.017857142857142856], - [0.7321428571428571, 0.017857142857142856], - [0.7678571428571429, 0.017857142857142856], - [0.7678571428571429, 0.017857142857142856], - [0.8035714285714286, 0.017857142857142856], - [0.8035714285714286, 0.017857142857142856], - [0.8392857142857143, 0.017857142857142856], - [0.8392857142857143, 0.017857142857142856], - [0.875, 0.017857142857142856], - [0.875, 0.017857142857142856], - [0.9107142857142857, 0.017857142857142856], - [0.9107142857142857, 0.017857142857142856], - [0.9464285714285714, 0.017857142857142856], - [0.9464285714285714, 0.017857142857142856], - [0.9821428571428571, 0.017857142857142856], - [0.9821428571428571, 0.017857142857142856], - [0.017857142857142856, 0.05357142857142857], - [0.017857142857142856, 0.05357142857142857], - [0.05357142857142857, 0.05357142857142857], - [0.05357142857142857, 0.05357142857142857], - [0.08928571428571429, 0.05357142857142857], - [0.08928571428571429, 0.05357142857142857], - [0.125, 0.05357142857142857], - [0.125, 0.05357142857142857], - [0.16071428571428573, 0.05357142857142857], - [0.16071428571428573, 0.05357142857142857], - [0.19642857142857142, 0.05357142857142857], - [0.19642857142857142, 0.05357142857142857], - [0.23214285714285715, 0.05357142857142857], - [0.23214285714285715, 0.05357142857142857], - [0.26785714285714285, 0.05357142857142857], - [0.26785714285714285, 0.05357142857142857], - [0.30357142857142855, 0.05357142857142857], - [0.30357142857142855, 0.05357142857142857], - [0.3392857142857143, 0.05357142857142857], - [0.3392857142857143, 0.05357142857142857], - [0.375, 0.05357142857142857], - [0.375, 0.05357142857142857], - [0.4107142857142857, 0.05357142857142857], - [0.4107142857142857, 0.05357142857142857], - [0.44642857142857145, 0.05357142857142857], - [0.44642857142857145, 0.05357142857142857], - [0.48214285714285715, 0.05357142857142857], - [0.48214285714285715, 0.05357142857142857], - [0.5178571428571429, 0.05357142857142857], - [0.5178571428571429, 0.05357142857142857], - [0.5535714285714286, 0.05357142857142857], - [0.5535714285714286, 0.05357142857142857], - [0.5892857142857143, 0.05357142857142857], - [0.5892857142857143, 0.05357142857142857], - [0.625, 0.05357142857142857], - [0.625, 0.05357142857142857], - [0.6607142857142857, 0.05357142857142857], - [0.6607142857142857, 0.05357142857142857], - [0.6964285714285714, 0.05357142857142857], - [0.6964285714285714, 0.05357142857142857], - [0.7321428571428571, 0.05357142857142857], - [0.7321428571428571, 0.05357142857142857], - [0.7678571428571429, 0.05357142857142857], - [0.7678571428571429, 0.05357142857142857], - [0.8035714285714286, 0.05357142857142857], - [0.8035714285714286, 0.05357142857142857], - [0.8392857142857143, 0.05357142857142857], - [0.8392857142857143, 0.05357142857142857], - [0.875, 0.05357142857142857], - [0.875, 0.05357142857142857], - [0.9107142857142857, 0.05357142857142857], - [0.9107142857142857, 0.05357142857142857], - [0.9464285714285714, 0.05357142857142857], - [0.9464285714285714, 0.05357142857142857], - [0.9821428571428571, 0.05357142857142857], - [0.9821428571428571, 0.05357142857142857], - [0.017857142857142856, 0.08928571428571429], - [0.017857142857142856, 0.08928571428571429], - [0.05357142857142857, 0.08928571428571429], - [0.05357142857142857, 0.08928571428571429], - [0.08928571428571429, 0.08928571428571429], - [0.08928571428571429, 0.08928571428571429], - [0.125, 0.08928571428571429], - [0.125, 0.08928571428571429], - [0.16071428571428573, 0.08928571428571429], - [0.16071428571428573, 0.08928571428571429], - [0.19642857142857142, 0.08928571428571429], - [0.19642857142857142, 0.08928571428571429], - [0.23214285714285715, 0.08928571428571429], - [0.23214285714285715, 0.08928571428571429], - [0.26785714285714285, 0.08928571428571429], - [0.26785714285714285, 0.08928571428571429], - [0.30357142857142855, 0.08928571428571429], - [0.30357142857142855, 0.08928571428571429], - [0.3392857142857143, 0.08928571428571429], - [0.3392857142857143, 0.08928571428571429], - [0.375, 0.08928571428571429], - [0.375, 0.08928571428571429], - [0.4107142857142857, 0.08928571428571429], - [0.4107142857142857, 0.08928571428571429], - [0.44642857142857145, 0.08928571428571429], - [0.44642857142857145, 0.08928571428571429], - [0.48214285714285715, 0.08928571428571429], - [0.48214285714285715, 0.08928571428571429], - [0.5178571428571429, 0.08928571428571429], - [0.5178571428571429, 0.08928571428571429], - [0.5535714285714286, 0.08928571428571429], - [0.5535714285714286, 0.08928571428571429], - [0.5892857142857143, 0.08928571428571429], - [0.5892857142857143, 0.08928571428571429], - [0.625, 0.08928571428571429], - [0.625, 0.08928571428571429], - [0.6607142857142857, 0.08928571428571429], - [0.6607142857142857, 0.08928571428571429], - [0.6964285714285714, 0.08928571428571429], - [0.6964285714285714, 0.08928571428571429], - [0.7321428571428571, 0.08928571428571429], - [0.7321428571428571, 0.08928571428571429], - [0.7678571428571429, 0.08928571428571429], - [0.7678571428571429, 0.08928571428571429], - [0.8035714285714286, 0.08928571428571429], - [0.8035714285714286, 0.08928571428571429], - [0.8392857142857143, 0.08928571428571429], - [0.8392857142857143, 0.08928571428571429], - [0.875, 0.08928571428571429], - [0.875, 0.08928571428571429], - [0.9107142857142857, 0.08928571428571429], - [0.9107142857142857, 0.08928571428571429], - [0.9464285714285714, 0.08928571428571429], - [0.9464285714285714, 0.08928571428571429], - [0.9821428571428571, 0.08928571428571429], - [0.9821428571428571, 0.08928571428571429], - [0.017857142857142856, 0.125], - [0.017857142857142856, 0.125], - [0.05357142857142857, 0.125], - [0.05357142857142857, 0.125], - [0.08928571428571429, 0.125], - [0.08928571428571429, 0.125], - [0.125, 0.125], - [0.125, 0.125], - [0.16071428571428573, 0.125], - [0.16071428571428573, 0.125], - [0.19642857142857142, 0.125], - [0.19642857142857142, 0.125], - [0.23214285714285715, 0.125], - [0.23214285714285715, 0.125], - [0.26785714285714285, 0.125], - [0.26785714285714285, 0.125], - [0.30357142857142855, 0.125], - [0.30357142857142855, 0.125], - [0.3392857142857143, 0.125], - [0.3392857142857143, 0.125], - [0.375, 0.125], - [0.375, 0.125], - [0.4107142857142857, 0.125], - [0.4107142857142857, 0.125], - [0.44642857142857145, 0.125], - [0.44642857142857145, 0.125], - [0.48214285714285715, 0.125], - [0.48214285714285715, 0.125], - [0.5178571428571429, 0.125], - [0.5178571428571429, 0.125], - [0.5535714285714286, 0.125], - [0.5535714285714286, 0.125], - [0.5892857142857143, 0.125], - [0.5892857142857143, 0.125], - [0.625, 0.125], - [0.625, 0.125], - [0.6607142857142857, 0.125], - [0.6607142857142857, 0.125], - [0.6964285714285714, 0.125], - [0.6964285714285714, 0.125], - [0.7321428571428571, 0.125], - [0.7321428571428571, 0.125], - [0.7678571428571429, 0.125], - [0.7678571428571429, 0.125], - [0.8035714285714286, 0.125], - [0.8035714285714286, 0.125], - [0.8392857142857143, 0.125], - [0.8392857142857143, 0.125], - [0.875, 0.125], - [0.875, 0.125], - [0.9107142857142857, 0.125], - [0.9107142857142857, 0.125], - [0.9464285714285714, 0.125], - [0.9464285714285714, 0.125], - [0.9821428571428571, 0.125], - [0.9821428571428571, 0.125], - [0.017857142857142856, 0.16071428571428573], - [0.017857142857142856, 0.16071428571428573], - [0.05357142857142857, 0.16071428571428573], - [0.05357142857142857, 0.16071428571428573], - [0.08928571428571429, 0.16071428571428573], - [0.08928571428571429, 0.16071428571428573], - [0.125, 0.16071428571428573], - [0.125, 0.16071428571428573], - [0.16071428571428573, 0.16071428571428573], - [0.16071428571428573, 0.16071428571428573], - [0.19642857142857142, 0.16071428571428573], - [0.19642857142857142, 0.16071428571428573], - [0.23214285714285715, 0.16071428571428573], - [0.23214285714285715, 0.16071428571428573], - [0.26785714285714285, 0.16071428571428573], - [0.26785714285714285, 0.16071428571428573], - [0.30357142857142855, 0.16071428571428573], - [0.30357142857142855, 0.16071428571428573], - [0.3392857142857143, 0.16071428571428573], - [0.3392857142857143, 0.16071428571428573], - [0.375, 0.16071428571428573], - [0.375, 0.16071428571428573], - [0.4107142857142857, 0.16071428571428573], - [0.4107142857142857, 0.16071428571428573], - [0.44642857142857145, 0.16071428571428573], - [0.44642857142857145, 0.16071428571428573], - [0.48214285714285715, 0.16071428571428573], - [0.48214285714285715, 0.16071428571428573], - [0.5178571428571429, 0.16071428571428573], - [0.5178571428571429, 0.16071428571428573], - [0.5535714285714286, 0.16071428571428573], - [0.5535714285714286, 0.16071428571428573], - [0.5892857142857143, 0.16071428571428573], - [0.5892857142857143, 0.16071428571428573], - [0.625, 0.16071428571428573], - [0.625, 0.16071428571428573], - [0.6607142857142857, 0.16071428571428573], - [0.6607142857142857, 0.16071428571428573], - [0.6964285714285714, 0.16071428571428573], - [0.6964285714285714, 0.16071428571428573], - [0.7321428571428571, 0.16071428571428573], - [0.7321428571428571, 0.16071428571428573], - [0.7678571428571429, 0.16071428571428573], - [0.7678571428571429, 0.16071428571428573], - [0.8035714285714286, 0.16071428571428573], - [0.8035714285714286, 0.16071428571428573], - [0.8392857142857143, 0.16071428571428573], - [0.8392857142857143, 0.16071428571428573], - [0.875, 0.16071428571428573], - [0.875, 0.16071428571428573], - [0.9107142857142857, 0.16071428571428573], - [0.9107142857142857, 0.16071428571428573], - [0.9464285714285714, 0.16071428571428573], - [0.9464285714285714, 0.16071428571428573], - [0.9821428571428571, 0.16071428571428573], - [0.9821428571428571, 0.16071428571428573], - [0.017857142857142856, 0.19642857142857142], - [0.017857142857142856, 0.19642857142857142], - [0.05357142857142857, 0.19642857142857142], - [0.05357142857142857, 0.19642857142857142], - [0.08928571428571429, 0.19642857142857142], - [0.08928571428571429, 0.19642857142857142], - [0.125, 0.19642857142857142], - [0.125, 0.19642857142857142], - [0.16071428571428573, 0.19642857142857142], - [0.16071428571428573, 0.19642857142857142], - [0.19642857142857142, 0.19642857142857142], - [0.19642857142857142, 0.19642857142857142], - [0.23214285714285715, 0.19642857142857142], - [0.23214285714285715, 0.19642857142857142], - [0.26785714285714285, 0.19642857142857142], - [0.26785714285714285, 0.19642857142857142], - [0.30357142857142855, 0.19642857142857142], - [0.30357142857142855, 0.19642857142857142], - [0.3392857142857143, 0.19642857142857142], - [0.3392857142857143, 0.19642857142857142], - [0.375, 0.19642857142857142], - [0.375, 0.19642857142857142], - [0.4107142857142857, 0.19642857142857142], - [0.4107142857142857, 0.19642857142857142], - [0.44642857142857145, 0.19642857142857142], - [0.44642857142857145, 0.19642857142857142], - [0.48214285714285715, 0.19642857142857142], - [0.48214285714285715, 0.19642857142857142], - [0.5178571428571429, 0.19642857142857142], - [0.5178571428571429, 0.19642857142857142], - [0.5535714285714286, 0.19642857142857142], - [0.5535714285714286, 0.19642857142857142], - [0.5892857142857143, 0.19642857142857142], - [0.5892857142857143, 0.19642857142857142], - [0.625, 0.19642857142857142], - [0.625, 0.19642857142857142], - [0.6607142857142857, 0.19642857142857142], - [0.6607142857142857, 0.19642857142857142], - [0.6964285714285714, 0.19642857142857142], - [0.6964285714285714, 0.19642857142857142], - [0.7321428571428571, 0.19642857142857142], - [0.7321428571428571, 0.19642857142857142], - [0.7678571428571429, 0.19642857142857142], - [0.7678571428571429, 0.19642857142857142], - [0.8035714285714286, 0.19642857142857142], - [0.8035714285714286, 0.19642857142857142], - [0.8392857142857143, 0.19642857142857142], - [0.8392857142857143, 0.19642857142857142], - [0.875, 0.19642857142857142], - [0.875, 0.19642857142857142], - [0.9107142857142857, 0.19642857142857142], - [0.9107142857142857, 0.19642857142857142], - [0.9464285714285714, 0.19642857142857142], - [0.9464285714285714, 0.19642857142857142], - [0.9821428571428571, 0.19642857142857142], - [0.9821428571428571, 0.19642857142857142], - [0.017857142857142856, 0.23214285714285715], - [0.017857142857142856, 0.23214285714285715], - [0.05357142857142857, 0.23214285714285715], - [0.05357142857142857, 0.23214285714285715], - [0.08928571428571429, 0.23214285714285715], - [0.08928571428571429, 0.23214285714285715], - [0.125, 0.23214285714285715], - [0.125, 0.23214285714285715], - [0.16071428571428573, 0.23214285714285715], - [0.16071428571428573, 0.23214285714285715], - [0.19642857142857142, 0.23214285714285715], - [0.19642857142857142, 0.23214285714285715], - [0.23214285714285715, 0.23214285714285715], - [0.23214285714285715, 0.23214285714285715], - [0.26785714285714285, 0.23214285714285715], - [0.26785714285714285, 0.23214285714285715], - [0.30357142857142855, 0.23214285714285715], - [0.30357142857142855, 0.23214285714285715], - [0.3392857142857143, 0.23214285714285715], - [0.3392857142857143, 0.23214285714285715], - [0.375, 0.23214285714285715], - [0.375, 0.23214285714285715], - [0.4107142857142857, 0.23214285714285715], - [0.4107142857142857, 0.23214285714285715], - [0.44642857142857145, 0.23214285714285715], - [0.44642857142857145, 0.23214285714285715], - [0.48214285714285715, 0.23214285714285715], - [0.48214285714285715, 0.23214285714285715], - [0.5178571428571429, 0.23214285714285715], - [0.5178571428571429, 0.23214285714285715], - [0.5535714285714286, 0.23214285714285715], - [0.5535714285714286, 0.23214285714285715], - [0.5892857142857143, 0.23214285714285715], - [0.5892857142857143, 0.23214285714285715], - [0.625, 0.23214285714285715], - [0.625, 0.23214285714285715], - [0.6607142857142857, 0.23214285714285715], - [0.6607142857142857, 0.23214285714285715], - [0.6964285714285714, 0.23214285714285715], - [0.6964285714285714, 0.23214285714285715], - [0.7321428571428571, 0.23214285714285715], - [0.7321428571428571, 0.23214285714285715], - [0.7678571428571429, 0.23214285714285715], - [0.7678571428571429, 0.23214285714285715], - [0.8035714285714286, 0.23214285714285715], - [0.8035714285714286, 0.23214285714285715], - [0.8392857142857143, 0.23214285714285715], - [0.8392857142857143, 0.23214285714285715], - [0.875, 0.23214285714285715], - [0.875, 0.23214285714285715], - [0.9107142857142857, 0.23214285714285715], - [0.9107142857142857, 0.23214285714285715], - [0.9464285714285714, 0.23214285714285715], - [0.9464285714285714, 0.23214285714285715], - [0.9821428571428571, 0.23214285714285715], - [0.9821428571428571, 0.23214285714285715], - [0.017857142857142856, 0.26785714285714285], - [0.017857142857142856, 0.26785714285714285], - [0.05357142857142857, 0.26785714285714285], - [0.05357142857142857, 0.26785714285714285], - [0.08928571428571429, 0.26785714285714285], - [0.08928571428571429, 0.26785714285714285], - [0.125, 0.26785714285714285], - [0.125, 0.26785714285714285], - [0.16071428571428573, 0.26785714285714285], - [0.16071428571428573, 0.26785714285714285], - [0.19642857142857142, 0.26785714285714285], - [0.19642857142857142, 0.26785714285714285], - [0.23214285714285715, 0.26785714285714285], - [0.23214285714285715, 0.26785714285714285], - [0.26785714285714285, 0.26785714285714285], - [0.26785714285714285, 0.26785714285714285], - [0.30357142857142855, 0.26785714285714285], - [0.30357142857142855, 0.26785714285714285], - [0.3392857142857143, 0.26785714285714285], - [0.3392857142857143, 0.26785714285714285], - [0.375, 0.26785714285714285], - [0.375, 0.26785714285714285], - [0.4107142857142857, 0.26785714285714285], - [0.4107142857142857, 0.26785714285714285], - [0.44642857142857145, 0.26785714285714285], - [0.44642857142857145, 0.26785714285714285], - [0.48214285714285715, 0.26785714285714285], - [0.48214285714285715, 0.26785714285714285], - [0.5178571428571429, 0.26785714285714285], - [0.5178571428571429, 0.26785714285714285], - [0.5535714285714286, 0.26785714285714285], - [0.5535714285714286, 0.26785714285714285], - [0.5892857142857143, 0.26785714285714285], - [0.5892857142857143, 0.26785714285714285], - [0.625, 0.26785714285714285], - [0.625, 0.26785714285714285], - [0.6607142857142857, 0.26785714285714285], - [0.6607142857142857, 0.26785714285714285], - [0.6964285714285714, 0.26785714285714285], - [0.6964285714285714, 0.26785714285714285], - [0.7321428571428571, 0.26785714285714285], - [0.7321428571428571, 0.26785714285714285], - [0.7678571428571429, 0.26785714285714285], - [0.7678571428571429, 0.26785714285714285], - [0.8035714285714286, 0.26785714285714285], - [0.8035714285714286, 0.26785714285714285], - [0.8392857142857143, 0.26785714285714285], - [0.8392857142857143, 0.26785714285714285], - [0.875, 0.26785714285714285], - [0.875, 0.26785714285714285], - [0.9107142857142857, 0.26785714285714285], - [0.9107142857142857, 0.26785714285714285], - [0.9464285714285714, 0.26785714285714285], - [0.9464285714285714, 0.26785714285714285], - [0.9821428571428571, 0.26785714285714285], - [0.9821428571428571, 0.26785714285714285], - [0.017857142857142856, 0.30357142857142855], - [0.017857142857142856, 0.30357142857142855], - [0.05357142857142857, 0.30357142857142855], - [0.05357142857142857, 0.30357142857142855], - [0.08928571428571429, 0.30357142857142855], - [0.08928571428571429, 0.30357142857142855], - [0.125, 0.30357142857142855], - [0.125, 0.30357142857142855], - [0.16071428571428573, 0.30357142857142855], - [0.16071428571428573, 0.30357142857142855], - [0.19642857142857142, 0.30357142857142855], - [0.19642857142857142, 0.30357142857142855], - [0.23214285714285715, 0.30357142857142855], - [0.23214285714285715, 0.30357142857142855], - [0.26785714285714285, 0.30357142857142855], - [0.26785714285714285, 0.30357142857142855], - [0.30357142857142855, 0.30357142857142855], - [0.30357142857142855, 0.30357142857142855], - [0.3392857142857143, 0.30357142857142855], - [0.3392857142857143, 0.30357142857142855], - [0.375, 0.30357142857142855], - [0.375, 0.30357142857142855], - [0.4107142857142857, 0.30357142857142855], - [0.4107142857142857, 0.30357142857142855], - [0.44642857142857145, 0.30357142857142855], - [0.44642857142857145, 0.30357142857142855], - [0.48214285714285715, 0.30357142857142855], - [0.48214285714285715, 0.30357142857142855], - [0.5178571428571429, 0.30357142857142855], - [0.5178571428571429, 0.30357142857142855], - [0.5535714285714286, 0.30357142857142855], - [0.5535714285714286, 0.30357142857142855], - [0.5892857142857143, 0.30357142857142855], - [0.5892857142857143, 0.30357142857142855], - [0.625, 0.30357142857142855], - [0.625, 0.30357142857142855], - [0.6607142857142857, 0.30357142857142855], - [0.6607142857142857, 0.30357142857142855], - [0.6964285714285714, 0.30357142857142855], - [0.6964285714285714, 0.30357142857142855], - [0.7321428571428571, 0.30357142857142855], - [0.7321428571428571, 0.30357142857142855], - [0.7678571428571429, 0.30357142857142855], - [0.7678571428571429, 0.30357142857142855], - [0.8035714285714286, 0.30357142857142855], - [0.8035714285714286, 0.30357142857142855], - [0.8392857142857143, 0.30357142857142855], - [0.8392857142857143, 0.30357142857142855], - [0.875, 0.30357142857142855], - [0.875, 0.30357142857142855], - [0.9107142857142857, 0.30357142857142855], - [0.9107142857142857, 0.30357142857142855], - [0.9464285714285714, 0.30357142857142855], - [0.9464285714285714, 0.30357142857142855], - [0.9821428571428571, 0.30357142857142855], - [0.9821428571428571, 0.30357142857142855], - [0.017857142857142856, 0.3392857142857143], - [0.017857142857142856, 0.3392857142857143], - [0.05357142857142857, 0.3392857142857143], - [0.05357142857142857, 0.3392857142857143], - [0.08928571428571429, 0.3392857142857143], - [0.08928571428571429, 0.3392857142857143], - [0.125, 0.3392857142857143], - [0.125, 0.3392857142857143], - [0.16071428571428573, 0.3392857142857143], - [0.16071428571428573, 0.3392857142857143], - [0.19642857142857142, 0.3392857142857143], - [0.19642857142857142, 0.3392857142857143], - [0.23214285714285715, 0.3392857142857143], - [0.23214285714285715, 0.3392857142857143], - [0.26785714285714285, 0.3392857142857143], - [0.26785714285714285, 0.3392857142857143], - [0.30357142857142855, 0.3392857142857143], - [0.30357142857142855, 0.3392857142857143], - [0.3392857142857143, 0.3392857142857143], - [0.3392857142857143, 0.3392857142857143], - [0.375, 0.3392857142857143], - [0.375, 0.3392857142857143], - [0.4107142857142857, 0.3392857142857143], - [0.4107142857142857, 0.3392857142857143], - [0.44642857142857145, 0.3392857142857143], - [0.44642857142857145, 0.3392857142857143], - [0.48214285714285715, 0.3392857142857143], - [0.48214285714285715, 0.3392857142857143], - [0.5178571428571429, 0.3392857142857143], - [0.5178571428571429, 0.3392857142857143], - [0.5535714285714286, 0.3392857142857143], - [0.5535714285714286, 0.3392857142857143], - [0.5892857142857143, 0.3392857142857143], - [0.5892857142857143, 0.3392857142857143], - [0.625, 0.3392857142857143], - [0.625, 0.3392857142857143], - [0.6607142857142857, 0.3392857142857143], - [0.6607142857142857, 0.3392857142857143], - [0.6964285714285714, 0.3392857142857143], - [0.6964285714285714, 0.3392857142857143], - [0.7321428571428571, 0.3392857142857143], - [0.7321428571428571, 0.3392857142857143], - [0.7678571428571429, 0.3392857142857143], - [0.7678571428571429, 0.3392857142857143], - [0.8035714285714286, 0.3392857142857143], - [0.8035714285714286, 0.3392857142857143], - [0.8392857142857143, 0.3392857142857143], - [0.8392857142857143, 0.3392857142857143], - [0.875, 0.3392857142857143], - [0.875, 0.3392857142857143], - [0.9107142857142857, 0.3392857142857143], - [0.9107142857142857, 0.3392857142857143], - [0.9464285714285714, 0.3392857142857143], - [0.9464285714285714, 0.3392857142857143], - [0.9821428571428571, 0.3392857142857143], - [0.9821428571428571, 0.3392857142857143], - [0.017857142857142856, 0.375], - [0.017857142857142856, 0.375], - [0.05357142857142857, 0.375], - [0.05357142857142857, 0.375], - [0.08928571428571429, 0.375], - [0.08928571428571429, 0.375], - [0.125, 0.375], - [0.125, 0.375], - [0.16071428571428573, 0.375], - [0.16071428571428573, 0.375], - [0.19642857142857142, 0.375], - [0.19642857142857142, 0.375], - [0.23214285714285715, 0.375], - [0.23214285714285715, 0.375], - [0.26785714285714285, 0.375], - [0.26785714285714285, 0.375], - [0.30357142857142855, 0.375], - [0.30357142857142855, 0.375], - [0.3392857142857143, 0.375], - [0.3392857142857143, 0.375], - [0.375, 0.375], - [0.375, 0.375], - [0.4107142857142857, 0.375], - [0.4107142857142857, 0.375], - [0.44642857142857145, 0.375], - [0.44642857142857145, 0.375], - [0.48214285714285715, 0.375], - [0.48214285714285715, 0.375], - [0.5178571428571429, 0.375], - [0.5178571428571429, 0.375], - [0.5535714285714286, 0.375], - [0.5535714285714286, 0.375], - [0.5892857142857143, 0.375], - [0.5892857142857143, 0.375], - [0.625, 0.375], - [0.625, 0.375], - [0.6607142857142857, 0.375], - [0.6607142857142857, 0.375], - [0.6964285714285714, 0.375], - [0.6964285714285714, 0.375], - [0.7321428571428571, 0.375], - [0.7321428571428571, 0.375], - [0.7678571428571429, 0.375], - [0.7678571428571429, 0.375], - [0.8035714285714286, 0.375], - [0.8035714285714286, 0.375], - [0.8392857142857143, 0.375], - [0.8392857142857143, 0.375], - [0.875, 0.375], - [0.875, 0.375], - [0.9107142857142857, 0.375], - [0.9107142857142857, 0.375], - [0.9464285714285714, 0.375], - [0.9464285714285714, 0.375], - [0.9821428571428571, 0.375], - [0.9821428571428571, 0.375], - [0.017857142857142856, 0.4107142857142857], - [0.017857142857142856, 0.4107142857142857], - [0.05357142857142857, 0.4107142857142857], - [0.05357142857142857, 0.4107142857142857], - [0.08928571428571429, 0.4107142857142857], - [0.08928571428571429, 0.4107142857142857], - [0.125, 0.4107142857142857], - [0.125, 0.4107142857142857], - [0.16071428571428573, 0.4107142857142857], - [0.16071428571428573, 0.4107142857142857], - [0.19642857142857142, 0.4107142857142857], - [0.19642857142857142, 0.4107142857142857], - [0.23214285714285715, 0.4107142857142857], - [0.23214285714285715, 0.4107142857142857], - [0.26785714285714285, 0.4107142857142857], - [0.26785714285714285, 0.4107142857142857], - [0.30357142857142855, 0.4107142857142857], - [0.30357142857142855, 0.4107142857142857], - [0.3392857142857143, 0.4107142857142857], - [0.3392857142857143, 0.4107142857142857], - [0.375, 0.4107142857142857], - [0.375, 0.4107142857142857], - [0.4107142857142857, 0.4107142857142857], - [0.4107142857142857, 0.4107142857142857], - [0.44642857142857145, 0.4107142857142857], - [0.44642857142857145, 0.4107142857142857], - [0.48214285714285715, 0.4107142857142857], - [0.48214285714285715, 0.4107142857142857], - [0.5178571428571429, 0.4107142857142857], - [0.5178571428571429, 0.4107142857142857], - [0.5535714285714286, 0.4107142857142857], - [0.5535714285714286, 0.4107142857142857], - [0.5892857142857143, 0.4107142857142857], - [0.5892857142857143, 0.4107142857142857], - [0.625, 0.4107142857142857], - [0.625, 0.4107142857142857], - [0.6607142857142857, 0.4107142857142857], - [0.6607142857142857, 0.4107142857142857], - [0.6964285714285714, 0.4107142857142857], - [0.6964285714285714, 0.4107142857142857], - [0.7321428571428571, 0.4107142857142857], - [0.7321428571428571, 0.4107142857142857], - [0.7678571428571429, 0.4107142857142857], - [0.7678571428571429, 0.4107142857142857], - [0.8035714285714286, 0.4107142857142857], - [0.8035714285714286, 0.4107142857142857], - [0.8392857142857143, 0.4107142857142857], - [0.8392857142857143, 0.4107142857142857], - [0.875, 0.4107142857142857], - [0.875, 0.4107142857142857], - [0.9107142857142857, 0.4107142857142857], - [0.9107142857142857, 0.4107142857142857], - [0.9464285714285714, 0.4107142857142857], - [0.9464285714285714, 0.4107142857142857], - [0.9821428571428571, 0.4107142857142857], - [0.9821428571428571, 0.4107142857142857], - [0.017857142857142856, 0.44642857142857145], - [0.017857142857142856, 0.44642857142857145], - [0.05357142857142857, 0.44642857142857145], - [0.05357142857142857, 0.44642857142857145], - [0.08928571428571429, 0.44642857142857145], - [0.08928571428571429, 0.44642857142857145], - [0.125, 0.44642857142857145], - [0.125, 0.44642857142857145], - [0.16071428571428573, 0.44642857142857145], - [0.16071428571428573, 0.44642857142857145], - [0.19642857142857142, 0.44642857142857145], - [0.19642857142857142, 0.44642857142857145], - [0.23214285714285715, 0.44642857142857145], - [0.23214285714285715, 0.44642857142857145], - [0.26785714285714285, 0.44642857142857145], - [0.26785714285714285, 0.44642857142857145], - [0.30357142857142855, 0.44642857142857145], - [0.30357142857142855, 0.44642857142857145], - [0.3392857142857143, 0.44642857142857145], - [0.3392857142857143, 0.44642857142857145], - [0.375, 0.44642857142857145], - [0.375, 0.44642857142857145], - [0.4107142857142857, 0.44642857142857145], - [0.4107142857142857, 0.44642857142857145], - [0.44642857142857145, 0.44642857142857145], - [0.44642857142857145, 0.44642857142857145], - [0.48214285714285715, 0.44642857142857145], - [0.48214285714285715, 0.44642857142857145], - [0.5178571428571429, 0.44642857142857145], - [0.5178571428571429, 0.44642857142857145], - [0.5535714285714286, 0.44642857142857145], - [0.5535714285714286, 0.44642857142857145], - [0.5892857142857143, 0.44642857142857145], - [0.5892857142857143, 0.44642857142857145], - [0.625, 0.44642857142857145], - [0.625, 0.44642857142857145], - [0.6607142857142857, 0.44642857142857145], - [0.6607142857142857, 0.44642857142857145], - [0.6964285714285714, 0.44642857142857145], - [0.6964285714285714, 0.44642857142857145], - [0.7321428571428571, 0.44642857142857145], - [0.7321428571428571, 0.44642857142857145], - [0.7678571428571429, 0.44642857142857145], - [0.7678571428571429, 0.44642857142857145], - [0.8035714285714286, 0.44642857142857145], - [0.8035714285714286, 0.44642857142857145], - [0.8392857142857143, 0.44642857142857145], - [0.8392857142857143, 0.44642857142857145], - [0.875, 0.44642857142857145], - [0.875, 0.44642857142857145], - [0.9107142857142857, 0.44642857142857145], - [0.9107142857142857, 0.44642857142857145], - [0.9464285714285714, 0.44642857142857145], - [0.9464285714285714, 0.44642857142857145], - [0.9821428571428571, 0.44642857142857145], - [0.9821428571428571, 0.44642857142857145], - [0.017857142857142856, 0.48214285714285715], - [0.017857142857142856, 0.48214285714285715], - [0.05357142857142857, 0.48214285714285715], - [0.05357142857142857, 0.48214285714285715], - [0.08928571428571429, 0.48214285714285715], - [0.08928571428571429, 0.48214285714285715], - [0.125, 0.48214285714285715], - [0.125, 0.48214285714285715], - [0.16071428571428573, 0.48214285714285715], - [0.16071428571428573, 0.48214285714285715], - [0.19642857142857142, 0.48214285714285715], - [0.19642857142857142, 0.48214285714285715], - [0.23214285714285715, 0.48214285714285715], - [0.23214285714285715, 0.48214285714285715], - [0.26785714285714285, 0.48214285714285715], - [0.26785714285714285, 0.48214285714285715], - [0.30357142857142855, 0.48214285714285715], - [0.30357142857142855, 0.48214285714285715], - [0.3392857142857143, 0.48214285714285715], - [0.3392857142857143, 0.48214285714285715], - [0.375, 0.48214285714285715], - [0.375, 0.48214285714285715], - [0.4107142857142857, 0.48214285714285715], - [0.4107142857142857, 0.48214285714285715], - [0.44642857142857145, 0.48214285714285715], - [0.44642857142857145, 0.48214285714285715], - [0.48214285714285715, 0.48214285714285715], - [0.48214285714285715, 0.48214285714285715], - [0.5178571428571429, 0.48214285714285715], - [0.5178571428571429, 0.48214285714285715], - [0.5535714285714286, 0.48214285714285715], - [0.5535714285714286, 0.48214285714285715], - [0.5892857142857143, 0.48214285714285715], - [0.5892857142857143, 0.48214285714285715], - [0.625, 0.48214285714285715], - [0.625, 0.48214285714285715], - [0.6607142857142857, 0.48214285714285715], - [0.6607142857142857, 0.48214285714285715], - [0.6964285714285714, 0.48214285714285715], - [0.6964285714285714, 0.48214285714285715], - [0.7321428571428571, 0.48214285714285715], - [0.7321428571428571, 0.48214285714285715], - [0.7678571428571429, 0.48214285714285715], - [0.7678571428571429, 0.48214285714285715], - [0.8035714285714286, 0.48214285714285715], - [0.8035714285714286, 0.48214285714285715], - [0.8392857142857143, 0.48214285714285715], - [0.8392857142857143, 0.48214285714285715], - [0.875, 0.48214285714285715], - [0.875, 0.48214285714285715], - [0.9107142857142857, 0.48214285714285715], - [0.9107142857142857, 0.48214285714285715], - [0.9464285714285714, 0.48214285714285715], - [0.9464285714285714, 0.48214285714285715], - [0.9821428571428571, 0.48214285714285715], - [0.9821428571428571, 0.48214285714285715], - [0.017857142857142856, 0.5178571428571429], - [0.017857142857142856, 0.5178571428571429], - [0.05357142857142857, 0.5178571428571429], - [0.05357142857142857, 0.5178571428571429], - [0.08928571428571429, 0.5178571428571429], - [0.08928571428571429, 0.5178571428571429], - [0.125, 0.5178571428571429], - [0.125, 0.5178571428571429], - [0.16071428571428573, 0.5178571428571429], - [0.16071428571428573, 0.5178571428571429], - [0.19642857142857142, 0.5178571428571429], - [0.19642857142857142, 0.5178571428571429], - [0.23214285714285715, 0.5178571428571429], - [0.23214285714285715, 0.5178571428571429], - [0.26785714285714285, 0.5178571428571429], - [0.26785714285714285, 0.5178571428571429], - [0.30357142857142855, 0.5178571428571429], - [0.30357142857142855, 0.5178571428571429], - [0.3392857142857143, 0.5178571428571429], - [0.3392857142857143, 0.5178571428571429], - [0.375, 0.5178571428571429], - [0.375, 0.5178571428571429], - [0.4107142857142857, 0.5178571428571429], - [0.4107142857142857, 0.5178571428571429], - [0.44642857142857145, 0.5178571428571429], - [0.44642857142857145, 0.5178571428571429], - [0.48214285714285715, 0.5178571428571429], - [0.48214285714285715, 0.5178571428571429], - [0.5178571428571429, 0.5178571428571429], - [0.5178571428571429, 0.5178571428571429], - [0.5535714285714286, 0.5178571428571429], - [0.5535714285714286, 0.5178571428571429], - [0.5892857142857143, 0.5178571428571429], - [0.5892857142857143, 0.5178571428571429], - [0.625, 0.5178571428571429], - [0.625, 0.5178571428571429], - [0.6607142857142857, 0.5178571428571429], - [0.6607142857142857, 0.5178571428571429], - [0.6964285714285714, 0.5178571428571429], - [0.6964285714285714, 0.5178571428571429], - [0.7321428571428571, 0.5178571428571429], - [0.7321428571428571, 0.5178571428571429], - [0.7678571428571429, 0.5178571428571429], - [0.7678571428571429, 0.5178571428571429], - [0.8035714285714286, 0.5178571428571429], - [0.8035714285714286, 0.5178571428571429], - [0.8392857142857143, 0.5178571428571429], - [0.8392857142857143, 0.5178571428571429], - [0.875, 0.5178571428571429], - [0.875, 0.5178571428571429], - [0.9107142857142857, 0.5178571428571429], - [0.9107142857142857, 0.5178571428571429], - [0.9464285714285714, 0.5178571428571429], - [0.9464285714285714, 0.5178571428571429], - [0.9821428571428571, 0.5178571428571429], - [0.9821428571428571, 0.5178571428571429], - [0.017857142857142856, 0.5535714285714286], - [0.017857142857142856, 0.5535714285714286], - [0.05357142857142857, 0.5535714285714286], - [0.05357142857142857, 0.5535714285714286], - [0.08928571428571429, 0.5535714285714286], - [0.08928571428571429, 0.5535714285714286], - [0.125, 0.5535714285714286], - [0.125, 0.5535714285714286], - [0.16071428571428573, 0.5535714285714286], - [0.16071428571428573, 0.5535714285714286], - [0.19642857142857142, 0.5535714285714286], - [0.19642857142857142, 0.5535714285714286], - [0.23214285714285715, 0.5535714285714286], - [0.23214285714285715, 0.5535714285714286], - [0.26785714285714285, 0.5535714285714286], - [0.26785714285714285, 0.5535714285714286], - [0.30357142857142855, 0.5535714285714286], - [0.30357142857142855, 0.5535714285714286], - [0.3392857142857143, 0.5535714285714286], - [0.3392857142857143, 0.5535714285714286], - [0.375, 0.5535714285714286], - [0.375, 0.5535714285714286], - [0.4107142857142857, 0.5535714285714286], - [0.4107142857142857, 0.5535714285714286], - [0.44642857142857145, 0.5535714285714286], - [0.44642857142857145, 0.5535714285714286], - [0.48214285714285715, 0.5535714285714286], - [0.48214285714285715, 0.5535714285714286], - [0.5178571428571429, 0.5535714285714286], - [0.5178571428571429, 0.5535714285714286], - [0.5535714285714286, 0.5535714285714286], - [0.5535714285714286, 0.5535714285714286], - [0.5892857142857143, 0.5535714285714286], - [0.5892857142857143, 0.5535714285714286], - [0.625, 0.5535714285714286], - [0.625, 0.5535714285714286], - [0.6607142857142857, 0.5535714285714286], - [0.6607142857142857, 0.5535714285714286], - [0.6964285714285714, 0.5535714285714286], - [0.6964285714285714, 0.5535714285714286], - [0.7321428571428571, 0.5535714285714286], - [0.7321428571428571, 0.5535714285714286], - [0.7678571428571429, 0.5535714285714286], - [0.7678571428571429, 0.5535714285714286], - [0.8035714285714286, 0.5535714285714286], - [0.8035714285714286, 0.5535714285714286], - [0.8392857142857143, 0.5535714285714286], - [0.8392857142857143, 0.5535714285714286], - [0.875, 0.5535714285714286], - [0.875, 0.5535714285714286], - [0.9107142857142857, 0.5535714285714286], - [0.9107142857142857, 0.5535714285714286], - [0.9464285714285714, 0.5535714285714286], - [0.9464285714285714, 0.5535714285714286], - [0.9821428571428571, 0.5535714285714286], - [0.9821428571428571, 0.5535714285714286], - [0.017857142857142856, 0.5892857142857143], - [0.017857142857142856, 0.5892857142857143], - [0.05357142857142857, 0.5892857142857143], - [0.05357142857142857, 0.5892857142857143], - [0.08928571428571429, 0.5892857142857143], - [0.08928571428571429, 0.5892857142857143], - [0.125, 0.5892857142857143], - [0.125, 0.5892857142857143], - [0.16071428571428573, 0.5892857142857143], - [0.16071428571428573, 0.5892857142857143], - [0.19642857142857142, 0.5892857142857143], - [0.19642857142857142, 0.5892857142857143], - [0.23214285714285715, 0.5892857142857143], - [0.23214285714285715, 0.5892857142857143], - [0.26785714285714285, 0.5892857142857143], - [0.26785714285714285, 0.5892857142857143], - [0.30357142857142855, 0.5892857142857143], - [0.30357142857142855, 0.5892857142857143], - [0.3392857142857143, 0.5892857142857143], - [0.3392857142857143, 0.5892857142857143], - [0.375, 0.5892857142857143], - [0.375, 0.5892857142857143], - [0.4107142857142857, 0.5892857142857143], - [0.4107142857142857, 0.5892857142857143], - [0.44642857142857145, 0.5892857142857143], - [0.44642857142857145, 0.5892857142857143], - [0.48214285714285715, 0.5892857142857143], - [0.48214285714285715, 0.5892857142857143], - [0.5178571428571429, 0.5892857142857143], - [0.5178571428571429, 0.5892857142857143], - [0.5535714285714286, 0.5892857142857143], - [0.5535714285714286, 0.5892857142857143], - [0.5892857142857143, 0.5892857142857143], - [0.5892857142857143, 0.5892857142857143], - [0.625, 0.5892857142857143], - [0.625, 0.5892857142857143], - [0.6607142857142857, 0.5892857142857143], - [0.6607142857142857, 0.5892857142857143], - [0.6964285714285714, 0.5892857142857143], - [0.6964285714285714, 0.5892857142857143], - [0.7321428571428571, 0.5892857142857143], - [0.7321428571428571, 0.5892857142857143], - [0.7678571428571429, 0.5892857142857143], - [0.7678571428571429, 0.5892857142857143], - [0.8035714285714286, 0.5892857142857143], - [0.8035714285714286, 0.5892857142857143], - [0.8392857142857143, 0.5892857142857143], - [0.8392857142857143, 0.5892857142857143], - [0.875, 0.5892857142857143], - [0.875, 0.5892857142857143], - [0.9107142857142857, 0.5892857142857143], - [0.9107142857142857, 0.5892857142857143], - [0.9464285714285714, 0.5892857142857143], - [0.9464285714285714, 0.5892857142857143], - [0.9821428571428571, 0.5892857142857143], - [0.9821428571428571, 0.5892857142857143], - [0.017857142857142856, 0.625], - [0.017857142857142856, 0.625], - [0.05357142857142857, 0.625], - [0.05357142857142857, 0.625], - [0.08928571428571429, 0.625], - [0.08928571428571429, 0.625], - [0.125, 0.625], - [0.125, 0.625], - [0.16071428571428573, 0.625], - [0.16071428571428573, 0.625], - [0.19642857142857142, 0.625], - [0.19642857142857142, 0.625], - [0.23214285714285715, 0.625], - [0.23214285714285715, 0.625], - [0.26785714285714285, 0.625], - [0.26785714285714285, 0.625], - [0.30357142857142855, 0.625], - [0.30357142857142855, 0.625], - [0.3392857142857143, 0.625], - [0.3392857142857143, 0.625], - [0.375, 0.625], - [0.375, 0.625], - [0.4107142857142857, 0.625], - [0.4107142857142857, 0.625], - [0.44642857142857145, 0.625], - [0.44642857142857145, 0.625], - [0.48214285714285715, 0.625], - [0.48214285714285715, 0.625], - [0.5178571428571429, 0.625], - [0.5178571428571429, 0.625], - [0.5535714285714286, 0.625], - [0.5535714285714286, 0.625], - [0.5892857142857143, 0.625], - [0.5892857142857143, 0.625], - [0.625, 0.625], - [0.625, 0.625], - [0.6607142857142857, 0.625], - [0.6607142857142857, 0.625], - [0.6964285714285714, 0.625], - [0.6964285714285714, 0.625], - [0.7321428571428571, 0.625], - [0.7321428571428571, 0.625], - [0.7678571428571429, 0.625], - [0.7678571428571429, 0.625], - [0.8035714285714286, 0.625], - [0.8035714285714286, 0.625], - [0.8392857142857143, 0.625], - [0.8392857142857143, 0.625], - [0.875, 0.625], - [0.875, 0.625], - [0.9107142857142857, 0.625], - [0.9107142857142857, 0.625], - [0.9464285714285714, 0.625], - [0.9464285714285714, 0.625], - [0.9821428571428571, 0.625], - [0.9821428571428571, 0.625], - [0.017857142857142856, 0.6607142857142857], - [0.017857142857142856, 0.6607142857142857], - [0.05357142857142857, 0.6607142857142857], - [0.05357142857142857, 0.6607142857142857], - [0.08928571428571429, 0.6607142857142857], - [0.08928571428571429, 0.6607142857142857], - [0.125, 0.6607142857142857], - [0.125, 0.6607142857142857], - [0.16071428571428573, 0.6607142857142857], - [0.16071428571428573, 0.6607142857142857], - [0.19642857142857142, 0.6607142857142857], - [0.19642857142857142, 0.6607142857142857], - [0.23214285714285715, 0.6607142857142857], - [0.23214285714285715, 0.6607142857142857], - [0.26785714285714285, 0.6607142857142857], - [0.26785714285714285, 0.6607142857142857], - [0.30357142857142855, 0.6607142857142857], - [0.30357142857142855, 0.6607142857142857], - [0.3392857142857143, 0.6607142857142857], - [0.3392857142857143, 0.6607142857142857], - [0.375, 0.6607142857142857], - [0.375, 0.6607142857142857], - [0.4107142857142857, 0.6607142857142857], - [0.4107142857142857, 0.6607142857142857], - [0.44642857142857145, 0.6607142857142857], - [0.44642857142857145, 0.6607142857142857], - [0.48214285714285715, 0.6607142857142857], - [0.48214285714285715, 0.6607142857142857], - [0.5178571428571429, 0.6607142857142857], - [0.5178571428571429, 0.6607142857142857], - [0.5535714285714286, 0.6607142857142857], - [0.5535714285714286, 0.6607142857142857], - [0.5892857142857143, 0.6607142857142857], - [0.5892857142857143, 0.6607142857142857], - [0.625, 0.6607142857142857], - [0.625, 0.6607142857142857], - [0.6607142857142857, 0.6607142857142857], - [0.6607142857142857, 0.6607142857142857], - [0.6964285714285714, 0.6607142857142857], - [0.6964285714285714, 0.6607142857142857], - [0.7321428571428571, 0.6607142857142857], - [0.7321428571428571, 0.6607142857142857], - [0.7678571428571429, 0.6607142857142857], - [0.7678571428571429, 0.6607142857142857], - [0.8035714285714286, 0.6607142857142857], - [0.8035714285714286, 0.6607142857142857], - [0.8392857142857143, 0.6607142857142857], - [0.8392857142857143, 0.6607142857142857], - [0.875, 0.6607142857142857], - [0.875, 0.6607142857142857], - [0.9107142857142857, 0.6607142857142857], - [0.9107142857142857, 0.6607142857142857], - [0.9464285714285714, 0.6607142857142857], - [0.9464285714285714, 0.6607142857142857], - [0.9821428571428571, 0.6607142857142857], - [0.9821428571428571, 0.6607142857142857], - [0.017857142857142856, 0.6964285714285714], - [0.017857142857142856, 0.6964285714285714], - [0.05357142857142857, 0.6964285714285714], - [0.05357142857142857, 0.6964285714285714], - [0.08928571428571429, 0.6964285714285714], - [0.08928571428571429, 0.6964285714285714], - [0.125, 0.6964285714285714], - [0.125, 0.6964285714285714], - [0.16071428571428573, 0.6964285714285714], - [0.16071428571428573, 0.6964285714285714], - [0.19642857142857142, 0.6964285714285714], - [0.19642857142857142, 0.6964285714285714], - [0.23214285714285715, 0.6964285714285714], - [0.23214285714285715, 0.6964285714285714], - [0.26785714285714285, 0.6964285714285714], - [0.26785714285714285, 0.6964285714285714], - [0.30357142857142855, 0.6964285714285714], - [0.30357142857142855, 0.6964285714285714], - [0.3392857142857143, 0.6964285714285714], - [0.3392857142857143, 0.6964285714285714], - [0.375, 0.6964285714285714], - [0.375, 0.6964285714285714], - [0.4107142857142857, 0.6964285714285714], - [0.4107142857142857, 0.6964285714285714], - [0.44642857142857145, 0.6964285714285714], - [0.44642857142857145, 0.6964285714285714], - [0.48214285714285715, 0.6964285714285714], - [0.48214285714285715, 0.6964285714285714], - [0.5178571428571429, 0.6964285714285714], - [0.5178571428571429, 0.6964285714285714], - [0.5535714285714286, 0.6964285714285714], - [0.5535714285714286, 0.6964285714285714], - [0.5892857142857143, 0.6964285714285714], - [0.5892857142857143, 0.6964285714285714], - [0.625, 0.6964285714285714], - [0.625, 0.6964285714285714], - [0.6607142857142857, 0.6964285714285714], - [0.6607142857142857, 0.6964285714285714], - [0.6964285714285714, 0.6964285714285714], - [0.6964285714285714, 0.6964285714285714], - [0.7321428571428571, 0.6964285714285714], - [0.7321428571428571, 0.6964285714285714], - [0.7678571428571429, 0.6964285714285714], - [0.7678571428571429, 0.6964285714285714], - [0.8035714285714286, 0.6964285714285714], - [0.8035714285714286, 0.6964285714285714], - [0.8392857142857143, 0.6964285714285714], - [0.8392857142857143, 0.6964285714285714], - [0.875, 0.6964285714285714], - [0.875, 0.6964285714285714], - [0.9107142857142857, 0.6964285714285714], - [0.9107142857142857, 0.6964285714285714], - [0.9464285714285714, 0.6964285714285714], - [0.9464285714285714, 0.6964285714285714], - [0.9821428571428571, 0.6964285714285714], - [0.9821428571428571, 0.6964285714285714], - [0.017857142857142856, 0.7321428571428571], - [0.017857142857142856, 0.7321428571428571], - [0.05357142857142857, 0.7321428571428571], - [0.05357142857142857, 0.7321428571428571], - [0.08928571428571429, 0.7321428571428571], - [0.08928571428571429, 0.7321428571428571], - [0.125, 0.7321428571428571], - [0.125, 0.7321428571428571], - [0.16071428571428573, 0.7321428571428571], - [0.16071428571428573, 0.7321428571428571], - [0.19642857142857142, 0.7321428571428571], - [0.19642857142857142, 0.7321428571428571], - [0.23214285714285715, 0.7321428571428571], - [0.23214285714285715, 0.7321428571428571], - [0.26785714285714285, 0.7321428571428571], - [0.26785714285714285, 0.7321428571428571], - [0.30357142857142855, 0.7321428571428571], - [0.30357142857142855, 0.7321428571428571], - [0.3392857142857143, 0.7321428571428571], - [0.3392857142857143, 0.7321428571428571], - [0.375, 0.7321428571428571], - [0.375, 0.7321428571428571], - [0.4107142857142857, 0.7321428571428571], - [0.4107142857142857, 0.7321428571428571], - [0.44642857142857145, 0.7321428571428571], - [0.44642857142857145, 0.7321428571428571], - [0.48214285714285715, 0.7321428571428571], - [0.48214285714285715, 0.7321428571428571], - [0.5178571428571429, 0.7321428571428571], - [0.5178571428571429, 0.7321428571428571], - [0.5535714285714286, 0.7321428571428571], - [0.5535714285714286, 0.7321428571428571], - [0.5892857142857143, 0.7321428571428571], - [0.5892857142857143, 0.7321428571428571], - [0.625, 0.7321428571428571], - [0.625, 0.7321428571428571], - [0.6607142857142857, 0.7321428571428571], - [0.6607142857142857, 0.7321428571428571], - [0.6964285714285714, 0.7321428571428571], - [0.6964285714285714, 0.7321428571428571], - [0.7321428571428571, 0.7321428571428571], - [0.7321428571428571, 0.7321428571428571], - [0.7678571428571429, 0.7321428571428571], - [0.7678571428571429, 0.7321428571428571], - [0.8035714285714286, 0.7321428571428571], - [0.8035714285714286, 0.7321428571428571], - [0.8392857142857143, 0.7321428571428571], - [0.8392857142857143, 0.7321428571428571], - [0.875, 0.7321428571428571], - [0.875, 0.7321428571428571], - [0.9107142857142857, 0.7321428571428571], - [0.9107142857142857, 0.7321428571428571], - [0.9464285714285714, 0.7321428571428571], - [0.9464285714285714, 0.7321428571428571], - [0.9821428571428571, 0.7321428571428571], - [0.9821428571428571, 0.7321428571428571], - [0.017857142857142856, 0.7678571428571429], - [0.017857142857142856, 0.7678571428571429], - [0.05357142857142857, 0.7678571428571429], - [0.05357142857142857, 0.7678571428571429], - [0.08928571428571429, 0.7678571428571429], - [0.08928571428571429, 0.7678571428571429], - [0.125, 0.7678571428571429], - [0.125, 0.7678571428571429], - [0.16071428571428573, 0.7678571428571429], - [0.16071428571428573, 0.7678571428571429], - [0.19642857142857142, 0.7678571428571429], - [0.19642857142857142, 0.7678571428571429], - [0.23214285714285715, 0.7678571428571429], - [0.23214285714285715, 0.7678571428571429], - [0.26785714285714285, 0.7678571428571429], - [0.26785714285714285, 0.7678571428571429], - [0.30357142857142855, 0.7678571428571429], - [0.30357142857142855, 0.7678571428571429], - [0.3392857142857143, 0.7678571428571429], - [0.3392857142857143, 0.7678571428571429], - [0.375, 0.7678571428571429], - [0.375, 0.7678571428571429], - [0.4107142857142857, 0.7678571428571429], - [0.4107142857142857, 0.7678571428571429], - [0.44642857142857145, 0.7678571428571429], - [0.44642857142857145, 0.7678571428571429], - [0.48214285714285715, 0.7678571428571429], - [0.48214285714285715, 0.7678571428571429], - [0.5178571428571429, 0.7678571428571429], - [0.5178571428571429, 0.7678571428571429], - [0.5535714285714286, 0.7678571428571429], - [0.5535714285714286, 0.7678571428571429], - [0.5892857142857143, 0.7678571428571429], - [0.5892857142857143, 0.7678571428571429], - [0.625, 0.7678571428571429], - [0.625, 0.7678571428571429], - [0.6607142857142857, 0.7678571428571429], - [0.6607142857142857, 0.7678571428571429], - [0.6964285714285714, 0.7678571428571429], - [0.6964285714285714, 0.7678571428571429], - [0.7321428571428571, 0.7678571428571429], - [0.7321428571428571, 0.7678571428571429], - [0.7678571428571429, 0.7678571428571429], - [0.7678571428571429, 0.7678571428571429], - [0.8035714285714286, 0.7678571428571429], - [0.8035714285714286, 0.7678571428571429], - [0.8392857142857143, 0.7678571428571429], - [0.8392857142857143, 0.7678571428571429], - [0.875, 0.7678571428571429], - [0.875, 0.7678571428571429], - [0.9107142857142857, 0.7678571428571429], - [0.9107142857142857, 0.7678571428571429], - [0.9464285714285714, 0.7678571428571429], - [0.9464285714285714, 0.7678571428571429], - [0.9821428571428571, 0.7678571428571429], - [0.9821428571428571, 0.7678571428571429], - [0.017857142857142856, 0.8035714285714286], - [0.017857142857142856, 0.8035714285714286], - [0.05357142857142857, 0.8035714285714286], - [0.05357142857142857, 0.8035714285714286], - [0.08928571428571429, 0.8035714285714286], - [0.08928571428571429, 0.8035714285714286], - [0.125, 0.8035714285714286], - [0.125, 0.8035714285714286], - [0.16071428571428573, 0.8035714285714286], - [0.16071428571428573, 0.8035714285714286], - [0.19642857142857142, 0.8035714285714286], - [0.19642857142857142, 0.8035714285714286], - [0.23214285714285715, 0.8035714285714286], - [0.23214285714285715, 0.8035714285714286], - [0.26785714285714285, 0.8035714285714286], - [0.26785714285714285, 0.8035714285714286], - [0.30357142857142855, 0.8035714285714286], - [0.30357142857142855, 0.8035714285714286], - [0.3392857142857143, 0.8035714285714286], - [0.3392857142857143, 0.8035714285714286], - [0.375, 0.8035714285714286], - [0.375, 0.8035714285714286], - [0.4107142857142857, 0.8035714285714286], - [0.4107142857142857, 0.8035714285714286], - [0.44642857142857145, 0.8035714285714286], - [0.44642857142857145, 0.8035714285714286], - [0.48214285714285715, 0.8035714285714286], - [0.48214285714285715, 0.8035714285714286], - [0.5178571428571429, 0.8035714285714286], - [0.5178571428571429, 0.8035714285714286], - [0.5535714285714286, 0.8035714285714286], - [0.5535714285714286, 0.8035714285714286], - [0.5892857142857143, 0.8035714285714286], - [0.5892857142857143, 0.8035714285714286], - [0.625, 0.8035714285714286], - [0.625, 0.8035714285714286], - [0.6607142857142857, 0.8035714285714286], - [0.6607142857142857, 0.8035714285714286], - [0.6964285714285714, 0.8035714285714286], - [0.6964285714285714, 0.8035714285714286], - [0.7321428571428571, 0.8035714285714286], - [0.7321428571428571, 0.8035714285714286], - [0.7678571428571429, 0.8035714285714286], - [0.7678571428571429, 0.8035714285714286], - [0.8035714285714286, 0.8035714285714286], - [0.8035714285714286, 0.8035714285714286], - [0.8392857142857143, 0.8035714285714286], - [0.8392857142857143, 0.8035714285714286], - [0.875, 0.8035714285714286], - [0.875, 0.8035714285714286], - [0.9107142857142857, 0.8035714285714286], - [0.9107142857142857, 0.8035714285714286], - [0.9464285714285714, 0.8035714285714286], - [0.9464285714285714, 0.8035714285714286], - [0.9821428571428571, 0.8035714285714286], - [0.9821428571428571, 0.8035714285714286], - [0.017857142857142856, 0.8392857142857143], - [0.017857142857142856, 0.8392857142857143], - [0.05357142857142857, 0.8392857142857143], - [0.05357142857142857, 0.8392857142857143], - [0.08928571428571429, 0.8392857142857143], - [0.08928571428571429, 0.8392857142857143], - [0.125, 0.8392857142857143], - [0.125, 0.8392857142857143], - [0.16071428571428573, 0.8392857142857143], - [0.16071428571428573, 0.8392857142857143], - [0.19642857142857142, 0.8392857142857143], - [0.19642857142857142, 0.8392857142857143], - [0.23214285714285715, 0.8392857142857143], - [0.23214285714285715, 0.8392857142857143], - [0.26785714285714285, 0.8392857142857143], - [0.26785714285714285, 0.8392857142857143], - [0.30357142857142855, 0.8392857142857143], - [0.30357142857142855, 0.8392857142857143], - [0.3392857142857143, 0.8392857142857143], - [0.3392857142857143, 0.8392857142857143], - [0.375, 0.8392857142857143], - [0.375, 0.8392857142857143], - [0.4107142857142857, 0.8392857142857143], - [0.4107142857142857, 0.8392857142857143], - [0.44642857142857145, 0.8392857142857143], - [0.44642857142857145, 0.8392857142857143], - [0.48214285714285715, 0.8392857142857143], - [0.48214285714285715, 0.8392857142857143], - [0.5178571428571429, 0.8392857142857143], - [0.5178571428571429, 0.8392857142857143], - [0.5535714285714286, 0.8392857142857143], - [0.5535714285714286, 0.8392857142857143], - [0.5892857142857143, 0.8392857142857143], - [0.5892857142857143, 0.8392857142857143], - [0.625, 0.8392857142857143], - [0.625, 0.8392857142857143], - [0.6607142857142857, 0.8392857142857143], - [0.6607142857142857, 0.8392857142857143], - [0.6964285714285714, 0.8392857142857143], - [0.6964285714285714, 0.8392857142857143], - [0.7321428571428571, 0.8392857142857143], - [0.7321428571428571, 0.8392857142857143], - [0.7678571428571429, 0.8392857142857143], - [0.7678571428571429, 0.8392857142857143], - [0.8035714285714286, 0.8392857142857143], - [0.8035714285714286, 0.8392857142857143], - [0.8392857142857143, 0.8392857142857143], - [0.8392857142857143, 0.8392857142857143], - [0.875, 0.8392857142857143], - [0.875, 0.8392857142857143], - [0.9107142857142857, 0.8392857142857143], - [0.9107142857142857, 0.8392857142857143], - [0.9464285714285714, 0.8392857142857143], - [0.9464285714285714, 0.8392857142857143], - [0.9821428571428571, 0.8392857142857143], - [0.9821428571428571, 0.8392857142857143], - [0.017857142857142856, 0.875], - [0.017857142857142856, 0.875], - [0.05357142857142857, 0.875], - [0.05357142857142857, 0.875], - [0.08928571428571429, 0.875], - [0.08928571428571429, 0.875], - [0.125, 0.875], - [0.125, 0.875], - [0.16071428571428573, 0.875], - [0.16071428571428573, 0.875], - [0.19642857142857142, 0.875], - [0.19642857142857142, 0.875], - [0.23214285714285715, 0.875], - [0.23214285714285715, 0.875], - [0.26785714285714285, 0.875], - [0.26785714285714285, 0.875], - [0.30357142857142855, 0.875], - [0.30357142857142855, 0.875], - [0.3392857142857143, 0.875], - [0.3392857142857143, 0.875], - [0.375, 0.875], - [0.375, 0.875], - [0.4107142857142857, 0.875], - [0.4107142857142857, 0.875], - [0.44642857142857145, 0.875], - [0.44642857142857145, 0.875], - [0.48214285714285715, 0.875], - [0.48214285714285715, 0.875], - [0.5178571428571429, 0.875], - [0.5178571428571429, 0.875], - [0.5535714285714286, 0.875], - [0.5535714285714286, 0.875], - [0.5892857142857143, 0.875], - [0.5892857142857143, 0.875], - [0.625, 0.875], - [0.625, 0.875], - [0.6607142857142857, 0.875], - [0.6607142857142857, 0.875], - [0.6964285714285714, 0.875], - [0.6964285714285714, 0.875], - [0.7321428571428571, 0.875], - [0.7321428571428571, 0.875], - [0.7678571428571429, 0.875], - [0.7678571428571429, 0.875], - [0.8035714285714286, 0.875], - [0.8035714285714286, 0.875], - [0.8392857142857143, 0.875], - [0.8392857142857143, 0.875], - [0.875, 0.875], - [0.875, 0.875], - [0.9107142857142857, 0.875], - [0.9107142857142857, 0.875], - [0.9464285714285714, 0.875], - [0.9464285714285714, 0.875], - [0.9821428571428571, 0.875], - [0.9821428571428571, 0.875], - [0.017857142857142856, 0.9107142857142857], - [0.017857142857142856, 0.9107142857142857], - [0.05357142857142857, 0.9107142857142857], - [0.05357142857142857, 0.9107142857142857], - [0.08928571428571429, 0.9107142857142857], - [0.08928571428571429, 0.9107142857142857], - [0.125, 0.9107142857142857], - [0.125, 0.9107142857142857], - [0.16071428571428573, 0.9107142857142857], - [0.16071428571428573, 0.9107142857142857], - [0.19642857142857142, 0.9107142857142857], - [0.19642857142857142, 0.9107142857142857], - [0.23214285714285715, 0.9107142857142857], - [0.23214285714285715, 0.9107142857142857], - [0.26785714285714285, 0.9107142857142857], - [0.26785714285714285, 0.9107142857142857], - [0.30357142857142855, 0.9107142857142857], - [0.30357142857142855, 0.9107142857142857], - [0.3392857142857143, 0.9107142857142857], - [0.3392857142857143, 0.9107142857142857], - [0.375, 0.9107142857142857], - [0.375, 0.9107142857142857], - [0.4107142857142857, 0.9107142857142857], - [0.4107142857142857, 0.9107142857142857], - [0.44642857142857145, 0.9107142857142857], - [0.44642857142857145, 0.9107142857142857], - [0.48214285714285715, 0.9107142857142857], - [0.48214285714285715, 0.9107142857142857], - [0.5178571428571429, 0.9107142857142857], - [0.5178571428571429, 0.9107142857142857], - [0.5535714285714286, 0.9107142857142857], - [0.5535714285714286, 0.9107142857142857], - [0.5892857142857143, 0.9107142857142857], - [0.5892857142857143, 0.9107142857142857], - [0.625, 0.9107142857142857], - [0.625, 0.9107142857142857], - [0.6607142857142857, 0.9107142857142857], - [0.6607142857142857, 0.9107142857142857], - [0.6964285714285714, 0.9107142857142857], - [0.6964285714285714, 0.9107142857142857], - [0.7321428571428571, 0.9107142857142857], - [0.7321428571428571, 0.9107142857142857], - [0.7678571428571429, 0.9107142857142857], - [0.7678571428571429, 0.9107142857142857], - [0.8035714285714286, 0.9107142857142857], - [0.8035714285714286, 0.9107142857142857], - [0.8392857142857143, 0.9107142857142857], - [0.8392857142857143, 0.9107142857142857], - [0.875, 0.9107142857142857], - [0.875, 0.9107142857142857], - [0.9107142857142857, 0.9107142857142857], - [0.9107142857142857, 0.9107142857142857], - [0.9464285714285714, 0.9107142857142857], - [0.9464285714285714, 0.9107142857142857], - [0.9821428571428571, 0.9107142857142857], - [0.9821428571428571, 0.9107142857142857], - [0.017857142857142856, 0.9464285714285714], - [0.017857142857142856, 0.9464285714285714], - [0.05357142857142857, 0.9464285714285714], - [0.05357142857142857, 0.9464285714285714], - [0.08928571428571429, 0.9464285714285714], - [0.08928571428571429, 0.9464285714285714], - [0.125, 0.9464285714285714], - [0.125, 0.9464285714285714], - [0.16071428571428573, 0.9464285714285714], - [0.16071428571428573, 0.9464285714285714], - [0.19642857142857142, 0.9464285714285714], - [0.19642857142857142, 0.9464285714285714], - [0.23214285714285715, 0.9464285714285714], - [0.23214285714285715, 0.9464285714285714], - [0.26785714285714285, 0.9464285714285714], - [0.26785714285714285, 0.9464285714285714], - [0.30357142857142855, 0.9464285714285714], - [0.30357142857142855, 0.9464285714285714], - [0.3392857142857143, 0.9464285714285714], - [0.3392857142857143, 0.9464285714285714], - [0.375, 0.9464285714285714], - [0.375, 0.9464285714285714], - [0.4107142857142857, 0.9464285714285714], - [0.4107142857142857, 0.9464285714285714], - [0.44642857142857145, 0.9464285714285714], - [0.44642857142857145, 0.9464285714285714], - [0.48214285714285715, 0.9464285714285714], - [0.48214285714285715, 0.9464285714285714], - [0.5178571428571429, 0.9464285714285714], - [0.5178571428571429, 0.9464285714285714], - [0.5535714285714286, 0.9464285714285714], - [0.5535714285714286, 0.9464285714285714], - [0.5892857142857143, 0.9464285714285714], - [0.5892857142857143, 0.9464285714285714], - [0.625, 0.9464285714285714], - [0.625, 0.9464285714285714], - [0.6607142857142857, 0.9464285714285714], - [0.6607142857142857, 0.9464285714285714], - [0.6964285714285714, 0.9464285714285714], - [0.6964285714285714, 0.9464285714285714], - [0.7321428571428571, 0.9464285714285714], - [0.7321428571428571, 0.9464285714285714], - [0.7678571428571429, 0.9464285714285714], - [0.7678571428571429, 0.9464285714285714], - [0.8035714285714286, 0.9464285714285714], - [0.8035714285714286, 0.9464285714285714], - [0.8392857142857143, 0.9464285714285714], - [0.8392857142857143, 0.9464285714285714], - [0.875, 0.9464285714285714], - [0.875, 0.9464285714285714], - [0.9107142857142857, 0.9464285714285714], - [0.9107142857142857, 0.9464285714285714], - [0.9464285714285714, 0.9464285714285714], - [0.9464285714285714, 0.9464285714285714], - [0.9821428571428571, 0.9464285714285714], - [0.9821428571428571, 0.9464285714285714], - [0.017857142857142856, 0.9821428571428571], - [0.017857142857142856, 0.9821428571428571], - [0.05357142857142857, 0.9821428571428571], - [0.05357142857142857, 0.9821428571428571], - [0.08928571428571429, 0.9821428571428571], - [0.08928571428571429, 0.9821428571428571], - [0.125, 0.9821428571428571], - [0.125, 0.9821428571428571], - [0.16071428571428573, 0.9821428571428571], - [0.16071428571428573, 0.9821428571428571], - [0.19642857142857142, 0.9821428571428571], - [0.19642857142857142, 0.9821428571428571], - [0.23214285714285715, 0.9821428571428571], - [0.23214285714285715, 0.9821428571428571], - [0.26785714285714285, 0.9821428571428571], - [0.26785714285714285, 0.9821428571428571], - [0.30357142857142855, 0.9821428571428571], - [0.30357142857142855, 0.9821428571428571], - [0.3392857142857143, 0.9821428571428571], - [0.3392857142857143, 0.9821428571428571], - [0.375, 0.9821428571428571], - [0.375, 0.9821428571428571], - [0.4107142857142857, 0.9821428571428571], - [0.4107142857142857, 0.9821428571428571], - [0.44642857142857145, 0.9821428571428571], - [0.44642857142857145, 0.9821428571428571], - [0.48214285714285715, 0.9821428571428571], - [0.48214285714285715, 0.9821428571428571], - [0.5178571428571429, 0.9821428571428571], - [0.5178571428571429, 0.9821428571428571], - [0.5535714285714286, 0.9821428571428571], - [0.5535714285714286, 0.9821428571428571], - [0.5892857142857143, 0.9821428571428571], - [0.5892857142857143, 0.9821428571428571], - [0.625, 0.9821428571428571], - [0.625, 0.9821428571428571], - [0.6607142857142857, 0.9821428571428571], - [0.6607142857142857, 0.9821428571428571], - [0.6964285714285714, 0.9821428571428571], - [0.6964285714285714, 0.9821428571428571], - [0.7321428571428571, 0.9821428571428571], - [0.7321428571428571, 0.9821428571428571], - [0.7678571428571429, 0.9821428571428571], - [0.7678571428571429, 0.9821428571428571], - [0.8035714285714286, 0.9821428571428571], - [0.8035714285714286, 0.9821428571428571], - [0.8392857142857143, 0.9821428571428571], - [0.8392857142857143, 0.9821428571428571], - [0.875, 0.9821428571428571], - [0.875, 0.9821428571428571], - [0.9107142857142857, 0.9821428571428571], - [0.9107142857142857, 0.9821428571428571], - [0.9464285714285714, 0.9821428571428571], - [0.9464285714285714, 0.9821428571428571], - [0.9821428571428571, 0.9821428571428571], - [0.9821428571428571, 0.9821428571428571], - [0.03571428571428571, 0.03571428571428571], - [0.03571428571428571, 0.03571428571428571], - [0.10714285714285714, 0.03571428571428571], - [0.10714285714285714, 0.03571428571428571], - [0.17857142857142858, 0.03571428571428571], - [0.17857142857142858, 0.03571428571428571], - [0.25, 0.03571428571428571], - [0.25, 0.03571428571428571], - [0.32142857142857145, 0.03571428571428571], - [0.32142857142857145, 0.03571428571428571], - [0.39285714285714285, 0.03571428571428571], - [0.39285714285714285, 0.03571428571428571], - [0.4642857142857143, 0.03571428571428571], - [0.4642857142857143, 0.03571428571428571], - [0.5357142857142857, 0.03571428571428571], - [0.5357142857142857, 0.03571428571428571], - [0.6071428571428571, 0.03571428571428571], - [0.6071428571428571, 0.03571428571428571], - [0.6785714285714286, 0.03571428571428571], - [0.6785714285714286, 0.03571428571428571], - [0.75, 0.03571428571428571], - [0.75, 0.03571428571428571], - [0.8214285714285714, 0.03571428571428571], - [0.8214285714285714, 0.03571428571428571], - [0.8928571428571429, 0.03571428571428571], - [0.8928571428571429, 0.03571428571428571], - [0.9642857142857143, 0.03571428571428571], - [0.9642857142857143, 0.03571428571428571], - [0.03571428571428571, 0.10714285714285714], - [0.03571428571428571, 0.10714285714285714], - [0.10714285714285714, 0.10714285714285714], - [0.10714285714285714, 0.10714285714285714], - [0.17857142857142858, 0.10714285714285714], - [0.17857142857142858, 0.10714285714285714], - [0.25, 0.10714285714285714], - [0.25, 0.10714285714285714], - [0.32142857142857145, 0.10714285714285714], - [0.32142857142857145, 0.10714285714285714], - [0.39285714285714285, 0.10714285714285714], - [0.39285714285714285, 0.10714285714285714], - [0.4642857142857143, 0.10714285714285714], - [0.4642857142857143, 0.10714285714285714], - [0.5357142857142857, 0.10714285714285714], - [0.5357142857142857, 0.10714285714285714], - [0.6071428571428571, 0.10714285714285714], - [0.6071428571428571, 0.10714285714285714], - [0.6785714285714286, 0.10714285714285714], - [0.6785714285714286, 0.10714285714285714], - [0.75, 0.10714285714285714], - [0.75, 0.10714285714285714], - [0.8214285714285714, 0.10714285714285714], - [0.8214285714285714, 0.10714285714285714], - [0.8928571428571429, 0.10714285714285714], - [0.8928571428571429, 0.10714285714285714], - [0.9642857142857143, 0.10714285714285714], - [0.9642857142857143, 0.10714285714285714], - [0.03571428571428571, 0.17857142857142858], - [0.03571428571428571, 0.17857142857142858], - [0.10714285714285714, 0.17857142857142858], - [0.10714285714285714, 0.17857142857142858], - [0.17857142857142858, 0.17857142857142858], - [0.17857142857142858, 0.17857142857142858], - [0.25, 0.17857142857142858], - [0.25, 0.17857142857142858], - [0.32142857142857145, 0.17857142857142858], - [0.32142857142857145, 0.17857142857142858], - [0.39285714285714285, 0.17857142857142858], - [0.39285714285714285, 0.17857142857142858], - [0.4642857142857143, 0.17857142857142858], - [0.4642857142857143, 0.17857142857142858], - [0.5357142857142857, 0.17857142857142858], - [0.5357142857142857, 0.17857142857142858], - [0.6071428571428571, 0.17857142857142858], - [0.6071428571428571, 0.17857142857142858], - [0.6785714285714286, 0.17857142857142858], - [0.6785714285714286, 0.17857142857142858], - [0.75, 0.17857142857142858], - [0.75, 0.17857142857142858], - [0.8214285714285714, 0.17857142857142858], - [0.8214285714285714, 0.17857142857142858], - [0.8928571428571429, 0.17857142857142858], - [0.8928571428571429, 0.17857142857142858], - [0.9642857142857143, 0.17857142857142858], - [0.9642857142857143, 0.17857142857142858], - [0.03571428571428571, 0.25], - [0.03571428571428571, 0.25], - [0.10714285714285714, 0.25], - [0.10714285714285714, 0.25], - [0.17857142857142858, 0.25], - [0.17857142857142858, 0.25], - [0.25, 0.25], - [0.25, 0.25], - [0.32142857142857145, 0.25], - [0.32142857142857145, 0.25], - [0.39285714285714285, 0.25], - [0.39285714285714285, 0.25], - [0.4642857142857143, 0.25], - [0.4642857142857143, 0.25], - [0.5357142857142857, 0.25], - [0.5357142857142857, 0.25], - [0.6071428571428571, 0.25], - [0.6071428571428571, 0.25], - [0.6785714285714286, 0.25], - [0.6785714285714286, 0.25], - [0.75, 0.25], - [0.75, 0.25], - [0.8214285714285714, 0.25], - [0.8214285714285714, 0.25], - [0.8928571428571429, 0.25], - [0.8928571428571429, 0.25], - [0.9642857142857143, 0.25], - [0.9642857142857143, 0.25], - [0.03571428571428571, 0.32142857142857145], - [0.03571428571428571, 0.32142857142857145], - [0.10714285714285714, 0.32142857142857145], - [0.10714285714285714, 0.32142857142857145], - [0.17857142857142858, 0.32142857142857145], - [0.17857142857142858, 0.32142857142857145], - [0.25, 0.32142857142857145], - [0.25, 0.32142857142857145], - [0.32142857142857145, 0.32142857142857145], - [0.32142857142857145, 0.32142857142857145], - [0.39285714285714285, 0.32142857142857145], - [0.39285714285714285, 0.32142857142857145], - [0.4642857142857143, 0.32142857142857145], - [0.4642857142857143, 0.32142857142857145], - [0.5357142857142857, 0.32142857142857145], - [0.5357142857142857, 0.32142857142857145], - [0.6071428571428571, 0.32142857142857145], - [0.6071428571428571, 0.32142857142857145], - [0.6785714285714286, 0.32142857142857145], - [0.6785714285714286, 0.32142857142857145], - [0.75, 0.32142857142857145], - [0.75, 0.32142857142857145], - [0.8214285714285714, 0.32142857142857145], - [0.8214285714285714, 0.32142857142857145], - [0.8928571428571429, 0.32142857142857145], - [0.8928571428571429, 0.32142857142857145], - [0.9642857142857143, 0.32142857142857145], - [0.9642857142857143, 0.32142857142857145], - [0.03571428571428571, 0.39285714285714285], - [0.03571428571428571, 0.39285714285714285], - [0.10714285714285714, 0.39285714285714285], - [0.10714285714285714, 0.39285714285714285], - [0.17857142857142858, 0.39285714285714285], - [0.17857142857142858, 0.39285714285714285], - [0.25, 0.39285714285714285], - [0.25, 0.39285714285714285], - [0.32142857142857145, 0.39285714285714285], - [0.32142857142857145, 0.39285714285714285], - [0.39285714285714285, 0.39285714285714285], - [0.39285714285714285, 0.39285714285714285], - [0.4642857142857143, 0.39285714285714285], - [0.4642857142857143, 0.39285714285714285], - [0.5357142857142857, 0.39285714285714285], - [0.5357142857142857, 0.39285714285714285], - [0.6071428571428571, 0.39285714285714285], - [0.6071428571428571, 0.39285714285714285], - [0.6785714285714286, 0.39285714285714285], - [0.6785714285714286, 0.39285714285714285], - [0.75, 0.39285714285714285], - [0.75, 0.39285714285714285], - [0.8214285714285714, 0.39285714285714285], - [0.8214285714285714, 0.39285714285714285], - [0.8928571428571429, 0.39285714285714285], - [0.8928571428571429, 0.39285714285714285], - [0.9642857142857143, 0.39285714285714285], - [0.9642857142857143, 0.39285714285714285], - [0.03571428571428571, 0.4642857142857143], - [0.03571428571428571, 0.4642857142857143], - [0.10714285714285714, 0.4642857142857143], - [0.10714285714285714, 0.4642857142857143], - [0.17857142857142858, 0.4642857142857143], - [0.17857142857142858, 0.4642857142857143], - [0.25, 0.4642857142857143], - [0.25, 0.4642857142857143], - [0.32142857142857145, 0.4642857142857143], - [0.32142857142857145, 0.4642857142857143], - [0.39285714285714285, 0.4642857142857143], - [0.39285714285714285, 0.4642857142857143], - [0.4642857142857143, 0.4642857142857143], - [0.4642857142857143, 0.4642857142857143], - [0.5357142857142857, 0.4642857142857143], - [0.5357142857142857, 0.4642857142857143], - [0.6071428571428571, 0.4642857142857143], - [0.6071428571428571, 0.4642857142857143], - [0.6785714285714286, 0.4642857142857143], - [0.6785714285714286, 0.4642857142857143], - [0.75, 0.4642857142857143], - [0.75, 0.4642857142857143], - [0.8214285714285714, 0.4642857142857143], - [0.8214285714285714, 0.4642857142857143], - [0.8928571428571429, 0.4642857142857143], - [0.8928571428571429, 0.4642857142857143], - [0.9642857142857143, 0.4642857142857143], - [0.9642857142857143, 0.4642857142857143], - [0.03571428571428571, 0.5357142857142857], - [0.03571428571428571, 0.5357142857142857], - [0.10714285714285714, 0.5357142857142857], - [0.10714285714285714, 0.5357142857142857], - [0.17857142857142858, 0.5357142857142857], - [0.17857142857142858, 0.5357142857142857], - [0.25, 0.5357142857142857], - [0.25, 0.5357142857142857], - [0.32142857142857145, 0.5357142857142857], - [0.32142857142857145, 0.5357142857142857], - [0.39285714285714285, 0.5357142857142857], - [0.39285714285714285, 0.5357142857142857], - [0.4642857142857143, 0.5357142857142857], - [0.4642857142857143, 0.5357142857142857], - [0.5357142857142857, 0.5357142857142857], - [0.5357142857142857, 0.5357142857142857], - [0.6071428571428571, 0.5357142857142857], - [0.6071428571428571, 0.5357142857142857], - [0.6785714285714286, 0.5357142857142857], - [0.6785714285714286, 0.5357142857142857], - [0.75, 0.5357142857142857], - [0.75, 0.5357142857142857], - [0.8214285714285714, 0.5357142857142857], - [0.8214285714285714, 0.5357142857142857], - [0.8928571428571429, 0.5357142857142857], - [0.8928571428571429, 0.5357142857142857], - [0.9642857142857143, 0.5357142857142857], - [0.9642857142857143, 0.5357142857142857], - [0.03571428571428571, 0.6071428571428571], - [0.03571428571428571, 0.6071428571428571], - [0.10714285714285714, 0.6071428571428571], - [0.10714285714285714, 0.6071428571428571], - [0.17857142857142858, 0.6071428571428571], - [0.17857142857142858, 0.6071428571428571], - [0.25, 0.6071428571428571], - [0.25, 0.6071428571428571], - [0.32142857142857145, 0.6071428571428571], - [0.32142857142857145, 0.6071428571428571], - [0.39285714285714285, 0.6071428571428571], - [0.39285714285714285, 0.6071428571428571], - [0.4642857142857143, 0.6071428571428571], - [0.4642857142857143, 0.6071428571428571], - [0.5357142857142857, 0.6071428571428571], - [0.5357142857142857, 0.6071428571428571], - [0.6071428571428571, 0.6071428571428571], - [0.6071428571428571, 0.6071428571428571], - [0.6785714285714286, 0.6071428571428571], - [0.6785714285714286, 0.6071428571428571], - [0.75, 0.6071428571428571], - [0.75, 0.6071428571428571], - [0.8214285714285714, 0.6071428571428571], - [0.8214285714285714, 0.6071428571428571], - [0.8928571428571429, 0.6071428571428571], - [0.8928571428571429, 0.6071428571428571], - [0.9642857142857143, 0.6071428571428571], - [0.9642857142857143, 0.6071428571428571], - [0.03571428571428571, 0.6785714285714286], - [0.03571428571428571, 0.6785714285714286], - [0.10714285714285714, 0.6785714285714286], - [0.10714285714285714, 0.6785714285714286], - [0.17857142857142858, 0.6785714285714286], - [0.17857142857142858, 0.6785714285714286], - [0.25, 0.6785714285714286], - [0.25, 0.6785714285714286], - [0.32142857142857145, 0.6785714285714286], - [0.32142857142857145, 0.6785714285714286], - [0.39285714285714285, 0.6785714285714286], - [0.39285714285714285, 0.6785714285714286], - [0.4642857142857143, 0.6785714285714286], - [0.4642857142857143, 0.6785714285714286], - [0.5357142857142857, 0.6785714285714286], - [0.5357142857142857, 0.6785714285714286], - [0.6071428571428571, 0.6785714285714286], - [0.6071428571428571, 0.6785714285714286], - [0.6785714285714286, 0.6785714285714286], - [0.6785714285714286, 0.6785714285714286], - [0.75, 0.6785714285714286], - [0.75, 0.6785714285714286], - [0.8214285714285714, 0.6785714285714286], - [0.8214285714285714, 0.6785714285714286], - [0.8928571428571429, 0.6785714285714286], - [0.8928571428571429, 0.6785714285714286], - [0.9642857142857143, 0.6785714285714286], - [0.9642857142857143, 0.6785714285714286], - [0.03571428571428571, 0.75], - [0.03571428571428571, 0.75], - [0.10714285714285714, 0.75], - [0.10714285714285714, 0.75], - [0.17857142857142858, 0.75], - [0.17857142857142858, 0.75], - [0.25, 0.75], - [0.25, 0.75], - [0.32142857142857145, 0.75], - [0.32142857142857145, 0.75], - [0.39285714285714285, 0.75], - [0.39285714285714285, 0.75], - [0.4642857142857143, 0.75], - [0.4642857142857143, 0.75], - [0.5357142857142857, 0.75], - [0.5357142857142857, 0.75], - [0.6071428571428571, 0.75], - [0.6071428571428571, 0.75], - [0.6785714285714286, 0.75], - [0.6785714285714286, 0.75], - [0.75, 0.75], - [0.75, 0.75], - [0.8214285714285714, 0.75], - [0.8214285714285714, 0.75], - [0.8928571428571429, 0.75], - [0.8928571428571429, 0.75], - [0.9642857142857143, 0.75], - [0.9642857142857143, 0.75], - [0.03571428571428571, 0.8214285714285714], - [0.03571428571428571, 0.8214285714285714], - [0.10714285714285714, 0.8214285714285714], - [0.10714285714285714, 0.8214285714285714], - [0.17857142857142858, 0.8214285714285714], - [0.17857142857142858, 0.8214285714285714], - [0.25, 0.8214285714285714], - [0.25, 0.8214285714285714], - [0.32142857142857145, 0.8214285714285714], - [0.32142857142857145, 0.8214285714285714], - [0.39285714285714285, 0.8214285714285714], - [0.39285714285714285, 0.8214285714285714], - [0.4642857142857143, 0.8214285714285714], - [0.4642857142857143, 0.8214285714285714], - [0.5357142857142857, 0.8214285714285714], - [0.5357142857142857, 0.8214285714285714], - [0.6071428571428571, 0.8214285714285714], - [0.6071428571428571, 0.8214285714285714], - [0.6785714285714286, 0.8214285714285714], - [0.6785714285714286, 0.8214285714285714], - [0.75, 0.8214285714285714], - [0.75, 0.8214285714285714], - [0.8214285714285714, 0.8214285714285714], - [0.8214285714285714, 0.8214285714285714], - [0.8928571428571429, 0.8214285714285714], - [0.8928571428571429, 0.8214285714285714], - [0.9642857142857143, 0.8214285714285714], - [0.9642857142857143, 0.8214285714285714], - [0.03571428571428571, 0.8928571428571429], - [0.03571428571428571, 0.8928571428571429], - [0.10714285714285714, 0.8928571428571429], - [0.10714285714285714, 0.8928571428571429], - [0.17857142857142858, 0.8928571428571429], - [0.17857142857142858, 0.8928571428571429], - [0.25, 0.8928571428571429], - [0.25, 0.8928571428571429], - [0.32142857142857145, 0.8928571428571429], - [0.32142857142857145, 0.8928571428571429], - [0.39285714285714285, 0.8928571428571429], - [0.39285714285714285, 0.8928571428571429], - [0.4642857142857143, 0.8928571428571429], - [0.4642857142857143, 0.8928571428571429], - [0.5357142857142857, 0.8928571428571429], - [0.5357142857142857, 0.8928571428571429], - [0.6071428571428571, 0.8928571428571429], - [0.6071428571428571, 0.8928571428571429], - [0.6785714285714286, 0.8928571428571429], - [0.6785714285714286, 0.8928571428571429], - [0.75, 0.8928571428571429], - [0.75, 0.8928571428571429], - [0.8214285714285714, 0.8928571428571429], - [0.8214285714285714, 0.8928571428571429], - [0.8928571428571429, 0.8928571428571429], - [0.8928571428571429, 0.8928571428571429], - [0.9642857142857143, 0.8928571428571429], - [0.9642857142857143, 0.8928571428571429], - [0.03571428571428571, 0.9642857142857143], - [0.03571428571428571, 0.9642857142857143], - [0.10714285714285714, 0.9642857142857143], - [0.10714285714285714, 0.9642857142857143], - [0.17857142857142858, 0.9642857142857143], - [0.17857142857142858, 0.9642857142857143], - [0.25, 0.9642857142857143], - [0.25, 0.9642857142857143], - [0.32142857142857145, 0.9642857142857143], - [0.32142857142857145, 0.9642857142857143], - [0.39285714285714285, 0.9642857142857143], - [0.39285714285714285, 0.9642857142857143], - [0.4642857142857143, 0.9642857142857143], - [0.4642857142857143, 0.9642857142857143], - [0.5357142857142857, 0.9642857142857143], - [0.5357142857142857, 0.9642857142857143], - [0.6071428571428571, 0.9642857142857143], - [0.6071428571428571, 0.9642857142857143], - [0.6785714285714286, 0.9642857142857143], - [0.6785714285714286, 0.9642857142857143], - [0.75, 0.9642857142857143], - [0.75, 0.9642857142857143], - [0.8214285714285714, 0.9642857142857143], - [0.8214285714285714, 0.9642857142857143], - [0.8928571428571429, 0.9642857142857143], - [0.8928571428571429, 0.9642857142857143], - [0.9642857142857143, 0.9642857142857143], - [0.9642857142857143, 0.9642857142857143], - [0.07142857142857142, 0.07142857142857142], - [0.07142857142857142, 0.07142857142857142], - [0.07142857142857142, 0.07142857142857142], - [0.07142857142857142, 0.07142857142857142], - [0.07142857142857142, 0.07142857142857142], - [0.07142857142857142, 0.07142857142857142], - [0.21428571428571427, 0.07142857142857142], - [0.21428571428571427, 0.07142857142857142], - [0.21428571428571427, 0.07142857142857142], - [0.21428571428571427, 0.07142857142857142], - [0.21428571428571427, 0.07142857142857142], - [0.21428571428571427, 0.07142857142857142], - [0.35714285714285715, 0.07142857142857142], - [0.35714285714285715, 0.07142857142857142], - [0.35714285714285715, 0.07142857142857142], - [0.35714285714285715, 0.07142857142857142], - [0.35714285714285715, 0.07142857142857142], - [0.35714285714285715, 0.07142857142857142], - [0.5, 0.07142857142857142], - [0.5, 0.07142857142857142], - [0.5, 0.07142857142857142], - [0.5, 0.07142857142857142], - [0.5, 0.07142857142857142], - [0.5, 0.07142857142857142], - [0.6428571428571429, 0.07142857142857142], - [0.6428571428571429, 0.07142857142857142], - [0.6428571428571429, 0.07142857142857142], - [0.6428571428571429, 0.07142857142857142], - [0.6428571428571429, 0.07142857142857142], - [0.6428571428571429, 0.07142857142857142], - [0.7857142857142857, 0.07142857142857142], - [0.7857142857142857, 0.07142857142857142], - [0.7857142857142857, 0.07142857142857142], - [0.7857142857142857, 0.07142857142857142], - [0.7857142857142857, 0.07142857142857142], - [0.7857142857142857, 0.07142857142857142], - [0.9285714285714286, 0.07142857142857142], - [0.9285714285714286, 0.07142857142857142], - [0.9285714285714286, 0.07142857142857142], - [0.9285714285714286, 0.07142857142857142], - [0.9285714285714286, 0.07142857142857142], - [0.9285714285714286, 0.07142857142857142], - [0.07142857142857142, 0.21428571428571427], - [0.07142857142857142, 0.21428571428571427], - [0.07142857142857142, 0.21428571428571427], - [0.07142857142857142, 0.21428571428571427], - [0.07142857142857142, 0.21428571428571427], - [0.07142857142857142, 0.21428571428571427], - [0.21428571428571427, 0.21428571428571427], - [0.21428571428571427, 0.21428571428571427], - [0.21428571428571427, 0.21428571428571427], - [0.21428571428571427, 0.21428571428571427], - [0.21428571428571427, 0.21428571428571427], - [0.21428571428571427, 0.21428571428571427], - [0.35714285714285715, 0.21428571428571427], - [0.35714285714285715, 0.21428571428571427], - [0.35714285714285715, 0.21428571428571427], - [0.35714285714285715, 0.21428571428571427], - [0.35714285714285715, 0.21428571428571427], - [0.35714285714285715, 0.21428571428571427], - [0.5, 0.21428571428571427], - [0.5, 0.21428571428571427], - [0.5, 0.21428571428571427], - [0.5, 0.21428571428571427], - [0.5, 0.21428571428571427], - [0.5, 0.21428571428571427], - [0.6428571428571429, 0.21428571428571427], - [0.6428571428571429, 0.21428571428571427], - [0.6428571428571429, 0.21428571428571427], - [0.6428571428571429, 0.21428571428571427], - [0.6428571428571429, 0.21428571428571427], - [0.6428571428571429, 0.21428571428571427], - [0.7857142857142857, 0.21428571428571427], - [0.7857142857142857, 0.21428571428571427], - [0.7857142857142857, 0.21428571428571427], - [0.7857142857142857, 0.21428571428571427], - [0.7857142857142857, 0.21428571428571427], - [0.7857142857142857, 0.21428571428571427], - [0.9285714285714286, 0.21428571428571427], - [0.9285714285714286, 0.21428571428571427], - [0.9285714285714286, 0.21428571428571427], - [0.9285714285714286, 0.21428571428571427], - [0.9285714285714286, 0.21428571428571427], - [0.9285714285714286, 0.21428571428571427], - [0.07142857142857142, 0.35714285714285715], - [0.07142857142857142, 0.35714285714285715], - [0.07142857142857142, 0.35714285714285715], - [0.07142857142857142, 0.35714285714285715], - [0.07142857142857142, 0.35714285714285715], - [0.07142857142857142, 0.35714285714285715], - [0.21428571428571427, 0.35714285714285715], - [0.21428571428571427, 0.35714285714285715], - [0.21428571428571427, 0.35714285714285715], - [0.21428571428571427, 0.35714285714285715], - [0.21428571428571427, 0.35714285714285715], - [0.21428571428571427, 0.35714285714285715], - [0.35714285714285715, 0.35714285714285715], - [0.35714285714285715, 0.35714285714285715], - [0.35714285714285715, 0.35714285714285715], - [0.35714285714285715, 0.35714285714285715], - [0.35714285714285715, 0.35714285714285715], - [0.35714285714285715, 0.35714285714285715], - [0.5, 0.35714285714285715], - [0.5, 0.35714285714285715], - [0.5, 0.35714285714285715], - [0.5, 0.35714285714285715], - [0.5, 0.35714285714285715], - [0.5, 0.35714285714285715], - [0.6428571428571429, 0.35714285714285715], - [0.6428571428571429, 0.35714285714285715], - [0.6428571428571429, 0.35714285714285715], - [0.6428571428571429, 0.35714285714285715], - [0.6428571428571429, 0.35714285714285715], - [0.6428571428571429, 0.35714285714285715], - [0.7857142857142857, 0.35714285714285715], - [0.7857142857142857, 0.35714285714285715], - [0.7857142857142857, 0.35714285714285715], - [0.7857142857142857, 0.35714285714285715], - [0.7857142857142857, 0.35714285714285715], - [0.7857142857142857, 0.35714285714285715], - [0.9285714285714286, 0.35714285714285715], - [0.9285714285714286, 0.35714285714285715], - [0.9285714285714286, 0.35714285714285715], - [0.9285714285714286, 0.35714285714285715], - [0.9285714285714286, 0.35714285714285715], - [0.9285714285714286, 0.35714285714285715], - [0.07142857142857142, 0.5], - [0.07142857142857142, 0.5], - [0.07142857142857142, 0.5], - [0.07142857142857142, 0.5], - [0.07142857142857142, 0.5], - [0.07142857142857142, 0.5], - [0.21428571428571427, 0.5], - [0.21428571428571427, 0.5], - [0.21428571428571427, 0.5], - [0.21428571428571427, 0.5], - [0.21428571428571427, 0.5], - [0.21428571428571427, 0.5], - [0.35714285714285715, 0.5], - [0.35714285714285715, 0.5], - [0.35714285714285715, 0.5], - [0.35714285714285715, 0.5], - [0.35714285714285715, 0.5], - [0.35714285714285715, 0.5], - [0.5, 0.5], - [0.5, 0.5], - [0.5, 0.5], - [0.5, 0.5], - [0.5, 0.5], - [0.5, 0.5], - [0.6428571428571429, 0.5], - [0.6428571428571429, 0.5], - [0.6428571428571429, 0.5], - [0.6428571428571429, 0.5], - [0.6428571428571429, 0.5], - [0.6428571428571429, 0.5], - [0.7857142857142857, 0.5], - [0.7857142857142857, 0.5], - [0.7857142857142857, 0.5], - [0.7857142857142857, 0.5], - [0.7857142857142857, 0.5], - [0.7857142857142857, 0.5], - [0.9285714285714286, 0.5], - [0.9285714285714286, 0.5], - [0.9285714285714286, 0.5], - [0.9285714285714286, 0.5], - [0.9285714285714286, 0.5], - [0.9285714285714286, 0.5], - [0.07142857142857142, 0.6428571428571429], - [0.07142857142857142, 0.6428571428571429], - [0.07142857142857142, 0.6428571428571429], - [0.07142857142857142, 0.6428571428571429], - [0.07142857142857142, 0.6428571428571429], - [0.07142857142857142, 0.6428571428571429], - [0.21428571428571427, 0.6428571428571429], - [0.21428571428571427, 0.6428571428571429], - [0.21428571428571427, 0.6428571428571429], - [0.21428571428571427, 0.6428571428571429], - [0.21428571428571427, 0.6428571428571429], - [0.21428571428571427, 0.6428571428571429], - [0.35714285714285715, 0.6428571428571429], - [0.35714285714285715, 0.6428571428571429], - [0.35714285714285715, 0.6428571428571429], - [0.35714285714285715, 0.6428571428571429], - [0.35714285714285715, 0.6428571428571429], - [0.35714285714285715, 0.6428571428571429], - [0.5, 0.6428571428571429], - [0.5, 0.6428571428571429], - [0.5, 0.6428571428571429], - [0.5, 0.6428571428571429], - [0.5, 0.6428571428571429], - [0.5, 0.6428571428571429], - [0.6428571428571429, 0.6428571428571429], - [0.6428571428571429, 0.6428571428571429], - [0.6428571428571429, 0.6428571428571429], - [0.6428571428571429, 0.6428571428571429], - [0.6428571428571429, 0.6428571428571429], - [0.6428571428571429, 0.6428571428571429], - [0.7857142857142857, 0.6428571428571429], - [0.7857142857142857, 0.6428571428571429], - [0.7857142857142857, 0.6428571428571429], - [0.7857142857142857, 0.6428571428571429], - [0.7857142857142857, 0.6428571428571429], - [0.7857142857142857, 0.6428571428571429], - [0.9285714285714286, 0.6428571428571429], - [0.9285714285714286, 0.6428571428571429], - [0.9285714285714286, 0.6428571428571429], - [0.9285714285714286, 0.6428571428571429], - [0.9285714285714286, 0.6428571428571429], - [0.9285714285714286, 0.6428571428571429], - [0.07142857142857142, 0.7857142857142857], - [0.07142857142857142, 0.7857142857142857], - [0.07142857142857142, 0.7857142857142857], - [0.07142857142857142, 0.7857142857142857], - [0.07142857142857142, 0.7857142857142857], - [0.07142857142857142, 0.7857142857142857], - [0.21428571428571427, 0.7857142857142857], - [0.21428571428571427, 0.7857142857142857], - [0.21428571428571427, 0.7857142857142857], - [0.21428571428571427, 0.7857142857142857], - [0.21428571428571427, 0.7857142857142857], - [0.21428571428571427, 0.7857142857142857], - [0.35714285714285715, 0.7857142857142857], - [0.35714285714285715, 0.7857142857142857], - [0.35714285714285715, 0.7857142857142857], - [0.35714285714285715, 0.7857142857142857], - [0.35714285714285715, 0.7857142857142857], - [0.35714285714285715, 0.7857142857142857], - [0.5, 0.7857142857142857], - [0.5, 0.7857142857142857], - [0.5, 0.7857142857142857], - [0.5, 0.7857142857142857], - [0.5, 0.7857142857142857], - [0.5, 0.7857142857142857], - [0.6428571428571429, 0.7857142857142857], - [0.6428571428571429, 0.7857142857142857], - [0.6428571428571429, 0.7857142857142857], - [0.6428571428571429, 0.7857142857142857], - [0.6428571428571429, 0.7857142857142857], - [0.6428571428571429, 0.7857142857142857], - [0.7857142857142857, 0.7857142857142857], - [0.7857142857142857, 0.7857142857142857], - [0.7857142857142857, 0.7857142857142857], - [0.7857142857142857, 0.7857142857142857], - [0.7857142857142857, 0.7857142857142857], - [0.7857142857142857, 0.7857142857142857], - [0.9285714285714286, 0.7857142857142857], - [0.9285714285714286, 0.7857142857142857], - [0.9285714285714286, 0.7857142857142857], - [0.9285714285714286, 0.7857142857142857], - [0.9285714285714286, 0.7857142857142857], - [0.9285714285714286, 0.7857142857142857], - [0.07142857142857142, 0.9285714285714286], - [0.07142857142857142, 0.9285714285714286], - [0.07142857142857142, 0.9285714285714286], - [0.07142857142857142, 0.9285714285714286], - [0.07142857142857142, 0.9285714285714286], - [0.07142857142857142, 0.9285714285714286], - [0.21428571428571427, 0.9285714285714286], - [0.21428571428571427, 0.9285714285714286], - [0.21428571428571427, 0.9285714285714286], - [0.21428571428571427, 0.9285714285714286], - [0.21428571428571427, 0.9285714285714286], - [0.21428571428571427, 0.9285714285714286], - [0.35714285714285715, 0.9285714285714286], - [0.35714285714285715, 0.9285714285714286], - [0.35714285714285715, 0.9285714285714286], - [0.35714285714285715, 0.9285714285714286], - [0.35714285714285715, 0.9285714285714286], - [0.35714285714285715, 0.9285714285714286], - [0.5, 0.9285714285714286], - [0.5, 0.9285714285714286], - [0.5, 0.9285714285714286], - [0.5, 0.9285714285714286], - [0.5, 0.9285714285714286], - [0.5, 0.9285714285714286], - [0.6428571428571429, 0.9285714285714286], - [0.6428571428571429, 0.9285714285714286], - [0.6428571428571429, 0.9285714285714286], - [0.6428571428571429, 0.9285714285714286], - [0.6428571428571429, 0.9285714285714286], - [0.6428571428571429, 0.9285714285714286], - [0.7857142857142857, 0.9285714285714286], - [0.7857142857142857, 0.9285714285714286], - [0.7857142857142857, 0.9285714285714286], - [0.7857142857142857, 0.9285714285714286], - [0.7857142857142857, 0.9285714285714286], - [0.7857142857142857, 0.9285714285714286], - [0.9285714285714286, 0.9285714285714286], - [0.9285714285714286, 0.9285714285714286], - [0.9285714285714286, 0.9285714285714286], - [0.9285714285714286, 0.9285714285714286], - [0.9285714285714286, 0.9285714285714286], - [0.9285714285714286, 0.9285714285714286]], dtype=np.float32) diff --git a/models/person_detection_mediapipe/person_detection_mediapipe_2023mar.onnx b/models/person_detection_mediapipe/person_detection_mediapipe_2023mar.onnx deleted file mode 100644 index 1780b51b..00000000 --- a/models/person_detection_mediapipe/person_detection_mediapipe_2023mar.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:47fd5599d6fa17608f03e0eb0ae230baa6e597d7e8a2c8199fe00abea55a701f -size 11990159 diff --git a/models/person_detection_mediapipe/person_detection_mediapipe_2023mar_int8bq.onnx b/models/person_detection_mediapipe/person_detection_mediapipe_2023mar_int8bq.onnx deleted file mode 100644 index 39078315..00000000 --- a/models/person_detection_mediapipe/person_detection_mediapipe_2023mar_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c5ed8c00c028b98e5d2c55b920a6e975af6c4cd538cfeea7c054f4fbbd8b9075 -size 3482053 diff --git a/models/person_reid_youtureid/CMakeLists.txt b/models/person_reid_youtureid/CMakeLists.txt deleted file mode 100644 index b8745c63..00000000 --- a/models/person_reid_youtureid/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ -cmake_minimum_required(VERSION 3.24.0) -project(opencv_zoo_person_reid_youtureid) - -set(OPENCV_VERSION "4.10.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") - -# Find OpenCV -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) - -add_executable(demo demo.cpp) -target_link_libraries(demo ${OpenCV_LIBS}) \ No newline at end of file diff --git a/models/person_reid_youtureid/LICENSE b/models/person_reid_youtureid/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/models/person_reid_youtureid/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/models/person_reid_youtureid/README.md b/models/person_reid_youtureid/README.md deleted file mode 100644 index 650b1ab3..00000000 --- a/models/person_reid_youtureid/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Youtu ReID Baseline - -This model is provided by Tencent Youtu Lab [[Credits]](https://github.com/opencv/opencv/blob/394e640909d5d8edf9c1f578f8216d513373698c/samples/dnn/person_reid.py#L6-L11). - -**Note**: -- Model source: https://github.com/ReID-Team/ReID_extra_testdata -- `person_reid_youtu_2021nov_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - -## Demo - -Run the following command to try the demo: - -### Python -```shell -python demo.py --query_dir /path/to/query --gallery_dir /path/to/gallery -v - -# get help regarding various parameters -python demo.py --help -``` - -### C++ -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -./build/demo --query_dir=/path/to/query --gallery_dir=/path/to/gallery -v - -# get help regarding various parameters -./build/demo --help -``` - -### License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE). - -## Reference: - -- OpenCV DNN Sample: https://github.com/opencv/opencv/blob/4.x/samples/dnn/person_reid.py -- Model source: https://github.com/ReID-Team/ReID_extra_testdata diff --git a/models/person_reid_youtureid/demo.cpp b/models/person_reid_youtureid/demo.cpp deleted file mode 100644 index bac7cc1f..00000000 --- a/models/person_reid_youtureid/demo.cpp +++ /dev/null @@ -1,308 +0,0 @@ -#include -#include "opencv2/dnn.hpp" -#include -#include -#include -#include -#include - - -// YoutuReID class for person re-identification -class YoutuReID { -public: - YoutuReID(const std::string& model_path, - const cv::Size& input_size = cv::Size(128, 256), - int output_dim = 768, - const cv::Scalar& mean = cv::Scalar(0.485, 0.456, 0.406), - const cv::Scalar& std = cv::Scalar(0.229, 0.224, 0.225), - int backend_id = 0, - int target_id = 0) - : model_path_(model_path), input_size_(input_size), - output_dim_(output_dim), mean_(mean), std_(std), - backend_id_(backend_id), target_id_(target_id) - { - - model_ = cv::dnn::readNet(model_path_); - model_.setPreferableBackend(backend_id_); - model_.setPreferableTarget(target_id_); - } - - void setBackendAndTarget(int backend_id, int target_id) { - backend_id_ = backend_id; - target_id_ = target_id; - model_.setPreferableBackend(backend_id_); - model_.setPreferableTarget(target_id_); - } - - void setInputSize(const cv::Size& input_size) { - input_size_ = input_size; - } - - // Preprocess image by resizing, normalizing, and creating a blob - cv::Mat preprocess(const cv::Mat& image) { - cv::Mat img; - cv::cvtColor(image, img, cv::COLOR_BGR2RGB); - img.convertTo(img, CV_32F, 1.0 / 255.0); - - // Normalize each channel separately - std::vector channels(3); - cv::split(img, channels); - channels[0] = (channels[0] - mean_[0]) / std_[0]; - channels[1] = (channels[1] - mean_[1]) / std_[1]; - channels[2] = (channels[2] - mean_[2]) / std_[2]; - cv::merge(channels, img); - - return cv::dnn::blobFromImage(img); - } - - // Run inference to extract feature vector - cv::Mat infer(const cv::Mat& image) { - cv::Mat input_blob = preprocess(image); - model_.setInput(input_blob); - cv::Mat features = model_.forward(); - - if (features.dims == 4 && features.size[2] == 1 && features.size[3] == 1) { - features = features.reshape(1, {1, features.size[1]}); - } - - return features; - } - - // Perform query, comparing each query image to each gallery image - std::vector> query(const std::vector& query_img_list, - const std::vector& gallery_img_list, - int topK = 5) { - std::vector query_features_list, gallery_features_list; - cv::Mat query_features, gallery_features; - - for (size_t i = 0; i < query_img_list.size(); ++i) { - cv::Mat feature = infer(query_img_list[i]); - query_features_list.push_back(feature.clone()); - } - cv::vconcat(query_features_list, query_features); - normalizeFeatures(query_features); - - for (size_t i = 0; i < gallery_img_list.size(); ++i) { - cv::Mat feature = infer(gallery_img_list[i]); - gallery_features_list.push_back(feature.clone()); - } - cv::vconcat(gallery_features_list, gallery_features); - normalizeFeatures(gallery_features); - - cv::Mat dist = query_features * gallery_features.t(); - return getTopK(dist, topK); - } - -private: - // Normalize feature vectors row-wise to unit length - void normalizeFeatures(cv::Mat& features) { - const float epsilon = 1e-6; - for (int i = 0; i < features.rows; ++i) { - cv::Mat featureRow = features.row(i); - float norm = cv::norm(featureRow, cv::NORM_L2); - if (norm < epsilon) { - norm = epsilon; - } - featureRow /= norm; - } - } - - // Retrieve Top-K indices from similarity matrix - std::vector> getTopK(const cv::Mat& dist, int topK) { - std::vector> indices(dist.rows); - - for (int i = 0; i < dist.rows; ++i) { - std::vector> sim_index_pairs; - for (int j = 0; j < dist.cols; ++j) { - sim_index_pairs.emplace_back(dist.at(i, j), j); - } - std::sort(sim_index_pairs.begin(), sim_index_pairs.end(), - [](const std::pair& a, const std::pair& b) { - return a.first > b.first; - }); - - for (int k = 0; k < topK && k < sim_index_pairs.size(); ++k) { - indices[i].push_back(sim_index_pairs[k].second); - } - } - return indices; - } - - std::string model_path_; - cv::Size input_size_; - int output_dim_; - cv::Scalar mean_, std_; - int backend_id_; - int target_id_; - cv::dnn::Net model_; -}; - -// Read images from directory and return a pair of image list and file list -std::pair, std::vector> readImagesFromDirectory(const std::string& img_dir, int w = 128, int h = 256) { - std::vector img_list; - std::vector file_list; - - std::vector file_names; - cv::glob(img_dir + "/*", file_names, false); - - for (size_t i = 0; i < file_names.size(); ++i) { - std::string file_name = file_names[i].substr(file_names[i].find_last_of("/\\") + 1); - cv::Mat img = cv::imread(file_names[i]); - if (!img.empty()) { - cv::resize(img, img, cv::Size(w, h)); - img_list.push_back(img); - file_list.push_back(file_name); - } - } - return std::make_pair(img_list, file_list); -} - -// Visualize query and gallery results by creating concatenated images -std::map visualize( - const std::map>& results, - const std::string& query_dir, - const std::string& gallery_dir, - const cv::Size& output_size = cv::Size(128, 384)) { - - std::map results_vis; - - for (std::map>::const_iterator it = results.begin(); it != results.end(); ++it) { - const std::string& query_file = it->first; - const std::vector& top_matches = it->second; - - cv::Mat query_img = cv::imread(query_dir + "/" + query_file); - if (query_img.empty()) continue; - - cv::resize(query_img, query_img, output_size); - cv::copyMakeBorder(query_img, query_img, 5, 5, 5, 5, - cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0)); - cv::putText(query_img, "Query", cv::Point(10, 30), - cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(0, 255, 0), 2); - - cv::Mat concat_img = query_img; - - for (size_t i = 0; i < top_matches.size(); ++i) { - cv::Mat gallery_img = cv::imread(gallery_dir + "/" + top_matches[i]); - if (gallery_img.empty()) continue; - - cv::resize(gallery_img, gallery_img, output_size); - cv::copyMakeBorder(gallery_img, gallery_img, 5, 5, 5, 5, - cv::BORDER_CONSTANT, cv::Scalar(255, 255, 255)); - cv::putText(gallery_img, "G" + std::to_string(i), cv::Point(10, 30), - cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(0, 255, 0), 2); - - cv::hconcat(concat_img, gallery_img, concat_img); - } - results_vis[query_file] = concat_img; - } - return results_vis; -} - -void printHelpMessage() { - std::cout << "usage: demo.cpp [-h] [--query_dir QUERY_DIR] [--gallery_dir GALLERY_DIR] " - << "[--backend_target BACKEND_TARGET] [--topk TOPK] [--model MODEL] [--save] [--vis]\n\n" - << "ReID baseline models from Tencent Youtu Lab\n\n" - << "optional arguments:\n" - << " -h, --help show this help message and exit\n" - << " --query_dir QUERY_DIR, -q QUERY_DIR\n" - << " Query directory.\n" - << " --gallery_dir GALLERY_DIR, -g GALLERY_DIR\n" - << " Gallery directory.\n" - << " --backend_target BACKEND_TARGET, -bt BACKEND_TARGET\n" - << " Choose one of the backend-target pair to run this demo: 0: (default) OpenCV implementation + " - "CPU, 1: CUDA + GPU (CUDA), 2: CUDA + GPU (CUDA FP16), 3: TIM-VX + NPU, 4: CANN + NPU\n" - << " --topk TOPK Top-K closest from gallery for each query.\n" - << " --model MODEL, -m MODEL\n" - << " Path to the model.\n" - << " --save, -s Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in " - "case of camera input.\n" - << " --vis, -v Usage: Specify to open a new window to show results. Invalid in case of camera input.\n"; -} - -int main(int argc, char** argv) { - // CommandLineParser setup - cv::CommandLineParser parser(argc, argv, - "{help h | | Show help message.}" - "{query_dir q | | Query directory.}" - "{gallery_dir g | | Gallery directory.}" - "{backend_target bt | 0 | Choose one of the backend-target pair to run this demo: 0: (default) OpenCV implementation + CPU, " - "1: CUDA + GPU (CUDA), 2: CUDA + GPU (CUDA FP16), 3: TIM-VX + NPU, 4: CANN + NPU}" - "{topk k | 10 | Top-K closest from gallery for each query.}" - "{model m | person_reid_youtu_2021nov.onnx | Path to the model.}" - "{save s | false | Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.}" - "{vis v | false | Usage: Specify to open a new window to show results. Invalid in case of camera input.}"); - - if (parser.has("help")) { - printHelpMessage(); - return 0; - } - - std::string query_dir = parser.get("query_dir"); - std::string gallery_dir = parser.get("gallery_dir"); - int backend_target = parser.get("backend_target"); - int topK = parser.get("topk"); - std::string model_path = parser.get("model"); - bool save_flag = parser.get("save"); - bool vis_flag = parser.get("vis"); - - if (!parser.check()) { - parser.printErrors(); - return 1; - } - - const std::vector> backend_target_pairs = { - {cv::dnn::DNN_BACKEND_OPENCV, cv::dnn::DNN_TARGET_CPU}, - {cv::dnn::DNN_BACKEND_CUDA, cv::dnn::DNN_TARGET_CUDA}, - {cv::dnn::DNN_BACKEND_CUDA, cv::dnn::DNN_TARGET_CUDA_FP16}, - {cv::dnn::DNN_BACKEND_TIMVX, cv::dnn::DNN_TARGET_NPU}, - {cv::dnn::DNN_BACKEND_CANN, cv::dnn::DNN_TARGET_NPU} - }; - - int backend_id = backend_target_pairs[backend_target].first; - int target_id = backend_target_pairs[backend_target].second; - - YoutuReID reid(model_path, cv::Size(128, 256), 768, - cv::Scalar(0.485, 0.456, 0.406), - cv::Scalar(0.229, 0.224, 0.225), - backend_id, target_id); - - std::pair, std::vector> query_data = readImagesFromDirectory(query_dir); - std::pair, std::vector> gallery_data = readImagesFromDirectory(gallery_dir); - - std::vector> indices = reid.query(query_data.first, gallery_data.first, topK); - - std::map> results; - for (size_t i = 0; i < query_data.second.size(); ++i) { - std::vector top_matches; - for (int idx : indices[i]) { - top_matches.push_back(gallery_data.second[idx]); - } - results[query_data.second[i]] = top_matches; - std::cout << "Query: " << query_data.second[i] << "\n"; - std::cout << "\tTop-" << topK << " from gallery: "; - for (size_t j = 0; j < top_matches.size(); ++j) { - std::cout << top_matches[j] << " "; - } - std::cout << std::endl; - } - - std::map results_vis = visualize(results, query_dir, gallery_dir); - - if (save_flag) { - for (std::map::iterator it = results_vis.begin(); it != results_vis.end(); ++it) { - std::string save_path = "result-" + it->first; - cv::imwrite(save_path, it->second); - } - } - - if (vis_flag) { - for (std::map::iterator it = results_vis.begin(); it != results_vis.end(); ++it) { - cv::namedWindow("result-" + it->first, cv::WINDOW_AUTOSIZE); - cv::imshow("result-" + it->first, it->second); - cv::waitKey(0); - cv::destroyAllWindows(); - } - } - - return 0; -} diff --git a/models/person_reid_youtureid/demo.py b/models/person_reid_youtureid/demo.py deleted file mode 100644 index 5160b918..00000000 --- a/models/person_reid_youtureid/demo.py +++ /dev/null @@ -1,124 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import os -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from youtureid import YoutuReID - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser( - description="ReID baseline models from Tencent Youtu Lab") -parser.add_argument('--query_dir', '-q', type=str, - help='Query directory.') -parser.add_argument('--gallery_dir', '-g', type=str, - help='Gallery directory.') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--topk', type=int, default=10, - help='Top-K closest from gallery for each query.') -parser.add_argument('--model', '-m', type=str, default='person_reid_youtu_2021nov.onnx', - help='Path to the model.') -parser.add_argument('--save', '-s', action='store_true', - help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.') -parser.add_argument('--vis', '-v', action='store_true', - help='Usage: Specify to open a new window to show results. Invalid in case of camera input.') -args = parser.parse_args() - -def readImageFromDirectory(img_dir, w=128, h=256): - img_list = [] - file_list = os.listdir(img_dir) - for f in file_list: - img = cv.imread(os.path.join(img_dir, f)) - img = cv.resize(img, (w, h)) - img_list.append(img) - return img_list, file_list - -def visualize(results, query_dir, gallery_dir, output_size=(128, 384)): - def addBorder(img, color, borderSize=5): - border = cv.copyMakeBorder(img, top=borderSize, bottom=borderSize, left=borderSize, right=borderSize, borderType=cv.BORDER_CONSTANT, value=color) - return border - - results_vis = dict.fromkeys(results.keys(), None) - for f, topk_f in results.items(): - query_img = cv.imread(os.path.join(query_dir, f)) - query_img = cv.resize(query_img, output_size) - query_img = addBorder(query_img, [0, 0, 0]) - cv.putText(query_img, 'Query', (10, 30), cv.FONT_HERSHEY_COMPLEX, 1., (0, 255, 0), 2) - - gallery_img_list = [] - for idx, gallery_f in enumerate(topk_f): - gallery_img = cv.imread(os.path.join(gallery_dir, gallery_f)) - gallery_img = cv.resize(gallery_img, output_size) - gallery_img = addBorder(gallery_img, [255, 255, 255]) - cv.putText(gallery_img, 'G{:02d}'.format(idx), (10, 30), cv.FONT_HERSHEY_COMPLEX, 1., (0, 255, 0), 2) - gallery_img_list.append(gallery_img) - - results_vis[f] = np.concatenate([query_img] + gallery_img_list, axis=1) - - return results_vis - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - - # Instantiate YoutuReID for person ReID - net = YoutuReID(modelPath=args.model, backendId=backend_id, targetId=target_id) - - # Read images from dir - query_img_list, query_file_list = readImageFromDirectory(args.query_dir) - gallery_img_list, gallery_file_list = readImageFromDirectory(args.gallery_dir) - - # Query - topk_indices = net.query(query_img_list, gallery_img_list, args.topk) - - # Index to filename - results = dict.fromkeys(query_file_list, None) - for f, indices in zip(query_file_list, topk_indices): - topk_matches = [] - for idx in indices: - topk_matches.append(gallery_file_list[idx]) - results[f] = topk_matches - # Print - print('Query: {}'.format(f)) - print('\tTop-{} from gallery: {}'.format(args.topk, str(topk_matches))) - - # Visualize - results_vis = visualize(results, args.query_dir, args.gallery_dir) - - if args.save: - for f, img in results_vis.items(): - cv.imwrite('result-{}'.format(f), img) - - if args.vis: - for f, img in results_vis.items(): - cv.namedWindow('result-{}'.format(f), cv.WINDOW_AUTOSIZE) - cv.imshow('result-{}'.format(f), img) - cv.waitKey(0) - cv.destroyAllWindows() - diff --git a/models/person_reid_youtureid/person_reid_youtu_2021nov.onnx b/models/person_reid_youtureid/person_reid_youtu_2021nov.onnx deleted file mode 100644 index 1e642cac..00000000 --- a/models/person_reid_youtureid/person_reid_youtu_2021nov.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0579683334d4b9440221606dcb461656dd0dc64143b18f48faedaced9b4f580d -size 106878407 diff --git a/models/person_reid_youtureid/person_reid_youtu_2021nov_int8.onnx b/models/person_reid_youtureid/person_reid_youtu_2021nov_int8.onnx deleted file mode 100644 index f405ab6a..00000000 --- a/models/person_reid_youtureid/person_reid_youtu_2021nov_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4757c4cb759b79030a9870abf29c064c2ee51e079a05700690800c81b16cf245 -size 26763574 diff --git a/models/person_reid_youtureid/person_reid_youtu_2021nov_int8bq.onnx b/models/person_reid_youtureid/person_reid_youtu_2021nov_int8bq.onnx deleted file mode 100644 index 95aa550a..00000000 --- a/models/person_reid_youtureid/person_reid_youtu_2021nov_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2b88597426335e6cd625119bdda090f9d3497bc80ba5b8a8910f65b8ccc09471 -size 29203236 diff --git a/models/person_reid_youtureid/youtureid.py b/models/person_reid_youtureid/youtureid.py deleted file mode 100644 index b2fafe16..00000000 --- a/models/person_reid_youtureid/youtureid.py +++ /dev/null @@ -1,67 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import numpy as np -import cv2 as cv - -class YoutuReID: - def __init__(self, modelPath, backendId=0, targetId=0): - self._modelPath = modelPath - self._backendId = backendId - self._targetId = targetId - - self._model = cv.dnn.readNet(modelPath) - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - self._input_size = (128, 256) # fixed - self._output_dim = 768 - self._mean = (0.485, 0.456, 0.406) - self._std = (0.229, 0.224, 0.225) - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self._backendId = backendId - self._targetId = targetId - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - def _preprocess(self, image): - image = image[:, :, ::-1] - image = (image / 255.0 - self._mean) / self._std - return cv.dnn.blobFromImage(image.astype(np.float32)) - # return cv.dnn.blobFromImage(image, scalefactor=(1.0/255.0), size=self._input_size, mean=self._mean) / self._std - - def infer(self, image): - # Preprocess - inputBlob = self._preprocess(image) - - # Forward - self._model.setInput(inputBlob) - features = self._model.forward() - return np.reshape(features, (features.shape[0], features.shape[1])) - - def query(self, query_img_list, gallery_img_list, topK=5): - query_features_list = [] - for q in query_img_list: - query_features_list.append(self.infer(q)) - query_features = np.concatenate(query_features_list, axis=0) - query_norm = np.linalg.norm(query_features, ord=2, axis=1, keepdims=True) - query_arr = query_features / (query_norm + np.finfo(np.float32).eps) - - gallery_features_list = [] - for g in gallery_img_list: - gallery_features_list.append(self.infer(g)) - gallery_features = np.concatenate(gallery_features_list, axis=0) - gallery_norm = np.linalg.norm(gallery_features, ord=2, axis=1, keepdims=True) - gallery_arr = gallery_features / (gallery_norm + np.finfo(np.float32).eps) - - dist = np.matmul(query_arr, gallery_arr.T) - idx = np.argsort(-dist, axis=1) - return [i[0:topK] for i in idx] diff --git a/models/pose_estimation_mediapipe/CMakeLists.txt b/models/pose_estimation_mediapipe/CMakeLists.txt deleted file mode 100644 index e49645ac..00000000 --- a/models/pose_estimation_mediapipe/CMakeLists.txt +++ /dev/null @@ -1,29 +0,0 @@ -cmake_minimum_required(VERSION 3.24) -set(project_name "opencv_zoo_pose_estimation_mediapipe") - -PROJECT (${project_name}) - -set(OPENCV_VERSION "4.10.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) -# Find OpenCV, you may need to set OpenCV_DIR variable -# to the absolute path to the directory containing OpenCVConfig.cmake file -# via the command line or GUI - -file(GLOB SourceFile - "demo.cpp") -# If the package has been found, several variables will -# be set, you can find the full list with descriptions -# in the OpenCVConfig.cmake file. -# Print some message showing some of them -message(STATUS "OpenCV library status:") -message(STATUS " config: ${OpenCV_DIR}") -message(STATUS " version: ${OpenCV_VERSION}") -message(STATUS " libraries: ${OpenCV_LIBS}") -message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}") - -# Declare the executable target built from your sources -add_executable(${project_name} ${SourceFile}) - -# Link your application with OpenCV libraries -target_link_libraries(${project_name} PRIVATE ${OpenCV_LIBS}) diff --git a/models/pose_estimation_mediapipe/LICENSE b/models/pose_estimation_mediapipe/LICENSE deleted file mode 100644 index 7a4a3ea2..00000000 --- a/models/pose_estimation_mediapipe/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/models/pose_estimation_mediapipe/README.md b/models/pose_estimation_mediapipe/README.md deleted file mode 100644 index 30d92ac9..00000000 --- a/models/pose_estimation_mediapipe/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# Pose estimation from MediaPipe Pose - -This model estimates 33 pose keypoints and person segmentation mask per detected person from [person detector](../person_detection_mediapipe). (The image below is referenced from [MediaPipe Pose Keypoints](https://github.com/tensorflow/tfjs-models/tree/master/pose-detection#blazepose-keypoints-used-in-mediapipe-blazepose)) - -![MediaPipe Pose Landmark](examples/pose_landmarks.png) - -This model is converted from TFlite to ONNX using following tools: -- TFLite model to ONNX: https://github.com/onnx/tensorflow-onnx -- simplified by [onnx-simplifier](https://github.com/daquexian/onnx-simplifier) - -**Note**: -- Visit https://github.com/google/mediapipe/blob/master/docs/solutions/models.md#pose for models of larger scale. -- `pose_estimation_mediapipe_2023mar_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - -## Demo - -### python - -Run the following commands to try the demo: -```bash -# detect on camera input -python demo.py -# detect on an image -python demo.py -i /path/to/image -v -``` -### C++ - -Install latest OpenCV and CMake >= 3.24.0 to get started with: - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# detect on camera input -./build/opencv_zoo_pose_estimation_mediapipe -# detect on an image -./build/opencv_zoo_pose_estimation_mediapipe -m=/path/to/model -i=/path/to/image -v -# get help messages -./build/opencv_zoo_pose_estimation_mediapipe -h -``` - -### Example outputs - -![webcam demo](./example_outputs/mpposeest_demo.webp) - -## License - -All files in this directory are licensed under [Apache 2.0 License](LICENSE). - -## Reference -- MediaPipe Pose: https://developers.google.com/mediapipe/solutions/vision/pose_landmarker -- MediaPipe pose model and model card: https://github.com/google/mediapipe/blob/master/docs/solutions/models.md#pose -- BlazePose TFJS: https://github.com/tensorflow/tfjs-models/tree/master/pose-detection/src/blazepose_tfjs diff --git a/models/pose_estimation_mediapipe/demo.cpp b/models/pose_estimation_mediapipe/demo.cpp deleted file mode 100644 index f5b97deb..00000000 --- a/models/pose_estimation_mediapipe/demo.cpp +++ /dev/null @@ -1,2850 +0,0 @@ -#include -#include -#include -#include - -#include - -const long double _M_PI = 3.141592653589793238L; -using namespace std; -using namespace cv; -using namespace dnn; - -vector< pair > backendTargetPairs = { - std::make_pair(dnn::DNN_BACKEND_OPENCV, dnn::DNN_TARGET_CPU), - std::make_pair(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA), - std::make_pair(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA_FP16), - std::make_pair(dnn::DNN_BACKEND_TIMVX, dnn::DNN_TARGET_NPU), - std::make_pair(dnn::DNN_BACKEND_CANN, dnn::DNN_TARGET_NPU) }; - - -Mat getMediapipeAnchor(); - -class MPPersonDet { -private: - Net net; - string modelPath; - Size inputSize; - float scoreThreshold; - float nmsThreshold; - dnn::Backend backendId; - dnn::Target targetId; - int topK; - Mat anchors; - -public: - MPPersonDet(string modPath, float nmsThresh = 0.3, float scoreThresh = 0.5, int tok=5000 , dnn::Backend bId = DNN_BACKEND_DEFAULT, dnn::Target tId = DNN_TARGET_CPU) : - modelPath(modPath), nmsThreshold(nmsThresh), - scoreThreshold(scoreThresh), topK(tok), - backendId(bId), targetId(tId) - { - this->inputSize = Size(224, 224); - this->net = readNet(this->modelPath); - this->net.setPreferableBackend(this->backendId); - this->net.setPreferableTarget(this->targetId); - this->anchors = getMediapipeAnchor(); - } - - pair preprocess(Mat img) - { - Mat blob; - Image2BlobParams paramMediapipe; - paramMediapipe.datalayout = DNN_LAYOUT_NCHW; - paramMediapipe.ddepth = CV_32F; - paramMediapipe.mean = Scalar::all(127.5); - paramMediapipe.scalefactor = Scalar::all(1/127.5); - paramMediapipe.size = this->inputSize; - paramMediapipe.swapRB = true; - paramMediapipe.paddingmode = DNN_PMODE_LETTERBOX; - - double ratio = min(this->inputSize.height / double(img.rows), this->inputSize.width / double(img.cols)); - Size padBias(0, 0); - if (img.rows != this->inputSize.height || img.cols != this->inputSize.width) - { - // keep aspect ratio when resize - Size ratioSize(int(img.cols * ratio), int(img.rows* ratio)); - int padH = this->inputSize.height - ratioSize.height; - int padW = this->inputSize.width - ratioSize.width; - padBias.width = padW / 2; - padBias.height = padH / 2; - } - blob = blobFromImageWithParams(img, paramMediapipe); - padBias = Size(int(padBias.width / ratio), int(padBias.height / ratio)); - return pair(blob, padBias); - } - - Mat infer(Mat srcimg) - { - pair w = this->preprocess(srcimg); - Mat inputBlob = get<0>(w); - Size padBias = get<1>(w); - this->net.setInput(inputBlob); - vector outs; - this->net.forward(outs, this->net.getUnconnectedOutLayersNames()); - Mat predictions = this->postprocess(outs, Size(srcimg.cols, srcimg.rows), padBias); - return predictions; - } - - Mat postprocess(vector outputs, Size orgSize, Size padBias) - { - Mat score = outputs[1].reshape(0, outputs[1].size[0]); - Mat boxLandDelta = outputs[0].reshape(outputs[0].size[0], outputs[0].size[1]); - Mat boxDelta = boxLandDelta.colRange(0, 4); - Mat landmarkDelta = boxLandDelta.colRange(4, boxLandDelta.cols); - float scale = float(max(orgSize.height, orgSize.width)); - Mat mask = score < -100; - score.setTo(-100, mask); - mask = score > 100; - score.setTo(100, mask); - Mat deno; - exp(-score, deno); - divide(1.0, 1+deno, score); - boxDelta.colRange(0, 1) = boxDelta.colRange(0, 1) / this->inputSize.width; - boxDelta.colRange(1, 2) = boxDelta.colRange(1, 2) / this->inputSize.height; - boxDelta.colRange(2, 3) = boxDelta.colRange(2, 3) / this->inputSize.width; - boxDelta.colRange(3, 4) = boxDelta.colRange(3, 4) / this->inputSize.height; - Mat xy1 = (boxDelta.colRange(0, 2) - boxDelta.colRange(2, 4) / 2 + this->anchors) * scale; - Mat xy2 = (boxDelta.colRange(0, 2) + boxDelta.colRange(2, 4) / 2 + this->anchors) * scale; - Mat boxes; - hconcat(xy1, xy2, boxes); - vector< Rect2d > rotBoxes(boxes.rows); - boxes.colRange(0, 1) = boxes.colRange(0, 1) - padBias.width; - boxes.colRange(1, 2) = boxes.colRange(1, 2) - padBias.height; - boxes.colRange(2, 3) = boxes.colRange(2, 3) - padBias.width; - boxes.colRange(3, 4) = boxes.colRange(3, 4) - padBias.height; - for (int i = 0; i < boxes.rows; i++) - { - rotBoxes[i] = Rect2d(Point2d(boxes.at(i, 0), boxes.at(i, 1)), Point2d(boxes.at(i, 2), boxes.at(i, 3))); - } - vector keep; - NMSBoxes(rotBoxes, score, this->scoreThreshold, this->nmsThreshold, keep, 1.0f, this->topK); - if (keep.size() == 0) - return Mat(); - int nbCols = landmarkDelta.cols + boxes.cols + 1; - Mat candidates(int(keep.size()), nbCols, CV_32FC1); - int row = 0; - for (auto idx : keep) - { - candidates.at(row, nbCols - 1) = score.at(idx); - boxes.row(idx).copyTo(candidates.row(row).colRange(0, 4)); - candidates.at(row, 4) = (landmarkDelta.at(idx, 0) / this->inputSize.width + this->anchors.at(idx,0)) * scale - padBias.width; - candidates.at(row, 5) = (landmarkDelta.at(idx, 1) / this->inputSize.height + this->anchors.at(idx, 1))* scale - padBias.height; - candidates.at(row, 6) = (landmarkDelta.at(idx, 2) / this->inputSize.width + this->anchors.at(idx, 0))* scale - padBias.width; - candidates.at(row, 7) = (landmarkDelta.at(idx, 3) / this->inputSize.height + this->anchors.at(idx, 1))* scale - padBias.height; - candidates.at(row, 8) = (landmarkDelta.at(idx, 4) / this->inputSize.width + this->anchors.at(idx, 0))* scale - padBias.width; - candidates.at(row, 9) = (landmarkDelta.at(idx, 5) / this->inputSize.height + this->anchors.at(idx, 1))* scale - padBias.height; - candidates.at(row, 10) = (landmarkDelta.at(idx, 6) / this->inputSize.width + this->anchors.at(idx, 0))* scale - padBias.width; - candidates.at(row, 11) = (landmarkDelta.at(idx, 7) / this->inputSize.height + this->anchors.at(idx, 1))* scale - padBias.height; - row++; - } - return candidates; - - } - - -}; - -class MPPose { -private: - Net net; - string modelPath; - Size inputSize; - float confThreshold; - dnn::Backend backendId; - dnn::Target targetId; - float personBoxPreEnlargeFactor; - float personBoxEnlargeFactor; - Mat anchors; - -public: - MPPose(string modPath, float confThresh = 0.5, dnn::Backend bId = DNN_BACKEND_DEFAULT, dnn::Target tId = DNN_TARGET_CPU) : - modelPath(modPath), confThreshold(confThresh), - backendId(bId), targetId(tId) - { - this->inputSize = Size(256, 256); - this->net = readNet(this->modelPath); - this->net.setPreferableBackend(this->backendId); - this->net.setPreferableTarget(this->targetId); - this->anchors = getMediapipeAnchor(); - // RoI will be larger so the performance will be better, but preprocess will be slower.Default to 1. - this->personBoxPreEnlargeFactor = 1; - this->personBoxEnlargeFactor = 1.25; - } - - tuple preprocess(Mat image, Mat person) - { - /*** - Rotate input for inference. - Parameters: - image - input image of BGR channel order - face_bbox - human face bounding box found in image of format [[x1, y1], [x2, y2]] (top-left and bottom-right points) - person_landmarks - 4 landmarks (2 full body points, 2 upper body points) of shape [4, 2] - Returns: - rotated_person - rotated person image for inference - rotate_person_bbox - person box of interest range - angle - rotate angle for person - rotation_matrix - matrix for rotation and de-rotation - pad_bias - pad pixels of interest range - */ - // crop and pad image to interest range - Size padBias(0, 0); // left, top - Mat personKeypoints = person.colRange(4, 12).reshape(0, 4); - Point2f midHipPoint = Point2f(personKeypoints.row(0)); - Point2f fullBodyPoint = Point2f(personKeypoints.row(1)); - // # get RoI - double fullDist = norm(midHipPoint - fullBodyPoint); - Mat fullBoxf,fullBox; - Mat v1 = Mat(midHipPoint) - fullDist, v2 = Mat(midHipPoint); - vector vmat = { Mat(midHipPoint) - fullDist, Mat(midHipPoint) + fullDist }; - hconcat(vmat, fullBoxf); - // enlarge to make sure full body can be cover - Mat cBox, centerBox, whBox; - reduce(fullBoxf, centerBox, 1, REDUCE_AVG, CV_32F); - whBox = fullBoxf.col(1) - fullBoxf.col(0); - Mat newHalfSize = whBox * this->personBoxPreEnlargeFactor / 2; - vmat[0] = centerBox - newHalfSize; - vmat[1] = centerBox + newHalfSize; - hconcat(vmat, fullBox); - Mat personBox; - fullBox.convertTo(personBox, CV_32S); - // refine person bbox - Mat idx = personBox.row(0) < 0; - personBox.row(0).setTo(0, idx); - idx = personBox.row(0) >= image.cols; - personBox.row(0).setTo(image.cols , idx); - idx = personBox.row(1) < 0; - personBox.row(1).setTo(0, idx); - idx = personBox.row(1) >= image.rows; - personBox.row(1).setTo(image.rows, idx); // crop to the size of interest - - image = image(Rect(personBox.at(0, 0), personBox.at(1, 0), personBox.at(0, 1) - personBox.at(0, 0), personBox.at(1, 1) - personBox.at(1, 0))); - // pad to square - int top = int(personBox.at(1, 0) - fullBox.at(1, 0)); - int left = int(personBox.at(0, 0) - fullBox.at(0, 0)); - int bottom = int(fullBox.at(1, 1) - personBox.at(1, 1)); - int right = int(fullBox.at(0, 1) - personBox.at(0, 1)); - copyMakeBorder(image, image, top, bottom, left, right, BORDER_CONSTANT, Scalar(0, 0, 0)); - padBias = Point(padBias) + Point(personBox.col(0)) - Point(left, top); - // compute rotation - midHipPoint -= Point2f(padBias); - fullBodyPoint -= Point2f(padBias); - float radians = float(_M_PI / 2 - atan2(-(fullBodyPoint.y - midHipPoint.y), fullBodyPoint.x - midHipPoint.x)); - radians = radians - 2 * float(_M_PI) * int((radians + _M_PI) / (2 * _M_PI)); - float angle = (radians * 180 / float(_M_PI)); - // get rotation matrix* - Mat rotationMatrix = getRotationMatrix2D(midHipPoint, angle, 1.0); - // get rotated image - Mat rotatedImage; - warpAffine(image, rotatedImage, rotationMatrix, Size(image.cols, image.rows)); - // get landmark bounding box - Mat blob; - Image2BlobParams paramPoseMediapipe; - paramPoseMediapipe.datalayout = DNN_LAYOUT_NHWC; - paramPoseMediapipe.ddepth = CV_32F; - paramPoseMediapipe.mean = Scalar::all(0); - paramPoseMediapipe.scalefactor = Scalar::all(1 / 255.); - paramPoseMediapipe.size = this->inputSize; - paramPoseMediapipe.swapRB = true; - paramPoseMediapipe.paddingmode = DNN_PMODE_NULL; - blob = blobFromImageWithParams(rotatedImage, paramPoseMediapipe); // resize INTER_AREA becomes INTER_LINEAR in blobFromImage - Mat rotatedPersonBox = (Mat_(2, 2) << 0, 0, image.cols, image.rows); - - return tuple(blob, rotatedPersonBox, angle, rotationMatrix, padBias); - } - - tuple infer(Mat image, Mat person) - { - int h = image.rows; - int w = image.cols; - // Preprocess - tuple tw; - tw = this->preprocess(image, person); - Mat inputBlob = get<0>(tw); - Mat rotatedPersonBbox = get<1>(tw); - float angle = get<2>(tw); - Mat rotationMatrix = get<3>(tw); - Size padBias = get<4>(tw); - - // Forward - this->net.setInput(inputBlob); - vector outputBlob; - this->net.forward(outputBlob, this->net.getUnconnectedOutLayersNames()); - - // Postprocess - tuple results; - results = this->postprocess(outputBlob, rotatedPersonBbox, angle, rotationMatrix, padBias, Size(w, h)); - return results;// # [bbox_coords, landmarks_coords, conf] - } - - tuple postprocess(vector blob, Mat rotatedPersonBox, float angle, Mat rotationMatrix, Size padBias, Size imgSize) - { - float valConf = blob[1].at(0); - if (valConf < this->confThreshold) - return tuple(Mat(), Mat(), Mat(), Mat(), Mat(), valConf); - Mat landmarks = blob[0].reshape(0, 39); - Mat mask = blob[2]; - Mat heatmap = blob[3]; - Mat landmarksWorld = blob[4].reshape(0, 39); - - Mat deno; - // recover sigmoid score - exp(-landmarks.colRange(3, landmarks.cols), deno); - divide(1.0, 1 + deno, landmarks.colRange(3, landmarks.cols)); - // TODO: refine landmarks with heatmap. reference: https://github.com/tensorflow/tfjs-models/blob/master/pose-detection/src/blazepose_tfjs/detector.ts#L577-L582 - heatmap = heatmap.reshape(0, heatmap.size[0]); - // transform coords back to the input coords - Mat whRotatedPersonPbox = rotatedPersonBox.row(1) - rotatedPersonBox.row(0); - Mat scaleFactor = whRotatedPersonPbox.clone(); - scaleFactor.col(0) /= this->inputSize.width; - scaleFactor.col(1) /= this->inputSize.height; - landmarks.col(0) = (landmarks.col(0) - this->inputSize.width / 2) * scaleFactor.at(0); - landmarks.col(1) = (landmarks.col(1) - this->inputSize.height / 2) * scaleFactor.at(1); - landmarks.col(2) = landmarks.col(2) * max(scaleFactor.at(1), scaleFactor.at(0)); - Mat coordsRotationMatrix; - getRotationMatrix2D(Point(0, 0), angle, 1.0).convertTo(coordsRotationMatrix, CV_32F); - Mat rotatedLandmarks = landmarks.colRange(0, 2) * coordsRotationMatrix.colRange(0, 2); - hconcat(rotatedLandmarks, landmarks.colRange(2, landmarks.cols), rotatedLandmarks); - Mat rotatedLandmarksWorld = landmarksWorld.colRange(0, 2) * coordsRotationMatrix.colRange(0, 2); - hconcat(rotatedLandmarksWorld, landmarksWorld.col(2), rotatedLandmarksWorld); - // invert rotation - Mat rotationComponent = (Mat_(2, 2) <(0,0), rotationMatrix.at(1, 0), rotationMatrix.at(0, 1), rotationMatrix.at(1, 1)); - Mat translationComponent = rotationMatrix(Rect(2, 0, 1, 2)).clone(); - Mat invertedTranslation = -rotationComponent * translationComponent; - Mat inverseRotationMatrix; - hconcat(rotationComponent, invertedTranslation, inverseRotationMatrix); - Mat center, rc; - reduce(rotatedPersonBox, rc, 0, REDUCE_AVG, CV_64F); - hconcat(rc, Mat(1, 1, CV_64FC1, 1) , center); - // get box center - Mat originalCenter(2, 1, CV_64FC1); - originalCenter.at(0) = center.dot(inverseRotationMatrix.row(0)); - originalCenter.at(1) = center.dot(inverseRotationMatrix.row(1)); - for (int idxRow = 0; idxRow < rotatedLandmarks.rows; idxRow++) - { - landmarks.at(idxRow, 0) = float(rotatedLandmarks.at(idxRow, 0) + originalCenter.at(0) + padBias.width); // - landmarks.at(idxRow, 1) = float(rotatedLandmarks.at(idxRow, 1) + originalCenter.at(1) + padBias.height); // - } - // get bounding box from rotated_landmarks - double vmin0, vmin1, vmax0, vmax1; - minMaxLoc(landmarks.col(0), &vmin0, &vmax0); - minMaxLoc(landmarks.col(1), &vmin1, &vmax1); - Mat bbox = (Mat_(2, 2) << vmin0, vmin1, vmax0, vmax1); - Mat centerBox; - reduce(bbox, centerBox, 0, REDUCE_AVG, CV_32F); - Mat whBox = bbox.row(1) - bbox.row(0); - Mat newHalfSize = whBox * this->personBoxEnlargeFactor / 2; - vector vmat(2); - vmat[0] = centerBox - newHalfSize; - vmat[1] = centerBox + newHalfSize; - vconcat(vmat, bbox); - // invert rotation for mask - mask = mask.reshape(1, 256); - Mat invertRotationMatrix = getRotationMatrix2D(Point(mask.cols / 2, mask.rows / 2), -angle, 1.0); - Mat invertRotationMask; - warpAffine(mask, invertRotationMask, invertRotationMatrix, Size(mask.cols, mask.rows)); - // enlarge mask - resize(invertRotationMask, invertRotationMask, Size(int(whRotatedPersonPbox.at(0)), int(whRotatedPersonPbox.at(1)))); - // crop and pad mask - int minW = -min(padBias.width, 0); - int minH= -min(padBias.height, 0); - int left = max(padBias.width, 0); - int top = max(padBias.height, 0); - Size padOver = imgSize - Size(invertRotationMask.cols, invertRotationMask.rows) - padBias; - int maxW = min(padOver.width, 0) + invertRotationMask.cols; - int maxH = min(padOver.height, 0) + invertRotationMask.rows; - int right = max(padOver.width, 0); - int bottom = max(padOver.height, 0); - invertRotationMask = invertRotationMask(Rect(minW, minH, maxW - minW, maxH - minH)).clone(); - copyMakeBorder(invertRotationMask, invertRotationMask, top, bottom, left, right, BORDER_CONSTANT, Scalar::all(0)); - // binarize mask - threshold(invertRotationMask, invertRotationMask, 1, 255, THRESH_BINARY); - - /* 2*2 person bbox: [[x1, y1], [x2, y2]] - # 39*5 screen landmarks: 33 keypoints and 6 auxiliary points with [x, y, z, visibility, presence], z value is relative to HIP - # Visibility is probability that a keypoint is located within the frame and not occluded by another bigger body part or another object - # Presence is probability that a keypoint is located within the frame - # 39*3 world landmarks: 33 keypoints and 6 auxiliary points with [x, y, z] 3D metric x, y, z coordinate - # img_height*img_width mask: gray mask, where 255 indicates the full body of a person and 0 means background - # 64*64*39 heatmap: currently only used for refining landmarks, requires sigmod processing before use - # conf: confidence of prediction*/ - return tuple(bbox, landmarks, rotatedLandmarksWorld, invertRotationMask, heatmap, valConf); - } -}; - -std::string keys = -"{ help h | | Print help message. }" -"{ model m | pose_estimation_mediapipe_2023mar.onnx | Usage: Path to the model, defaults to person_detection_mediapipe_2023mar.onnx }" -"{ input i | | Path to input image or video file. Skip this argument to capture frames from a camera.}" -"{ conf_threshold | 0.5 | Usage: Filter out hands of confidence < conf_threshold. }" -"{ top_k | 1 | Usage: Keep top_k bounding boxes before NMS. }" -"{ save s | true | Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input. }" -"{ vis v | true | Usage: Specify to open a new window to show results. Invalid in case of camera input. }" -"{ backend bt | 0 | Choose one of computation backends: " -"0: (default) OpenCV implementation + CPU, " -"1: CUDA + GPU (CUDA), " -"2: CUDA + GPU (CUDA FP16), " -"3: TIM-VX + NPU, " -"4: CANN + NPU}"; - - -void drawLines(Mat image, Mat landmarks, Mat keeplandmarks, bool isDrawPoint = true, int thickness = 2) -{ - - vector> segment = { - make_pair(0, 1), make_pair(1, 2), make_pair(2, 3), make_pair(3, 7), - make_pair(0, 4), make_pair(4, 5), make_pair(5, 6), make_pair(6, 8), - make_pair(9, 10), - make_pair(12, 14), make_pair(14, 16), make_pair(16, 22), make_pair(16, 18), make_pair(16, 20), make_pair(18, 20), - make_pair(11, 13), make_pair(13, 15), make_pair(15, 21), make_pair(15, 19), make_pair(15, 17), make_pair(17, 19), - make_pair(11, 12), make_pair(11, 23), make_pair(23, 24), make_pair(24, 12), - make_pair(24, 26), make_pair(26, 28), make_pair(28, 30), make_pair(28, 32), make_pair(30, 32), - make_pair(23, 25), make_pair(25, 27),make_pair(27, 31), make_pair(27, 29), make_pair(29, 31) }; - for (auto p : segment) - if (keeplandmarks.at(p.first) && keeplandmarks.at(p.second)) - line(image, Point(landmarks.row(p.first)), Point(landmarks.row(p.second)), Scalar(255, 255, 255), thickness); - if (isDrawPoint) - for (int idxRow = 0; idxRow < landmarks.rows; idxRow++) - if (keeplandmarks.at(idxRow)) - circle(image, Point(landmarks.row(idxRow)), thickness, Scalar(0, 0, 255), -1); -} - - -pair visualize(Mat image, vector> poses, float fps=-1) -{ - Mat displayScreen = image.clone(); - Mat display3d(400, 400, CV_8UC3, Scalar::all(0)); - line(display3d, Point(200, 0), Point(200, 400), Scalar(255, 255, 255), 2); - line(display3d, Point(0, 200), Point(400, 200), Scalar(255, 255, 255), 2); - putText(display3d, "Main View", Point(0, 12), FONT_HERSHEY_DUPLEX, 0.5, Scalar(0, 0, 255)); - putText(display3d, "Top View", Point(200, 12), FONT_HERSHEY_DUPLEX, 0.5, Scalar(0, 0, 255)); - putText(display3d, "Left View", Point(0, 212), FONT_HERSHEY_DUPLEX, 0.5, Scalar(0, 0, 255)); - putText(display3d, "Right View", Point(200, 212), FONT_HERSHEY_DUPLEX, 0.5, Scalar(0, 0, 255)); - bool isDraw = false; // ensure only one person is drawn - - for (auto pose : poses) - { - Mat bbox = get<0>(pose); - if (!bbox.empty()) - { - Mat landmarksScreen = get<1>(pose); - Mat landmarksWord = get<2>(pose); - Mat mask; - get<3>(pose).convertTo(mask, CV_8U); - Mat heatmap = get<4>(pose); - float conf = get<5>(pose); - Mat edges; - Canny(mask, edges, 100, 200); - Mat kernel(2, 2, CV_8UC1, Scalar::all(1)); // expansion edge to 2 pixels - dilate(edges, edges, kernel); - Mat edgesBGR; - cvtColor(edges, edgesBGR, COLOR_GRAY2BGR); - Mat idxSelec = edges == 255; - edgesBGR.setTo(Scalar(0, 255, 0), idxSelec); - - add(edgesBGR, displayScreen, displayScreen); - // draw box - Mat box; - bbox.convertTo(box, CV_32S); - - rectangle(displayScreen, Point(box.row(0)), Point(box.row(1)), Scalar(0, 255, 0), 2); - putText(displayScreen, format("Conf = %4f", conf), Point(0, 35), FONT_HERSHEY_DUPLEX, 0.7,Scalar(0, 0, 255), 2); - if (fps > 0) - putText(displayScreen, format("FPS = %.2f", fps), Point(0, 55), FONT_HERSHEY_SIMPLEX, 0.7, Scalar(0, 0, 255), 2); - // Draw line between each key points - landmarksScreen = landmarksScreen.rowRange(0, landmarksScreen.rows - 6); - landmarksWord = landmarksWord.rowRange(0, landmarksWord.rows - 6); - - Mat keepLandmarks = landmarksScreen.col(4) > 0.8; // only show visible keypoints which presence bigger than 0.8 - - Mat landmarksXY; - landmarksScreen.colRange(0, 2).convertTo(landmarksXY, CV_32S); - drawLines(displayScreen, landmarksXY, keepLandmarks, false); - - // z value is relative to HIP, but we use constant to instead - for (int idxRow = 0; idxRow < landmarksScreen.rows; idxRow++) - { - Mat landmark;// p in enumerate(landmarks_screen[:, 0 : 3].astype(np.int32)) - landmarksScreen.row(idxRow).convertTo(landmark, CV_32S); - if (keepLandmarks.at(idxRow)) - circle(displayScreen, Point(landmark.at(0), landmark.at(1)), 2, Scalar(0, 0, 255), -1); - } - - if (!isDraw) - { - isDraw = true; - // Main view - Mat landmarksXY = landmarksWord.colRange(0, 2).clone(); - Mat x = landmarksXY * 100 + 100; - x.convertTo(landmarksXY, CV_32S); - drawLines(display3d, landmarksXY, keepLandmarks, true, 2); - - // Top view - Mat landmarksXZ; - hconcat(landmarksWord.col(0), landmarksWord.col(2), landmarksXZ); - landmarksXZ.col(1) = -landmarksXZ.col(1); - x = landmarksXZ * 100; - x.col(0) += 300; - x.col(1) += 100; - x.convertTo(landmarksXZ, CV_32S); - drawLines(display3d, landmarksXZ, keepLandmarks, true, 2); - - // Left view - Mat landmarksYZ; - hconcat(landmarksWord.col(2), landmarksWord.col(1), landmarksYZ); - landmarksYZ.col(0) = -landmarksYZ.col(0); - x = landmarksYZ * 100; - x.col(0) += 100; - x.col(1) += 300; - x.convertTo(landmarksYZ, CV_32S); - drawLines(display3d, landmarksYZ, keepLandmarks, true, 2); - - // Right view - Mat landmarksZY; - hconcat(landmarksWord.col(2), landmarksWord.col(1), landmarksZY); - x = landmarksZY * 100; - x.col(0) += 300; - x.col(1) += 300; - x.convertTo(landmarksZY, CV_32S); - drawLines(display3d, landmarksZY, keepLandmarks, true, 2); - } - } - } - return pair(displayScreen, display3d); -} - - - -int main(int argc, char** argv) -{ - CommandLineParser parser(argc, argv, keys); - - parser.about("Person Detector from MediaPipe"); - if (parser.has("help")) - { - parser.printMessage(); - return 0; - } - - string model = parser.get("model"); - float confThreshold = parser.get("conf_threshold"); - float scoreThreshold = 0.5f; - float nmsThreshold = 0.3f; - int topK = 5000; - bool vis = parser.get("vis"); - bool save = parser.get("save"); - int backendTargetid = parser.get("backend"); - - if (model.empty()) - { - CV_Error(Error::StsError, "Model file " + model + " not found"); - } - VideoCapture cap; - if (parser.has("input")) - cap.open(samples::findFile(parser.get("input"))); - else - cap.open(0); - Mat frame; - // person detector - MPPersonDet modelNet("../person_detection_mediapipe/person_detection_mediapipe_2023mar.onnx", nmsThreshold, scoreThreshold, topK, - backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second); - // pose estimator - MPPose poseEstimator(model, confThreshold, backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second); - //! [Open a video file or an image file or a camera stream] - if (!cap.isOpened()) - CV_Error(Error::StsError, "Cannot open video or file"); - - static const std::string kWinName = "MPPose Demo"; - while (waitKey(1) < 0) - { - cap >> frame; - if (frame.empty()) - { - if (parser.has("input")) - { - cout << "Frame is empty" << endl; - break; - } - else - continue; - } - TickMeter tm; - tm.start(); - Mat person = modelNet.infer(frame); - tm.stop(); - vector> pose; - for (int idxRow = 0; idxRow < person.rows; idxRow++) - { - tuple re = poseEstimator.infer(frame, person.row(idxRow)); - if (!get<0>(re).empty()) - pose.push_back(re); - } - cout << "Inference time: " << tm.getTimeMilli() << " ms\n"; - pair duoimg = visualize(frame, pose, tm.getFPS()); - if (vis) - { - imshow(kWinName, get<0>(duoimg)); - imshow("3d", get<1>(duoimg)); - } - } - return 0; -} - - -Mat getMediapipeAnchor() -{ - Mat anchor= (Mat_(2254,2) << 0.017857142857142856, 0.017857142857142856, - 0.017857142857142856, 0.017857142857142856, - 0.05357142857142857, 0.017857142857142856, - 0.05357142857142857, 0.017857142857142856, - 0.08928571428571429, 0.017857142857142856, - 0.08928571428571429, 0.017857142857142856, - 0.125, 0.017857142857142856, - 0.125, 0.017857142857142856, - 0.16071428571428573, 0.017857142857142856, - 0.16071428571428573, 0.017857142857142856, - 0.19642857142857142, 0.017857142857142856, - 0.19642857142857142, 0.017857142857142856, - 0.23214285714285715, 0.017857142857142856, - 0.23214285714285715, 0.017857142857142856, - 0.26785714285714285, 0.017857142857142856, - 0.26785714285714285, 0.017857142857142856, - 0.30357142857142855, 0.017857142857142856, - 0.30357142857142855, 0.017857142857142856, - 0.3392857142857143, 0.017857142857142856, - 0.3392857142857143, 0.017857142857142856, - 0.375, 0.017857142857142856, - 0.375, 0.017857142857142856, - 0.4107142857142857, 0.017857142857142856, - 0.4107142857142857, 0.017857142857142856, - 0.44642857142857145, 0.017857142857142856, - 0.44642857142857145, 0.017857142857142856, - 0.48214285714285715, 0.017857142857142856, - 0.48214285714285715, 0.017857142857142856, - 0.5178571428571429, 0.017857142857142856, - 0.5178571428571429, 0.017857142857142856, - 0.5535714285714286, 0.017857142857142856, - 0.5535714285714286, 0.017857142857142856, - 0.5892857142857143, 0.017857142857142856, - 0.5892857142857143, 0.017857142857142856, - 0.625, 0.017857142857142856, - 0.625, 0.017857142857142856, - 0.6607142857142857, 0.017857142857142856, - 0.6607142857142857, 0.017857142857142856, - 0.6964285714285714, 0.017857142857142856, - 0.6964285714285714, 0.017857142857142856, - 0.7321428571428571, 0.017857142857142856, - 0.7321428571428571, 0.017857142857142856, - 0.7678571428571429, 0.017857142857142856, - 0.7678571428571429, 0.017857142857142856, - 0.8035714285714286, 0.017857142857142856, - 0.8035714285714286, 0.017857142857142856, - 0.8392857142857143, 0.017857142857142856, - 0.8392857142857143, 0.017857142857142856, - 0.875, 0.017857142857142856, - 0.875, 0.017857142857142856, - 0.9107142857142857, 0.017857142857142856, - 0.9107142857142857, 0.017857142857142856, - 0.9464285714285714, 0.017857142857142856, - 0.9464285714285714, 0.017857142857142856, - 0.9821428571428571, 0.017857142857142856, - 0.9821428571428571, 0.017857142857142856, - 0.017857142857142856, 0.05357142857142857, - 0.017857142857142856, 0.05357142857142857, - 0.05357142857142857, 0.05357142857142857, - 0.05357142857142857, 0.05357142857142857, - 0.08928571428571429, 0.05357142857142857, - 0.08928571428571429, 0.05357142857142857, - 0.125, 0.05357142857142857, - 0.125, 0.05357142857142857, - 0.16071428571428573, 0.05357142857142857, - 0.16071428571428573, 0.05357142857142857, - 0.19642857142857142, 0.05357142857142857, - 0.19642857142857142, 0.05357142857142857, - 0.23214285714285715, 0.05357142857142857, - 0.23214285714285715, 0.05357142857142857, - 0.26785714285714285, 0.05357142857142857, - 0.26785714285714285, 0.05357142857142857, - 0.30357142857142855, 0.05357142857142857, - 0.30357142857142855, 0.05357142857142857, - 0.3392857142857143, 0.05357142857142857, - 0.3392857142857143, 0.05357142857142857, - 0.375, 0.05357142857142857, - 0.375, 0.05357142857142857, - 0.4107142857142857, 0.05357142857142857, - 0.4107142857142857, 0.05357142857142857, - 0.44642857142857145, 0.05357142857142857, - 0.44642857142857145, 0.05357142857142857, - 0.48214285714285715, 0.05357142857142857, - 0.48214285714285715, 0.05357142857142857, - 0.5178571428571429, 0.05357142857142857, - 0.5178571428571429, 0.05357142857142857, - 0.5535714285714286, 0.05357142857142857, - 0.5535714285714286, 0.05357142857142857, - 0.5892857142857143, 0.05357142857142857, - 0.5892857142857143, 0.05357142857142857, - 0.625, 0.05357142857142857, - 0.625, 0.05357142857142857, - 0.6607142857142857, 0.05357142857142857, - 0.6607142857142857, 0.05357142857142857, - 0.6964285714285714, 0.05357142857142857, - 0.6964285714285714, 0.05357142857142857, - 0.7321428571428571, 0.05357142857142857, - 0.7321428571428571, 0.05357142857142857, - 0.7678571428571429, 0.05357142857142857, - 0.7678571428571429, 0.05357142857142857, - 0.8035714285714286, 0.05357142857142857, - 0.8035714285714286, 0.05357142857142857, - 0.8392857142857143, 0.05357142857142857, - 0.8392857142857143, 0.05357142857142857, - 0.875, 0.05357142857142857, - 0.875, 0.05357142857142857, - 0.9107142857142857, 0.05357142857142857, - 0.9107142857142857, 0.05357142857142857, - 0.9464285714285714, 0.05357142857142857, - 0.9464285714285714, 0.05357142857142857, - 0.9821428571428571, 0.05357142857142857, - 0.9821428571428571, 0.05357142857142857, - 0.017857142857142856, 0.08928571428571429, - 0.017857142857142856, 0.08928571428571429, - 0.05357142857142857, 0.08928571428571429, - 0.05357142857142857, 0.08928571428571429, - 0.08928571428571429, 0.08928571428571429, - 0.08928571428571429, 0.08928571428571429, - 0.125, 0.08928571428571429, - 0.125, 0.08928571428571429, - 0.16071428571428573, 0.08928571428571429, - 0.16071428571428573, 0.08928571428571429, - 0.19642857142857142, 0.08928571428571429, - 0.19642857142857142, 0.08928571428571429, - 0.23214285714285715, 0.08928571428571429, - 0.23214285714285715, 0.08928571428571429, - 0.26785714285714285, 0.08928571428571429, - 0.26785714285714285, 0.08928571428571429, - 0.30357142857142855, 0.08928571428571429, - 0.30357142857142855, 0.08928571428571429, - 0.3392857142857143, 0.08928571428571429, - 0.3392857142857143, 0.08928571428571429, - 0.375, 0.08928571428571429, - 0.375, 0.08928571428571429, - 0.4107142857142857, 0.08928571428571429, - 0.4107142857142857, 0.08928571428571429, - 0.44642857142857145, 0.08928571428571429, - 0.44642857142857145, 0.08928571428571429, - 0.48214285714285715, 0.08928571428571429, - 0.48214285714285715, 0.08928571428571429, - 0.5178571428571429, 0.08928571428571429, - 0.5178571428571429, 0.08928571428571429, - 0.5535714285714286, 0.08928571428571429, - 0.5535714285714286, 0.08928571428571429, - 0.5892857142857143, 0.08928571428571429, - 0.5892857142857143, 0.08928571428571429, - 0.625, 0.08928571428571429, - 0.625, 0.08928571428571429, - 0.6607142857142857, 0.08928571428571429, - 0.6607142857142857, 0.08928571428571429, - 0.6964285714285714, 0.08928571428571429, - 0.6964285714285714, 0.08928571428571429, - 0.7321428571428571, 0.08928571428571429, - 0.7321428571428571, 0.08928571428571429, - 0.7678571428571429, 0.08928571428571429, - 0.7678571428571429, 0.08928571428571429, - 0.8035714285714286, 0.08928571428571429, - 0.8035714285714286, 0.08928571428571429, - 0.8392857142857143, 0.08928571428571429, - 0.8392857142857143, 0.08928571428571429, - 0.875, 0.08928571428571429, - 0.875, 0.08928571428571429, - 0.9107142857142857, 0.08928571428571429, - 0.9107142857142857, 0.08928571428571429, - 0.9464285714285714, 0.08928571428571429, - 0.9464285714285714, 0.08928571428571429, - 0.9821428571428571, 0.08928571428571429, - 0.9821428571428571, 0.08928571428571429, - 0.017857142857142856, 0.125, - 0.017857142857142856, 0.125, - 0.05357142857142857, 0.125, - 0.05357142857142857, 0.125, - 0.08928571428571429, 0.125, - 0.08928571428571429, 0.125, - 0.125, 0.125, - 0.125, 0.125, - 0.16071428571428573, 0.125, - 0.16071428571428573, 0.125, - 0.19642857142857142, 0.125, - 0.19642857142857142, 0.125, - 0.23214285714285715, 0.125, - 0.23214285714285715, 0.125, - 0.26785714285714285, 0.125, - 0.26785714285714285, 0.125, - 0.30357142857142855, 0.125, - 0.30357142857142855, 0.125, - 0.3392857142857143, 0.125, - 0.3392857142857143, 0.125, - 0.375, 0.125, - 0.375, 0.125, - 0.4107142857142857, 0.125, - 0.4107142857142857, 0.125, - 0.44642857142857145, 0.125, - 0.44642857142857145, 0.125, - 0.48214285714285715, 0.125, - 0.48214285714285715, 0.125, - 0.5178571428571429, 0.125, - 0.5178571428571429, 0.125, - 0.5535714285714286, 0.125, - 0.5535714285714286, 0.125, - 0.5892857142857143, 0.125, - 0.5892857142857143, 0.125, - 0.625, 0.125, - 0.625, 0.125, - 0.6607142857142857, 0.125, - 0.6607142857142857, 0.125, - 0.6964285714285714, 0.125, - 0.6964285714285714, 0.125, - 0.7321428571428571, 0.125, - 0.7321428571428571, 0.125, - 0.7678571428571429, 0.125, - 0.7678571428571429, 0.125, - 0.8035714285714286, 0.125, - 0.8035714285714286, 0.125, - 0.8392857142857143, 0.125, - 0.8392857142857143, 0.125, - 0.875, 0.125, - 0.875, 0.125, - 0.9107142857142857, 0.125, - 0.9107142857142857, 0.125, - 0.9464285714285714, 0.125, - 0.9464285714285714, 0.125, - 0.9821428571428571, 0.125, - 0.9821428571428571, 0.125, - 0.017857142857142856, 0.16071428571428573, - 0.017857142857142856, 0.16071428571428573, - 0.05357142857142857, 0.16071428571428573, - 0.05357142857142857, 0.16071428571428573, - 0.08928571428571429, 0.16071428571428573, - 0.08928571428571429, 0.16071428571428573, - 0.125, 0.16071428571428573, - 0.125, 0.16071428571428573, - 0.16071428571428573, 0.16071428571428573, - 0.16071428571428573, 0.16071428571428573, - 0.19642857142857142, 0.16071428571428573, - 0.19642857142857142, 0.16071428571428573, - 0.23214285714285715, 0.16071428571428573, - 0.23214285714285715, 0.16071428571428573, - 0.26785714285714285, 0.16071428571428573, - 0.26785714285714285, 0.16071428571428573, - 0.30357142857142855, 0.16071428571428573, - 0.30357142857142855, 0.16071428571428573, - 0.3392857142857143, 0.16071428571428573, - 0.3392857142857143, 0.16071428571428573, - 0.375, 0.16071428571428573, - 0.375, 0.16071428571428573, - 0.4107142857142857, 0.16071428571428573, - 0.4107142857142857, 0.16071428571428573, - 0.44642857142857145, 0.16071428571428573, - 0.44642857142857145, 0.16071428571428573, - 0.48214285714285715, 0.16071428571428573, - 0.48214285714285715, 0.16071428571428573, - 0.5178571428571429, 0.16071428571428573, - 0.5178571428571429, 0.16071428571428573, - 0.5535714285714286, 0.16071428571428573, - 0.5535714285714286, 0.16071428571428573, - 0.5892857142857143, 0.16071428571428573, - 0.5892857142857143, 0.16071428571428573, - 0.625, 0.16071428571428573, - 0.625, 0.16071428571428573, - 0.6607142857142857, 0.16071428571428573, - 0.6607142857142857, 0.16071428571428573, - 0.6964285714285714, 0.16071428571428573, - 0.6964285714285714, 0.16071428571428573, - 0.7321428571428571, 0.16071428571428573, - 0.7321428571428571, 0.16071428571428573, - 0.7678571428571429, 0.16071428571428573, - 0.7678571428571429, 0.16071428571428573, - 0.8035714285714286, 0.16071428571428573, - 0.8035714285714286, 0.16071428571428573, - 0.8392857142857143, 0.16071428571428573, - 0.8392857142857143, 0.16071428571428573, - 0.875, 0.16071428571428573, - 0.875, 0.16071428571428573, - 0.9107142857142857, 0.16071428571428573, - 0.9107142857142857, 0.16071428571428573, - 0.9464285714285714, 0.16071428571428573, - 0.9464285714285714, 0.16071428571428573, - 0.9821428571428571, 0.16071428571428573, - 0.9821428571428571, 0.16071428571428573, - 0.017857142857142856, 0.19642857142857142, - 0.017857142857142856, 0.19642857142857142, - 0.05357142857142857, 0.19642857142857142, - 0.05357142857142857, 0.19642857142857142, - 0.08928571428571429, 0.19642857142857142, - 0.08928571428571429, 0.19642857142857142, - 0.125, 0.19642857142857142, - 0.125, 0.19642857142857142, - 0.16071428571428573, 0.19642857142857142, - 0.16071428571428573, 0.19642857142857142, - 0.19642857142857142, 0.19642857142857142, - 0.19642857142857142, 0.19642857142857142, - 0.23214285714285715, 0.19642857142857142, - 0.23214285714285715, 0.19642857142857142, - 0.26785714285714285, 0.19642857142857142, - 0.26785714285714285, 0.19642857142857142, - 0.30357142857142855, 0.19642857142857142, - 0.30357142857142855, 0.19642857142857142, - 0.3392857142857143, 0.19642857142857142, - 0.3392857142857143, 0.19642857142857142, - 0.375, 0.19642857142857142, - 0.375, 0.19642857142857142, - 0.4107142857142857, 0.19642857142857142, - 0.4107142857142857, 0.19642857142857142, - 0.44642857142857145, 0.19642857142857142, - 0.44642857142857145, 0.19642857142857142, - 0.48214285714285715, 0.19642857142857142, - 0.48214285714285715, 0.19642857142857142, - 0.5178571428571429, 0.19642857142857142, - 0.5178571428571429, 0.19642857142857142, - 0.5535714285714286, 0.19642857142857142, - 0.5535714285714286, 0.19642857142857142, - 0.5892857142857143, 0.19642857142857142, - 0.5892857142857143, 0.19642857142857142, - 0.625, 0.19642857142857142, - 0.625, 0.19642857142857142, - 0.6607142857142857, 0.19642857142857142, - 0.6607142857142857, 0.19642857142857142, - 0.6964285714285714, 0.19642857142857142, - 0.6964285714285714, 0.19642857142857142, - 0.7321428571428571, 0.19642857142857142, - 0.7321428571428571, 0.19642857142857142, - 0.7678571428571429, 0.19642857142857142, - 0.7678571428571429, 0.19642857142857142, - 0.8035714285714286, 0.19642857142857142, - 0.8035714285714286, 0.19642857142857142, - 0.8392857142857143, 0.19642857142857142, - 0.8392857142857143, 0.19642857142857142, - 0.875, 0.19642857142857142, - 0.875, 0.19642857142857142, - 0.9107142857142857, 0.19642857142857142, - 0.9107142857142857, 0.19642857142857142, - 0.9464285714285714, 0.19642857142857142, - 0.9464285714285714, 0.19642857142857142, - 0.9821428571428571, 0.19642857142857142, - 0.9821428571428571, 0.19642857142857142, - 0.017857142857142856, 0.23214285714285715, - 0.017857142857142856, 0.23214285714285715, - 0.05357142857142857, 0.23214285714285715, - 0.05357142857142857, 0.23214285714285715, - 0.08928571428571429, 0.23214285714285715, - 0.08928571428571429, 0.23214285714285715, - 0.125, 0.23214285714285715, - 0.125, 0.23214285714285715, - 0.16071428571428573, 0.23214285714285715, - 0.16071428571428573, 0.23214285714285715, - 0.19642857142857142, 0.23214285714285715, - 0.19642857142857142, 0.23214285714285715, - 0.23214285714285715, 0.23214285714285715, - 0.23214285714285715, 0.23214285714285715, - 0.26785714285714285, 0.23214285714285715, - 0.26785714285714285, 0.23214285714285715, - 0.30357142857142855, 0.23214285714285715, - 0.30357142857142855, 0.23214285714285715, - 0.3392857142857143, 0.23214285714285715, - 0.3392857142857143, 0.23214285714285715, - 0.375, 0.23214285714285715, - 0.375, 0.23214285714285715, - 0.4107142857142857, 0.23214285714285715, - 0.4107142857142857, 0.23214285714285715, - 0.44642857142857145, 0.23214285714285715, - 0.44642857142857145, 0.23214285714285715, - 0.48214285714285715, 0.23214285714285715, - 0.48214285714285715, 0.23214285714285715, - 0.5178571428571429, 0.23214285714285715, - 0.5178571428571429, 0.23214285714285715, - 0.5535714285714286, 0.23214285714285715, - 0.5535714285714286, 0.23214285714285715, - 0.5892857142857143, 0.23214285714285715, - 0.5892857142857143, 0.23214285714285715, - 0.625, 0.23214285714285715, - 0.625, 0.23214285714285715, - 0.6607142857142857, 0.23214285714285715, - 0.6607142857142857, 0.23214285714285715, - 0.6964285714285714, 0.23214285714285715, - 0.6964285714285714, 0.23214285714285715, - 0.7321428571428571, 0.23214285714285715, - 0.7321428571428571, 0.23214285714285715, - 0.7678571428571429, 0.23214285714285715, - 0.7678571428571429, 0.23214285714285715, - 0.8035714285714286, 0.23214285714285715, - 0.8035714285714286, 0.23214285714285715, - 0.8392857142857143, 0.23214285714285715, - 0.8392857142857143, 0.23214285714285715, - 0.875, 0.23214285714285715, - 0.875, 0.23214285714285715, - 0.9107142857142857, 0.23214285714285715, - 0.9107142857142857, 0.23214285714285715, - 0.9464285714285714, 0.23214285714285715, - 0.9464285714285714, 0.23214285714285715, - 0.9821428571428571, 0.23214285714285715, - 0.9821428571428571, 0.23214285714285715, - 0.017857142857142856, 0.26785714285714285, - 0.017857142857142856, 0.26785714285714285, - 0.05357142857142857, 0.26785714285714285, - 0.05357142857142857, 0.26785714285714285, - 0.08928571428571429, 0.26785714285714285, - 0.08928571428571429, 0.26785714285714285, - 0.125, 0.26785714285714285, - 0.125, 0.26785714285714285, - 0.16071428571428573, 0.26785714285714285, - 0.16071428571428573, 0.26785714285714285, - 0.19642857142857142, 0.26785714285714285, - 0.19642857142857142, 0.26785714285714285, - 0.23214285714285715, 0.26785714285714285, - 0.23214285714285715, 0.26785714285714285, - 0.26785714285714285, 0.26785714285714285, - 0.26785714285714285, 0.26785714285714285, - 0.30357142857142855, 0.26785714285714285, - 0.30357142857142855, 0.26785714285714285, - 0.3392857142857143, 0.26785714285714285, - 0.3392857142857143, 0.26785714285714285, - 0.375, 0.26785714285714285, - 0.375, 0.26785714285714285, - 0.4107142857142857, 0.26785714285714285, - 0.4107142857142857, 0.26785714285714285, - 0.44642857142857145, 0.26785714285714285, - 0.44642857142857145, 0.26785714285714285, - 0.48214285714285715, 0.26785714285714285, - 0.48214285714285715, 0.26785714285714285, - 0.5178571428571429, 0.26785714285714285, - 0.5178571428571429, 0.26785714285714285, - 0.5535714285714286, 0.26785714285714285, - 0.5535714285714286, 0.26785714285714285, - 0.5892857142857143, 0.26785714285714285, - 0.5892857142857143, 0.26785714285714285, - 0.625, 0.26785714285714285, - 0.625, 0.26785714285714285, - 0.6607142857142857, 0.26785714285714285, - 0.6607142857142857, 0.26785714285714285, - 0.6964285714285714, 0.26785714285714285, - 0.6964285714285714, 0.26785714285714285, - 0.7321428571428571, 0.26785714285714285, - 0.7321428571428571, 0.26785714285714285, - 0.7678571428571429, 0.26785714285714285, - 0.7678571428571429, 0.26785714285714285, - 0.8035714285714286, 0.26785714285714285, - 0.8035714285714286, 0.26785714285714285, - 0.8392857142857143, 0.26785714285714285, - 0.8392857142857143, 0.26785714285714285, - 0.875, 0.26785714285714285, - 0.875, 0.26785714285714285, - 0.9107142857142857, 0.26785714285714285, - 0.9107142857142857, 0.26785714285714285, - 0.9464285714285714, 0.26785714285714285, - 0.9464285714285714, 0.26785714285714285, - 0.9821428571428571, 0.26785714285714285, - 0.9821428571428571, 0.26785714285714285, - 0.017857142857142856, 0.30357142857142855, - 0.017857142857142856, 0.30357142857142855, - 0.05357142857142857, 0.30357142857142855, - 0.05357142857142857, 0.30357142857142855, - 0.08928571428571429, 0.30357142857142855, - 0.08928571428571429, 0.30357142857142855, - 0.125, 0.30357142857142855, - 0.125, 0.30357142857142855, - 0.16071428571428573, 0.30357142857142855, - 0.16071428571428573, 0.30357142857142855, - 0.19642857142857142, 0.30357142857142855, - 0.19642857142857142, 0.30357142857142855, - 0.23214285714285715, 0.30357142857142855, - 0.23214285714285715, 0.30357142857142855, - 0.26785714285714285, 0.30357142857142855, - 0.26785714285714285, 0.30357142857142855, - 0.30357142857142855, 0.30357142857142855, - 0.30357142857142855, 0.30357142857142855, - 0.3392857142857143, 0.30357142857142855, - 0.3392857142857143, 0.30357142857142855, - 0.375, 0.30357142857142855, - 0.375, 0.30357142857142855, - 0.4107142857142857, 0.30357142857142855, - 0.4107142857142857, 0.30357142857142855, - 0.44642857142857145, 0.30357142857142855, - 0.44642857142857145, 0.30357142857142855, - 0.48214285714285715, 0.30357142857142855, - 0.48214285714285715, 0.30357142857142855, - 0.5178571428571429, 0.30357142857142855, - 0.5178571428571429, 0.30357142857142855, - 0.5535714285714286, 0.30357142857142855, - 0.5535714285714286, 0.30357142857142855, - 0.5892857142857143, 0.30357142857142855, - 0.5892857142857143, 0.30357142857142855, - 0.625, 0.30357142857142855, - 0.625, 0.30357142857142855, - 0.6607142857142857, 0.30357142857142855, - 0.6607142857142857, 0.30357142857142855, - 0.6964285714285714, 0.30357142857142855, - 0.6964285714285714, 0.30357142857142855, - 0.7321428571428571, 0.30357142857142855, - 0.7321428571428571, 0.30357142857142855, - 0.7678571428571429, 0.30357142857142855, - 0.7678571428571429, 0.30357142857142855, - 0.8035714285714286, 0.30357142857142855, - 0.8035714285714286, 0.30357142857142855, - 0.8392857142857143, 0.30357142857142855, - 0.8392857142857143, 0.30357142857142855, - 0.875, 0.30357142857142855, - 0.875, 0.30357142857142855, - 0.9107142857142857, 0.30357142857142855, - 0.9107142857142857, 0.30357142857142855, - 0.9464285714285714, 0.30357142857142855, - 0.9464285714285714, 0.30357142857142855, - 0.9821428571428571, 0.30357142857142855, - 0.9821428571428571, 0.30357142857142855, - 0.017857142857142856, 0.3392857142857143, - 0.017857142857142856, 0.3392857142857143, - 0.05357142857142857, 0.3392857142857143, - 0.05357142857142857, 0.3392857142857143, - 0.08928571428571429, 0.3392857142857143, - 0.08928571428571429, 0.3392857142857143, - 0.125, 0.3392857142857143, - 0.125, 0.3392857142857143, - 0.16071428571428573, 0.3392857142857143, - 0.16071428571428573, 0.3392857142857143, - 0.19642857142857142, 0.3392857142857143, - 0.19642857142857142, 0.3392857142857143, - 0.23214285714285715, 0.3392857142857143, - 0.23214285714285715, 0.3392857142857143, - 0.26785714285714285, 0.3392857142857143, - 0.26785714285714285, 0.3392857142857143, - 0.30357142857142855, 0.3392857142857143, - 0.30357142857142855, 0.3392857142857143, - 0.3392857142857143, 0.3392857142857143, - 0.3392857142857143, 0.3392857142857143, - 0.375, 0.3392857142857143, - 0.375, 0.3392857142857143, - 0.4107142857142857, 0.3392857142857143, - 0.4107142857142857, 0.3392857142857143, - 0.44642857142857145, 0.3392857142857143, - 0.44642857142857145, 0.3392857142857143, - 0.48214285714285715, 0.3392857142857143, - 0.48214285714285715, 0.3392857142857143, - 0.5178571428571429, 0.3392857142857143, - 0.5178571428571429, 0.3392857142857143, - 0.5535714285714286, 0.3392857142857143, - 0.5535714285714286, 0.3392857142857143, - 0.5892857142857143, 0.3392857142857143, - 0.5892857142857143, 0.3392857142857143, - 0.625, 0.3392857142857143, - 0.625, 0.3392857142857143, - 0.6607142857142857, 0.3392857142857143, - 0.6607142857142857, 0.3392857142857143, - 0.6964285714285714, 0.3392857142857143, - 0.6964285714285714, 0.3392857142857143, - 0.7321428571428571, 0.3392857142857143, - 0.7321428571428571, 0.3392857142857143, - 0.7678571428571429, 0.3392857142857143, - 0.7678571428571429, 0.3392857142857143, - 0.8035714285714286, 0.3392857142857143, - 0.8035714285714286, 0.3392857142857143, - 0.8392857142857143, 0.3392857142857143, - 0.8392857142857143, 0.3392857142857143, - 0.875, 0.3392857142857143, - 0.875, 0.3392857142857143, - 0.9107142857142857, 0.3392857142857143, - 0.9107142857142857, 0.3392857142857143, - 0.9464285714285714, 0.3392857142857143, - 0.9464285714285714, 0.3392857142857143, - 0.9821428571428571, 0.3392857142857143, - 0.9821428571428571, 0.3392857142857143, - 0.017857142857142856, 0.375, - 0.017857142857142856, 0.375, - 0.05357142857142857, 0.375, - 0.05357142857142857, 0.375, - 0.08928571428571429, 0.375, - 0.08928571428571429, 0.375, - 0.125, 0.375, - 0.125, 0.375, - 0.16071428571428573, 0.375, - 0.16071428571428573, 0.375, - 0.19642857142857142, 0.375, - 0.19642857142857142, 0.375, - 0.23214285714285715, 0.375, - 0.23214285714285715, 0.375, - 0.26785714285714285, 0.375, - 0.26785714285714285, 0.375, - 0.30357142857142855, 0.375, - 0.30357142857142855, 0.375, - 0.3392857142857143, 0.375, - 0.3392857142857143, 0.375, - 0.375, 0.375, - 0.375, 0.375, - 0.4107142857142857, 0.375, - 0.4107142857142857, 0.375, - 0.44642857142857145, 0.375, - 0.44642857142857145, 0.375, - 0.48214285714285715, 0.375, - 0.48214285714285715, 0.375, - 0.5178571428571429, 0.375, - 0.5178571428571429, 0.375, - 0.5535714285714286, 0.375, - 0.5535714285714286, 0.375, - 0.5892857142857143, 0.375, - 0.5892857142857143, 0.375, - 0.625, 0.375, - 0.625, 0.375, - 0.6607142857142857, 0.375, - 0.6607142857142857, 0.375, - 0.6964285714285714, 0.375, - 0.6964285714285714, 0.375, - 0.7321428571428571, 0.375, - 0.7321428571428571, 0.375, - 0.7678571428571429, 0.375, - 0.7678571428571429, 0.375, - 0.8035714285714286, 0.375, - 0.8035714285714286, 0.375, - 0.8392857142857143, 0.375, - 0.8392857142857143, 0.375, - 0.875, 0.375, - 0.875, 0.375, - 0.9107142857142857, 0.375, - 0.9107142857142857, 0.375, - 0.9464285714285714, 0.375, - 0.9464285714285714, 0.375, - 0.9821428571428571, 0.375, - 0.9821428571428571, 0.375, - 0.017857142857142856, 0.4107142857142857, - 0.017857142857142856, 0.4107142857142857, - 0.05357142857142857, 0.4107142857142857, - 0.05357142857142857, 0.4107142857142857, - 0.08928571428571429, 0.4107142857142857, - 0.08928571428571429, 0.4107142857142857, - 0.125, 0.4107142857142857, - 0.125, 0.4107142857142857, - 0.16071428571428573, 0.4107142857142857, - 0.16071428571428573, 0.4107142857142857, - 0.19642857142857142, 0.4107142857142857, - 0.19642857142857142, 0.4107142857142857, - 0.23214285714285715, 0.4107142857142857, - 0.23214285714285715, 0.4107142857142857, - 0.26785714285714285, 0.4107142857142857, - 0.26785714285714285, 0.4107142857142857, - 0.30357142857142855, 0.4107142857142857, - 0.30357142857142855, 0.4107142857142857, - 0.3392857142857143, 0.4107142857142857, - 0.3392857142857143, 0.4107142857142857, - 0.375, 0.4107142857142857, - 0.375, 0.4107142857142857, - 0.4107142857142857, 0.4107142857142857, - 0.4107142857142857, 0.4107142857142857, - 0.44642857142857145, 0.4107142857142857, - 0.44642857142857145, 0.4107142857142857, - 0.48214285714285715, 0.4107142857142857, - 0.48214285714285715, 0.4107142857142857, - 0.5178571428571429, 0.4107142857142857, - 0.5178571428571429, 0.4107142857142857, - 0.5535714285714286, 0.4107142857142857, - 0.5535714285714286, 0.4107142857142857, - 0.5892857142857143, 0.4107142857142857, - 0.5892857142857143, 0.4107142857142857, - 0.625, 0.4107142857142857, - 0.625, 0.4107142857142857, - 0.6607142857142857, 0.4107142857142857, - 0.6607142857142857, 0.4107142857142857, - 0.6964285714285714, 0.4107142857142857, - 0.6964285714285714, 0.4107142857142857, - 0.7321428571428571, 0.4107142857142857, - 0.7321428571428571, 0.4107142857142857, - 0.7678571428571429, 0.4107142857142857, - 0.7678571428571429, 0.4107142857142857, - 0.8035714285714286, 0.4107142857142857, - 0.8035714285714286, 0.4107142857142857, - 0.8392857142857143, 0.4107142857142857, - 0.8392857142857143, 0.4107142857142857, - 0.875, 0.4107142857142857, - 0.875, 0.4107142857142857, - 0.9107142857142857, 0.4107142857142857, - 0.9107142857142857, 0.4107142857142857, - 0.9464285714285714, 0.4107142857142857, - 0.9464285714285714, 0.4107142857142857, - 0.9821428571428571, 0.4107142857142857, - 0.9821428571428571, 0.4107142857142857, - 0.017857142857142856, 0.44642857142857145, - 0.017857142857142856, 0.44642857142857145, - 0.05357142857142857, 0.44642857142857145, - 0.05357142857142857, 0.44642857142857145, - 0.08928571428571429, 0.44642857142857145, - 0.08928571428571429, 0.44642857142857145, - 0.125, 0.44642857142857145, - 0.125, 0.44642857142857145, - 0.16071428571428573, 0.44642857142857145, - 0.16071428571428573, 0.44642857142857145, - 0.19642857142857142, 0.44642857142857145, - 0.19642857142857142, 0.44642857142857145, - 0.23214285714285715, 0.44642857142857145, - 0.23214285714285715, 0.44642857142857145, - 0.26785714285714285, 0.44642857142857145, - 0.26785714285714285, 0.44642857142857145, - 0.30357142857142855, 0.44642857142857145, - 0.30357142857142855, 0.44642857142857145, - 0.3392857142857143, 0.44642857142857145, - 0.3392857142857143, 0.44642857142857145, - 0.375, 0.44642857142857145, - 0.375, 0.44642857142857145, - 0.4107142857142857, 0.44642857142857145, - 0.4107142857142857, 0.44642857142857145, - 0.44642857142857145, 0.44642857142857145, - 0.44642857142857145, 0.44642857142857145, - 0.48214285714285715, 0.44642857142857145, - 0.48214285714285715, 0.44642857142857145, - 0.5178571428571429, 0.44642857142857145, - 0.5178571428571429, 0.44642857142857145, - 0.5535714285714286, 0.44642857142857145, - 0.5535714285714286, 0.44642857142857145, - 0.5892857142857143, 0.44642857142857145, - 0.5892857142857143, 0.44642857142857145, - 0.625, 0.44642857142857145, - 0.625, 0.44642857142857145, - 0.6607142857142857, 0.44642857142857145, - 0.6607142857142857, 0.44642857142857145, - 0.6964285714285714, 0.44642857142857145, - 0.6964285714285714, 0.44642857142857145, - 0.7321428571428571, 0.44642857142857145, - 0.7321428571428571, 0.44642857142857145, - 0.7678571428571429, 0.44642857142857145, - 0.7678571428571429, 0.44642857142857145, - 0.8035714285714286, 0.44642857142857145, - 0.8035714285714286, 0.44642857142857145, - 0.8392857142857143, 0.44642857142857145, - 0.8392857142857143, 0.44642857142857145, - 0.875, 0.44642857142857145, - 0.875, 0.44642857142857145, - 0.9107142857142857, 0.44642857142857145, - 0.9107142857142857, 0.44642857142857145, - 0.9464285714285714, 0.44642857142857145, - 0.9464285714285714, 0.44642857142857145, - 0.9821428571428571, 0.44642857142857145, - 0.9821428571428571, 0.44642857142857145, - 0.017857142857142856, 0.48214285714285715, - 0.017857142857142856, 0.48214285714285715, - 0.05357142857142857, 0.48214285714285715, - 0.05357142857142857, 0.48214285714285715, - 0.08928571428571429, 0.48214285714285715, - 0.08928571428571429, 0.48214285714285715, - 0.125, 0.48214285714285715, - 0.125, 0.48214285714285715, - 0.16071428571428573, 0.48214285714285715, - 0.16071428571428573, 0.48214285714285715, - 0.19642857142857142, 0.48214285714285715, - 0.19642857142857142, 0.48214285714285715, - 0.23214285714285715, 0.48214285714285715, - 0.23214285714285715, 0.48214285714285715, - 0.26785714285714285, 0.48214285714285715, - 0.26785714285714285, 0.48214285714285715, - 0.30357142857142855, 0.48214285714285715, - 0.30357142857142855, 0.48214285714285715, - 0.3392857142857143, 0.48214285714285715, - 0.3392857142857143, 0.48214285714285715, - 0.375, 0.48214285714285715, - 0.375, 0.48214285714285715, - 0.4107142857142857, 0.48214285714285715, - 0.4107142857142857, 0.48214285714285715, - 0.44642857142857145, 0.48214285714285715, - 0.44642857142857145, 0.48214285714285715, - 0.48214285714285715, 0.48214285714285715, - 0.48214285714285715, 0.48214285714285715, - 0.5178571428571429, 0.48214285714285715, - 0.5178571428571429, 0.48214285714285715, - 0.5535714285714286, 0.48214285714285715, - 0.5535714285714286, 0.48214285714285715, - 0.5892857142857143, 0.48214285714285715, - 0.5892857142857143, 0.48214285714285715, - 0.625, 0.48214285714285715, - 0.625, 0.48214285714285715, - 0.6607142857142857, 0.48214285714285715, - 0.6607142857142857, 0.48214285714285715, - 0.6964285714285714, 0.48214285714285715, - 0.6964285714285714, 0.48214285714285715, - 0.7321428571428571, 0.48214285714285715, - 0.7321428571428571, 0.48214285714285715, - 0.7678571428571429, 0.48214285714285715, - 0.7678571428571429, 0.48214285714285715, - 0.8035714285714286, 0.48214285714285715, - 0.8035714285714286, 0.48214285714285715, - 0.8392857142857143, 0.48214285714285715, - 0.8392857142857143, 0.48214285714285715, - 0.875, 0.48214285714285715, - 0.875, 0.48214285714285715, - 0.9107142857142857, 0.48214285714285715, - 0.9107142857142857, 0.48214285714285715, - 0.9464285714285714, 0.48214285714285715, - 0.9464285714285714, 0.48214285714285715, - 0.9821428571428571, 0.48214285714285715, - 0.9821428571428571, 0.48214285714285715, - 0.017857142857142856, 0.5178571428571429, - 0.017857142857142856, 0.5178571428571429, - 0.05357142857142857, 0.5178571428571429, - 0.05357142857142857, 0.5178571428571429, - 0.08928571428571429, 0.5178571428571429, - 0.08928571428571429, 0.5178571428571429, - 0.125, 0.5178571428571429, - 0.125, 0.5178571428571429, - 0.16071428571428573, 0.5178571428571429, - 0.16071428571428573, 0.5178571428571429, - 0.19642857142857142, 0.5178571428571429, - 0.19642857142857142, 0.5178571428571429, - 0.23214285714285715, 0.5178571428571429, - 0.23214285714285715, 0.5178571428571429, - 0.26785714285714285, 0.5178571428571429, - 0.26785714285714285, 0.5178571428571429, - 0.30357142857142855, 0.5178571428571429, - 0.30357142857142855, 0.5178571428571429, - 0.3392857142857143, 0.5178571428571429, - 0.3392857142857143, 0.5178571428571429, - 0.375, 0.5178571428571429, - 0.375, 0.5178571428571429, - 0.4107142857142857, 0.5178571428571429, - 0.4107142857142857, 0.5178571428571429, - 0.44642857142857145, 0.5178571428571429, - 0.44642857142857145, 0.5178571428571429, - 0.48214285714285715, 0.5178571428571429, - 0.48214285714285715, 0.5178571428571429, - 0.5178571428571429, 0.5178571428571429, - 0.5178571428571429, 0.5178571428571429, - 0.5535714285714286, 0.5178571428571429, - 0.5535714285714286, 0.5178571428571429, - 0.5892857142857143, 0.5178571428571429, - 0.5892857142857143, 0.5178571428571429, - 0.625, 0.5178571428571429, - 0.625, 0.5178571428571429, - 0.6607142857142857, 0.5178571428571429, - 0.6607142857142857, 0.5178571428571429, - 0.6964285714285714, 0.5178571428571429, - 0.6964285714285714, 0.5178571428571429, - 0.7321428571428571, 0.5178571428571429, - 0.7321428571428571, 0.5178571428571429, - 0.7678571428571429, 0.5178571428571429, - 0.7678571428571429, 0.5178571428571429, - 0.8035714285714286, 0.5178571428571429, - 0.8035714285714286, 0.5178571428571429, - 0.8392857142857143, 0.5178571428571429, - 0.8392857142857143, 0.5178571428571429, - 0.875, 0.5178571428571429, - 0.875, 0.5178571428571429, - 0.9107142857142857, 0.5178571428571429, - 0.9107142857142857, 0.5178571428571429, - 0.9464285714285714, 0.5178571428571429, - 0.9464285714285714, 0.5178571428571429, - 0.9821428571428571, 0.5178571428571429, - 0.9821428571428571, 0.5178571428571429, - 0.017857142857142856, 0.5535714285714286, - 0.017857142857142856, 0.5535714285714286, - 0.05357142857142857, 0.5535714285714286, - 0.05357142857142857, 0.5535714285714286, - 0.08928571428571429, 0.5535714285714286, - 0.08928571428571429, 0.5535714285714286, - 0.125, 0.5535714285714286, - 0.125, 0.5535714285714286, - 0.16071428571428573, 0.5535714285714286, - 0.16071428571428573, 0.5535714285714286, - 0.19642857142857142, 0.5535714285714286, - 0.19642857142857142, 0.5535714285714286, - 0.23214285714285715, 0.5535714285714286, - 0.23214285714285715, 0.5535714285714286, - 0.26785714285714285, 0.5535714285714286, - 0.26785714285714285, 0.5535714285714286, - 0.30357142857142855, 0.5535714285714286, - 0.30357142857142855, 0.5535714285714286, - 0.3392857142857143, 0.5535714285714286, - 0.3392857142857143, 0.5535714285714286, - 0.375, 0.5535714285714286, - 0.375, 0.5535714285714286, - 0.4107142857142857, 0.5535714285714286, - 0.4107142857142857, 0.5535714285714286, - 0.44642857142857145, 0.5535714285714286, - 0.44642857142857145, 0.5535714285714286, - 0.48214285714285715, 0.5535714285714286, - 0.48214285714285715, 0.5535714285714286, - 0.5178571428571429, 0.5535714285714286, - 0.5178571428571429, 0.5535714285714286, - 0.5535714285714286, 0.5535714285714286, - 0.5535714285714286, 0.5535714285714286, - 0.5892857142857143, 0.5535714285714286, - 0.5892857142857143, 0.5535714285714286, - 0.625, 0.5535714285714286, - 0.625, 0.5535714285714286, - 0.6607142857142857, 0.5535714285714286, - 0.6607142857142857, 0.5535714285714286, - 0.6964285714285714, 0.5535714285714286, - 0.6964285714285714, 0.5535714285714286, - 0.7321428571428571, 0.5535714285714286, - 0.7321428571428571, 0.5535714285714286, - 0.7678571428571429, 0.5535714285714286, - 0.7678571428571429, 0.5535714285714286, - 0.8035714285714286, 0.5535714285714286, - 0.8035714285714286, 0.5535714285714286, - 0.8392857142857143, 0.5535714285714286, - 0.8392857142857143, 0.5535714285714286, - 0.875, 0.5535714285714286, - 0.875, 0.5535714285714286, - 0.9107142857142857, 0.5535714285714286, - 0.9107142857142857, 0.5535714285714286, - 0.9464285714285714, 0.5535714285714286, - 0.9464285714285714, 0.5535714285714286, - 0.9821428571428571, 0.5535714285714286, - 0.9821428571428571, 0.5535714285714286, - 0.017857142857142856, 0.5892857142857143, - 0.017857142857142856, 0.5892857142857143, - 0.05357142857142857, 0.5892857142857143, - 0.05357142857142857, 0.5892857142857143, - 0.08928571428571429, 0.5892857142857143, - 0.08928571428571429, 0.5892857142857143, - 0.125, 0.5892857142857143, - 0.125, 0.5892857142857143, - 0.16071428571428573, 0.5892857142857143, - 0.16071428571428573, 0.5892857142857143, - 0.19642857142857142, 0.5892857142857143, - 0.19642857142857142, 0.5892857142857143, - 0.23214285714285715, 0.5892857142857143, - 0.23214285714285715, 0.5892857142857143, - 0.26785714285714285, 0.5892857142857143, - 0.26785714285714285, 0.5892857142857143, - 0.30357142857142855, 0.5892857142857143, - 0.30357142857142855, 0.5892857142857143, - 0.3392857142857143, 0.5892857142857143, - 0.3392857142857143, 0.5892857142857143, - 0.375, 0.5892857142857143, - 0.375, 0.5892857142857143, - 0.4107142857142857, 0.5892857142857143, - 0.4107142857142857, 0.5892857142857143, - 0.44642857142857145, 0.5892857142857143, - 0.44642857142857145, 0.5892857142857143, - 0.48214285714285715, 0.5892857142857143, - 0.48214285714285715, 0.5892857142857143, - 0.5178571428571429, 0.5892857142857143, - 0.5178571428571429, 0.5892857142857143, - 0.5535714285714286, 0.5892857142857143, - 0.5535714285714286, 0.5892857142857143, - 0.5892857142857143, 0.5892857142857143, - 0.5892857142857143, 0.5892857142857143, - 0.625, 0.5892857142857143, - 0.625, 0.5892857142857143, - 0.6607142857142857, 0.5892857142857143, - 0.6607142857142857, 0.5892857142857143, - 0.6964285714285714, 0.5892857142857143, - 0.6964285714285714, 0.5892857142857143, - 0.7321428571428571, 0.5892857142857143, - 0.7321428571428571, 0.5892857142857143, - 0.7678571428571429, 0.5892857142857143, - 0.7678571428571429, 0.5892857142857143, - 0.8035714285714286, 0.5892857142857143, - 0.8035714285714286, 0.5892857142857143, - 0.8392857142857143, 0.5892857142857143, - 0.8392857142857143, 0.5892857142857143, - 0.875, 0.5892857142857143, - 0.875, 0.5892857142857143, - 0.9107142857142857, 0.5892857142857143, - 0.9107142857142857, 0.5892857142857143, - 0.9464285714285714, 0.5892857142857143, - 0.9464285714285714, 0.5892857142857143, - 0.9821428571428571, 0.5892857142857143, - 0.9821428571428571, 0.5892857142857143, - 0.017857142857142856, 0.625, - 0.017857142857142856, 0.625, - 0.05357142857142857, 0.625, - 0.05357142857142857, 0.625, - 0.08928571428571429, 0.625, - 0.08928571428571429, 0.625, - 0.125, 0.625, - 0.125, 0.625, - 0.16071428571428573, 0.625, - 0.16071428571428573, 0.625, - 0.19642857142857142, 0.625, - 0.19642857142857142, 0.625, - 0.23214285714285715, 0.625, - 0.23214285714285715, 0.625, - 0.26785714285714285, 0.625, - 0.26785714285714285, 0.625, - 0.30357142857142855, 0.625, - 0.30357142857142855, 0.625, - 0.3392857142857143, 0.625, - 0.3392857142857143, 0.625, - 0.375, 0.625, - 0.375, 0.625, - 0.4107142857142857, 0.625, - 0.4107142857142857, 0.625, - 0.44642857142857145, 0.625, - 0.44642857142857145, 0.625, - 0.48214285714285715, 0.625, - 0.48214285714285715, 0.625, - 0.5178571428571429, 0.625, - 0.5178571428571429, 0.625, - 0.5535714285714286, 0.625, - 0.5535714285714286, 0.625, - 0.5892857142857143, 0.625, - 0.5892857142857143, 0.625, - 0.625, 0.625, - 0.625, 0.625, - 0.6607142857142857, 0.625, - 0.6607142857142857, 0.625, - 0.6964285714285714, 0.625, - 0.6964285714285714, 0.625, - 0.7321428571428571, 0.625, - 0.7321428571428571, 0.625, - 0.7678571428571429, 0.625, - 0.7678571428571429, 0.625, - 0.8035714285714286, 0.625, - 0.8035714285714286, 0.625, - 0.8392857142857143, 0.625, - 0.8392857142857143, 0.625, - 0.875, 0.625, - 0.875, 0.625, - 0.9107142857142857, 0.625, - 0.9107142857142857, 0.625, - 0.9464285714285714, 0.625, - 0.9464285714285714, 0.625, - 0.9821428571428571, 0.625, - 0.9821428571428571, 0.625, - 0.017857142857142856, 0.6607142857142857, - 0.017857142857142856, 0.6607142857142857, - 0.05357142857142857, 0.6607142857142857, - 0.05357142857142857, 0.6607142857142857, - 0.08928571428571429, 0.6607142857142857, - 0.08928571428571429, 0.6607142857142857, - 0.125, 0.6607142857142857, - 0.125, 0.6607142857142857, - 0.16071428571428573, 0.6607142857142857, - 0.16071428571428573, 0.6607142857142857, - 0.19642857142857142, 0.6607142857142857, - 0.19642857142857142, 0.6607142857142857, - 0.23214285714285715, 0.6607142857142857, - 0.23214285714285715, 0.6607142857142857, - 0.26785714285714285, 0.6607142857142857, - 0.26785714285714285, 0.6607142857142857, - 0.30357142857142855, 0.6607142857142857, - 0.30357142857142855, 0.6607142857142857, - 0.3392857142857143, 0.6607142857142857, - 0.3392857142857143, 0.6607142857142857, - 0.375, 0.6607142857142857, - 0.375, 0.6607142857142857, - 0.4107142857142857, 0.6607142857142857, - 0.4107142857142857, 0.6607142857142857, - 0.44642857142857145, 0.6607142857142857, - 0.44642857142857145, 0.6607142857142857, - 0.48214285714285715, 0.6607142857142857, - 0.48214285714285715, 0.6607142857142857, - 0.5178571428571429, 0.6607142857142857, - 0.5178571428571429, 0.6607142857142857, - 0.5535714285714286, 0.6607142857142857, - 0.5535714285714286, 0.6607142857142857, - 0.5892857142857143, 0.6607142857142857, - 0.5892857142857143, 0.6607142857142857, - 0.625, 0.6607142857142857, - 0.625, 0.6607142857142857, - 0.6607142857142857, 0.6607142857142857, - 0.6607142857142857, 0.6607142857142857, - 0.6964285714285714, 0.6607142857142857, - 0.6964285714285714, 0.6607142857142857, - 0.7321428571428571, 0.6607142857142857, - 0.7321428571428571, 0.6607142857142857, - 0.7678571428571429, 0.6607142857142857, - 0.7678571428571429, 0.6607142857142857, - 0.8035714285714286, 0.6607142857142857, - 0.8035714285714286, 0.6607142857142857, - 0.8392857142857143, 0.6607142857142857, - 0.8392857142857143, 0.6607142857142857, - 0.875, 0.6607142857142857, - 0.875, 0.6607142857142857, - 0.9107142857142857, 0.6607142857142857, - 0.9107142857142857, 0.6607142857142857, - 0.9464285714285714, 0.6607142857142857, - 0.9464285714285714, 0.6607142857142857, - 0.9821428571428571, 0.6607142857142857, - 0.9821428571428571, 0.6607142857142857, - 0.017857142857142856, 0.6964285714285714, - 0.017857142857142856, 0.6964285714285714, - 0.05357142857142857, 0.6964285714285714, - 0.05357142857142857, 0.6964285714285714, - 0.08928571428571429, 0.6964285714285714, - 0.08928571428571429, 0.6964285714285714, - 0.125, 0.6964285714285714, - 0.125, 0.6964285714285714, - 0.16071428571428573, 0.6964285714285714, - 0.16071428571428573, 0.6964285714285714, - 0.19642857142857142, 0.6964285714285714, - 0.19642857142857142, 0.6964285714285714, - 0.23214285714285715, 0.6964285714285714, - 0.23214285714285715, 0.6964285714285714, - 0.26785714285714285, 0.6964285714285714, - 0.26785714285714285, 0.6964285714285714, - 0.30357142857142855, 0.6964285714285714, - 0.30357142857142855, 0.6964285714285714, - 0.3392857142857143, 0.6964285714285714, - 0.3392857142857143, 0.6964285714285714, - 0.375, 0.6964285714285714, - 0.375, 0.6964285714285714, - 0.4107142857142857, 0.6964285714285714, - 0.4107142857142857, 0.6964285714285714, - 0.44642857142857145, 0.6964285714285714, - 0.44642857142857145, 0.6964285714285714, - 0.48214285714285715, 0.6964285714285714, - 0.48214285714285715, 0.6964285714285714, - 0.5178571428571429, 0.6964285714285714, - 0.5178571428571429, 0.6964285714285714, - 0.5535714285714286, 0.6964285714285714, - 0.5535714285714286, 0.6964285714285714, - 0.5892857142857143, 0.6964285714285714, - 0.5892857142857143, 0.6964285714285714, - 0.625, 0.6964285714285714, - 0.625, 0.6964285714285714, - 0.6607142857142857, 0.6964285714285714, - 0.6607142857142857, 0.6964285714285714, - 0.6964285714285714, 0.6964285714285714, - 0.6964285714285714, 0.6964285714285714, - 0.7321428571428571, 0.6964285714285714, - 0.7321428571428571, 0.6964285714285714, - 0.7678571428571429, 0.6964285714285714, - 0.7678571428571429, 0.6964285714285714, - 0.8035714285714286, 0.6964285714285714, - 0.8035714285714286, 0.6964285714285714, - 0.8392857142857143, 0.6964285714285714, - 0.8392857142857143, 0.6964285714285714, - 0.875, 0.6964285714285714, - 0.875, 0.6964285714285714, - 0.9107142857142857, 0.6964285714285714, - 0.9107142857142857, 0.6964285714285714, - 0.9464285714285714, 0.6964285714285714, - 0.9464285714285714, 0.6964285714285714, - 0.9821428571428571, 0.6964285714285714, - 0.9821428571428571, 0.6964285714285714, - 0.017857142857142856, 0.7321428571428571, - 0.017857142857142856, 0.7321428571428571, - 0.05357142857142857, 0.7321428571428571, - 0.05357142857142857, 0.7321428571428571, - 0.08928571428571429, 0.7321428571428571, - 0.08928571428571429, 0.7321428571428571, - 0.125, 0.7321428571428571, - 0.125, 0.7321428571428571, - 0.16071428571428573, 0.7321428571428571, - 0.16071428571428573, 0.7321428571428571, - 0.19642857142857142, 0.7321428571428571, - 0.19642857142857142, 0.7321428571428571, - 0.23214285714285715, 0.7321428571428571, - 0.23214285714285715, 0.7321428571428571, - 0.26785714285714285, 0.7321428571428571, - 0.26785714285714285, 0.7321428571428571, - 0.30357142857142855, 0.7321428571428571, - 0.30357142857142855, 0.7321428571428571, - 0.3392857142857143, 0.7321428571428571, - 0.3392857142857143, 0.7321428571428571, - 0.375, 0.7321428571428571, - 0.375, 0.7321428571428571, - 0.4107142857142857, 0.7321428571428571, - 0.4107142857142857, 0.7321428571428571, - 0.44642857142857145, 0.7321428571428571, - 0.44642857142857145, 0.7321428571428571, - 0.48214285714285715, 0.7321428571428571, - 0.48214285714285715, 0.7321428571428571, - 0.5178571428571429, 0.7321428571428571, - 0.5178571428571429, 0.7321428571428571, - 0.5535714285714286, 0.7321428571428571, - 0.5535714285714286, 0.7321428571428571, - 0.5892857142857143, 0.7321428571428571, - 0.5892857142857143, 0.7321428571428571, - 0.625, 0.7321428571428571, - 0.625, 0.7321428571428571, - 0.6607142857142857, 0.7321428571428571, - 0.6607142857142857, 0.7321428571428571, - 0.6964285714285714, 0.7321428571428571, - 0.6964285714285714, 0.7321428571428571, - 0.7321428571428571, 0.7321428571428571, - 0.7321428571428571, 0.7321428571428571, - 0.7678571428571429, 0.7321428571428571, - 0.7678571428571429, 0.7321428571428571, - 0.8035714285714286, 0.7321428571428571, - 0.8035714285714286, 0.7321428571428571, - 0.8392857142857143, 0.7321428571428571, - 0.8392857142857143, 0.7321428571428571, - 0.875, 0.7321428571428571, - 0.875, 0.7321428571428571, - 0.9107142857142857, 0.7321428571428571, - 0.9107142857142857, 0.7321428571428571, - 0.9464285714285714, 0.7321428571428571, - 0.9464285714285714, 0.7321428571428571, - 0.9821428571428571, 0.7321428571428571, - 0.9821428571428571, 0.7321428571428571, - 0.017857142857142856, 0.7678571428571429, - 0.017857142857142856, 0.7678571428571429, - 0.05357142857142857, 0.7678571428571429, - 0.05357142857142857, 0.7678571428571429, - 0.08928571428571429, 0.7678571428571429, - 0.08928571428571429, 0.7678571428571429, - 0.125, 0.7678571428571429, - 0.125, 0.7678571428571429, - 0.16071428571428573, 0.7678571428571429, - 0.16071428571428573, 0.7678571428571429, - 0.19642857142857142, 0.7678571428571429, - 0.19642857142857142, 0.7678571428571429, - 0.23214285714285715, 0.7678571428571429, - 0.23214285714285715, 0.7678571428571429, - 0.26785714285714285, 0.7678571428571429, - 0.26785714285714285, 0.7678571428571429, - 0.30357142857142855, 0.7678571428571429, - 0.30357142857142855, 0.7678571428571429, - 0.3392857142857143, 0.7678571428571429, - 0.3392857142857143, 0.7678571428571429, - 0.375, 0.7678571428571429, - 0.375, 0.7678571428571429, - 0.4107142857142857, 0.7678571428571429, - 0.4107142857142857, 0.7678571428571429, - 0.44642857142857145, 0.7678571428571429, - 0.44642857142857145, 0.7678571428571429, - 0.48214285714285715, 0.7678571428571429, - 0.48214285714285715, 0.7678571428571429, - 0.5178571428571429, 0.7678571428571429, - 0.5178571428571429, 0.7678571428571429, - 0.5535714285714286, 0.7678571428571429, - 0.5535714285714286, 0.7678571428571429, - 0.5892857142857143, 0.7678571428571429, - 0.5892857142857143, 0.7678571428571429, - 0.625, 0.7678571428571429, - 0.625, 0.7678571428571429, - 0.6607142857142857, 0.7678571428571429, - 0.6607142857142857, 0.7678571428571429, - 0.6964285714285714, 0.7678571428571429, - 0.6964285714285714, 0.7678571428571429, - 0.7321428571428571, 0.7678571428571429, - 0.7321428571428571, 0.7678571428571429, - 0.7678571428571429, 0.7678571428571429, - 0.7678571428571429, 0.7678571428571429, - 0.8035714285714286, 0.7678571428571429, - 0.8035714285714286, 0.7678571428571429, - 0.8392857142857143, 0.7678571428571429, - 0.8392857142857143, 0.7678571428571429, - 0.875, 0.7678571428571429, - 0.875, 0.7678571428571429, - 0.9107142857142857, 0.7678571428571429, - 0.9107142857142857, 0.7678571428571429, - 0.9464285714285714, 0.7678571428571429, - 0.9464285714285714, 0.7678571428571429, - 0.9821428571428571, 0.7678571428571429, - 0.9821428571428571, 0.7678571428571429, - 0.017857142857142856, 0.8035714285714286, - 0.017857142857142856, 0.8035714285714286, - 0.05357142857142857, 0.8035714285714286, - 0.05357142857142857, 0.8035714285714286, - 0.08928571428571429, 0.8035714285714286, - 0.08928571428571429, 0.8035714285714286, - 0.125, 0.8035714285714286, - 0.125, 0.8035714285714286, - 0.16071428571428573, 0.8035714285714286, - 0.16071428571428573, 0.8035714285714286, - 0.19642857142857142, 0.8035714285714286, - 0.19642857142857142, 0.8035714285714286, - 0.23214285714285715, 0.8035714285714286, - 0.23214285714285715, 0.8035714285714286, - 0.26785714285714285, 0.8035714285714286, - 0.26785714285714285, 0.8035714285714286, - 0.30357142857142855, 0.8035714285714286, - 0.30357142857142855, 0.8035714285714286, - 0.3392857142857143, 0.8035714285714286, - 0.3392857142857143, 0.8035714285714286, - 0.375, 0.8035714285714286, - 0.375, 0.8035714285714286, - 0.4107142857142857, 0.8035714285714286, - 0.4107142857142857, 0.8035714285714286, - 0.44642857142857145, 0.8035714285714286, - 0.44642857142857145, 0.8035714285714286, - 0.48214285714285715, 0.8035714285714286, - 0.48214285714285715, 0.8035714285714286, - 0.5178571428571429, 0.8035714285714286, - 0.5178571428571429, 0.8035714285714286, - 0.5535714285714286, 0.8035714285714286, - 0.5535714285714286, 0.8035714285714286, - 0.5892857142857143, 0.8035714285714286, - 0.5892857142857143, 0.8035714285714286, - 0.625, 0.8035714285714286, - 0.625, 0.8035714285714286, - 0.6607142857142857, 0.8035714285714286, - 0.6607142857142857, 0.8035714285714286, - 0.6964285714285714, 0.8035714285714286, - 0.6964285714285714, 0.8035714285714286, - 0.7321428571428571, 0.8035714285714286, - 0.7321428571428571, 0.8035714285714286, - 0.7678571428571429, 0.8035714285714286, - 0.7678571428571429, 0.8035714285714286, - 0.8035714285714286, 0.8035714285714286, - 0.8035714285714286, 0.8035714285714286, - 0.8392857142857143, 0.8035714285714286, - 0.8392857142857143, 0.8035714285714286, - 0.875, 0.8035714285714286, - 0.875, 0.8035714285714286, - 0.9107142857142857, 0.8035714285714286, - 0.9107142857142857, 0.8035714285714286, - 0.9464285714285714, 0.8035714285714286, - 0.9464285714285714, 0.8035714285714286, - 0.9821428571428571, 0.8035714285714286, - 0.9821428571428571, 0.8035714285714286, - 0.017857142857142856, 0.8392857142857143, - 0.017857142857142856, 0.8392857142857143, - 0.05357142857142857, 0.8392857142857143, - 0.05357142857142857, 0.8392857142857143, - 0.08928571428571429, 0.8392857142857143, - 0.08928571428571429, 0.8392857142857143, - 0.125, 0.8392857142857143, - 0.125, 0.8392857142857143, - 0.16071428571428573, 0.8392857142857143, - 0.16071428571428573, 0.8392857142857143, - 0.19642857142857142, 0.8392857142857143, - 0.19642857142857142, 0.8392857142857143, - 0.23214285714285715, 0.8392857142857143, - 0.23214285714285715, 0.8392857142857143, - 0.26785714285714285, 0.8392857142857143, - 0.26785714285714285, 0.8392857142857143, - 0.30357142857142855, 0.8392857142857143, - 0.30357142857142855, 0.8392857142857143, - 0.3392857142857143, 0.8392857142857143, - 0.3392857142857143, 0.8392857142857143, - 0.375, 0.8392857142857143, - 0.375, 0.8392857142857143, - 0.4107142857142857, 0.8392857142857143, - 0.4107142857142857, 0.8392857142857143, - 0.44642857142857145, 0.8392857142857143, - 0.44642857142857145, 0.8392857142857143, - 0.48214285714285715, 0.8392857142857143, - 0.48214285714285715, 0.8392857142857143, - 0.5178571428571429, 0.8392857142857143, - 0.5178571428571429, 0.8392857142857143, - 0.5535714285714286, 0.8392857142857143, - 0.5535714285714286, 0.8392857142857143, - 0.5892857142857143, 0.8392857142857143, - 0.5892857142857143, 0.8392857142857143, - 0.625, 0.8392857142857143, - 0.625, 0.8392857142857143, - 0.6607142857142857, 0.8392857142857143, - 0.6607142857142857, 0.8392857142857143, - 0.6964285714285714, 0.8392857142857143, - 0.6964285714285714, 0.8392857142857143, - 0.7321428571428571, 0.8392857142857143, - 0.7321428571428571, 0.8392857142857143, - 0.7678571428571429, 0.8392857142857143, - 0.7678571428571429, 0.8392857142857143, - 0.8035714285714286, 0.8392857142857143, - 0.8035714285714286, 0.8392857142857143, - 0.8392857142857143, 0.8392857142857143, - 0.8392857142857143, 0.8392857142857143, - 0.875, 0.8392857142857143, - 0.875, 0.8392857142857143, - 0.9107142857142857, 0.8392857142857143, - 0.9107142857142857, 0.8392857142857143, - 0.9464285714285714, 0.8392857142857143, - 0.9464285714285714, 0.8392857142857143, - 0.9821428571428571, 0.8392857142857143, - 0.9821428571428571, 0.8392857142857143, - 0.017857142857142856, 0.875, - 0.017857142857142856, 0.875, - 0.05357142857142857, 0.875, - 0.05357142857142857, 0.875, - 0.08928571428571429, 0.875, - 0.08928571428571429, 0.875, - 0.125, 0.875, - 0.125, 0.875, - 0.16071428571428573, 0.875, - 0.16071428571428573, 0.875, - 0.19642857142857142, 0.875, - 0.19642857142857142, 0.875, - 0.23214285714285715, 0.875, - 0.23214285714285715, 0.875, - 0.26785714285714285, 0.875, - 0.26785714285714285, 0.875, - 0.30357142857142855, 0.875, - 0.30357142857142855, 0.875, - 0.3392857142857143, 0.875, - 0.3392857142857143, 0.875, - 0.375, 0.875, - 0.375, 0.875, - 0.4107142857142857, 0.875, - 0.4107142857142857, 0.875, - 0.44642857142857145, 0.875, - 0.44642857142857145, 0.875, - 0.48214285714285715, 0.875, - 0.48214285714285715, 0.875, - 0.5178571428571429, 0.875, - 0.5178571428571429, 0.875, - 0.5535714285714286, 0.875, - 0.5535714285714286, 0.875, - 0.5892857142857143, 0.875, - 0.5892857142857143, 0.875, - 0.625, 0.875, - 0.625, 0.875, - 0.6607142857142857, 0.875, - 0.6607142857142857, 0.875, - 0.6964285714285714, 0.875, - 0.6964285714285714, 0.875, - 0.7321428571428571, 0.875, - 0.7321428571428571, 0.875, - 0.7678571428571429, 0.875, - 0.7678571428571429, 0.875, - 0.8035714285714286, 0.875, - 0.8035714285714286, 0.875, - 0.8392857142857143, 0.875, - 0.8392857142857143, 0.875, - 0.875, 0.875, - 0.875, 0.875, - 0.9107142857142857, 0.875, - 0.9107142857142857, 0.875, - 0.9464285714285714, 0.875, - 0.9464285714285714, 0.875, - 0.9821428571428571, 0.875, - 0.9821428571428571, 0.875, - 0.017857142857142856, 0.9107142857142857, - 0.017857142857142856, 0.9107142857142857, - 0.05357142857142857, 0.9107142857142857, - 0.05357142857142857, 0.9107142857142857, - 0.08928571428571429, 0.9107142857142857, - 0.08928571428571429, 0.9107142857142857, - 0.125, 0.9107142857142857, - 0.125, 0.9107142857142857, - 0.16071428571428573, 0.9107142857142857, - 0.16071428571428573, 0.9107142857142857, - 0.19642857142857142, 0.9107142857142857, - 0.19642857142857142, 0.9107142857142857, - 0.23214285714285715, 0.9107142857142857, - 0.23214285714285715, 0.9107142857142857, - 0.26785714285714285, 0.9107142857142857, - 0.26785714285714285, 0.9107142857142857, - 0.30357142857142855, 0.9107142857142857, - 0.30357142857142855, 0.9107142857142857, - 0.3392857142857143, 0.9107142857142857, - 0.3392857142857143, 0.9107142857142857, - 0.375, 0.9107142857142857, - 0.375, 0.9107142857142857, - 0.4107142857142857, 0.9107142857142857, - 0.4107142857142857, 0.9107142857142857, - 0.44642857142857145, 0.9107142857142857, - 0.44642857142857145, 0.9107142857142857, - 0.48214285714285715, 0.9107142857142857, - 0.48214285714285715, 0.9107142857142857, - 0.5178571428571429, 0.9107142857142857, - 0.5178571428571429, 0.9107142857142857, - 0.5535714285714286, 0.9107142857142857, - 0.5535714285714286, 0.9107142857142857, - 0.5892857142857143, 0.9107142857142857, - 0.5892857142857143, 0.9107142857142857, - 0.625, 0.9107142857142857, - 0.625, 0.9107142857142857, - 0.6607142857142857, 0.9107142857142857, - 0.6607142857142857, 0.9107142857142857, - 0.6964285714285714, 0.9107142857142857, - 0.6964285714285714, 0.9107142857142857, - 0.7321428571428571, 0.9107142857142857, - 0.7321428571428571, 0.9107142857142857, - 0.7678571428571429, 0.9107142857142857, - 0.7678571428571429, 0.9107142857142857, - 0.8035714285714286, 0.9107142857142857, - 0.8035714285714286, 0.9107142857142857, - 0.8392857142857143, 0.9107142857142857, - 0.8392857142857143, 0.9107142857142857, - 0.875, 0.9107142857142857, - 0.875, 0.9107142857142857, - 0.9107142857142857, 0.9107142857142857, - 0.9107142857142857, 0.9107142857142857, - 0.9464285714285714, 0.9107142857142857, - 0.9464285714285714, 0.9107142857142857, - 0.9821428571428571, 0.9107142857142857, - 0.9821428571428571, 0.9107142857142857, - 0.017857142857142856, 0.9464285714285714, - 0.017857142857142856, 0.9464285714285714, - 0.05357142857142857, 0.9464285714285714, - 0.05357142857142857, 0.9464285714285714, - 0.08928571428571429, 0.9464285714285714, - 0.08928571428571429, 0.9464285714285714, - 0.125, 0.9464285714285714, - 0.125, 0.9464285714285714, - 0.16071428571428573, 0.9464285714285714, - 0.16071428571428573, 0.9464285714285714, - 0.19642857142857142, 0.9464285714285714, - 0.19642857142857142, 0.9464285714285714, - 0.23214285714285715, 0.9464285714285714, - 0.23214285714285715, 0.9464285714285714, - 0.26785714285714285, 0.9464285714285714, - 0.26785714285714285, 0.9464285714285714, - 0.30357142857142855, 0.9464285714285714, - 0.30357142857142855, 0.9464285714285714, - 0.3392857142857143, 0.9464285714285714, - 0.3392857142857143, 0.9464285714285714, - 0.375, 0.9464285714285714, - 0.375, 0.9464285714285714, - 0.4107142857142857, 0.9464285714285714, - 0.4107142857142857, 0.9464285714285714, - 0.44642857142857145, 0.9464285714285714, - 0.44642857142857145, 0.9464285714285714, - 0.48214285714285715, 0.9464285714285714, - 0.48214285714285715, 0.9464285714285714, - 0.5178571428571429, 0.9464285714285714, - 0.5178571428571429, 0.9464285714285714, - 0.5535714285714286, 0.9464285714285714, - 0.5535714285714286, 0.9464285714285714, - 0.5892857142857143, 0.9464285714285714, - 0.5892857142857143, 0.9464285714285714, - 0.625, 0.9464285714285714, - 0.625, 0.9464285714285714, - 0.6607142857142857, 0.9464285714285714, - 0.6607142857142857, 0.9464285714285714, - 0.6964285714285714, 0.9464285714285714, - 0.6964285714285714, 0.9464285714285714, - 0.7321428571428571, 0.9464285714285714, - 0.7321428571428571, 0.9464285714285714, - 0.7678571428571429, 0.9464285714285714, - 0.7678571428571429, 0.9464285714285714, - 0.8035714285714286, 0.9464285714285714, - 0.8035714285714286, 0.9464285714285714, - 0.8392857142857143, 0.9464285714285714, - 0.8392857142857143, 0.9464285714285714, - 0.875, 0.9464285714285714, - 0.875, 0.9464285714285714, - 0.9107142857142857, 0.9464285714285714, - 0.9107142857142857, 0.9464285714285714, - 0.9464285714285714, 0.9464285714285714, - 0.9464285714285714, 0.9464285714285714, - 0.9821428571428571, 0.9464285714285714, - 0.9821428571428571, 0.9464285714285714, - 0.017857142857142856, 0.9821428571428571, - 0.017857142857142856, 0.9821428571428571, - 0.05357142857142857, 0.9821428571428571, - 0.05357142857142857, 0.9821428571428571, - 0.08928571428571429, 0.9821428571428571, - 0.08928571428571429, 0.9821428571428571, - 0.125, 0.9821428571428571, - 0.125, 0.9821428571428571, - 0.16071428571428573, 0.9821428571428571, - 0.16071428571428573, 0.9821428571428571, - 0.19642857142857142, 0.9821428571428571, - 0.19642857142857142, 0.9821428571428571, - 0.23214285714285715, 0.9821428571428571, - 0.23214285714285715, 0.9821428571428571, - 0.26785714285714285, 0.9821428571428571, - 0.26785714285714285, 0.9821428571428571, - 0.30357142857142855, 0.9821428571428571, - 0.30357142857142855, 0.9821428571428571, - 0.3392857142857143, 0.9821428571428571, - 0.3392857142857143, 0.9821428571428571, - 0.375, 0.9821428571428571, - 0.375, 0.9821428571428571, - 0.4107142857142857, 0.9821428571428571, - 0.4107142857142857, 0.9821428571428571, - 0.44642857142857145, 0.9821428571428571, - 0.44642857142857145, 0.9821428571428571, - 0.48214285714285715, 0.9821428571428571, - 0.48214285714285715, 0.9821428571428571, - 0.5178571428571429, 0.9821428571428571, - 0.5178571428571429, 0.9821428571428571, - 0.5535714285714286, 0.9821428571428571, - 0.5535714285714286, 0.9821428571428571, - 0.5892857142857143, 0.9821428571428571, - 0.5892857142857143, 0.9821428571428571, - 0.625, 0.9821428571428571, - 0.625, 0.9821428571428571, - 0.6607142857142857, 0.9821428571428571, - 0.6607142857142857, 0.9821428571428571, - 0.6964285714285714, 0.9821428571428571, - 0.6964285714285714, 0.9821428571428571, - 0.7321428571428571, 0.9821428571428571, - 0.7321428571428571, 0.9821428571428571, - 0.7678571428571429, 0.9821428571428571, - 0.7678571428571429, 0.9821428571428571, - 0.8035714285714286, 0.9821428571428571, - 0.8035714285714286, 0.9821428571428571, - 0.8392857142857143, 0.9821428571428571, - 0.8392857142857143, 0.9821428571428571, - 0.875, 0.9821428571428571, - 0.875, 0.9821428571428571, - 0.9107142857142857, 0.9821428571428571, - 0.9107142857142857, 0.9821428571428571, - 0.9464285714285714, 0.9821428571428571, - 0.9464285714285714, 0.9821428571428571, - 0.9821428571428571, 0.9821428571428571, - 0.9821428571428571, 0.9821428571428571, - 0.03571428571428571, 0.03571428571428571, - 0.03571428571428571, 0.03571428571428571, - 0.10714285714285714, 0.03571428571428571, - 0.10714285714285714, 0.03571428571428571, - 0.17857142857142858, 0.03571428571428571, - 0.17857142857142858, 0.03571428571428571, - 0.25, 0.03571428571428571, - 0.25, 0.03571428571428571, - 0.32142857142857145, 0.03571428571428571, - 0.32142857142857145, 0.03571428571428571, - 0.39285714285714285, 0.03571428571428571, - 0.39285714285714285, 0.03571428571428571, - 0.4642857142857143, 0.03571428571428571, - 0.4642857142857143, 0.03571428571428571, - 0.5357142857142857, 0.03571428571428571, - 0.5357142857142857, 0.03571428571428571, - 0.6071428571428571, 0.03571428571428571, - 0.6071428571428571, 0.03571428571428571, - 0.6785714285714286, 0.03571428571428571, - 0.6785714285714286, 0.03571428571428571, - 0.75, 0.03571428571428571, - 0.75, 0.03571428571428571, - 0.8214285714285714, 0.03571428571428571, - 0.8214285714285714, 0.03571428571428571, - 0.8928571428571429, 0.03571428571428571, - 0.8928571428571429, 0.03571428571428571, - 0.9642857142857143, 0.03571428571428571, - 0.9642857142857143, 0.03571428571428571, - 0.03571428571428571, 0.10714285714285714, - 0.03571428571428571, 0.10714285714285714, - 0.10714285714285714, 0.10714285714285714, - 0.10714285714285714, 0.10714285714285714, - 0.17857142857142858, 0.10714285714285714, - 0.17857142857142858, 0.10714285714285714, - 0.25, 0.10714285714285714, - 0.25, 0.10714285714285714, - 0.32142857142857145, 0.10714285714285714, - 0.32142857142857145, 0.10714285714285714, - 0.39285714285714285, 0.10714285714285714, - 0.39285714285714285, 0.10714285714285714, - 0.4642857142857143, 0.10714285714285714, - 0.4642857142857143, 0.10714285714285714, - 0.5357142857142857, 0.10714285714285714, - 0.5357142857142857, 0.10714285714285714, - 0.6071428571428571, 0.10714285714285714, - 0.6071428571428571, 0.10714285714285714, - 0.6785714285714286, 0.10714285714285714, - 0.6785714285714286, 0.10714285714285714, - 0.75, 0.10714285714285714, - 0.75, 0.10714285714285714, - 0.8214285714285714, 0.10714285714285714, - 0.8214285714285714, 0.10714285714285714, - 0.8928571428571429, 0.10714285714285714, - 0.8928571428571429, 0.10714285714285714, - 0.9642857142857143, 0.10714285714285714, - 0.9642857142857143, 0.10714285714285714, - 0.03571428571428571, 0.17857142857142858, - 0.03571428571428571, 0.17857142857142858, - 0.10714285714285714, 0.17857142857142858, - 0.10714285714285714, 0.17857142857142858, - 0.17857142857142858, 0.17857142857142858, - 0.17857142857142858, 0.17857142857142858, - 0.25, 0.17857142857142858, - 0.25, 0.17857142857142858, - 0.32142857142857145, 0.17857142857142858, - 0.32142857142857145, 0.17857142857142858, - 0.39285714285714285, 0.17857142857142858, - 0.39285714285714285, 0.17857142857142858, - 0.4642857142857143, 0.17857142857142858, - 0.4642857142857143, 0.17857142857142858, - 0.5357142857142857, 0.17857142857142858, - 0.5357142857142857, 0.17857142857142858, - 0.6071428571428571, 0.17857142857142858, - 0.6071428571428571, 0.17857142857142858, - 0.6785714285714286, 0.17857142857142858, - 0.6785714285714286, 0.17857142857142858, - 0.75, 0.17857142857142858, - 0.75, 0.17857142857142858, - 0.8214285714285714, 0.17857142857142858, - 0.8214285714285714, 0.17857142857142858, - 0.8928571428571429, 0.17857142857142858, - 0.8928571428571429, 0.17857142857142858, - 0.9642857142857143, 0.17857142857142858, - 0.9642857142857143, 0.17857142857142858, - 0.03571428571428571, 0.25, - 0.03571428571428571, 0.25, - 0.10714285714285714, 0.25, - 0.10714285714285714, 0.25, - 0.17857142857142858, 0.25, - 0.17857142857142858, 0.25, - 0.25, 0.25, - 0.25, 0.25, - 0.32142857142857145, 0.25, - 0.32142857142857145, 0.25, - 0.39285714285714285, 0.25, - 0.39285714285714285, 0.25, - 0.4642857142857143, 0.25, - 0.4642857142857143, 0.25, - 0.5357142857142857, 0.25, - 0.5357142857142857, 0.25, - 0.6071428571428571, 0.25, - 0.6071428571428571, 0.25, - 0.6785714285714286, 0.25, - 0.6785714285714286, 0.25, - 0.75, 0.25, - 0.75, 0.25, - 0.8214285714285714, 0.25, - 0.8214285714285714, 0.25, - 0.8928571428571429, 0.25, - 0.8928571428571429, 0.25, - 0.9642857142857143, 0.25, - 0.9642857142857143, 0.25, - 0.03571428571428571, 0.32142857142857145, - 0.03571428571428571, 0.32142857142857145, - 0.10714285714285714, 0.32142857142857145, - 0.10714285714285714, 0.32142857142857145, - 0.17857142857142858, 0.32142857142857145, - 0.17857142857142858, 0.32142857142857145, - 0.25, 0.32142857142857145, - 0.25, 0.32142857142857145, - 0.32142857142857145, 0.32142857142857145, - 0.32142857142857145, 0.32142857142857145, - 0.39285714285714285, 0.32142857142857145, - 0.39285714285714285, 0.32142857142857145, - 0.4642857142857143, 0.32142857142857145, - 0.4642857142857143, 0.32142857142857145, - 0.5357142857142857, 0.32142857142857145, - 0.5357142857142857, 0.32142857142857145, - 0.6071428571428571, 0.32142857142857145, - 0.6071428571428571, 0.32142857142857145, - 0.6785714285714286, 0.32142857142857145, - 0.6785714285714286, 0.32142857142857145, - 0.75, 0.32142857142857145, - 0.75, 0.32142857142857145, - 0.8214285714285714, 0.32142857142857145, - 0.8214285714285714, 0.32142857142857145, - 0.8928571428571429, 0.32142857142857145, - 0.8928571428571429, 0.32142857142857145, - 0.9642857142857143, 0.32142857142857145, - 0.9642857142857143, 0.32142857142857145, - 0.03571428571428571, 0.39285714285714285, - 0.03571428571428571, 0.39285714285714285, - 0.10714285714285714, 0.39285714285714285, - 0.10714285714285714, 0.39285714285714285, - 0.17857142857142858, 0.39285714285714285, - 0.17857142857142858, 0.39285714285714285, - 0.25, 0.39285714285714285, - 0.25, 0.39285714285714285, - 0.32142857142857145, 0.39285714285714285, - 0.32142857142857145, 0.39285714285714285, - 0.39285714285714285, 0.39285714285714285, - 0.39285714285714285, 0.39285714285714285, - 0.4642857142857143, 0.39285714285714285, - 0.4642857142857143, 0.39285714285714285, - 0.5357142857142857, 0.39285714285714285, - 0.5357142857142857, 0.39285714285714285, - 0.6071428571428571, 0.39285714285714285, - 0.6071428571428571, 0.39285714285714285, - 0.6785714285714286, 0.39285714285714285, - 0.6785714285714286, 0.39285714285714285, - 0.75, 0.39285714285714285, - 0.75, 0.39285714285714285, - 0.8214285714285714, 0.39285714285714285, - 0.8214285714285714, 0.39285714285714285, - 0.8928571428571429, 0.39285714285714285, - 0.8928571428571429, 0.39285714285714285, - 0.9642857142857143, 0.39285714285714285, - 0.9642857142857143, 0.39285714285714285, - 0.03571428571428571, 0.4642857142857143, - 0.03571428571428571, 0.4642857142857143, - 0.10714285714285714, 0.4642857142857143, - 0.10714285714285714, 0.4642857142857143, - 0.17857142857142858, 0.4642857142857143, - 0.17857142857142858, 0.4642857142857143, - 0.25, 0.4642857142857143, - 0.25, 0.4642857142857143, - 0.32142857142857145, 0.4642857142857143, - 0.32142857142857145, 0.4642857142857143, - 0.39285714285714285, 0.4642857142857143, - 0.39285714285714285, 0.4642857142857143, - 0.4642857142857143, 0.4642857142857143, - 0.4642857142857143, 0.4642857142857143, - 0.5357142857142857, 0.4642857142857143, - 0.5357142857142857, 0.4642857142857143, - 0.6071428571428571, 0.4642857142857143, - 0.6071428571428571, 0.4642857142857143, - 0.6785714285714286, 0.4642857142857143, - 0.6785714285714286, 0.4642857142857143, - 0.75, 0.4642857142857143, - 0.75, 0.4642857142857143, - 0.8214285714285714, 0.4642857142857143, - 0.8214285714285714, 0.4642857142857143, - 0.8928571428571429, 0.4642857142857143, - 0.8928571428571429, 0.4642857142857143, - 0.9642857142857143, 0.4642857142857143, - 0.9642857142857143, 0.4642857142857143, - 0.03571428571428571, 0.5357142857142857, - 0.03571428571428571, 0.5357142857142857, - 0.10714285714285714, 0.5357142857142857, - 0.10714285714285714, 0.5357142857142857, - 0.17857142857142858, 0.5357142857142857, - 0.17857142857142858, 0.5357142857142857, - 0.25, 0.5357142857142857, - 0.25, 0.5357142857142857, - 0.32142857142857145, 0.5357142857142857, - 0.32142857142857145, 0.5357142857142857, - 0.39285714285714285, 0.5357142857142857, - 0.39285714285714285, 0.5357142857142857, - 0.4642857142857143, 0.5357142857142857, - 0.4642857142857143, 0.5357142857142857, - 0.5357142857142857, 0.5357142857142857, - 0.5357142857142857, 0.5357142857142857, - 0.6071428571428571, 0.5357142857142857, - 0.6071428571428571, 0.5357142857142857, - 0.6785714285714286, 0.5357142857142857, - 0.6785714285714286, 0.5357142857142857, - 0.75, 0.5357142857142857, - 0.75, 0.5357142857142857, - 0.8214285714285714, 0.5357142857142857, - 0.8214285714285714, 0.5357142857142857, - 0.8928571428571429, 0.5357142857142857, - 0.8928571428571429, 0.5357142857142857, - 0.9642857142857143, 0.5357142857142857, - 0.9642857142857143, 0.5357142857142857, - 0.03571428571428571, 0.6071428571428571, - 0.03571428571428571, 0.6071428571428571, - 0.10714285714285714, 0.6071428571428571, - 0.10714285714285714, 0.6071428571428571, - 0.17857142857142858, 0.6071428571428571, - 0.17857142857142858, 0.6071428571428571, - 0.25, 0.6071428571428571, - 0.25, 0.6071428571428571, - 0.32142857142857145, 0.6071428571428571, - 0.32142857142857145, 0.6071428571428571, - 0.39285714285714285, 0.6071428571428571, - 0.39285714285714285, 0.6071428571428571, - 0.4642857142857143, 0.6071428571428571, - 0.4642857142857143, 0.6071428571428571, - 0.5357142857142857, 0.6071428571428571, - 0.5357142857142857, 0.6071428571428571, - 0.6071428571428571, 0.6071428571428571, - 0.6071428571428571, 0.6071428571428571, - 0.6785714285714286, 0.6071428571428571, - 0.6785714285714286, 0.6071428571428571, - 0.75, 0.6071428571428571, - 0.75, 0.6071428571428571, - 0.8214285714285714, 0.6071428571428571, - 0.8214285714285714, 0.6071428571428571, - 0.8928571428571429, 0.6071428571428571, - 0.8928571428571429, 0.6071428571428571, - 0.9642857142857143, 0.6071428571428571, - 0.9642857142857143, 0.6071428571428571, - 0.03571428571428571, 0.6785714285714286, - 0.03571428571428571, 0.6785714285714286, - 0.10714285714285714, 0.6785714285714286, - 0.10714285714285714, 0.6785714285714286, - 0.17857142857142858, 0.6785714285714286, - 0.17857142857142858, 0.6785714285714286, - 0.25, 0.6785714285714286, - 0.25, 0.6785714285714286, - 0.32142857142857145, 0.6785714285714286, - 0.32142857142857145, 0.6785714285714286, - 0.39285714285714285, 0.6785714285714286, - 0.39285714285714285, 0.6785714285714286, - 0.4642857142857143, 0.6785714285714286, - 0.4642857142857143, 0.6785714285714286, - 0.5357142857142857, 0.6785714285714286, - 0.5357142857142857, 0.6785714285714286, - 0.6071428571428571, 0.6785714285714286, - 0.6071428571428571, 0.6785714285714286, - 0.6785714285714286, 0.6785714285714286, - 0.6785714285714286, 0.6785714285714286, - 0.75, 0.6785714285714286, - 0.75, 0.6785714285714286, - 0.8214285714285714, 0.6785714285714286, - 0.8214285714285714, 0.6785714285714286, - 0.8928571428571429, 0.6785714285714286, - 0.8928571428571429, 0.6785714285714286, - 0.9642857142857143, 0.6785714285714286, - 0.9642857142857143, 0.6785714285714286, - 0.03571428571428571, 0.75, - 0.03571428571428571, 0.75, - 0.10714285714285714, 0.75, - 0.10714285714285714, 0.75, - 0.17857142857142858, 0.75, - 0.17857142857142858, 0.75, - 0.25, 0.75, - 0.25, 0.75, - 0.32142857142857145, 0.75, - 0.32142857142857145, 0.75, - 0.39285714285714285, 0.75, - 0.39285714285714285, 0.75, - 0.4642857142857143, 0.75, - 0.4642857142857143, 0.75, - 0.5357142857142857, 0.75, - 0.5357142857142857, 0.75, - 0.6071428571428571, 0.75, - 0.6071428571428571, 0.75, - 0.6785714285714286, 0.75, - 0.6785714285714286, 0.75, - 0.75, 0.75, - 0.75, 0.75, - 0.8214285714285714, 0.75, - 0.8214285714285714, 0.75, - 0.8928571428571429, 0.75, - 0.8928571428571429, 0.75, - 0.9642857142857143, 0.75, - 0.9642857142857143, 0.75, - 0.03571428571428571, 0.8214285714285714, - 0.03571428571428571, 0.8214285714285714, - 0.10714285714285714, 0.8214285714285714, - 0.10714285714285714, 0.8214285714285714, - 0.17857142857142858, 0.8214285714285714, - 0.17857142857142858, 0.8214285714285714, - 0.25, 0.8214285714285714, - 0.25, 0.8214285714285714, - 0.32142857142857145, 0.8214285714285714, - 0.32142857142857145, 0.8214285714285714, - 0.39285714285714285, 0.8214285714285714, - 0.39285714285714285, 0.8214285714285714, - 0.4642857142857143, 0.8214285714285714, - 0.4642857142857143, 0.8214285714285714, - 0.5357142857142857, 0.8214285714285714, - 0.5357142857142857, 0.8214285714285714, - 0.6071428571428571, 0.8214285714285714, - 0.6071428571428571, 0.8214285714285714, - 0.6785714285714286, 0.8214285714285714, - 0.6785714285714286, 0.8214285714285714, - 0.75, 0.8214285714285714, - 0.75, 0.8214285714285714, - 0.8214285714285714, 0.8214285714285714, - 0.8214285714285714, 0.8214285714285714, - 0.8928571428571429, 0.8214285714285714, - 0.8928571428571429, 0.8214285714285714, - 0.9642857142857143, 0.8214285714285714, - 0.9642857142857143, 0.8214285714285714, - 0.03571428571428571, 0.8928571428571429, - 0.03571428571428571, 0.8928571428571429, - 0.10714285714285714, 0.8928571428571429, - 0.10714285714285714, 0.8928571428571429, - 0.17857142857142858, 0.8928571428571429, - 0.17857142857142858, 0.8928571428571429, - 0.25, 0.8928571428571429, - 0.25, 0.8928571428571429, - 0.32142857142857145, 0.8928571428571429, - 0.32142857142857145, 0.8928571428571429, - 0.39285714285714285, 0.8928571428571429, - 0.39285714285714285, 0.8928571428571429, - 0.4642857142857143, 0.8928571428571429, - 0.4642857142857143, 0.8928571428571429, - 0.5357142857142857, 0.8928571428571429, - 0.5357142857142857, 0.8928571428571429, - 0.6071428571428571, 0.8928571428571429, - 0.6071428571428571, 0.8928571428571429, - 0.6785714285714286, 0.8928571428571429, - 0.6785714285714286, 0.8928571428571429, - 0.75, 0.8928571428571429, - 0.75, 0.8928571428571429, - 0.8214285714285714, 0.8928571428571429, - 0.8214285714285714, 0.8928571428571429, - 0.8928571428571429, 0.8928571428571429, - 0.8928571428571429, 0.8928571428571429, - 0.9642857142857143, 0.8928571428571429, - 0.9642857142857143, 0.8928571428571429, - 0.03571428571428571, 0.9642857142857143, - 0.03571428571428571, 0.9642857142857143, - 0.10714285714285714, 0.9642857142857143, - 0.10714285714285714, 0.9642857142857143, - 0.17857142857142858, 0.9642857142857143, - 0.17857142857142858, 0.9642857142857143, - 0.25, 0.9642857142857143, - 0.25, 0.9642857142857143, - 0.32142857142857145, 0.9642857142857143, - 0.32142857142857145, 0.9642857142857143, - 0.39285714285714285, 0.9642857142857143, - 0.39285714285714285, 0.9642857142857143, - 0.4642857142857143, 0.9642857142857143, - 0.4642857142857143, 0.9642857142857143, - 0.5357142857142857, 0.9642857142857143, - 0.5357142857142857, 0.9642857142857143, - 0.6071428571428571, 0.9642857142857143, - 0.6071428571428571, 0.9642857142857143, - 0.6785714285714286, 0.9642857142857143, - 0.6785714285714286, 0.9642857142857143, - 0.75, 0.9642857142857143, - 0.75, 0.9642857142857143, - 0.8214285714285714, 0.9642857142857143, - 0.8214285714285714, 0.9642857142857143, - 0.8928571428571429, 0.9642857142857143, - 0.8928571428571429, 0.9642857142857143, - 0.9642857142857143, 0.9642857142857143, - 0.9642857142857143, 0.9642857142857143, - 0.07142857142857142, 0.07142857142857142, - 0.07142857142857142, 0.07142857142857142, - 0.07142857142857142, 0.07142857142857142, - 0.07142857142857142, 0.07142857142857142, - 0.07142857142857142, 0.07142857142857142, - 0.07142857142857142, 0.07142857142857142, - 0.21428571428571427, 0.07142857142857142, - 0.21428571428571427, 0.07142857142857142, - 0.21428571428571427, 0.07142857142857142, - 0.21428571428571427, 0.07142857142857142, - 0.21428571428571427, 0.07142857142857142, - 0.21428571428571427, 0.07142857142857142, - 0.35714285714285715, 0.07142857142857142, - 0.35714285714285715, 0.07142857142857142, - 0.35714285714285715, 0.07142857142857142, - 0.35714285714285715, 0.07142857142857142, - 0.35714285714285715, 0.07142857142857142, - 0.35714285714285715, 0.07142857142857142, - 0.5, 0.07142857142857142, - 0.5, 0.07142857142857142, - 0.5, 0.07142857142857142, - 0.5, 0.07142857142857142, - 0.5, 0.07142857142857142, - 0.5, 0.07142857142857142, - 0.6428571428571429, 0.07142857142857142, - 0.6428571428571429, 0.07142857142857142, - 0.6428571428571429, 0.07142857142857142, - 0.6428571428571429, 0.07142857142857142, - 0.6428571428571429, 0.07142857142857142, - 0.6428571428571429, 0.07142857142857142, - 0.7857142857142857, 0.07142857142857142, - 0.7857142857142857, 0.07142857142857142, - 0.7857142857142857, 0.07142857142857142, - 0.7857142857142857, 0.07142857142857142, - 0.7857142857142857, 0.07142857142857142, - 0.7857142857142857, 0.07142857142857142, - 0.9285714285714286, 0.07142857142857142, - 0.9285714285714286, 0.07142857142857142, - 0.9285714285714286, 0.07142857142857142, - 0.9285714285714286, 0.07142857142857142, - 0.9285714285714286, 0.07142857142857142, - 0.9285714285714286, 0.07142857142857142, - 0.07142857142857142, 0.21428571428571427, - 0.07142857142857142, 0.21428571428571427, - 0.07142857142857142, 0.21428571428571427, - 0.07142857142857142, 0.21428571428571427, - 0.07142857142857142, 0.21428571428571427, - 0.07142857142857142, 0.21428571428571427, - 0.21428571428571427, 0.21428571428571427, - 0.21428571428571427, 0.21428571428571427, - 0.21428571428571427, 0.21428571428571427, - 0.21428571428571427, 0.21428571428571427, - 0.21428571428571427, 0.21428571428571427, - 0.21428571428571427, 0.21428571428571427, - 0.35714285714285715, 0.21428571428571427, - 0.35714285714285715, 0.21428571428571427, - 0.35714285714285715, 0.21428571428571427, - 0.35714285714285715, 0.21428571428571427, - 0.35714285714285715, 0.21428571428571427, - 0.35714285714285715, 0.21428571428571427, - 0.5, 0.21428571428571427, - 0.5, 0.21428571428571427, - 0.5, 0.21428571428571427, - 0.5, 0.21428571428571427, - 0.5, 0.21428571428571427, - 0.5, 0.21428571428571427, - 0.6428571428571429, 0.21428571428571427, - 0.6428571428571429, 0.21428571428571427, - 0.6428571428571429, 0.21428571428571427, - 0.6428571428571429, 0.21428571428571427, - 0.6428571428571429, 0.21428571428571427, - 0.6428571428571429, 0.21428571428571427, - 0.7857142857142857, 0.21428571428571427, - 0.7857142857142857, 0.21428571428571427, - 0.7857142857142857, 0.21428571428571427, - 0.7857142857142857, 0.21428571428571427, - 0.7857142857142857, 0.21428571428571427, - 0.7857142857142857, 0.21428571428571427, - 0.9285714285714286, 0.21428571428571427, - 0.9285714285714286, 0.21428571428571427, - 0.9285714285714286, 0.21428571428571427, - 0.9285714285714286, 0.21428571428571427, - 0.9285714285714286, 0.21428571428571427, - 0.9285714285714286, 0.21428571428571427, - 0.07142857142857142, 0.35714285714285715, - 0.07142857142857142, 0.35714285714285715, - 0.07142857142857142, 0.35714285714285715, - 0.07142857142857142, 0.35714285714285715, - 0.07142857142857142, 0.35714285714285715, - 0.07142857142857142, 0.35714285714285715, - 0.21428571428571427, 0.35714285714285715, - 0.21428571428571427, 0.35714285714285715, - 0.21428571428571427, 0.35714285714285715, - 0.21428571428571427, 0.35714285714285715, - 0.21428571428571427, 0.35714285714285715, - 0.21428571428571427, 0.35714285714285715, - 0.35714285714285715, 0.35714285714285715, - 0.35714285714285715, 0.35714285714285715, - 0.35714285714285715, 0.35714285714285715, - 0.35714285714285715, 0.35714285714285715, - 0.35714285714285715, 0.35714285714285715, - 0.35714285714285715, 0.35714285714285715, - 0.5, 0.35714285714285715, - 0.5, 0.35714285714285715, - 0.5, 0.35714285714285715, - 0.5, 0.35714285714285715, - 0.5, 0.35714285714285715, - 0.5, 0.35714285714285715, - 0.6428571428571429, 0.35714285714285715, - 0.6428571428571429, 0.35714285714285715, - 0.6428571428571429, 0.35714285714285715, - 0.6428571428571429, 0.35714285714285715, - 0.6428571428571429, 0.35714285714285715, - 0.6428571428571429, 0.35714285714285715, - 0.7857142857142857, 0.35714285714285715, - 0.7857142857142857, 0.35714285714285715, - 0.7857142857142857, 0.35714285714285715, - 0.7857142857142857, 0.35714285714285715, - 0.7857142857142857, 0.35714285714285715, - 0.7857142857142857, 0.35714285714285715, - 0.9285714285714286, 0.35714285714285715, - 0.9285714285714286, 0.35714285714285715, - 0.9285714285714286, 0.35714285714285715, - 0.9285714285714286, 0.35714285714285715, - 0.9285714285714286, 0.35714285714285715, - 0.9285714285714286, 0.35714285714285715, - 0.07142857142857142, 0.5, - 0.07142857142857142, 0.5, - 0.07142857142857142, 0.5, - 0.07142857142857142, 0.5, - 0.07142857142857142, 0.5, - 0.07142857142857142, 0.5, - 0.21428571428571427, 0.5, - 0.21428571428571427, 0.5, - 0.21428571428571427, 0.5, - 0.21428571428571427, 0.5, - 0.21428571428571427, 0.5, - 0.21428571428571427, 0.5, - 0.35714285714285715, 0.5, - 0.35714285714285715, 0.5, - 0.35714285714285715, 0.5, - 0.35714285714285715, 0.5, - 0.35714285714285715, 0.5, - 0.35714285714285715, 0.5, - 0.5, 0.5, - 0.5, 0.5, - 0.5, 0.5, - 0.5, 0.5, - 0.5, 0.5, - 0.5, 0.5, - 0.6428571428571429, 0.5, - 0.6428571428571429, 0.5, - 0.6428571428571429, 0.5, - 0.6428571428571429, 0.5, - 0.6428571428571429, 0.5, - 0.6428571428571429, 0.5, - 0.7857142857142857, 0.5, - 0.7857142857142857, 0.5, - 0.7857142857142857, 0.5, - 0.7857142857142857, 0.5, - 0.7857142857142857, 0.5, - 0.7857142857142857, 0.5, - 0.9285714285714286, 0.5, - 0.9285714285714286, 0.5, - 0.9285714285714286, 0.5, - 0.9285714285714286, 0.5, - 0.9285714285714286, 0.5, - 0.9285714285714286, 0.5, - 0.07142857142857142, 0.6428571428571429, - 0.07142857142857142, 0.6428571428571429, - 0.07142857142857142, 0.6428571428571429, - 0.07142857142857142, 0.6428571428571429, - 0.07142857142857142, 0.6428571428571429, - 0.07142857142857142, 0.6428571428571429, - 0.21428571428571427, 0.6428571428571429, - 0.21428571428571427, 0.6428571428571429, - 0.21428571428571427, 0.6428571428571429, - 0.21428571428571427, 0.6428571428571429, - 0.21428571428571427, 0.6428571428571429, - 0.21428571428571427, 0.6428571428571429, - 0.35714285714285715, 0.6428571428571429, - 0.35714285714285715, 0.6428571428571429, - 0.35714285714285715, 0.6428571428571429, - 0.35714285714285715, 0.6428571428571429, - 0.35714285714285715, 0.6428571428571429, - 0.35714285714285715, 0.6428571428571429, - 0.5, 0.6428571428571429, - 0.5, 0.6428571428571429, - 0.5, 0.6428571428571429, - 0.5, 0.6428571428571429, - 0.5, 0.6428571428571429, - 0.5, 0.6428571428571429, - 0.6428571428571429, 0.6428571428571429, - 0.6428571428571429, 0.6428571428571429, - 0.6428571428571429, 0.6428571428571429, - 0.6428571428571429, 0.6428571428571429, - 0.6428571428571429, 0.6428571428571429, - 0.6428571428571429, 0.6428571428571429, - 0.7857142857142857, 0.6428571428571429, - 0.7857142857142857, 0.6428571428571429, - 0.7857142857142857, 0.6428571428571429, - 0.7857142857142857, 0.6428571428571429, - 0.7857142857142857, 0.6428571428571429, - 0.7857142857142857, 0.6428571428571429, - 0.9285714285714286, 0.6428571428571429, - 0.9285714285714286, 0.6428571428571429, - 0.9285714285714286, 0.6428571428571429, - 0.9285714285714286, 0.6428571428571429, - 0.9285714285714286, 0.6428571428571429, - 0.9285714285714286, 0.6428571428571429, - 0.07142857142857142, 0.7857142857142857, - 0.07142857142857142, 0.7857142857142857, - 0.07142857142857142, 0.7857142857142857, - 0.07142857142857142, 0.7857142857142857, - 0.07142857142857142, 0.7857142857142857, - 0.07142857142857142, 0.7857142857142857, - 0.21428571428571427, 0.7857142857142857, - 0.21428571428571427, 0.7857142857142857, - 0.21428571428571427, 0.7857142857142857, - 0.21428571428571427, 0.7857142857142857, - 0.21428571428571427, 0.7857142857142857, - 0.21428571428571427, 0.7857142857142857, - 0.35714285714285715, 0.7857142857142857, - 0.35714285714285715, 0.7857142857142857, - 0.35714285714285715, 0.7857142857142857, - 0.35714285714285715, 0.7857142857142857, - 0.35714285714285715, 0.7857142857142857, - 0.35714285714285715, 0.7857142857142857, - 0.5, 0.7857142857142857, - 0.5, 0.7857142857142857, - 0.5, 0.7857142857142857, - 0.5, 0.7857142857142857, - 0.5, 0.7857142857142857, - 0.5, 0.7857142857142857, - 0.6428571428571429, 0.7857142857142857, - 0.6428571428571429, 0.7857142857142857, - 0.6428571428571429, 0.7857142857142857, - 0.6428571428571429, 0.7857142857142857, - 0.6428571428571429, 0.7857142857142857, - 0.6428571428571429, 0.7857142857142857, - 0.7857142857142857, 0.7857142857142857, - 0.7857142857142857, 0.7857142857142857, - 0.7857142857142857, 0.7857142857142857, - 0.7857142857142857, 0.7857142857142857, - 0.7857142857142857, 0.7857142857142857, - 0.7857142857142857, 0.7857142857142857, - 0.9285714285714286, 0.7857142857142857, - 0.9285714285714286, 0.7857142857142857, - 0.9285714285714286, 0.7857142857142857, - 0.9285714285714286, 0.7857142857142857, - 0.9285714285714286, 0.7857142857142857, - 0.9285714285714286, 0.7857142857142857, - 0.07142857142857142, 0.9285714285714286, - 0.07142857142857142, 0.9285714285714286, - 0.07142857142857142, 0.9285714285714286, - 0.07142857142857142, 0.9285714285714286, - 0.07142857142857142, 0.9285714285714286, - 0.07142857142857142, 0.9285714285714286, - 0.21428571428571427, 0.9285714285714286, - 0.21428571428571427, 0.9285714285714286, - 0.21428571428571427, 0.9285714285714286, - 0.21428571428571427, 0.9285714285714286, - 0.21428571428571427, 0.9285714285714286, - 0.21428571428571427, 0.9285714285714286, - 0.35714285714285715, 0.9285714285714286, - 0.35714285714285715, 0.9285714285714286, - 0.35714285714285715, 0.9285714285714286, - 0.35714285714285715, 0.9285714285714286, - 0.35714285714285715, 0.9285714285714286, - 0.35714285714285715, 0.9285714285714286, - 0.5, 0.9285714285714286, - 0.5, 0.9285714285714286, - 0.5, 0.9285714285714286, - 0.5, 0.9285714285714286, - 0.5, 0.9285714285714286, - 0.5, 0.9285714285714286, - 0.6428571428571429, 0.9285714285714286, - 0.6428571428571429, 0.9285714285714286, - 0.6428571428571429, 0.9285714285714286, - 0.6428571428571429, 0.9285714285714286, - 0.6428571428571429, 0.9285714285714286, - 0.6428571428571429, 0.9285714285714286, - 0.7857142857142857, 0.9285714285714286, - 0.7857142857142857, 0.9285714285714286, - 0.7857142857142857, 0.9285714285714286, - 0.7857142857142857, 0.9285714285714286, - 0.7857142857142857, 0.9285714285714286, - 0.7857142857142857, 0.9285714285714286, - 0.9285714285714286, 0.9285714285714286, - 0.9285714285714286, 0.9285714285714286, - 0.9285714285714286, 0.9285714285714286, - 0.9285714285714286, 0.9285714285714286, - 0.9285714285714286, 0.9285714285714286, - 0.9285714285714286, 0.9285714285714286); - return anchor; -} diff --git a/models/pose_estimation_mediapipe/demo.py b/models/pose_estimation_mediapipe/demo.py deleted file mode 100644 index 4b70a1e0..00000000 --- a/models/pose_estimation_mediapipe/demo.py +++ /dev/null @@ -1,253 +0,0 @@ -import sys -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from mp_pose import MPPose - -sys.path.append('../person_detection_mediapipe') -from mp_persondet import MPPersonDet - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser(description='Pose Estimation from MediaPipe') -parser.add_argument('--input', '-i', type=str, - help='Path to the input image. Omit for using default camera.') -parser.add_argument('--model', '-m', type=str, default='./pose_estimation_mediapipe_2023mar.onnx', - help='Path to the model.') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--conf_threshold', type=float, default=0.8, - help='Filter out hands of confidence < conf_threshold.') -parser.add_argument('--save', '-s', action='store_true', - help='Specify to save results. This flag is invalid when using camera.') -parser.add_argument('--vis', '-v', action='store_true', - help='Specify to open a window for result visualization. This flag is invalid when using camera.') -args = parser.parse_args() - -def visualize(image, poses): - display_screen = image.copy() - display_3d = np.zeros((400, 400, 3), np.uint8) - cv.line(display_3d, (200, 0), (200, 400), (255, 255, 255), 2) - cv.line(display_3d, (0, 200), (400, 200), (255, 255, 255), 2) - cv.putText(display_3d, 'Main View', (0, 12), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255)) - cv.putText(display_3d, 'Top View', (200, 12), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255)) - cv.putText(display_3d, 'Left View', (0, 212), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255)) - cv.putText(display_3d, 'Right View', (200, 212), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255)) - is_draw = False # ensure only one person is drawn - - def _draw_lines(image, landmarks, keep_landmarks, is_draw_point=True, thickness=2): - - def _draw_by_presence(idx1, idx2): - if keep_landmarks[idx1] and keep_landmarks[idx2]: - cv.line(image, landmarks[idx1], landmarks[idx2], (255, 255, 255), thickness) - - _draw_by_presence(0, 1) - _draw_by_presence(1, 2) - _draw_by_presence(2, 3) - _draw_by_presence(3, 7) - _draw_by_presence(0, 4) - _draw_by_presence(4, 5) - _draw_by_presence(5, 6) - _draw_by_presence(6, 8) - - _draw_by_presence(9, 10) - - _draw_by_presence(12, 14) - _draw_by_presence(14, 16) - _draw_by_presence(16, 22) - _draw_by_presence(16, 18) - _draw_by_presence(16, 20) - _draw_by_presence(18, 20) - - _draw_by_presence(11, 13) - _draw_by_presence(13, 15) - _draw_by_presence(15, 21) - _draw_by_presence(15, 19) - _draw_by_presence(15, 17) - _draw_by_presence(17, 19) - - _draw_by_presence(11, 12) - _draw_by_presence(11, 23) - _draw_by_presence(23, 24) - _draw_by_presence(24, 12) - - _draw_by_presence(24, 26) - _draw_by_presence(26, 28) - _draw_by_presence(28, 30) - _draw_by_presence(28, 32) - _draw_by_presence(30, 32) - - _draw_by_presence(23, 25) - _draw_by_presence(25, 27) - _draw_by_presence(27, 31) - _draw_by_presence(27, 29) - _draw_by_presence(29, 31) - - if is_draw_point: - for i, p in enumerate(landmarks): - if keep_landmarks[i]: - cv.circle(image, p, thickness, (0, 0, 255), -1) - - for idx, pose in enumerate(poses): - bbox, landmarks_screen, landmarks_word, mask, heatmap, conf = pose - - edges = cv.Canny(mask, 100, 200) - kernel = np.ones((2, 2), np.uint8) # expansion edge to 2 pixels - edges = cv.dilate(edges, kernel, iterations=1) - edges_bgr = cv.cvtColor(edges, cv.COLOR_GRAY2BGR) - edges_bgr[edges == 255] = [0, 255, 0] - display_screen = cv.add(edges_bgr, display_screen) - - - # draw box - bbox = bbox.astype(np.int32) - cv.rectangle(display_screen, bbox[0], bbox[1], (0, 255, 0), 2) - cv.putText(display_screen, '{:.4f}'.format(conf), (bbox[0][0], bbox[0][1] + 12), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255)) - # Draw line between each key points - landmarks_screen = landmarks_screen[:-6, :] - landmarks_word = landmarks_word[:-6, :] - - keep_landmarks = landmarks_screen[:, 4] > 0.8 # only show visible keypoints which presence bigger than 0.8 - - landmarks_screen = landmarks_screen - landmarks_word = landmarks_word - - landmarks_xy = landmarks_screen[:, 0: 2].astype(np.int32) - _draw_lines(display_screen, landmarks_xy, keep_landmarks, is_draw_point=False) - - # z value is relative to HIP, but we use constant to instead - for i, p in enumerate(landmarks_screen[:, 0: 3].astype(np.int32)): - if keep_landmarks[i]: - cv.circle(display_screen, np.array([p[0], p[1]]), 2, (0, 0, 255), -1) - - if is_draw is False: - is_draw = True - # Main view - landmarks_xy = landmarks_word[:, [0, 1]] - landmarks_xy = (landmarks_xy * 100 + 100).astype(np.int32) - _draw_lines(display_3d, landmarks_xy, keep_landmarks, thickness=2) - - # Top view - landmarks_xz = landmarks_word[:, [0, 2]] - landmarks_xz[:, 1] = -landmarks_xz[:, 1] - landmarks_xz = (landmarks_xz * 100 + np.array([300, 100])).astype(np.int32) - _draw_lines(display_3d, landmarks_xz,keep_landmarks, thickness=2) - - # Left view - landmarks_yz = landmarks_word[:, [2, 1]] - landmarks_yz[:, 0] = -landmarks_yz[:, 0] - landmarks_yz = (landmarks_yz * 100 + np.array([100, 300])).astype(np.int32) - _draw_lines(display_3d, landmarks_yz, keep_landmarks, thickness=2) - - # Right view - landmarks_zy = landmarks_word[:, [2, 1]] - landmarks_zy = (landmarks_zy * 100 + np.array([300, 300])).astype(np.int32) - _draw_lines(display_3d, landmarks_zy, keep_landmarks, thickness=2) - - return display_screen, display_3d - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - - # person detector - person_detector = MPPersonDet(modelPath='../person_detection_mediapipe/person_detection_mediapipe_2023mar.onnx', - nmsThreshold=0.3, - scoreThreshold=0.5, - topK=5000, # usually only one person has good performance - backendId=backend_id, - targetId=target_id) - # pose estimator - pose_estimator = MPPose(modelPath=args.model, - confThreshold=args.conf_threshold, - backendId=backend_id, - targetId=target_id) - - # If input is an image - if args.input is not None: - image = cv.imread(args.input) - - # person detector inference - persons = person_detector.infer(image) - poses = [] - - # Estimate the pose of each person - for person in persons: - # pose estimator inference - pose = pose_estimator.infer(image, person) - if pose is not None: - poses.append(pose) - # Draw results on the input image - image, view_3d = visualize(image, poses) - - if len(persons) == 0: - print('No person detected!') - else: - print('Person detected!') - - # Save results - if args.save: - cv.imwrite('result.jpg', image) - print('Results saved to result.jpg\n') - - # Visualize results in a new window - if args.vis: - cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE) - cv.imshow(args.input, image) - cv.imshow('3D Pose Demo', view_3d) - cv.waitKey(0) - else: # Omit input to call default camera - deviceId = 0 - cap = cv.VideoCapture(deviceId) - - tm = cv.TickMeter() - while cv.waitKey(1) < 0: - hasFrame, frame = cap.read() - if not hasFrame: - print('No frames grabbed!') - break - - # person detector inference - persons = person_detector.infer(frame) - poses = [] - - tm.start() - # Estimate the pose of each person - for person in persons: - # pose detector inference - pose = pose_estimator.infer(frame, person) - if pose is not None: - poses.append(pose) - tm.stop() - # Draw results on the input image - frame, view_3d = visualize(frame, poses) - - if len(persons) == 0: - print('No person detected!') - else: - print('Person detected!') - cv.putText(frame, 'FPS: {:.2f}'.format(tm.getFPS()), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) - - cv.imshow('MediaPipe Pose Detection Demo', frame) - cv.imshow('3D Pose Demo', view_3d) - tm.reset() diff --git a/models/pose_estimation_mediapipe/example_outputs/mpposeest_demo.webp b/models/pose_estimation_mediapipe/example_outputs/mpposeest_demo.webp deleted file mode 100644 index 2e43b190..00000000 --- a/models/pose_estimation_mediapipe/example_outputs/mpposeest_demo.webp +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f95c6e80fa90dd22b06a88b95d8dac512e52192d8367ea6b5f576bd667df3d4c -size 1564162 diff --git a/models/pose_estimation_mediapipe/example_outputs/pose_landmarks.png b/models/pose_estimation_mediapipe/example_outputs/pose_landmarks.png deleted file mode 100644 index 50441c72..00000000 --- a/models/pose_estimation_mediapipe/example_outputs/pose_landmarks.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c93063a83adff4db00c02aec8cf04d7444ae9169956c8ec67ee2351adbcd8c0f -size 123013 diff --git a/models/pose_estimation_mediapipe/mp_pose.py b/models/pose_estimation_mediapipe/mp_pose.py deleted file mode 100644 index 86348b6e..00000000 --- a/models/pose_estimation_mediapipe/mp_pose.py +++ /dev/null @@ -1,179 +0,0 @@ -import numpy as np -import cv2 as cv - -class MPPose: - def __init__(self, modelPath, confThreshold=0.5, backendId=0, targetId=0): - self.model_path = modelPath - self.conf_threshold = confThreshold - self.backend_id = backendId - self.target_id = targetId - - self.input_size = np.array([256, 256]) # wh - # RoI will be larger so the performance will be better, but preprocess will be slower. Default to 1. - self.PERSON_BOX_PRE_ENLARGE_FACTOR = 1 - self.PERSON_BOX_ENLARGE_FACTOR = 1.25 - - self.model = cv.dnn.readNet(self.model_path) - self.model.setPreferableBackend(self.backend_id) - self.model.setPreferableTarget(self.target_id) - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self._backendId = backendId - self._targetId = targetId - self.model.setPreferableBackend(self.backend_id) - self.model.setPreferableTarget(self.target_id) - - def _preprocess(self, image, person): - ''' - Rotate input for inference. - Parameters: - image - input image of BGR channel order - face_bbox - human face bounding box found in image of format [[x1, y1], [x2, y2]] (top-left and bottom-right points) - person_landmarks - 4 landmarks (2 full body points, 2 upper body points) of shape [4, 2] - Returns: - rotated_person - rotated person image for inference - rotate_person_bbox - person box of interest range - angle - rotate angle for person - rotation_matrix - matrix for rotation and de-rotation - pad_bias - pad pixels of interest range - ''' - # crop and pad image to interest range - pad_bias = np.array([0, 0], dtype=np.int32) # left, top - person_keypoints = person[4: 12].reshape(-1, 2) - mid_hip_point = person_keypoints[0] - full_body_point = person_keypoints[1] - # get RoI - full_dist = np.linalg.norm(mid_hip_point - full_body_point) - full_bbox = np.array([mid_hip_point - full_dist, mid_hip_point + full_dist], np.int32) - # enlarge to make sure full body can be cover - center_bbox = np.sum(full_bbox, axis=0) / 2 - wh_bbox = full_bbox[1] - full_bbox[0] - new_half_size = wh_bbox * self.PERSON_BOX_PRE_ENLARGE_FACTOR / 2 - full_bbox = np.array([ - center_bbox - new_half_size, - center_bbox + new_half_size], np.int32) - - person_bbox = full_bbox.copy() - # refine person bbox - person_bbox[:, 0] = np.clip(person_bbox[:, 0], 0, image.shape[1]) - person_bbox[:, 1] = np.clip(person_bbox[:, 1], 0, image.shape[0]) - # crop to the size of interest - image = image[person_bbox[0][1]:person_bbox[1][1], person_bbox[0][0]:person_bbox[1][0], :] - # pad to square - left, top = person_bbox[0] - full_bbox[0] - right, bottom = full_bbox[1] - person_bbox[1] - image = cv.copyMakeBorder(image, top, bottom, left, right, cv.BORDER_CONSTANT, None, (0, 0, 0)) - pad_bias += person_bbox[0] - [left, top] - # compute rotation - mid_hip_point -= pad_bias - full_body_point -= pad_bias - radians = np.pi / 2 - np.arctan2(-(full_body_point[1] - mid_hip_point[1]), full_body_point[0] - mid_hip_point[0]) - radians = radians - 2 * np.pi * np.floor((radians + np.pi) / (2 * np.pi)) - angle = np.rad2deg(radians) - # get rotation matrix - rotation_matrix = cv.getRotationMatrix2D(mid_hip_point, angle, 1.0) - # get rotated image - rotated_image = cv.warpAffine(image, rotation_matrix, (image.shape[1], image.shape[0])) - # get landmark bounding box - blob = cv.resize(rotated_image, dsize=self.input_size, interpolation=cv.INTER_AREA).astype(np.float32) - rotated_person_bbox = np.array([[0, 0], [image.shape[1], image.shape[0]]], dtype=np.int32) - blob = cv.cvtColor(blob, cv.COLOR_BGR2RGB) - blob = blob / 255. # [0, 1] - return blob[np.newaxis, :, :, :], rotated_person_bbox, angle, rotation_matrix, pad_bias - - def infer(self, image, person): - h, w, _ = image.shape - # Preprocess - input_blob, rotated_person_bbox, angle, rotation_matrix, pad_bias = self._preprocess(image, person) - - # Forward - self.model.setInput(input_blob) - output_blob = self.model.forward(self.model.getUnconnectedOutLayersNames()) - - # Postprocess - results = self._postprocess(output_blob, rotated_person_bbox, angle, rotation_matrix, pad_bias, np.array([w, h])) - return results # [bbox_coords, landmarks_coords, conf] - - def _postprocess(self, blob, rotated_person_bbox, angle, rotation_matrix, pad_bias, img_size): - landmarks, conf, mask, heatmap, landmarks_word = blob - - conf = conf[0][0] - if conf < self.conf_threshold: - return None - - landmarks = landmarks[0].reshape(-1, 5) # shape: (1, 195) -> (39, 5) - landmarks_word = landmarks_word[0].reshape(-1, 3) # shape: (1, 117) -> (39, 3) - - # recover sigmoid score - landmarks[:, 3:] = 1 / (1 + np.exp(-landmarks[:, 3:])) - # TODO: refine landmarks with heatmap. reference: https://github.com/tensorflow/tfjs-models/blob/master/pose-detection/src/blazepose_tfjs/detector.ts#L577-L582 - heatmap = heatmap[0] - - # transform coords back to the input coords - wh_rotated_person_bbox = rotated_person_bbox[1] - rotated_person_bbox[0] - scale_factor = wh_rotated_person_bbox / self.input_size - landmarks[:, :2] = (landmarks[:, :2] - self.input_size / 2) * scale_factor - landmarks[:, 2] = landmarks[:, 2] * max(scale_factor) # depth scaling - coords_rotation_matrix = cv.getRotationMatrix2D((0, 0), angle, 1.0) - rotated_landmarks = np.dot(landmarks[:, :2], coords_rotation_matrix[:, :2]) - rotated_landmarks = np.c_[rotated_landmarks, landmarks[:, 2:]] - rotated_landmarks_world = np.dot(landmarks_word[:, :2], coords_rotation_matrix[:, :2]) - rotated_landmarks_world = np.c_[rotated_landmarks_world, landmarks_word[:, 2]] - # invert rotation - rotation_component = np.array([ - [rotation_matrix[0][0], rotation_matrix[1][0]], - [rotation_matrix[0][1], rotation_matrix[1][1]]]) - translation_component = np.array([ - rotation_matrix[0][2], rotation_matrix[1][2]]) - inverted_translation = np.array([ - -np.dot(rotation_component[0], translation_component), - -np.dot(rotation_component[1], translation_component)]) - inverse_rotation_matrix = np.c_[rotation_component, inverted_translation] - # get box center - center = np.append(np.sum(rotated_person_bbox, axis=0) / 2, 1) - original_center = np.array([ - np.dot(center, inverse_rotation_matrix[0]), - np.dot(center, inverse_rotation_matrix[1])]) - landmarks[:, :2] = rotated_landmarks[:, :2] + original_center + pad_bias - - # get bounding box from rotated_landmarks - bbox = np.array([ - np.amin(landmarks[:, :2], axis=0), - np.amax(landmarks[:, :2], axis=0)]) # [top-left, bottom-right] - center_bbox = np.sum(bbox, axis=0) / 2 - wh_bbox = bbox[1] - bbox[0] - new_half_size = wh_bbox * self.PERSON_BOX_ENLARGE_FACTOR / 2 - bbox = np.array([ - center_bbox - new_half_size, - center_bbox + new_half_size]) - - # invert rotation for mask - mask = mask[0].reshape(256, 256) # shape: (1, 256, 256, 1) -> (256, 256) - invert_rotation_matrix = cv.getRotationMatrix2D((mask.shape[1]/2, mask.shape[0]/2), -angle, 1.0) - invert_rotation_mask = cv.warpAffine(mask, invert_rotation_matrix, (mask.shape[1], mask.shape[0])) - # enlarge mask - invert_rotation_mask = cv.resize(invert_rotation_mask, wh_rotated_person_bbox) - # crop and pad mask - min_w, min_h = -np.minimum(pad_bias, 0) - left, top = np.maximum(pad_bias, 0) - pad_over = img_size - [invert_rotation_mask.shape[1], invert_rotation_mask.shape[0]] - pad_bias - max_w, max_h = np.minimum(pad_over, 0) + [invert_rotation_mask.shape[1], invert_rotation_mask.shape[0]] - right, bottom = np.maximum(pad_over, 0) - invert_rotation_mask = invert_rotation_mask[min_h:max_h, min_w:max_w] - invert_rotation_mask = cv.copyMakeBorder(invert_rotation_mask, top, bottom, left, right, cv.BORDER_CONSTANT, None, 0) - # binarize mask - invert_rotation_mask = np.where(invert_rotation_mask > 0, 255, 0).astype(np.uint8) - - # 2*2 person bbox: [[x1, y1], [x2, y2]] - # 39*5 screen landmarks: 33 keypoints and 6 auxiliary points with [x, y, z, visibility, presence], z value is relative to HIP - # Visibility is probability that a keypoint is located within the frame and not occluded by another bigger body part or another object - # Presence is probability that a keypoint is located within the frame - # 39*3 world landmarks: 33 keypoints and 6 auxiliary points with [x, y, z] 3D metric x, y, z coordinate - # img_height*img_width mask: gray mask, where 255 indicates the full body of a person and 0 means background - # 64*64*39 heatmap: currently only used for refining landmarks, requires sigmod processing before use - # conf: confidence of prediction - return [bbox, landmarks, rotated_landmarks_world, invert_rotation_mask, heatmap, conf] diff --git a/models/pose_estimation_mediapipe/pose_estimation_mediapipe_2023mar.onnx b/models/pose_estimation_mediapipe/pose_estimation_mediapipe_2023mar.onnx deleted file mode 100644 index 2544a0a4..00000000 --- a/models/pose_estimation_mediapipe/pose_estimation_mediapipe_2023mar.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9d89c599319a18fb7d2e28451a883476164543182bafca5f09eb2cf767ed2f3f -size 5557238 diff --git a/models/pose_estimation_mediapipe/pose_estimation_mediapipe_2023mar_int8bq.onnx b/models/pose_estimation_mediapipe/pose_estimation_mediapipe_2023mar_int8bq.onnx deleted file mode 100644 index 51df007a..00000000 --- a/models/pose_estimation_mediapipe/pose_estimation_mediapipe_2023mar_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1848ea80f657f4620e0de160594f598ed8107c9e7785d0be74f65cf15aa6deb1 -size 1694896 diff --git a/models/qrcode_wechatqrcode/CMakeLists.txt b/models/qrcode_wechatqrcode/CMakeLists.txt deleted file mode 100644 index 823d7e17..00000000 --- a/models/qrcode_wechatqrcode/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ -cmake_minimum_required(VERSION 3.24.0) -project(opencv_zoo_qrcode_wechatqrcode) - -set(OPENCV_VERSION "4.10.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") - -# Find OpenCV -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) - -add_executable(demo demo.cpp) -target_link_libraries(demo ${OpenCV_LIBS}) diff --git a/models/qrcode_wechatqrcode/LICENSE b/models/qrcode_wechatqrcode/LICENSE deleted file mode 100644 index 7a4a3ea2..00000000 --- a/models/qrcode_wechatqrcode/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/models/qrcode_wechatqrcode/README.md b/models/qrcode_wechatqrcode/README.md deleted file mode 100644 index 786490b3..00000000 --- a/models/qrcode_wechatqrcode/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# WeChatQRCode - -WeChatQRCode for detecting and parsing QR Code, contributed by [WeChat Computer Vision Team (WeChatCV)](https://github.com/WeChatCV). Visit [opencv/opencv_contrib/modules/wechat_qrcode](https://github.com/opencv/opencv_contrib/tree/master/modules/wechat_qrcode) for more details. - -Notes: - -- Model source: [opencv/opencv_3rdparty:wechat_qrcode_20210119](https://github.com/opencv/opencv_3rdparty/tree/wechat_qrcode_20210119) -- The APIs `cv::wechat_qrcode::WeChatQRCode` (C++) & `cv.wechat_qrcode_WeChatQRCode` (Python) are both designed to run on default backend (OpenCV) and target (CPU) only. Therefore, benchmark results of this model are only available on CPU devices, until the APIs are updated with setting backends and targets. - -## Demo - -### Python - -Run the following command to try the demo: - -```shell -# detect on camera input -python demo.py -# detect on an image -python demo.py --input /path/to/image -v - -# get help regarding various parameters -python demo.py --help -``` - -### C++ - -Install latest OpenCV (with opencv_contrib) and CMake >= 3.24.0 to get started with: - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build - -# detect on camera input -./build/demo -# detect on an image -./build/demo -i=/path/to/image -v -# get help messages -./build/demo -h -``` - -### Example outputs - -![webcam demo](./example_outputs/wechat_qrcode_demo.gif) - -## License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE). - -## Reference: - -- https://github.com/opencv/opencv_contrib/tree/master/modules/wechat_qrcode -- https://github.com/opencv/opencv_3rdparty/tree/wechat_qrcode_20210119 diff --git a/models/qrcode_wechatqrcode/demo.cpp b/models/qrcode_wechatqrcode/demo.cpp deleted file mode 100644 index 5f915a83..00000000 --- a/models/qrcode_wechatqrcode/demo.cpp +++ /dev/null @@ -1,192 +0,0 @@ -#include -#include -#include -#include -#include - -class WeChatQRCode { - public: - WeChatQRCode(const std::string& detect_prototxt, - const std::string& detect_model, - const std::string& sr_prototxt, const std::string& sr_model, - int backend_target_index) - : backend_target_index_(backend_target_index) { - - const std::vector> backend_target_pairs = { - {cv::dnn::DNN_BACKEND_OPENCV, cv::dnn::DNN_TARGET_CPU}, - {cv::dnn::DNN_BACKEND_CUDA, cv::dnn::DNN_TARGET_CUDA}, - {cv::dnn::DNN_BACKEND_CUDA, cv::dnn::DNN_TARGET_CUDA_FP16}, - {cv::dnn::DNN_BACKEND_TIMVX, cv::dnn::DNN_TARGET_NPU}, - {cv::dnn::DNN_BACKEND_CANN, cv::dnn::DNN_TARGET_NPU}}; - - if (backend_target_index_ < 0 || - backend_target_index_ >= backend_target_pairs.size()) { - throw std::invalid_argument("Invalid backend-target index"); - } - - // initialize detector - detector_ = cv::makePtr( - detect_prototxt, detect_model, sr_prototxt, sr_model); - } - - std::pair, std::vector> detect( - const cv::Mat& image) { - std::vector results; - std::vector points; - results = detector_->detectAndDecode(image, points); - return {results, points}; - } - - cv::Mat visualize(const cv::Mat& image, - const std::vector& results, - const std::vector& points, - cv::Scalar points_color = cv::Scalar(0, 255, 0), - cv::Scalar text_color = cv::Scalar(0, 255, 0), - double fps = -1) const { - cv::Mat output = image.clone(); - - if (fps >= 0) { - cv::putText(output, "FPS: " + std::to_string(fps), cv::Point(0, 15), - cv::FONT_HERSHEY_SIMPLEX, 0.5, text_color); - } - - double fontScale = 0.5; - int fontSize = 1; - - for (size_t i = 0; i < results.size(); ++i) { - const auto& p = points[i]; - - for (int r = 0; r < p.rows; ++r) { - cv::Point point(p.at(r, 0), p.at(r, 1)); - cv::circle(output, point, 10, points_color, -1); - } - - int qrcode_center_x = (p.at(0, 0) + p.at(2, 0)) / 2; - int qrcode_center_y = (p.at(0, 1) + p.at(2, 1)) / 2; - - int baseline = 0; - cv::Size text_size = - cv::getTextSize(results[i], cv::FONT_HERSHEY_DUPLEX, fontScale, - fontSize, &baseline); - - cv::Point text_pos(qrcode_center_x - text_size.width / 2, - qrcode_center_y + text_size.height / 2); - - cv::putText(output, results[i], text_pos, cv::FONT_HERSHEY_DUPLEX, - fontScale, text_color, fontSize); - } - - return output; - } - - private: - int backend_target_index_; - cv::Ptr detector_; -}; - -int main(int argc, char** argv) { - - cv::CommandLineParser parser( - argc, argv, - "{help h | | Show this help message.}" - "{input i | | Set path to the input image. Omit for using default camera.}" - "{detect_prototxt_path | detect_2021nov.prototxt | Set path to detect.prototxt.}" - "{detect_model_path | detect_2021nov.caffemodel | Set path to detect.caffemodel.}" - "{sr_prototxt_path | sr_2021nov.prototxt | Set path to sr.prototxt.}" - "{sr_model_path | sr_2021nov.caffemodel | Set path to sr.caffemodel.}" - "{backend_target bt | 0 | Choose one of the backend-target pairs to run this demo.}" - "{save s | false | Specify to save file with results.}" - "{vis v | false | Specify to open a new window to show results.}"); - - if (parser.has("help")) { - parser.printMessage(); - return 0; - } - - // get paths - std::string detect_prototxt = parser.get("detect_prototxt_path"); - std::string detect_model = parser.get("detect_model_path"); - std::string sr_prototxt = parser.get("sr_prototxt_path"); - std::string sr_model = parser.get("sr_model_path"); - int backend_target_index = parser.get("backend_target"); - - // input check - std::string input_path = parser.get("input"); - bool save_result = parser.get("save"); - bool visualize_result = parser.get("vis"); - - try { - WeChatQRCode qrDetector(detect_prototxt, detect_model, sr_prototxt, - sr_model, backend_target_index); - - if (!input_path.empty()) { - // process image - cv::Mat image = cv::imread(input_path); - if (image.empty()) { - std::cerr << "Could not read the image" << std::endl; - return -1; - } - - std::pair, std::vector> detectionResult = qrDetector.detect(image); - auto& results = detectionResult.first; - auto& points = detectionResult.second; - - for (const auto& result : results) { - std::cout << result << std::endl; - } - - cv::Mat result_image = qrDetector.visualize(image, results, points); - - if (save_result) { - cv::imwrite("result.jpg", result_image); - std::cout << "Results saved to result.jpg" << std::endl; - } - - if (visualize_result) { - cv::imshow(input_path, result_image); - cv::waitKey(0); - } - } else { - // process camera - cv::VideoCapture cap(0); - if (!cap.isOpened()) { - std::cerr << "Error opening camera" << std::endl; - return -1; - } - - cv::Mat frame; - cv::TickMeter tm; - - while (true) { - cap >> frame; - if (frame.empty()) { - std::cout << "No frames grabbed" << std::endl; - break; - } - - std::pair, std::vector> detectionResult = qrDetector.detect(frame); - auto& results = detectionResult.first; - auto& points = detectionResult.second; - - tm.start(); - double fps = tm.getFPS(); - tm.stop(); - - cv::Mat result_frame = qrDetector.visualize( - frame, results, points, cv::Scalar(0, 255, 0), - cv::Scalar(0, 255, 0), fps); - cv::imshow("WeChatQRCode Demo", result_frame); - - tm.reset(); - - if (cv::waitKey(1) >= 0) break; - } - } - - } catch (const std::exception& ex) { - std::cerr << "Error: " << ex.what() << std::endl; - return -1; - } - - return 0; -} diff --git a/models/qrcode_wechatqrcode/demo.py b/models/qrcode_wechatqrcode/demo.py deleted file mode 100644 index 2cd08b56..00000000 --- a/models/qrcode_wechatqrcode/demo.py +++ /dev/null @@ -1,136 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from wechatqrcode import WeChatQRCode - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser( - description="WeChat QR code detector for detecting and parsing QR code (https://github.com/opencv/opencv_contrib/tree/master/modules/wechat_qrcode)") -parser.add_argument('--input', '-i', type=str, - help='Usage: Set path to the input image. Omit for using default camera.') -parser.add_argument('--detect_prototxt_path', type=str, default='detect_2021nov.prototxt', - help='Usage: Set path to detect.prototxt.') -parser.add_argument('--detect_model_path', type=str, default='detect_2021nov.caffemodel', - help='Usage: Set path to detect.caffemodel.') -parser.add_argument('--sr_prototxt_path', type=str, default='sr_2021nov.prototxt', - help='Usage: Set path to sr.prototxt.') -parser.add_argument('--sr_model_path', type=str, default='sr_2021nov.caffemodel', - help='Usage: Set path to sr.caffemodel.') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--save', '-s', action='store_true', - help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.') -parser.add_argument('--vis', '-v', action='store_true', - help='Usage: Specify to open a new window to show results. Invalid in case of camera input.') -args = parser.parse_args() - -def visualize(image, res, points, points_color=(0, 255, 0), text_color=(0, 255, 0), fps=None): - output = image.copy() - h, w, _ = output.shape - - if fps is not None: - cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, text_color) - - fontScale = 0.5 - fontSize = 1 - for r, p in zip(res, points): - p = p.astype(np.int32) - for _p in p: - cv.circle(output, _p, 10, points_color, -1) - - qrcode_center_x = int((p[0][0] + p[2][0]) / 2) - qrcode_center_y = int((p[0][1] + p[2][1]) / 2) - - text_size, baseline = cv.getTextSize(r, cv.FONT_HERSHEY_DUPLEX, fontScale, fontSize) - text_x = qrcode_center_x - int(text_size[0] / 2) - text_y = qrcode_center_y - int(text_size[1] / 2) - cv.putText(output, '{}'.format(r), (text_x, text_y), cv.FONT_HERSHEY_DUPLEX, fontScale, text_color, fontSize) - - return output - - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - - # Instantiate WeChatQRCode - model = WeChatQRCode(args.detect_prototxt_path, - args.detect_model_path, - args.sr_prototxt_path, - args.sr_model_path, - backendId=backend_id, - targetId=target_id) - - # If input is an image: - if args.input is not None: - image = cv.imread(args.input) - res, points = model.infer(image) - - # Print results: - print(res) - print(points) - - # Draw results on the input image - image = visualize(image, res, points) - - # Save results if save is true - if args.save: - print('Results saved to result.jpg\n') - cv.imwrite('result.jpg', image) - - # Visualize results in a new window - if args.vis: - cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE) - cv.imshow(args.input, image) - cv.waitKey(0) - else: # Omit input to call default camera - deviceId = 0 - cap = cv.VideoCapture(deviceId) - - tm = cv.TickMeter() - while cv.waitKey(1) < 0: - hasFrame, frame = cap.read() - if not hasFrame: - print('No frames grabbed!') - break - - # Inference - tm.start() - res, points = model.infer(frame) - tm.stop() - fps = tm.getFPS() - - # Draw results on the input image - frame = visualize(frame, res, points, fps=fps) - - # Visualize results in a new window - cv.imshow('WeChatQRCode Demo', frame) - - tm.reset() diff --git a/models/qrcode_wechatqrcode/detect_2021nov.caffemodel b/models/qrcode_wechatqrcode/detect_2021nov.caffemodel deleted file mode 100644 index 458c760b..00000000 --- a/models/qrcode_wechatqrcode/detect_2021nov.caffemodel +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cc49b8c9babaf45f3037610fe499df38c8819ebda29e90ca9f2e33270f6ef809 -size 965430 diff --git a/models/qrcode_wechatqrcode/detect_2021nov.prototxt b/models/qrcode_wechatqrcode/detect_2021nov.prototxt deleted file mode 100644 index bd2417c9..00000000 --- a/models/qrcode_wechatqrcode/detect_2021nov.prototxt +++ /dev/null @@ -1,2716 +0,0 @@ -layer { - name: "data" - type: "Input" - top: "data" - input_param { - shape { - dim: 1 - dim: 1 - dim: 384 - dim: 384 - } - } -} -layer { - name: "data/bn" - type: "BatchNorm" - bottom: "data" - top: "data" - param { - lr_mult: 0.0 - decay_mult: 0.0 - } - param { - lr_mult: 0.0 - decay_mult: 0.0 - } - param { - lr_mult: 0.0 - decay_mult: 0.0 - } -} -layer { - name: "data/bn/scale" - type: "Scale" - bottom: "data" - top: "data" - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - scale_param { - filler { - type: "constant" - value: 1.0 - } - bias_term: true - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "stage1" - type: "Convolution" - bottom: "data" - top: "stage1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 24 - bias_term: true - pad: 1 - kernel_size: 3 - group: 1 - stride: 2 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage1/bn" - type: "BatchNorm" - bottom: "stage1" - top: "stage1" - param { - lr_mult: 0.0 - decay_mult: 0.0 - } - param { - lr_mult: 0.0 - decay_mult: 0.0 - } - param { - lr_mult: 0.0 - decay_mult: 0.0 - } -} -layer { - name: "stage1/bn/scale" - type: "Scale" - bottom: "stage1" - top: "stage1" - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - scale_param { - filler { - type: "constant" - value: 1.0 - } - bias_term: true - bias_filler { - type: "constant" - value: 0.0 - } - } -} -layer { - name: "stage1/relu" - type: "ReLU" - bottom: "stage1" - top: "stage1" -} -layer { - name: "stage2" - type: "Pooling" - bottom: "stage1" - top: "stage2" - pooling_param { - pool: MAX - kernel_size: 3 - stride: 2 - pad: 0 - } -} -layer { - name: "stage3_1/conv1" - type: "Convolution" - bottom: "stage2" - top: "stage3_1/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 16 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage3_1/conv1/relu" - type: "ReLU" - bottom: "stage3_1/conv1" - top: "stage3_1/conv1" -} -layer { - name: "stage3_1/conv2" - type: "Convolution" - bottom: "stage3_1/conv1" - top: "stage3_1/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 16 - pad: 1 - kernel_size: 3 - group: 16 - stride: 2 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage3_1/conv3" - type: "Convolution" - bottom: "stage3_1/conv2" - top: "stage3_1/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 64 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage3_1/relu" - type: "ReLU" - bottom: "stage3_1/conv3" - top: "stage3_1/conv3" -} -layer { - name: "stage3_2/conv1" - type: "Convolution" - bottom: "stage3_1/conv3" - top: "stage3_2/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 16 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage3_2/conv1/relu" - type: "ReLU" - bottom: "stage3_2/conv1" - top: "stage3_2/conv1" -} -layer { - name: "stage3_2/conv2" - type: "Convolution" - bottom: "stage3_2/conv1" - top: "stage3_2/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 16 - pad: 1 - kernel_size: 3 - group: 16 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage3_2/conv3" - type: "Convolution" - bottom: "stage3_2/conv2" - top: "stage3_2/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 64 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage3_2/sum" - type: "Eltwise" - bottom: "stage3_1/conv3" - bottom: "stage3_2/conv3" - top: "stage3_2/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage3_2/relu" - type: "ReLU" - bottom: "stage3_2/sum" - top: "stage3_2/sum" -} -layer { - name: "stage3_3/conv1" - type: "Convolution" - bottom: "stage3_2/sum" - top: "stage3_3/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 16 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage3_3/conv1/relu" - type: "ReLU" - bottom: "stage3_3/conv1" - top: "stage3_3/conv1" -} -layer { - name: "stage3_3/conv2" - type: "Convolution" - bottom: "stage3_3/conv1" - top: "stage3_3/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 16 - pad: 1 - kernel_size: 3 - group: 16 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage3_3/conv3" - type: "Convolution" - bottom: "stage3_3/conv2" - top: "stage3_3/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 64 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage3_3/sum" - type: "Eltwise" - bottom: "stage3_2/sum" - bottom: "stage3_3/conv3" - top: "stage3_3/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage3_3/relu" - type: "ReLU" - bottom: "stage3_3/sum" - top: "stage3_3/sum" -} -layer { - name: "stage3_4/conv1" - type: "Convolution" - bottom: "stage3_3/sum" - top: "stage3_4/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 16 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage3_4/conv1/relu" - type: "ReLU" - bottom: "stage3_4/conv1" - top: "stage3_4/conv1" -} -layer { - name: "stage3_4/conv2" - type: "Convolution" - bottom: "stage3_4/conv1" - top: "stage3_4/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 16 - pad: 1 - kernel_size: 3 - group: 16 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage3_4/conv3" - type: "Convolution" - bottom: "stage3_4/conv2" - top: "stage3_4/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 64 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage3_4/sum" - type: "Eltwise" - bottom: "stage3_3/sum" - bottom: "stage3_4/conv3" - top: "stage3_4/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage3_4/relu" - type: "ReLU" - bottom: "stage3_4/sum" - top: "stage3_4/sum" -} -layer { - name: "stage4_1/conv1" - type: "Convolution" - bottom: "stage3_4/sum" - top: "stage4_1/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_1/conv1/relu" - type: "ReLU" - bottom: "stage4_1/conv1" - top: "stage4_1/conv1" -} -layer { - name: "stage4_1/conv2" - type: "Convolution" - bottom: "stage4_1/conv1" - top: "stage4_1/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 1 - kernel_size: 3 - group: 32 - stride: 2 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_1/conv3" - type: "Convolution" - bottom: "stage4_1/conv2" - top: "stage4_1/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_1/relu" - type: "ReLU" - bottom: "stage4_1/conv3" - top: "stage4_1/conv3" -} -layer { - name: "stage4_2/conv1" - type: "Convolution" - bottom: "stage4_1/conv3" - top: "stage4_2/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_2/conv1/relu" - type: "ReLU" - bottom: "stage4_2/conv1" - top: "stage4_2/conv1" -} -layer { - name: "stage4_2/conv2" - type: "Convolution" - bottom: "stage4_2/conv1" - top: "stage4_2/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 1 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_2/conv3" - type: "Convolution" - bottom: "stage4_2/conv2" - top: "stage4_2/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_2/sum" - type: "Eltwise" - bottom: "stage4_1/conv3" - bottom: "stage4_2/conv3" - top: "stage4_2/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage4_2/relu" - type: "ReLU" - bottom: "stage4_2/sum" - top: "stage4_2/sum" -} -layer { - name: "stage4_3/conv1" - type: "Convolution" - bottom: "stage4_2/sum" - top: "stage4_3/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_3/conv1/relu" - type: "ReLU" - bottom: "stage4_3/conv1" - top: "stage4_3/conv1" -} -layer { - name: "stage4_3/conv2" - type: "Convolution" - bottom: "stage4_3/conv1" - top: "stage4_3/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 1 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_3/conv3" - type: "Convolution" - bottom: "stage4_3/conv2" - top: "stage4_3/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_3/sum" - type: "Eltwise" - bottom: "stage4_2/sum" - bottom: "stage4_3/conv3" - top: "stage4_3/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage4_3/relu" - type: "ReLU" - bottom: "stage4_3/sum" - top: "stage4_3/sum" -} -layer { - name: "stage4_4/conv1" - type: "Convolution" - bottom: "stage4_3/sum" - top: "stage4_4/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_4/conv1/relu" - type: "ReLU" - bottom: "stage4_4/conv1" - top: "stage4_4/conv1" -} -layer { - name: "stage4_4/conv2" - type: "Convolution" - bottom: "stage4_4/conv1" - top: "stage4_4/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 1 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_4/conv3" - type: "Convolution" - bottom: "stage4_4/conv2" - top: "stage4_4/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_4/sum" - type: "Eltwise" - bottom: "stage4_3/sum" - bottom: "stage4_4/conv3" - top: "stage4_4/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage4_4/relu" - type: "ReLU" - bottom: "stage4_4/sum" - top: "stage4_4/sum" -} -layer { - name: "stage4_5/conv1" - type: "Convolution" - bottom: "stage4_4/sum" - top: "stage4_5/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_5/conv1/relu" - type: "ReLU" - bottom: "stage4_5/conv1" - top: "stage4_5/conv1" -} -layer { - name: "stage4_5/conv2" - type: "Convolution" - bottom: "stage4_5/conv1" - top: "stage4_5/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 1 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_5/conv3" - type: "Convolution" - bottom: "stage4_5/conv2" - top: "stage4_5/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_5/sum" - type: "Eltwise" - bottom: "stage4_4/sum" - bottom: "stage4_5/conv3" - top: "stage4_5/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage4_5/relu" - type: "ReLU" - bottom: "stage4_5/sum" - top: "stage4_5/sum" -} -layer { - name: "stage4_6/conv1" - type: "Convolution" - bottom: "stage4_5/sum" - top: "stage4_6/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_6/conv1/relu" - type: "ReLU" - bottom: "stage4_6/conv1" - top: "stage4_6/conv1" -} -layer { - name: "stage4_6/conv2" - type: "Convolution" - bottom: "stage4_6/conv1" - top: "stage4_6/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 1 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_6/conv3" - type: "Convolution" - bottom: "stage4_6/conv2" - top: "stage4_6/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_6/sum" - type: "Eltwise" - bottom: "stage4_5/sum" - bottom: "stage4_6/conv3" - top: "stage4_6/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage4_6/relu" - type: "ReLU" - bottom: "stage4_6/sum" - top: "stage4_6/sum" -} -layer { - name: "stage4_7/conv1" - type: "Convolution" - bottom: "stage4_6/sum" - top: "stage4_7/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_7/conv1/relu" - type: "ReLU" - bottom: "stage4_7/conv1" - top: "stage4_7/conv1" -} -layer { - name: "stage4_7/conv2" - type: "Convolution" - bottom: "stage4_7/conv1" - top: "stage4_7/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 1 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_7/conv3" - type: "Convolution" - bottom: "stage4_7/conv2" - top: "stage4_7/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_7/sum" - type: "Eltwise" - bottom: "stage4_6/sum" - bottom: "stage4_7/conv3" - top: "stage4_7/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage4_7/relu" - type: "ReLU" - bottom: "stage4_7/sum" - top: "stage4_7/sum" -} -layer { - name: "stage4_8/conv1" - type: "Convolution" - bottom: "stage4_7/sum" - top: "stage4_8/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_8/conv1/relu" - type: "ReLU" - bottom: "stage4_8/conv1" - top: "stage4_8/conv1" -} -layer { - name: "stage4_8/conv2" - type: "Convolution" - bottom: "stage4_8/conv1" - top: "stage4_8/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 1 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_8/conv3" - type: "Convolution" - bottom: "stage4_8/conv2" - top: "stage4_8/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage4_8/sum" - type: "Eltwise" - bottom: "stage4_7/sum" - bottom: "stage4_8/conv3" - top: "stage4_8/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage4_8/relu" - type: "ReLU" - bottom: "stage4_8/sum" - top: "stage4_8/sum" -} -layer { - name: "stage5_1/conv1" - type: "Convolution" - bottom: "stage4_8/sum" - top: "stage5_1/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage5_1/conv1/relu" - type: "ReLU" - bottom: "stage5_1/conv1" - top: "stage5_1/conv1" -} -layer { - name: "stage5_1/conv2" - type: "Convolution" - bottom: "stage5_1/conv1" - top: "stage5_1/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 2 - kernel_size: 3 - group: 32 - stride: 2 - weight_filler { - type: "msra" - } - dilation: 2 - } -} -layer { - name: "stage5_1/conv3" - type: "Convolution" - bottom: "stage5_1/conv2" - top: "stage5_1/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage5_1/relu" - type: "ReLU" - bottom: "stage5_1/conv3" - top: "stage5_1/conv3" -} -layer { - name: "stage5_2/conv1" - type: "Convolution" - bottom: "stage5_1/conv3" - top: "stage5_2/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage5_2/conv1/relu" - type: "ReLU" - bottom: "stage5_2/conv1" - top: "stage5_2/conv1" -} -layer { - name: "stage5_2/conv2" - type: "Convolution" - bottom: "stage5_2/conv1" - top: "stage5_2/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 2 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 2 - } -} -layer { - name: "stage5_2/conv3" - type: "Convolution" - bottom: "stage5_2/conv2" - top: "stage5_2/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage5_2/sum" - type: "Eltwise" - bottom: "stage5_1/conv3" - bottom: "stage5_2/conv3" - top: "stage5_2/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage5_2/relu" - type: "ReLU" - bottom: "stage5_2/sum" - top: "stage5_2/sum" -} -layer { - name: "stage5_3/conv1" - type: "Convolution" - bottom: "stage5_2/sum" - top: "stage5_3/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage5_3/conv1/relu" - type: "ReLU" - bottom: "stage5_3/conv1" - top: "stage5_3/conv1" -} -layer { - name: "stage5_3/conv2" - type: "Convolution" - bottom: "stage5_3/conv1" - top: "stage5_3/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 2 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 2 - } -} -layer { - name: "stage5_3/conv3" - type: "Convolution" - bottom: "stage5_3/conv2" - top: "stage5_3/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage5_3/sum" - type: "Eltwise" - bottom: "stage5_2/sum" - bottom: "stage5_3/conv3" - top: "stage5_3/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage5_3/relu" - type: "ReLU" - bottom: "stage5_3/sum" - top: "stage5_3/sum" -} -layer { - name: "stage5_4/conv1" - type: "Convolution" - bottom: "stage5_3/sum" - top: "stage5_4/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage5_4/conv1/relu" - type: "ReLU" - bottom: "stage5_4/conv1" - top: "stage5_4/conv1" -} -layer { - name: "stage5_4/conv2" - type: "Convolution" - bottom: "stage5_4/conv1" - top: "stage5_4/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 2 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 2 - } -} -layer { - name: "stage5_4/conv3" - type: "Convolution" - bottom: "stage5_4/conv2" - top: "stage5_4/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage5_4/sum" - type: "Eltwise" - bottom: "stage5_3/sum" - bottom: "stage5_4/conv3" - top: "stage5_4/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage5_4/relu" - type: "ReLU" - bottom: "stage5_4/sum" - top: "stage5_4/sum" -} -layer { - name: "stage6_1/conv4" - type: "Convolution" - bottom: "stage5_4/sum" - top: "stage6_1/conv4" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage6_1/conv1" - type: "Convolution" - bottom: "stage5_4/sum" - top: "stage6_1/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage6_1/conv1/relu" - type: "ReLU" - bottom: "stage6_1/conv1" - top: "stage6_1/conv1" -} -layer { - name: "stage6_1/conv2" - type: "Convolution" - bottom: "stage6_1/conv1" - top: "stage6_1/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 2 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 2 - } -} -layer { - name: "stage6_1/conv3" - type: "Convolution" - bottom: "stage6_1/conv2" - top: "stage6_1/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage6_1/sum" - type: "Eltwise" - bottom: "stage6_1/conv4" - bottom: "stage6_1/conv3" - top: "stage6_1/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage6_1/relu" - type: "ReLU" - bottom: "stage6_1/sum" - top: "stage6_1/sum" -} -layer { - name: "stage6_2/conv1" - type: "Convolution" - bottom: "stage6_1/sum" - top: "stage6_2/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage6_2/conv1/relu" - type: "ReLU" - bottom: "stage6_2/conv1" - top: "stage6_2/conv1" -} -layer { - name: "stage6_2/conv2" - type: "Convolution" - bottom: "stage6_2/conv1" - top: "stage6_2/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 2 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 2 - } -} -layer { - name: "stage6_2/conv3" - type: "Convolution" - bottom: "stage6_2/conv2" - top: "stage6_2/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage6_2/sum" - type: "Eltwise" - bottom: "stage6_1/sum" - bottom: "stage6_2/conv3" - top: "stage6_2/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage6_2/relu" - type: "ReLU" - bottom: "stage6_2/sum" - top: "stage6_2/sum" -} -layer { - name: "stage7_1/conv4" - type: "Convolution" - bottom: "stage6_2/sum" - top: "stage7_1/conv4" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage7_1/conv1" - type: "Convolution" - bottom: "stage6_2/sum" - top: "stage7_1/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage7_1/conv1/relu" - type: "ReLU" - bottom: "stage7_1/conv1" - top: "stage7_1/conv1" -} -layer { - name: "stage7_1/conv2" - type: "Convolution" - bottom: "stage7_1/conv1" - top: "stage7_1/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 2 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 2 - } -} -layer { - name: "stage7_1/conv3" - type: "Convolution" - bottom: "stage7_1/conv2" - top: "stage7_1/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage7_1/sum" - type: "Eltwise" - bottom: "stage7_1/conv4" - bottom: "stage7_1/conv3" - top: "stage7_1/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage7_1/relu" - type: "ReLU" - bottom: "stage7_1/sum" - top: "stage7_1/sum" -} -layer { - name: "stage7_2/conv1" - type: "Convolution" - bottom: "stage7_1/sum" - top: "stage7_2/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage7_2/conv1/relu" - type: "ReLU" - bottom: "stage7_2/conv1" - top: "stage7_2/conv1" -} -layer { - name: "stage7_2/conv2" - type: "Convolution" - bottom: "stage7_2/conv1" - top: "stage7_2/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 2 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 2 - } -} -layer { - name: "stage7_2/conv3" - type: "Convolution" - bottom: "stage7_2/conv2" - top: "stage7_2/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage7_2/sum" - type: "Eltwise" - bottom: "stage7_1/sum" - bottom: "stage7_2/conv3" - top: "stage7_2/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage7_2/relu" - type: "ReLU" - bottom: "stage7_2/sum" - top: "stage7_2/sum" -} -layer { - name: "stage8_1/conv4" - type: "Convolution" - bottom: "stage7_2/sum" - top: "stage8_1/conv4" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage8_1/conv1" - type: "Convolution" - bottom: "stage7_2/sum" - top: "stage8_1/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage8_1/conv1/relu" - type: "ReLU" - bottom: "stage8_1/conv1" - top: "stage8_1/conv1" -} -layer { - name: "stage8_1/conv2" - type: "Convolution" - bottom: "stage8_1/conv1" - top: "stage8_1/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 2 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 2 - } -} -layer { - name: "stage8_1/conv3" - type: "Convolution" - bottom: "stage8_1/conv2" - top: "stage8_1/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage8_1/sum" - type: "Eltwise" - bottom: "stage8_1/conv4" - bottom: "stage8_1/conv3" - top: "stage8_1/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage8_1/relu" - type: "ReLU" - bottom: "stage8_1/sum" - top: "stage8_1/sum" -} -layer { - name: "stage8_2/conv1" - type: "Convolution" - bottom: "stage8_1/sum" - top: "stage8_2/conv1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage8_2/conv1/relu" - type: "ReLU" - bottom: "stage8_2/conv1" - top: "stage8_2/conv1" -} -layer { - name: "stage8_2/conv2" - type: "Convolution" - bottom: "stage8_2/conv1" - top: "stage8_2/conv2" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 32 - pad: 2 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 2 - } -} -layer { - name: "stage8_2/conv3" - type: "Convolution" - bottom: "stage8_2/conv2" - top: "stage8_2/conv3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - convolution_param { - num_output: 128 - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "stage8_2/sum" - type: "Eltwise" - bottom: "stage8_1/sum" - bottom: "stage8_2/conv3" - top: "stage8_2/sum" - eltwise_param { - operation: SUM - } -} -layer { - name: "stage8_2/relu" - type: "ReLU" - bottom: "stage8_2/sum" - top: "stage8_2/sum" -} -layer { - name: "cls1/conv" - type: "Convolution" - bottom: "stage4_8/sum" - top: "cls1/conv" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 12 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "cls1/permute" - type: "Permute" - bottom: "cls1/conv" - top: "cls1/permute" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "cls1/flatten" - type: "Flatten" - bottom: "cls1/permute" - top: "cls1/flatten" - flatten_param { - axis: 1 - } -} -layer { - name: "loc1/conv" - type: "Convolution" - bottom: "stage4_8/sum" - top: "loc1/conv" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 24 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "loc1/permute" - type: "Permute" - bottom: "loc1/conv" - top: "loc1/permute" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "loc1/flatten" - type: "Flatten" - bottom: "loc1/permute" - top: "loc1/flatten" - flatten_param { - axis: 1 - } -} -layer { - name: "stage4_8/sum/prior_box" - type: "PriorBox" - bottom: "stage4_8/sum" - bottom: "data" - top: "stage4_8/sum/prior_box" - prior_box_param { - min_size: 50.0 - max_size: 100.0 - aspect_ratio: 2.0 - aspect_ratio: 0.5 - aspect_ratio: 3.0 - aspect_ratio: 0.3333333432674408 - flip: false - clip: false - variance: 0.10000000149011612 - variance: 0.10000000149011612 - variance: 0.20000000298023224 - variance: 0.20000000298023224 - step: 16.0 - } -} -layer { - name: "cls2/conv" - type: "Convolution" - bottom: "stage5_4/sum" - top: "cls2/conv" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 12 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "cls2/permute" - type: "Permute" - bottom: "cls2/conv" - top: "cls2/permute" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "cls2/flatten" - type: "Flatten" - bottom: "cls2/permute" - top: "cls2/flatten" - flatten_param { - axis: 1 - } -} -layer { - name: "loc2/conv" - type: "Convolution" - bottom: "stage5_4/sum" - top: "loc2/conv" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 24 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "loc2/permute" - type: "Permute" - bottom: "loc2/conv" - top: "loc2/permute" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "loc2/flatten" - type: "Flatten" - bottom: "loc2/permute" - top: "loc2/flatten" - flatten_param { - axis: 1 - } -} -layer { - name: "stage5_4/sum/prior_box" - type: "PriorBox" - bottom: "stage5_4/sum" - bottom: "data" - top: "stage5_4/sum/prior_box" - prior_box_param { - min_size: 100.0 - max_size: 150.0 - aspect_ratio: 2.0 - aspect_ratio: 0.5 - aspect_ratio: 3.0 - aspect_ratio: 0.3333333432674408 - flip: false - clip: false - variance: 0.10000000149011612 - variance: 0.10000000149011612 - variance: 0.20000000298023224 - variance: 0.20000000298023224 - step: 32.0 - } -} -layer { - name: "cls3/conv" - type: "Convolution" - bottom: "stage6_2/sum" - top: "cls3/conv" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 12 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "cls3/permute" - type: "Permute" - bottom: "cls3/conv" - top: "cls3/permute" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "cls3/flatten" - type: "Flatten" - bottom: "cls3/permute" - top: "cls3/flatten" - flatten_param { - axis: 1 - } -} -layer { - name: "loc3/conv" - type: "Convolution" - bottom: "stage6_2/sum" - top: "loc3/conv" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 24 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "loc3/permute" - type: "Permute" - bottom: "loc3/conv" - top: "loc3/permute" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "loc3/flatten" - type: "Flatten" - bottom: "loc3/permute" - top: "loc3/flatten" - flatten_param { - axis: 1 - } -} -layer { - name: "stage6_2/sum/prior_box" - type: "PriorBox" - bottom: "stage6_2/sum" - bottom: "data" - top: "stage6_2/sum/prior_box" - prior_box_param { - min_size: 150.0 - max_size: 200.0 - aspect_ratio: 2.0 - aspect_ratio: 0.5 - aspect_ratio: 3.0 - aspect_ratio: 0.3333333432674408 - flip: false - clip: false - variance: 0.10000000149011612 - variance: 0.10000000149011612 - variance: 0.20000000298023224 - variance: 0.20000000298023224 - step: 32.0 - } -} -layer { - name: "cls4/conv" - type: "Convolution" - bottom: "stage7_2/sum" - top: "cls4/conv" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 12 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "cls4/permute" - type: "Permute" - bottom: "cls4/conv" - top: "cls4/permute" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "cls4/flatten" - type: "Flatten" - bottom: "cls4/permute" - top: "cls4/flatten" - flatten_param { - axis: 1 - } -} -layer { - name: "loc4/conv" - type: "Convolution" - bottom: "stage7_2/sum" - top: "loc4/conv" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 24 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "loc4/permute" - type: "Permute" - bottom: "loc4/conv" - top: "loc4/permute" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "loc4/flatten" - type: "Flatten" - bottom: "loc4/permute" - top: "loc4/flatten" - flatten_param { - axis: 1 - } -} -layer { - name: "stage7_2/sum/prior_box" - type: "PriorBox" - bottom: "stage7_2/sum" - bottom: "data" - top: "stage7_2/sum/prior_box" - prior_box_param { - min_size: 200.0 - max_size: 300.0 - aspect_ratio: 2.0 - aspect_ratio: 0.5 - aspect_ratio: 3.0 - aspect_ratio: 0.3333333432674408 - flip: false - clip: false - variance: 0.10000000149011612 - variance: 0.10000000149011612 - variance: 0.20000000298023224 - variance: 0.20000000298023224 - step: 32.0 - } -} -layer { - name: "cls5/conv" - type: "Convolution" - bottom: "stage8_2/sum" - top: "cls5/conv" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 12 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "cls5/permute" - type: "Permute" - bottom: "cls5/conv" - top: "cls5/permute" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "cls5/flatten" - type: "Flatten" - bottom: "cls5/permute" - top: "cls5/flatten" - flatten_param { - axis: 1 - } -} -layer { - name: "loc5/conv" - type: "Convolution" - bottom: "stage8_2/sum" - top: "loc5/conv" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 24 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - dilation: 1 - } -} -layer { - name: "loc5/permute" - type: "Permute" - bottom: "loc5/conv" - top: "loc5/permute" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "loc5/flatten" - type: "Flatten" - bottom: "loc5/permute" - top: "loc5/flatten" - flatten_param { - axis: 1 - } -} -layer { - name: "stage8_2/sum/prior_box" - type: "PriorBox" - bottom: "stage8_2/sum" - bottom: "data" - top: "stage8_2/sum/prior_box" - prior_box_param { - min_size: 300.0 - max_size: 400.0 - aspect_ratio: 2.0 - aspect_ratio: 0.5 - aspect_ratio: 3.0 - aspect_ratio: 0.3333333432674408 - flip: false - clip: false - variance: 0.10000000149011612 - variance: 0.10000000149011612 - variance: 0.20000000298023224 - variance: 0.20000000298023224 - step: 32.0 - } -} -layer { - name: "mbox_conf" - type: "Concat" - bottom: "cls1/flatten" - bottom: "cls2/flatten" - bottom: "cls3/flatten" - bottom: "cls4/flatten" - bottom: "cls5/flatten" - top: "mbox_conf" - concat_param { - axis: 1 - } -} -layer { - name: "mbox_loc" - type: "Concat" - bottom: "loc1/flatten" - bottom: "loc2/flatten" - bottom: "loc3/flatten" - bottom: "loc4/flatten" - bottom: "loc5/flatten" - top: "mbox_loc" - concat_param { - axis: 1 - } -} -layer { - name: "mbox_priorbox" - type: "Concat" - bottom: "stage4_8/sum/prior_box" - bottom: "stage5_4/sum/prior_box" - bottom: "stage6_2/sum/prior_box" - bottom: "stage7_2/sum/prior_box" - bottom: "stage8_2/sum/prior_box" - top: "mbox_priorbox" - concat_param { - axis: 2 - } -} -layer { - name: "mbox_conf_reshape" - type: "Reshape" - bottom: "mbox_conf" - top: "mbox_conf_reshape" - reshape_param { - shape { - dim: 0 - dim: -1 - dim: 2 - } - } -} -layer { - name: "mbox_conf_softmax" - type: "Softmax" - bottom: "mbox_conf_reshape" - top: "mbox_conf_softmax" - softmax_param { - axis: 2 - } -} -layer { - name: "mbox_conf_flatten" - type: "Flatten" - bottom: "mbox_conf_softmax" - top: "mbox_conf_flatten" - flatten_param { - axis: 1 - } -} -layer { - name: "detection_output" - type: "DetectionOutput" - bottom: "mbox_loc" - bottom: "mbox_conf_flatten" - bottom: "mbox_priorbox" - top: "detection_output" - detection_output_param { - num_classes: 2 - share_location: true - background_label_id: 0 - nms_param { - nms_threshold: 0.44999998807907104 - top_k: 100 - } - code_type: CENTER_SIZE - keep_top_k: 100 - confidence_threshold: 0.20000000298023224 - } -} diff --git a/models/qrcode_wechatqrcode/example_outputs/wechat_qrcode_demo.gif b/models/qrcode_wechatqrcode/example_outputs/wechat_qrcode_demo.gif deleted file mode 100644 index 1980e19b..00000000 --- a/models/qrcode_wechatqrcode/example_outputs/wechat_qrcode_demo.gif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ef1aa6f9b78320b3e3d6032648261dcfe250db332a58455787c88a87711a8b58 -size 1785414 diff --git a/models/qrcode_wechatqrcode/sr_2021nov.caffemodel b/models/qrcode_wechatqrcode/sr_2021nov.caffemodel deleted file mode 100644 index ec2e9565..00000000 --- a/models/qrcode_wechatqrcode/sr_2021nov.caffemodel +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e5d36889d8e6ef2f1c1f515f807cec03979320ac81792cd8fb927c31fd658ae3 -size 23929 diff --git a/models/qrcode_wechatqrcode/sr_2021nov.prototxt b/models/qrcode_wechatqrcode/sr_2021nov.prototxt deleted file mode 100644 index e85caa17..00000000 --- a/models/qrcode_wechatqrcode/sr_2021nov.prototxt +++ /dev/null @@ -1,403 +0,0 @@ -layer { - name: "data" - type: "Input" - top: "data" - input_param { - shape { - dim: 1 - dim: 1 - dim: 224 - dim: 224 - } - } -} -layer { - name: "conv0" - type: "Convolution" - bottom: "data" - top: "conv0" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 32 - bias_term: true - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - } -} -layer { - name: "conv0/lrelu" - type: "ReLU" - bottom: "conv0" - top: "conv0" - relu_param { - negative_slope: 0.05000000074505806 - } -} -layer { - name: "db1/reduce" - type: "Convolution" - bottom: "conv0" - top: "db1/reduce" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 8 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - } -} -layer { - name: "db1/reduce/lrelu" - type: "ReLU" - bottom: "db1/reduce" - top: "db1/reduce" - relu_param { - negative_slope: 0.05000000074505806 - } -} -layer { - name: "db1/3x3" - type: "Convolution" - bottom: "db1/reduce" - top: "db1/3x3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 8 - bias_term: true - pad: 1 - kernel_size: 3 - group: 8 - stride: 1 - weight_filler { - type: "msra" - } - } -} -layer { - name: "db1/3x3/lrelu" - type: "ReLU" - bottom: "db1/3x3" - top: "db1/3x3" - relu_param { - negative_slope: 0.05000000074505806 - } -} -layer { - name: "db1/1x1" - type: "Convolution" - bottom: "db1/3x3" - top: "db1/1x1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 32 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - } -} -layer { - name: "db1/1x1/lrelu" - type: "ReLU" - bottom: "db1/1x1" - top: "db1/1x1" - relu_param { - negative_slope: 0.05000000074505806 - } -} -layer { - name: "db1/concat" - type: "Concat" - bottom: "conv0" - bottom: "db1/1x1" - top: "db1/concat" - concat_param { - axis: 1 - } -} -layer { - name: "db2/reduce" - type: "Convolution" - bottom: "db1/concat" - top: "db2/reduce" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 8 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - } -} -layer { - name: "db2/reduce/lrelu" - type: "ReLU" - bottom: "db2/reduce" - top: "db2/reduce" - relu_param { - negative_slope: 0.05000000074505806 - } -} -layer { - name: "db2/3x3" - type: "Convolution" - bottom: "db2/reduce" - top: "db2/3x3" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 8 - bias_term: true - pad: 1 - kernel_size: 3 - group: 8 - stride: 1 - weight_filler { - type: "msra" - } - } -} -layer { - name: "db2/3x3/lrelu" - type: "ReLU" - bottom: "db2/3x3" - top: "db2/3x3" - relu_param { - negative_slope: 0.05000000074505806 - } -} -layer { - name: "db2/1x1" - type: "Convolution" - bottom: "db2/3x3" - top: "db2/1x1" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 32 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - } -} -layer { - name: "db2/1x1/lrelu" - type: "ReLU" - bottom: "db2/1x1" - top: "db2/1x1" - relu_param { - negative_slope: 0.05000000074505806 - } -} -layer { - name: "db2/concat" - type: "Concat" - bottom: "db1/concat" - bottom: "db2/1x1" - top: "db2/concat" - concat_param { - axis: 1 - } -} -layer { - name: "upsample/reduce" - type: "Convolution" - bottom: "db2/concat" - top: "upsample/reduce" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 32 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - } -} -layer { - name: "upsample/reduce/lrelu" - type: "ReLU" - bottom: "upsample/reduce" - top: "upsample/reduce" - relu_param { - negative_slope: 0.05000000074505806 - } -} -layer { - name: "upsample/deconv" - type: "Deconvolution" - bottom: "upsample/reduce" - top: "upsample/deconv" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 32 - bias_term: true - pad: 1 - kernel_size: 3 - group: 32 - stride: 2 - weight_filler { - type: "msra" - } - } -} -layer { - name: "upsample/lrelu" - type: "ReLU" - bottom: "upsample/deconv" - top: "upsample/deconv" - relu_param { - negative_slope: 0.05000000074505806 - } -} -layer { - name: "upsample/rec" - type: "Convolution" - bottom: "upsample/deconv" - top: "upsample/rec" - param { - lr_mult: 1.0 - decay_mult: 1.0 - } - param { - lr_mult: 1.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 1 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "msra" - } - } -} -layer { - name: "nearest" - type: "Deconvolution" - bottom: "data" - top: "nearest" - param { - lr_mult: 0.0 - decay_mult: 0.0 - } - convolution_param { - num_output: 1 - bias_term: false - pad: 0 - kernel_size: 2 - group: 1 - stride: 2 - weight_filler { - type: "constant" - value: 1.0 - } - } -} -layer { - name: "Crop1" - type: "Crop" - bottom: "nearest" - bottom: "upsample/rec" - top: "Crop1" -} -layer { - name: "fc" - type: "Eltwise" - bottom: "Crop1" - bottom: "upsample/rec" - top: "fc" - eltwise_param { - operation: SUM - } -} diff --git a/models/qrcode_wechatqrcode/wechatqrcode.py b/models/qrcode_wechatqrcode/wechatqrcode.py deleted file mode 100644 index 95c98be4..00000000 --- a/models/qrcode_wechatqrcode/wechatqrcode.py +++ /dev/null @@ -1,34 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import numpy as np -import cv2 as cv # needs to have cv.wechat_qrcode_WeChatQRCode, which requires compile from source with opencv_contrib/modules/wechat_qrcode - -class WeChatQRCode: - def __init__(self, detect_prototxt_path, detect_model_path, sr_prototxt_path, sr_model_path, backendId=0, targetId=0): - self._model = cv.wechat_qrcode_WeChatQRCode( - detect_prototxt_path, - detect_model_path, - sr_prototxt_path, - sr_model_path - ) - if backendId != 0 and backendId != 3: - raise NotImplementedError("Backend {} is not supported by cv.wechat_qrcode_WeChatQRCode()".format(backendId)) - if targetId != 0: - raise NotImplementedError("Target {} is not supported by cv.wechat_qrcode_WeChatQRCode()") - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - if backendId != 0 and backendId != 3: - raise NotImplementedError("Backend {} is not supported by cv.wechat_qrcode_WeChatQRCode()".format(backendId)) - if targetId != 0: - raise NotImplementedError("Target {} is not supported by cv.wechat_qrcode_WeChatQRCode()") - - def infer(self, image): - return self._model.detectAndDecode(image) diff --git a/models/text_detection_ppocr/CMakeLists.txt b/models/text_detection_ppocr/CMakeLists.txt deleted file mode 100644 index 9f56acaa..00000000 --- a/models/text_detection_ppocr/CMakeLists.txt +++ /dev/null @@ -1,29 +0,0 @@ -cmake_minimum_required(VERSION 3.24) -set(project_name "opencv_zoo_text_detection_ppocr") - -PROJECT (${project_name}) - -set(OPENCV_VERSION "4.10.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) -# Find OpenCV, you may need to set OpenCV_DIR variable -# to the absolute path to the directory containing OpenCVConfig.cmake file -# via the command line or GUI - -file(GLOB SourceFile - "demo.cpp") -# If the package has been found, several variables will -# be set, you can find the full list with descriptions -# in the OpenCVConfig.cmake file. -# Print some message showing some of them -message(STATUS "OpenCV library status:") -message(STATUS " config: ${OpenCV_DIR}") -message(STATUS " version: ${OpenCV_VERSION}") -message(STATUS " libraries: ${OpenCV_LIBS}") -message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}") - -# Declare the executable target built from your sources -add_executable(${project_name} ${SourceFile}) - -# Link your application with OpenCV libraries -target_link_libraries(${project_name} PRIVATE ${OpenCV_LIBS}) diff --git a/models/text_detection_ppocr/LICENSE b/models/text_detection_ppocr/LICENSE deleted file mode 100644 index 9696cafd..00000000 --- a/models/text_detection_ppocr/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ -Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/models/text_detection_ppocr/README.md b/models/text_detection_ppocr/README.md deleted file mode 100644 index 49a40034..00000000 --- a/models/text_detection_ppocr/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# PP-OCRv3 Text Detection - -PP-OCRv3: More Attempts for the Improvement of Ultra Lightweight OCR System. - -**Note**: - -- The int8 quantization model may produce unstable results due to some loss of accuracy. -- Original Paddle Models source of English: [here](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_infer.tar). -- Original Paddle Models source of Chinese: [here](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar). -- `IC15` in the filename means the model is trained on [IC15 dataset](https://rrc.cvc.uab.es/?ch=4&com=introduction), which can detect English text instances only. -- `TD500` in the filename means the model is trained on [TD500 dataset](http://www.iapr-tc11.org/mediawiki/index.php/MSRA_Text_Detection_500_Database_(MSRA-TD500)), which can detect both English & Chinese instances. -- Visit https://docs.opencv.org/master/d4/d43/tutorial_dnn_text_spotting.html for more information. -- `text_detection_xx_ppocrv3_2023may_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - -## Demo - -### Python - -Run the following command to try the demo: - -```shell -# detect on camera input -python demo.py -# detect on an image -python demo.py --input /path/to/image -v - -# get help regarding various parameters -python demo.py --help -``` - -### C++ - -Install latest OpenCV and CMake >= 3.24.0 to get started with: - -```shell -# A typical and default installation path of OpenCV is /usr/local -cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation . -cmake --build build -# detect on camera input -./build/opencv_zoo_text_detection_ppocr -m=/path/to/model -# detect on an image -./build/opencv_zoo_text_detection_ppocr -m=/path/to/model -i=/path/to/image -v -# get help messages -./build/opencv_zoo_text_detection_ppocr -h -``` - -### Example outputs - -![mask](./example_outputs/mask.jpg) - -![gsoc](./example_outputs/gsoc.jpg) - -## License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE). - -## Reference - -- https://arxiv.org/abs/2206.03001 -- https://github.com/PaddlePaddle/PaddleOCR -- https://docs.opencv.org/master/d4/d43/tutorial_dnn_text_spotting.html diff --git a/models/text_detection_ppocr/demo.cpp b/models/text_detection_ppocr/demo.cpp deleted file mode 100644 index c1faa757..00000000 --- a/models/text_detection_ppocr/demo.cpp +++ /dev/null @@ -1,186 +0,0 @@ -#include - -#include -#include -#include - -using namespace std; -using namespace cv; -using namespace dnn; - -vector< pair > backendTargetPairs = { - std::make_pair(dnn::DNN_BACKEND_OPENCV, dnn::DNN_TARGET_CPU), - std::make_pair(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA), - std::make_pair(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA_FP16), - std::make_pair(dnn::DNN_BACKEND_TIMVX, dnn::DNN_TARGET_NPU), - std::make_pair(dnn::DNN_BACKEND_CANN, dnn::DNN_TARGET_NPU)}; - - -std::string keys = -"{ help h | | Print help message. }" -"{ model m | text_detection_cn_ppocrv3_2023may.onnx | Usage: Set model type, defaults to text_detection_ch_ppocrv3_2023may.onnx }" -"{ input i | | Usage: Path to input image or video file. Skip this argument to capture frames from a camera.}" -"{ width | 736 | Usage: Resize input image to certain width, default = 736. It should be multiple by 32.}" -"{ height | 736 | Usage: Resize input image to certain height, default = 736. It should be multiple by 32.}" -"{ binary_threshold | 0.3 | Usage: Threshold of the binary map, default = 0.3.}" -"{ polygon_threshold | 0.5 | Usage: Threshold of polygons, default = 0.5.}" -"{ max_candidates | 200 | Usage: Set maximum number of polygon candidates, default = 200.}" -"{ unclip_ratio | 2.0 | Usage: The unclip ratio of the detected text region, which determines the output size, default = 2.0.}" -"{ save s | true | Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.}" -"{ viz v | true | Usage: Specify to open a new window to show results. Invalid in case of camera input.}" -"{ backend bt | 0 | Choose one of computation backends: " -"0: (default) OpenCV implementation + CPU, " -"1: CUDA + GPU (CUDA), " -"2: CUDA + GPU (CUDA FP16), " -"3: TIM-VX + NPU, " -"4: CANN + NPU}"; - - -class PPOCRDet { -public: - - PPOCRDet(string modPath, Size inSize = Size(736, 736), float binThresh = 0.3, - float polyThresh = 0.5, int maxCand = 200, double unRatio = 2.0, - dnn::Backend bId = DNN_BACKEND_DEFAULT, dnn::Target tId = DNN_TARGET_CPU) : modelPath(modPath), inputSize(inSize), binaryThreshold(binThresh), - polygonThreshold(polyThresh), maxCandidates(maxCand), unclipRatio(unRatio), - backendId(bId), targetId(tId) - { - this->model = TextDetectionModel_DB(readNet(modelPath)); - this->model.setPreferableBackend(backendId); - this->model.setPreferableTarget(targetId); - - this->model.setBinaryThreshold(binaryThreshold); - this->model.setPolygonThreshold(polygonThreshold); - this->model.setUnclipRatio(unclipRatio); - this->model.setMaxCandidates(maxCandidates); - - this->model.setInputParams(1.0 / 255.0, inputSize, Scalar(122.67891434, 116.66876762, 104.00698793)); - } - pair< vector>, vector > infer(Mat image) { - CV_Assert(image.rows == this->inputSize.height && "height of input image != net input size "); - CV_Assert(image.cols == this->inputSize.width && "width of input image != net input size "); - vector> pt; - vector confidence; - this->model.detect(image, pt, confidence); - return make_pair< vector> &, vector< float > &>(pt, confidence); - } - -private: - string modelPath; - TextDetectionModel_DB model; - Size inputSize; - float binaryThreshold; - float polygonThreshold; - int maxCandidates; - double unclipRatio; - dnn::Backend backendId; - dnn::Target targetId; - -}; - -Mat visualize(Mat image, pair< vector>, vector >&results, double fps=-1, Scalar boxColor=Scalar(0, 255, 0), Scalar textColor=Scalar(0, 0, 255), bool isClosed=true, int thickness=2) -{ - Mat output; - image.copyTo(output); - if (fps > 0) - putText(output, format("FPS: %.2f", fps), Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, textColor); - polylines(output, results.first, isClosed, boxColor, thickness); - return output; -} - -int main(int argc, char** argv) -{ - CommandLineParser parser(argc, argv, keys); - - parser.about("Use this program to run Real-time Scene Text Detection with Differentiable Binarization in opencv Zoo using OpenCV."); - if (parser.has("help")) - { - parser.printMessage(); - return 0; - } - - int backendTargetid = parser.get("backend"); - String modelName = parser.get("model"); - - if (modelName.empty()) - { - CV_Error(Error::StsError, "Model file " + modelName + " not found"); - } - - Size inpSize(parser.get("width"), parser.get("height")); - float binThresh = parser.get("binary_threshold"); - float polyThresh = parser.get("polygon_threshold"); - int maxCand = parser.get("max_candidates"); - double unRatio = parser.get("unclip_ratio"); - bool save = parser.get("save"); - bool viz = parser.get("viz"); - - PPOCRDet model(modelName, inpSize, binThresh, polyThresh, maxCand, unRatio, backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second); - - //! [Open a video file or an image file or a camera stream] - VideoCapture cap; - if (parser.has("input")) - cap.open(parser.get("input")); - else - cap.open(0); - if (!cap.isOpened()) - CV_Error(Error::StsError, "Cannot open video or file"); - Mat originalImage; - static const std::string kWinName = modelName; - while (waitKey(1) < 0) - { - cap >> originalImage; - if (originalImage.empty()) - { - if (parser.has("input")) - { - cout << "Frame is empty" << endl; - break; - } - else - continue; - } - int originalW = originalImage.cols; - int originalH = originalImage.rows; - double scaleHeight = originalH / double(inpSize.height); - double scaleWidth = originalW / double(inpSize.width); - Mat image; - resize(originalImage, image, inpSize); - - // inference - TickMeter tm; - tm.start(); - pair< vector>, vector > results = model.infer(image); - tm.stop(); - auto x = results.first; - // Scale the results bounding box - for (auto &pts : results.first) - { - for (int i = 0; i < 4; i++) - { - pts[i].x = int(pts[i].x * scaleWidth); - pts[i].y = int(pts[i].y * scaleHeight); - } - } - originalImage = visualize(originalImage, results, tm.getFPS()); - tm.reset(); - if (parser.has("input")) - { - if (save) - { - cout << "Result image saved to result.jpg\n"; - imwrite("result.jpg", originalImage); - } - if (viz) - { - imshow(kWinName, originalImage); - waitKey(0); - } - } - else - imshow(kWinName, originalImage); - } - return 0; -} - - diff --git a/models/text_detection_ppocr/demo.py b/models/text_detection_ppocr/demo.py deleted file mode 100644 index 18a5efaa..00000000 --- a/models/text_detection_ppocr/demo.py +++ /dev/null @@ -1,155 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from ppocr_det import PPOCRDet - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser(description='PP-OCR Text Detection (https://arxiv.org/abs/2206.03001).') -parser.add_argument('--input', '-i', type=str, - help='Usage: Set path to the input image. Omit for using default camera.') -parser.add_argument('--model', '-m', type=str, default='./text_detection_en_ppocrv3_2023may.onnx', - help='Usage: Set model path, defaults to text_detection_en_ppocrv3_2023may.onnx.') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--width', type=int, default=736, - help='Usage: Resize input image to certain width, default = 736. It should be multiple by 32.') -parser.add_argument('--height', type=int, default=736, - help='Usage: Resize input image to certain height, default = 736. It should be multiple by 32.') -parser.add_argument('--binary_threshold', type=float, default=0.3, - help='Usage: Threshold of the binary map, default = 0.3.') -parser.add_argument('--polygon_threshold', type=float, default=0.5, - help='Usage: Threshold of polygons, default = 0.5.') -parser.add_argument('--max_candidates', type=int, default=200, - help='Usage: Set maximum number of polygon candidates, default = 200.') -parser.add_argument('--unclip_ratio', type=np.float64, default=2.0, - help=' Usage: The unclip ratio of the detected text region, which determines the output size, default = 2.0.') -parser.add_argument('--save', '-s', action='store_true', - help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.') -parser.add_argument('--vis', '-v', action='store_true', - help='Usage: Specify to open a new window to show results. Invalid in case of camera input.') -args = parser.parse_args() - -def visualize(image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), isClosed=True, thickness=2, fps=None): - output = image.copy() - - if fps is not None: - cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, text_color) - - pts = np.array(results[0]) - output = cv.polylines(output, pts, isClosed, box_color, thickness) - - return output - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - - # Instantiate model - model = PPOCRDet(modelPath=args.model, - inputSize=[args.width, args.height], - binaryThreshold=args.binary_threshold, - polygonThreshold=args.polygon_threshold, - maxCandidates=args.max_candidates, - unclipRatio=args.unclip_ratio, - backendId=backend_id, - targetId=target_id) - - # If input is an image - if args.input is not None: - original_image = cv.imread(args.input) - original_w = original_image.shape[1] - original_h = original_image.shape[0] - scaleHeight = original_h / args.height - scaleWidth = original_w / args.width - image = cv.resize(original_image, [args.width, args.height]) - - # Inference - results = model.infer(image) - - # Scale the results bounding box - for i in range(len(results[0])): - for j in range(4): - box = results[0][i][j] - results[0][i][j][0] = box[0] * scaleWidth - results[0][i][j][1] = box[1] * scaleHeight - - # Print results - print('{} texts detected.'.format(len(results[0]))) - for idx, (bbox, score) in enumerate(zip(results[0], results[1])): - print('{}: {} {} {} {}, {:.2f}'.format(idx, bbox[0], bbox[1], bbox[2], bbox[3], score)) - - # Draw results on the input image - original_image = visualize(original_image, results) - - # Save results if save is true - if args.save: - print('Resutls saved to result.jpg\n') - cv.imwrite('result.jpg', original_image) - - # Visualize results in a new window - if args.vis: - cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE) - cv.imshow(args.input, original_image) - cv.waitKey(0) - else: # Omit input to call default camera - deviceId = 0 - cap = cv.VideoCapture(deviceId) - - tm = cv.TickMeter() - while cv.waitKey(1) < 0: - hasFrame, original_image = cap.read() - if not hasFrame: - print('No frames grabbed!') - break - - original_w = original_image.shape[1] - original_h = original_image.shape[0] - scaleHeight = original_h / args.height - scaleWidth = original_w / args.width - frame = cv.resize(original_image, [args.width, args.height]) - # Inference - tm.start() - results = model.infer(frame) # results is a tuple - tm.stop() - - # Scale the results bounding box - for i in range(len(results[0])): - for j in range(4): - box = results[0][i][j] - results[0][i][j][0] = box[0] * scaleWidth - results[0][i][j][1] = box[1] * scaleHeight - - # Draw results on the input image - original_image = visualize(original_image, results, fps=tm.getFPS()) - - # Visualize results in a new Window - cv.imshow('{} Demo'.format(model.name), original_image) - - tm.reset() diff --git a/models/text_detection_ppocr/example_outputs/gsoc.jpg b/models/text_detection_ppocr/example_outputs/gsoc.jpg deleted file mode 100644 index 58d29c89..00000000 --- a/models/text_detection_ppocr/example_outputs/gsoc.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c6c852b20c3b187d3eefc7e0d4e89a89ec96637dfc544f8169bcfe4981ce8143 -size 314342 diff --git a/models/text_detection_ppocr/example_outputs/mask.jpg b/models/text_detection_ppocr/example_outputs/mask.jpg deleted file mode 100644 index 5f36556c..00000000 --- a/models/text_detection_ppocr/example_outputs/mask.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1d5b84065442652e94a78fbcf11f210668862f205dad52e7fbf1642a5371898d -size 121326 diff --git a/models/text_detection_ppocr/ppocr_det.py b/models/text_detection_ppocr/ppocr_det.py deleted file mode 100644 index fac01a2e..00000000 --- a/models/text_detection_ppocr/ppocr_det.py +++ /dev/null @@ -1,59 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import numpy as np -import cv2 as cv - -class PPOCRDet: - def __init__(self, modelPath, inputSize=[736, 736], binaryThreshold=0.3, polygonThreshold=0.5, maxCandidates=200, unclipRatio=2.0, backendId=0, targetId=0): - self._modelPath = modelPath - self._model = cv.dnn_TextDetectionModel_DB( - cv.dnn.readNet(self._modelPath) - ) - - self._inputSize = tuple(inputSize) # (w, h) - self._inputHeight = inputSize[0] - self._inputWidth = inputSize[1] - self._binaryThreshold = binaryThreshold - self._polygonThreshold = polygonThreshold - self._maxCandidates = maxCandidates - self._unclipRatio = unclipRatio - self._backendId = backendId - self._targetId = targetId - - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - self._model.setBinaryThreshold(self._binaryThreshold) - self._model.setPolygonThreshold(self._polygonThreshold) - self._model.setUnclipRatio(self._unclipRatio) - self._model.setMaxCandidates(self._maxCandidates) - - self._model.setInputSize(self._inputSize) - self._model.setInputMean((123.675, 116.28, 103.53)) - self._model.setInputScale(1.0/255.0/np.array([0.229, 0.224, 0.225])) - - @property - def name(self): - return self.__class__.__name__ - - def setBackendAndTarget(self, backendId, targetId): - self._backendId = backendId - self._targetId = targetId - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - def setInputSize(self, input_size): - self._inputSize = tuple(input_size) - self._model.setInputSize(self._inputSize) - self._model.setInputMean((123.675, 116.28, 103.53)) - self._model.setInputScale(1.0/255.0/np.array([0.229, 0.224, 0.225])) - - def infer(self, image): - assert image.shape[0] == self._inputSize[1], '{} (height of input image) != {} (preset height)'.format(image.shape[0], self._inputSize[1]) - assert image.shape[1] == self._inputSize[0], '{} (width of input image) != {} (preset width)'.format(image.shape[1], self._inputSize[0]) - - return self._model.detect(image) diff --git a/models/text_detection_ppocr/text_detection_cn_ppocrv3_2023may.onnx b/models/text_detection_ppocr/text_detection_cn_ppocrv3_2023may.onnx deleted file mode 100644 index e55d8596..00000000 --- a/models/text_detection_ppocr/text_detection_cn_ppocrv3_2023may.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:03f550c6b406fda8bf54bd8327815f6c7e2edd98cea02348c93d879254366587 -size 2423490 diff --git a/models/text_detection_ppocr/text_detection_cn_ppocrv3_2023may_int8.onnx b/models/text_detection_ppocr/text_detection_cn_ppocrv3_2023may_int8.onnx deleted file mode 100644 index bf51a45d..00000000 --- a/models/text_detection_ppocr/text_detection_cn_ppocrv3_2023may_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d094e1bd27ed294acfb7bb608ac87e27d12860b67eebcb45c387288ea9ec4b36 -size 705007 diff --git a/models/text_detection_ppocr/text_detection_cn_ppocrv3_2023may_int8bq.onnx b/models/text_detection_ppocr/text_detection_cn_ppocrv3_2023may_int8bq.onnx deleted file mode 100644 index a3f95270..00000000 --- a/models/text_detection_ppocr/text_detection_cn_ppocrv3_2023may_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7f4638708dde26fc77b3cd84aed01019d281268276933ca0e13b0ade5220875f -size 855375 diff --git a/models/text_detection_ppocr/text_detection_en_ppocrv3_2023may.onnx b/models/text_detection_ppocr/text_detection_en_ppocrv3_2023may.onnx deleted file mode 100644 index e55d8596..00000000 --- a/models/text_detection_ppocr/text_detection_en_ppocrv3_2023may.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:03f550c6b406fda8bf54bd8327815f6c7e2edd98cea02348c93d879254366587 -size 2423490 diff --git a/models/text_detection_ppocr/text_detection_en_ppocrv3_2023may_int8.onnx b/models/text_detection_ppocr/text_detection_en_ppocrv3_2023may_int8.onnx deleted file mode 100644 index 6ced759d..00000000 --- a/models/text_detection_ppocr/text_detection_en_ppocrv3_2023may_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5700c6d43bfc022b4bf2905cd0bac1a3d7dc41f4f954e9c171314ae9b4f0e41a -size 705007 diff --git a/models/text_detection_ppocr/text_detection_en_ppocrv3_2023may_int8bq.onnx b/models/text_detection_ppocr/text_detection_en_ppocrv3_2023may_int8bq.onnx deleted file mode 100644 index a3f95270..00000000 --- a/models/text_detection_ppocr/text_detection_en_ppocrv3_2023may_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7f4638708dde26fc77b3cd84aed01019d281268276933ca0e13b0ade5220875f -size 855375 diff --git a/models/text_recognition_crnn/CMakeLists.txt b/models/text_recognition_crnn/CMakeLists.txt deleted file mode 100644 index 15a73813..00000000 --- a/models/text_recognition_crnn/CMakeLists.txt +++ /dev/null @@ -1,29 +0,0 @@ -cmake_minimum_required(VERSION 3.24) -set(project_name "opencv_zoo_text_recognition_crnn") - -PROJECT (${project_name}) - -set(OPENCV_VERSION "4.10.0") -set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation") -find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH}) -# Find OpenCV, you may need to set OpenCV_DIR variable -# to the absolute path to the directory containing OpenCVConfig.cmake file -# via the command line or GUI - -file(GLOB SourceFile - "demo.cpp") -# If the package has been found, several variables will -# be set, you can find the full list with descriptions -# in the OpenCVConfig.cmake file. -# Print some message showing some of them -message(STATUS "OpenCV library status:") -message(STATUS " config: ${OpenCV_DIR}") -message(STATUS " version: ${OpenCV_VERSION}") -message(STATUS " libraries: ${OpenCV_LIBS}") -message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}") - -# Declare the executable target built from your sources -add_executable(${project_name} ${SourceFile}) - -# Link your application with OpenCV libraries -target_link_libraries(${project_name} PRIVATE ${OpenCV_LIBS}) diff --git a/models/text_recognition_crnn/LICENSE b/models/text_recognition_crnn/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/models/text_recognition_crnn/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/models/text_recognition_crnn/README.md b/models/text_recognition_crnn/README.md deleted file mode 100644 index 5f0a3f5a..00000000 --- a/models/text_recognition_crnn/README.md +++ /dev/null @@ -1,103 +0,0 @@ -# CRNN - -[An End-to-End Trainable Neural Network for Image-based Sequence Recognition and Its Application to Scene Text Recognition](https://arxiv.org/abs/1507.05717) - -Results of accuracy evaluation with [tools/eval](../../tools/eval) at different text recognition datasets. - -| Model name | ICDAR03(%) | IIIT5k(%) | CUTE80(%) | -| ------------ | ---------- | --------- | --------- | -| CRNN_EN | 81.66 | 74.33 | 52.78 | -| CRNN_EN_FP16 | 82.01 | 74.93 | 52.34 | -| CRNN_EN_INT8 | 81.75 | 75.33 | 52.43 | -| CRNN_CH | 71.28 | 80.90 | 67.36 | -| CRNN_CH_FP16 | 78.63 | 80.93 | 67.01 | -| CRNN_CH_INT8 | 78.11 | 81.20 | 67.01 | - -\*: 'FP16' or 'INT8' stands for 'model quantized into FP16' or 'model quantized into int8' - -**Note**: - -- Model source: - - `text_recognition_CRNN_EN_2021sep.onnx`: https://docs.opencv.org/4.5.2/d9/d1e/tutorial_dnn_OCR.html (CRNN_VGG_BiLSTM_CTC.onnx) - - `text_recognition_CRNN_CH_2021sep.onnx`: https://docs.opencv.org/4.x/d4/d43/tutorial_dnn_text_spotting.html (crnn_cs.onnx) - - `text_recognition_CRNN_CN_2021nov.onnx`: https://docs.opencv.org/4.5.2/d4/d43/tutorial_dnn_text_spotting.html (crnn_cs_CN.onnx) -- `text_recognition_CRNN_EN_2021sep.onnx` can detect digits (0\~9) and letters (return lowercase letters a\~z) (see `CHARSET_EN_36` for details in `crnn.py`). -- `text_recognition_CRNN_CH_2021sep.onnx` can detect digits (0\~9), upper/lower-case letters (a\~z and A\~Z), and some special characters (see `CHARSET_CH_94` for details in `crnn.py`). -- `text_recognition_CRNN_CN_2021nov.onnx` can detect digits (0\~9), upper/lower-case letters (a\~z and A\~Z), some Chinese characters and some special characters (see `CHARSET_CN_3944` for details in `crnn.py`). -- For details on training this model series, please visit https://github.com/zihaomu/deep-text-recognition-benchmark. -- `text_recognition_CRNN_XX_2021xxx_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`. - -## Demo - -***NOTE***: - -- This demo uses [text_detection_db](../text_detection_db) as text detector. - -### Python - -Run the demo detecting English: - -```shell -# detect on camera input -python demo.py -# detect on an image -python demo.py --input /path/to/image -v - -# get help regarding various parameters -python demo.py --help -``` - -Run the demo detecting Chinese: - -```shell -# detect on camera input -python demo.py --model text_recognition_CRNN_CN_2021nov.onnx -# detect on an image -python demo.py --input /path/to/image --model text_recognition_CRNN_CN_2021nov.onnx - -# get help regarding various parameters -python demo.py --help -``` -### C++ - -Install latest OpenCV and CMake >= 3.24.0 to get started with: - -```shell -# detect on camera input -./build/opencv_zoo_text_recognition_crnn -# detect on an image -./build/opencv_zoo_text_recognition_crnn --input /path/to/image -v - -# get help regarding various parameters -./build/opencv_zoo_text_recognition_crnn --help -``` - -Run the demo detecting Chinese: - -```shell -# detect on camera input -./build/opencv_zoo_text_recognition_crnn --model=text_recognition_CRNN_CN_2021nov.onnx --charset=charset_3944_CN.txt -# detect on an image -./build/opencv_zoo_text_recognition_crnn --input=/path/to/image --model=text_recognition_CRNN_CN_2021nov.onnx --charset=charset_3944_CN.txt - -# get help regarding various parameters -./build/opencv_zoo_text_recognition_crnn --help -``` - -### Examples - -![CRNNCTC](./example_outputs/CRNNCTC.gif) - -![demo](./example_outputs/demo.jpg) - -## License - -All files in this directory are licensed under [Apache 2.0 License](./LICENSE). - -## Reference - -- https://arxiv.org/abs/1507.05717 -- https://github.com/bgshih/crnn -- https://github.com/meijieru/crnn.pytorch -- https://github.com/zihaomu/deep-text-recognition-benchmark -- https://docs.opencv.org/4.5.2/d9/d1e/tutorial_dnn_OCR.html diff --git a/models/text_recognition_crnn/charset_32_94_3944.h b/models/text_recognition_crnn/charset_32_94_3944.h deleted file mode 100644 index 3e2d2419..00000000 --- a/models/text_recognition_crnn/charset_32_94_3944.h +++ /dev/null @@ -1,4092 +0,0 @@ -#include -#include - -std::vector loadCharset(std::string name){ - std::vector CHARSET_EN_36 = { - u"0", - u"1", - u"2", - u"3", - u"4", - u"5", - u"6", - u"7", - u"8", - u"9", - u"a", - u"b", - u"c", - u"d", - u"e", - u"f", - u"g", - u"h", - u"i", - u"j", - u"k", - u"l", - u"m", - u"n", - u"o", - u"p", - u"q", - u"r", - u"s", - u"t", - u"u", - u"v", - u"w", - u"x", - u"y", - u"z" }; - - std::vector CHARSET_CH_94 = { - u"0", - u"1", - u"2", - u"3", - u"4", - u"5", - u"6", - u"7", - u"8", - u"9", - u"a", - u"b", - u"c", - u"d", - u"e", - u"f", - u"g", - u"h", - u"i", - u"j", - u"k", - u"l", - u"m", - u"n", - u"o", - u"p", - u"q", - u"r", - u"s", - u"t", - u"u", - u"v", - u"w", - u"x", - u"y", - u"z", - u"A", - u"B", - u"C", - u"D", - u"E", - u"F", - u"G", - u"H", - u"I", - u"J", - u"K", - u"L", - u"M", - u"N", - u"O", - u"P", - u"Q", - u"R", - u"S", - u"T", - u"U", - u"V", - u"W", - u"X", - u"Y", - u"Z", - u"!", - u"\"", - u"#", - u"$", - u"%", - u"&", - u"'", - u"(", - u")", - u"*", - u"+", - u",", - u"-", - u".", - u"/", - u":", - u";", - u"<", - u"=", - u">", - u"?", - u"@", - u"[", - u"\\", - u"]", - u"^", - u"_", - u"`", - u"{", - u"|", - u"}" }; - - std::vector CHARSET_CN_3944 = { - u"H", - u"O", - u"K", - u"I", - u"T", - u"E", - u"A", - u"酱", - u"鸭", - u"传", - u"奇", - u"J", - u"N", - u"G", - u"Y", - u"C", - u"U", - u"Q", - u"蝦", - u"兵", - u"蟹", - u"煲", - u"这", - u"是", - u"可", - u"以", - u"先", - u"吃", - u"后", - u"涮", - u"的", - u"干", - u"锅", - u"菜", - u"加", - u"盟", - u"电", - u"话", - u":", - u"1", - u"7", - u"3", - u"9", - u"8", - u"郑", - u"州", - u"总", - u"店", - u"雪", - u"花", - u"勇", - u"闯", - u"天", - u"涯", - u"虾", - u",", - u"一", - u"送", - u"鱼", - u"锡", - u"纸", - u"蛤", - u"土", - u"豆", - u"粉", - u"砂", - u"米", - u"线", - u"牛", - u"筋", - u"面", - u"刀", - u"削", - u"水", - u"饺", - u"吧", - u"沙", - u"拉", - u"老", - u"饭", - u"盒", - u"教", - u"室", - u"主", - u"题", - u"餐", - u"厅", - u"仁", - u"馄", - u"饨", - u"重", - u"庆", - u"小", - u"便", - u"当", - u"全", - u"国", - u"连", - u"锁", - u"4", - u"0", - u"-", - u"6", - u"5", - u"2", - u"人", - u"快", - u"量", - u"贩", - u"蓬", - u"朗", - u"御", - u"茶", - u"川", - u"渝", - u"捞", - u"火", - u"古", - u"之", - u"匠", - u"今", - u"七", - u"西", - u"域", - u"羊", - u"城", - u"l", - u"i", - u"k", - u"n", - u"g", - u"c", - u"o", - u"f", - u"e", - u"w", - u"贵", - u"阳", - u"素", - u"有", - u"家", - u"会", - u"展", - u"口", - u"乐", - u"三", - u"惹", - u"烤", - u"肉", - u"h", - u"t", - u"子", - u"馆", - u"常", - u"盖", - u"浇", - u"兴", - u"业", - u"路", - u"书", - u"亦", - u"燒", - u"仙", - u"草", - u"L", - u":", - u"德", - u"啤", - u"工", - u"坊", - u"杏", - u"屋", - u"高", - u"桥", - u"号", - u"品", - u"麻", - u"辣", - u"烫", - u"检", - u"官", - u".", - u"千", - u"翼", - u"木", - u"兰", - u"画", - u"食", - u"上", - u"汤", - u"剁", - u"馅", - u"手", - u"煮", - u"时", - u"尚", - u"健", - u"康", - u"傲", - u"椒", - u"B", - u"啵", - u"条", - u"脾", - u"气", - u"!", - u"/", - u"月", - u"腾", - u"讯", - u"应", - u"用", - u"喵", - u"泡", - u"我", - u"鲜", - u"滚", - u"给", - u"你", - u"看", - u"客", - u"来", - u"香", - u"汉", - u"湘", - u"本", - u"地", - u"炒", - u"系", - u"列", - u"订", - u"仔", - u"肘", - u"蹄", - u"梅", - u"扣", - u"黄", - u"焖", - u"排", - u"骨", - u"炖", - u"鸡", - u"韓", - u"金", - u"利", - u"串", - u"舊", - u"街", - u"梨", - u"村", - u"座", - u"经", - u"济", - u"实", - u"惠", - u"绿", - u"色", - u"炭", - u"庐", - u"蛙", - u"忆", - u"蓉", - u"源", - u"真", - u"d", - u"D", - u"概", - u"念", - u"创", - u"意", - u"六", - u"熏", - u"各", - u"种", - u"精", - u"美", - u"y", - u"疯", - u"狂", - u"世", - u"界", - u"杯", - u"特", - u"价", - u"酒", - u"元", - u"瓶", - u"沸", - u"带", - u"F", - u"请", - u"二", - u"楼", - u"自", - u"动", - u"升", - u"降", - u"烏", - u"邦", - u"嗦", - u"味", - u"风", - u"货", - u"团", - u"外", - u"卖", - u"嘞", - u"个", - u"折", - u"辛", - u"束", - u"舌", - u"尖", - u"中", - u"包", - u"浆", - u"腐", - u"r", - u"P", - u"a", - u"u", - u"丸", - u"作", - u"福", - u"M", - u"漫", - u"蜜", - u"冰", - u"拌", - u"匆", - u"那", - u"年", - u"R", - u"S", - u"果", - u"光", - u"夹", - u"馍", - u"凉", - u"皮", - u"过", - u"祖", - u"南", - u"山", - u"風", - u"景", - u"堂", - u"烘", - u"培", - u"龍", - u"坎", - u"半", - u"婆", - u"建", - u"设", - u"富", - u"强", - u"丽", - u"菏", - u"泽", - u"省", - u"安", - u"港", - u"竹", - u"签", - u"撩", - u"只", - u"为", - u"好", - u"生", - u"活", - u"抓", - u"海", - u"最", - u"网", - u"红", - u"铁", - u"统", - u"®", - u"功", - u"夫", - u"鱿", - u"大", - u"闻", - u"就", - u"知", - u"遇", - u"见", - u"文", - u"合", - u"热", - u"森", - u"台", - u"湾", - u"卤", - u"然", - u"汁", - u"甄", - u"选", - u"材", - u"还", - u"原", - u"初", - u"衷", - u"*", - u"洪", - u"龙", - u"公", - u"酸", - u"巴", - u"乡", - u"焦", - u"烧", - u"淘", - u"成", - u"都", - u"眼", - u"镜", - u"优", - u"菓", - u"恋", - u"V", - u"化", - u"糖", - u"、", - u"粥", - u"田", - u"螺", - u"斓", - u"X", - u"爺", - u"W", - u"j", - u"院", - u"华", - u"Z", - u"蜊", - u"北", - u"京", - u"刷", - u"蝎", - u"腿", - u"梦", - u"幻", - u"奶", - u"式", - u"蛋", - u"鍋", - u"区", - u"·", - u"领", - u"航", - u"者", - u"四", - u"通", - u"往", - u"楚", - u"河", - u"停", - u"车", - u"场", - u"凌", - u"晨", - u"点", - u"杞", - u"缘", - u"王", - u"集", - u"唐", - u"菠", - u"萝", - u"泰", - u"板", - u"鳳", - u"凰", - u"樓", - u"名", - u"壹", - u"猪", - u"晴", - u"舍", - u"犟", - u"师", - u"傅", - u"飯", - u"致", - u"青", - u"春", - u"轰", - u"炸", - u"卡", - u"里", - u"身", - u"厨", - u"房", - u"x", - u"聚", - u"鑫", - u"阁", - u"岛", - u"纯", - u"聘", - u"专", - u"长", - u"庄", - u"鄉", - u"更", - u"珍", - u"固", - u"新", - u"岩", - u"v", - u"s", - u"m", - u"至", - u"尊", - u"比", - u"萨", - u"广", - u"披", - u"饮", - u"管", - u"理", - u"限", - u"司", - u"p", - u"幸", - u"东", - u"正", - u"挞", - u"少", - u"女", - u"克", - u"装", - u"童", - u"哒", - u"磨", - u"厂", - u"怼", - u"纤", - u"入", - u"户", - u"独", - u"溜", - u"共", - u"享", - u"滋", - u"江", - u"门", - u"九", - u"蒸", - u"胜", - u"盛", - u"&", - u"魔", - u"爪", - u"鹅", - u"皇", - u"(", - u")", - u"友", - u"甲", - u"魚", - u"首", - u"烹", - u"行", - u"员", - u"若", - u"资", - u"议", - u"联", - u"同", - u"急", - u"私", - u"燕", - u"儿", - u"巢", - u"鹏", - u"记", - u"腊", - u"营", - u"欢", - u"迎", - u"旗", - u"舰", - u"叫", - u"了", - u"做", - u"故", - u"铃", - u"煎", - u"饼", - u"哥", - u"力", - u"五", - u"谷", - u"野", - u"戈", - u"厠", - u"所", - u"超", - u"牌", - u"冒", - u"陳", - u"陈", - u"苕", - u"爽", - u"滑", - u"启", - u"秦", - u"择", - u"现", - u"进", - u"惊", - u"喜", - u"定", - u"于", - u"雅", - u"膳", - u"多", - u"推", - u"淇", - u"淋", - u"b", - u"思", - u"堡", - u"偶", - u"相", - u"伴", - u"呈", - u"湯", - u"绝", - u"浏", - u"\"", - u"刘", - u"态", - u"牧", - u"万", - u"达", - u"和", - u"番", - u"丼", - u"—", - u"机", - u"瘦", - u"绵", - u"柔", - u"厉", - u"蚝", - u"娘", - u"彩", - u"百", - u"事", - u"调", - u"韩", - u"爱", - u"喝", - u"玩", - u"放", - u"肆", - u"寿", - u"净", - u"配", - u"髓", - u"非", - u"道", - u"额", - u"吉", - u"招", - u"商", - u"杂", - u"粮", - u"筐", - u"运", - u"转", - u"服", - u"务", - u"缤", - u"灿", - u"腕", - u"楠", - u"彤", - u"学", - u"橋", - u"试", - u"浩", - u"减", - u"薪", - u"诚", - u"霸", - u"第", - u"间", - u"日", - u"极", - u"料", - u"開", - u"業", - u"霏", - u"星", - u"期", - u"分", - u"秒", - u"内", - u"咨", - u"询", - u"。", - u"樐", - u"头", - u"开", - u"氏", - u"渔", - u"约", - u"劳", - u"保", - u"礼", - u"宏", - u"武", - u"佘", - u"轻", - u"奢", - u"艺", - u"井", - u"隆", - u"鐵", - u"卷", - u"染", - u"焙", - u"钵", - u"马", - u"牟", - u"洋", - u"芋", - u"片", - u"流", - u"宽", - u"心", - u"位", - u"清", - u"潼", - u"关", - u"祥", - u"背", - u"凡", - u"哈", - u"尔", - u"滨", - u"珠", - u"派", - u"艾", - u"让", - u"变", - u"得", - u"样", - u"玖", - u"等", - u"综", - u"性", - u"涵", - u"粗", - u"冠", - u"記", - u"肠", - u"湖", - u"财", - u"贡", - u"桃", - u"杭", - u"平", - u"桂", - u"林", - u"煨", - u"档", - u"案", - u"造", - u"潮", - u"汕", - u"宗", - u"单", - u"县", - u"鲁", - u"舜", - u"脆", - u"酥", - u"糕", - u"仕", - u"十", - u"临", - u"簋", - u"宴", - u"字", - u"太", - u"灌", - u"薄", - u"尝", - u"址", - u"晗", - u"幢", - u"购", - u"梁", - u"醉", - u"皖", - u"庭", - u"白", - u"肥", - u"块", - u"石", - u"碗", - u"颜", - u"值", - u"張", - u"瘾", - u"跷", - u"脚", - u"而", - u"叁", - u"蜀", - u"橙", - u"市", - u"边", - u"早", - u"晚", - u"云", - u"吞", - u"目", - u"表", - u"赵", - u"烩", - u"擀", - u"蔬", - u"找", - u"回", - u"游", - u"刃", - u"余", - u"支", - u"洗", - u"吹", - u"休", - u"闲", - u"简", - u"撸", - u"根", - u"据", - u"鸽", - u"铜", - u"亲", - u"贝", - u"纪", - u"吕", - u"豚", - u"饅", - u"悦", - u"汇", - u"油", - u"无", - u"制", - u"在", - u"寻", - u"碳", - u"馋", - u"嘴", - u"架", - u"荣", - u"斋", - u"护", - u"角", - u"落", - u"铺", - u"臊", - u"丝", - u"围", - u"柳", - u"蛳", - u"蒲", - u"庙", - u"视", - u"荐", - u"缃", - u"想", - u"呀", - u"姜", - u"母", - u"起", - u"泉", - u"族", - u"群", - u"众", - u"其", - u"它", - u"血", - u"双", - u"补", - u"阴", - u"润", - u"不", - u"禽", - u"类", - u"款", - u"较", - u"候", - u"些", - u"畅", - u"脉", - u"痰", - u"疏", - u"肝", - u"帮", - u"助", - u"消", - u"增", - u"欲", - u"尤", - u"对", - u"胃", - u"畏", - u"寒", - u"很", - u"效", - u"秘", - u"黑", - u"嘿", - u"佳", - u"越", - u"脑", - u"桶", - u"项", - u"▪", - u"|", - u"榜", - u"许", - u"仿", - u"或", - u"酬", - u"宾", - u"指", - u"买", - u"赠", - u"笃", - u"鼎", - u"盆", - u"™", - u"咕", - u"咾", - u"肚", - u"识", - u"栖", - u"凤", - u"渡", - u"筒", - u"彬", - u"弟", - u"醋", - u"財", - u"師", - u"民", - u"博", - u"丁", - u"扒", - u"翅", - u"墨", - u"柠", - u"檬", - u"紫", - u"薯", - u"焗", - u"芝", - u"士", - u"胸", - u"图", - u"妮", - u"杀", - u"菌", - u"爹", - u"尽", - u"归", - u"宁", - u"粽", - u"瑞", - u"轩", - u"午", - u"陕", - u"出", - u"才", - u"盘", - u"植", - u"甜", - u"粒", - u"神", - u"舟", - u"玻", - u"璃", - u"医", - u"划", - u"药", - u"郡", - u"毛", - u"张", - u"姐", - u"留", - u"满", - u"下", - u"兄", - u"法", - u"鋪", - u"é", - u"[", - u"槑", - u"]", - u"言", - u"密", - u"帝", - u"場", - u"朴", - u"寨", - u"奉", - u"z", - u"什", - u"顺", - u"疆", - u"馕", - u"豫", - u"怀", - u"旧", - u"验", - u"昙", - u"搞", - u"圣", - u"格", - u"ǐ", - u"à", - u"隱", - u"燙", - u"状", - u"居", - u"饱", - u"底", - u"免", - u"费", - u"廣", - u"點", - u"專", - u"門", - u"语", - u"叉", - u"左", - u"岸", - u"发", - u"乌", - u"齐", - u"冷", - u"命", - u"●", - u"修", - u"闸", - u"飞", - u"空", - u"养", - u"笼", - u"興", - u"银", - u"套", - u"東", - u"吴", - u"麺", - u"館", - u"¥", - u"从", - u"前", - u"乙", - u"弘", - u"炝", - u"夏", - u"秋", - u"冬", - u"咖", - u"啡", - u"℃", - u"©", - u"莲", - u"塘", - u"哆", - u"梓", - u"依", - u"哎", - u"麦", - u"泗", - u"泾", - u"瓯", - u"胡", - u"∣", - u"歺", - u"八", - u"度", - u"深", - u"夜", - u"旋", - u"永", - u"远", - u"温", - u"又", - u"晶", - u"溏", - u"ä", - u"盔", - u"飘", - u"劲", - u"旺", - u"楸", - u"良", - u"譜", - u"餅", - u"苏", - u"莎", - u"足", - u"宵", - u"与", - u"楊", - u"國", - u"莱", - u"卜", - u"炊", - u"挑", - u"剔", - u"存", - u"错", - u"方", - u"程", - u"解", - u"能", - u"堆", - u"洲", - u"诗", - u"玛", - u"渴", - u"脖", - u"丛", - u"狼", - u"翁", - u"姓", - u"葫", - u"芦", - u"沾", - u"葵", - u"の", - u"咔", - u"粹", - u"弥", - u"乖", - u"悠", - u"茗", - u"别", - u"走", - u"柒", - u"榨", - u"咥", - u"虹", - u"沏", - u"桔", - u"叔", - u"贴", - u"办", - u"充", - u"崎", - u"鮮", - u"属", - u"彭", - u"浦", - u"町", - u"郎", - u"°", - u"悟", - u"惑", - u"科", - u"英", - u"育", - u"岁", - u"幼", - u"园", - u"慢", - u"摆", - u"_", - u"狐", - u"狸", - u"典", - u"暴", - u"帥", - u"尾", - u"琼", - u"見", - u"望", - u"烟", - u"坚", - u"鸳", - u"鸯", - u"直", - u"校", - u"饪", - u"承", - u"们", - u"么", - u"¥", - u"份", - u"宇", - u"炉", - u"峰", - u"乃", - u"趣", - u"代", - u"刨", - u"抖", - u"音", - u"占", - u"谜", - u"答", - u"熟", - u"控", - u"蕾", - u"节", - u"社", - u"您", - u"《", - u"羅", - u"茉", - u"瀞", - u"憨", - u"尼", - u"丰", - u"镇", - u"酿", - u"避", - u"抢", - u"突", - u"破", - u"杰", - u"姆", - u"波", - u"观", - u"澜", - u"庫", - u"舒", - u"谁", - u"短", - u"島", - u"爷", - u"码", - u"每", - u"欧", - u"注", - u"册", - u"标", - u"腸", - u"奈", - u"熊", - u"粵", - u"吳", - u"衢", - u"雄", - u"际", - u"葱", - u"柱", - u"压", - u"陪", - u"器", - u"厘", - u"柴", - u"席", - u"饿", - u"俏", - u"汽", - u"站", - u"霜", - u"荟", - u"禾", - u"咘", - u"臭", - u"夷", - u"肖", - u"微", - u"组", - u"刺", - u"拼", - u"打", - u"信", - u"步", - u"!", - u"说", - u"囍", - u"智", - u"藍", - u"鹿", - u"巷", - u"顾", - u"勃", - u"頭", - u"帕", - u"徐", - u"渣", - u"嗨", - u"鲍", - u"抽", - u"莊", - u"胗", - u"耳", - u"栈", - u"葑", - u"谊", - u"李", - u"够", - u"歪", - u"到", - u"杜", - u"绪", - u"始", - u"“", - u"”", - u"编", - u"感", - u"谢", - u"阿", - u"妹", - u"抄", - u"屿", - u"旁", - u"钟", - u"糰", - u"鷄", - u"觉", - u"队", - u"明", - u"没", - u"幺", - u"罗", - u"恭", - u"發", - u"溢", - u"圆", - u"筵", - u"鲩", - u"斤", - u"噜", - u"府", - u"雕", - u"牦", - u"津", - u"間", - u"粤", - u"义", - u"驾", - u"嫩", - u"眷", - u"苔", - u"怡", - u"逍", - u"遥", - u"即", - u"把", - u"季", - u"鹃", - u"妈", - u"烙", - u"淡", - u"嘟", - u"班", - u"散", - u"磐", - u"稣", - u"耍", - u"芽", - u"昌", - u"粿", - u"鼓", - u"姑", - u"央", - u"告", - u"翔", - u"迦", - u"缆", - u"怪", - u"俗", - u"菩", - u"宥", - u"酵", - u"男", - u"顿", - u"蚂", - u"蚁", - u"q", - u"緑", - u"瑩", - u"養", - u"滿", - u"接", - u"立", - u"勤", - u"封", - u"徽", - u"酷", - u"(", - u"慕", - u"曹", - u"吊", - u"咸", - u"矿", - u"黛", - u"刻", - u"呗", - u"布", - u"袋", - u"钝", - u"丘", - u"逗", - u"窗", - u"吾", - u"塔", - u"坡", - u"周", - u"雙", - u"朝", - u"末", - u"如", - u"杨", - u"淮", - u"摄", - u"影", - u"翻", - u"窝", - u"物", - u"椰", - u"荞", - u"搅", - u"陇", - u"收", - u"两", - u"倍", - u"狮", - u"伊", - u"後", - u"晖", - u"長", - u"箐", - u"豪", - u"耀", - u"漢", - u"釜", - u"宮", - u"次", - u"掌", - u"斯", - u"朋", - u"针", - u"菇", - u"蚬", - u"拍", - u"雒", - u"陽", - u"漿", - u"麵", - u"條", - u"部", - u"←", - u"柜", - u"驴", - u"证", - u"票", - u"账", - u"汗", - u"汆", - u"稍", - u"戏", - u"菋", - u"卫", - u"匹", - u"栋", - u"馨", - u"肯", - u"迪", - u"邢", - u"梯", - u"容", - u"嘉", - u"莞", - u"袁", - u"锦", - u"遮", - u"雨", - u"篷", - u"腰", - u"肺", - u"剡", - u"乾", - u",", - u"翰", - u"蔚", - u"刁", - u"藤", - u"帅", - u"傳", - u"维", - u"笔", - u"历", - u"史", - u"】", - u"适", - u"煌", - u"倾", - u"沧", - u"姬", - u"训", - u"邵", - u"诺", - u"敢", - u"质", - u"益", - u"佬", - u"兼", - u"职", - u"盅", - u"诊", - u"扬", - u"速", - u"宝", - u"褚", - u"糁", - u"钢", - u"松", - u"婚", - u"秀", - u"盐", - u"及", - u"個", - u"飲", - u"绍", - u"槿", - u"觅", - u"逼", - u"兽", - u"》", - u"吐", - u"右", - u"久", - u"闺", - u"祝", - u"贺", - u"啦", - u"瓦", - u"甏", - u"探", - u"辰", - u"碚", - u"芳", - u"灣", - u"泷", - u"饰", - u"隔", - u"帐", - u"飮", - u"搜", - u"時", - u"宫", - u"蘭", - u"再", - u"糊", - u"仓", - u"稻", - u"玉", - u"印", - u"象", - u"稀", - u"拴", - u"桩", - u"餃", - u"贾", - u"贱", - u"球", - u"萌", - u"撕", - u"脂", - u"肪", - u"层", - u"晋", - u"荷", - u"钱", - u"潍", - u"失", - u"孜", - u"提", - u"供", - u"具", - u"洛", - u"涂", - u"叠", - u"豊", - u"积", - u"媒", - u"级", - u"纷", - u"巧", - u"瓜", - u"苹", - u"琥", - u"珀", - u"蜂", - u"柚", - u"莉", - u"爆", - u"龄", - u"饸", - u"饹", - u"郞", - u"嫡", - u"億", - u"姚", - u"繁", - u"监", - u"督", - u"示", - u"佰", - u"汍", - u"%", - u"甘", - u"蔗", - u"喻", - u"骄", - u"基", - u"因", - u"匙", - u"评", - u"侠", - u"赢", - u"交", - u"歡", - u"待", - u"馒", - u"产", - u"倡", - u"导", - u"低", - u"茂", - u"沐", - u"熙", - u"延", - u"丧", - u"受", - u"确", - u"睡", - u"蓝", - u"未", - u"賣", - u"電", - u"話", - u"农", - u"札", - u"岗", - u"树", - u"赖", - u"琪", - u"驻", - u"辉", - u"软", - u"防", - u"盗", - u"隐", - u"形", - u"纱", - u"灶", - u"扎", - u"环", - u"禁", - u"止", - u"吸", - u"萬", - u"昆", - u"几", - u"跳", - u"媳", - u"婦", - u"坛", - u"<", - u">", - u"拿", - u"妖", - u"协", - u"朱", - u"住", - u"宿", - u"魅", - u"照", - u"碰", - u"滴", - u"何", - u"贤", - u"棒", - u"持", - u"啊", - u"赛", - u"版", - u"帆", - u"順", - u"狗", - u"情", - u"+", - u"洞", - u"奋", - u"斗", - u"亨", - u"叶", - u"涛", - u"铝", - u"范", - u"汀", - u"號", - u"律", - u"價", - u"鞭", - u"肩", - u"#", - u"愚", - u"奥", - u"脯", - u"沁", - u"奚", - u"魏", - u"批", - u"租", - u"宠", - u"炲", - u"横", - u"沥", - u"彪", - u"投", - u"诉", - u"犀", - u"去", - u"屠", - u"鲅", - u"~", - u"俱", - u"徒", - u"鴻", - u"劉", - u"迷", - u"荤", - u"威", - u"曜", - u"連", - u"鎖", - u"馳", - u"载", - u"添", - u"筑", - u"陵", - u"佐", - u"敦", - u">", - u"郭", - u"厢", - u"祛", - u"茄", - u"堰", - u"漂", - u"亮", - u"爅", - u"虎", - u"膀", - u"叼", - u"猫", - u"藏", - u"陶", - u"鲈", - u"栏", - u"…", - u"考", - u"冲", - u"胖", - u"裕", - u"沃", - u"挂", - u"报", - u"兔", - u"胶", - u"臨", - u"附", - u"处", - u"嫂", - u"萃", - u"幂", - u"吻", - u"聪", - u"糯", - u"糍", - u"棋", - u"烓", - u"脊", - u"衡", - u"亚", - u"副", - u"肤", - u"荆", - u"榴", - u"绚", - u"黔", - u"圈", - u"纳", - u"课", - u"逸", - u"宜", - u"=", - u"烊", - u"姨", - u"施", - u"救", - u"贸", - u"啥", - u"也", - u"贯", - u"雷", - u"呆", - u"棠", - u"伙", - u"岐", - u"宛", - u"媽", - u"寸", - u"澳", - u"已", - u"還", - u"兒", - u"Ⅱ", - u"凯", - u"株", - u"藕", - u"闽", - u"窖", - u"瀘", - u"售", - u"索", - u"体", - u"型", - u"樂", - u"琅", - u"琊", - u"夺", - u"扩", - u")", - u"诱", - u"滩", - u"浓", - u"要", - u"芹", - u"君", - u"反", - u"复", - u"羔", - u"追", - u"演", - u"唱", - u"過", - u"綫", - u"乳", - u"涩", - u"芒", - u"露", - u"蒙", - u"羯", - u"励", - u"志", - u"嵊", - u"閒", - u"罐", - u"佛", - u"墙", - u"頁", - u"坐", - u"眯", - u"预", - u"華", - u"廉", - u"释", - u"必", - u"随", - u"逐", - u"引", - u"究", - u"爸", - u"灵", - u"勺", - u"岂", - u"俵", - u"廷", - u"苗", - u"岭", - u"将", - u"來", - u"泛", - u"朵", - u"維", - u"園", - u"廳", - u"圳", - u"伦", - u"寶", - u"付", - u"仅", - u"減", - u"谦", - u"硕", - u"抚", - u"慶", - u"雞", - u"郝", - u"计", - u"熱", - u"杖", - u"亭", - u"喱", - u"惜", - u"莒", - u"另", - u"陆", - u"拾", - u"伍", - u"谈", - u"嚼", - u"娅", - u"翟", - u"別", - u"颈", - u"邮", - u"弄", - u"•", - u"扇", - u"哦", - u"吼", - u"耶", - u"宅", - u"帽", - u"魂", - u"搭", - u"笨", - u"映", - u"拨", - u"烂", - u"馈", - u"胎", - u"溶", - u"\\", - u"善", - u"销", - u"难", - u"忘", - u"斑", - u"噢", - u"錫", - u"娟", - u"語", - u"哨", - u"筷", - u"摊", - u"均", - u"椅", - u"改", - u"换", - u"跟", - u"帖", - u"勾", - u"缅", - u"孙", - u"啪", - u"栗", - u"着", - u"漁", - u"吓", - u"易", - u"漲", - u"靖", - u"枸", - u"馬", - u"昇", - u"當", - u"麥", - u"妆", - u"塑", - u"魯", - u"鎮", - u"吗", - u"魁", - u"丹", - u"杈", - u"技", - u"术", - u"泼", - u"零", - u"忙", - u"漾", - u"創", - u"攀", - u"郫", - u"抿", - u"稼", - u"假", - u"循", - u"泳", - u"池", - u"膨", - u"巨", - u"歧", - u"愛", - u"鵝", - u"悉", - u"灯", - u"激", - u"踪", - u"细", - u"會", - u"舔", - u"愿", - u"們", - u"衹", - u"令", - u"浔", - u"丨", - u"酉", - u"惦", - u"耕", - u"×", - u"闪", - u"經", - u"玺", - u"芯", - u"襄", - u"賦", - u"予", - u"學", - u"苑", - u"托", - u"丢", - u"赔", - u"ā", - u"聽", - u"濤", - u"浮", - u"伯", - u"兑", - u"币", - u"治", - u"愈", - u"盱", - u"眙", - u"漏", - u"夕", - u"搏", - u"由", - u"完", - u"切", - u"罕", - u"息", - u"燃", - u"叙", - u"萍", - u"碑", - u"腌", - u"衣", - u"害", - u"己", - u"患", - u"浙", - u"闫", - u"|", - u"芈", - u"谣", - u"戴", - u"錦", - u"謝", - u"恩", - u"芊", - u"拇", - u"矾", - u"政", - u"锣", - u"跃", - u"钥", - u"寺", - u"驼", - u"芙", - u"插", - u"恒", - u"咪", - u"禄", - u"摩", - u"轮", - u"譚", - u"鴨", - u"戊", - u"申", - u"丙", - u"邊", - u"唯", - u"登", - u"困", - u"貢", - u"誉", - u"賀", - u"认", - u"准", - u"妃", - u"潜", - u"旨", - u"死", - u"桌", - u"尧", - u"箱", - u"届", - u"获", - u"顶", - u"柿", - u"臂", - u"蓮", - u"凭", - u"慵", - u"懒", - u"醇", - u"籍", - u"静", - u"淌", - u"此", - u"甚", - u"绣", - u"渌", - u"呢", - u"问", - u"抹", - u"弹", - u"捷", - u"邱", - u"旦", - u"曉", - u"艳", - u"雲", - u"研", - u"守", - u"鼻", - u"¦", - u"揽", - u"含", - u"沂", - u"听", - u"帛", - u"端", - u"兆", - u"舆", - u"谐", - u"帘", - u"笑", - u"寅", - u"【", - u"車", - u"@", - u"&", - u"胪", - u"臻", - u"蘆", - u"衙", - u"餌", - u"①", - u"鉴", - u"敬", - u"枝", - u"沈", - u"衔", - u"蝉", - u"芜", - u"烈", - u"库", - u"椿", - u"稳", - u"’", - u"豌", - u"亿", - u"缙", - u"獨", - u"菊", - u"沤", - u"迟", - u"忧", - u"沫", - u"伟", - u"靠", - u"并", - u"互", - u"晓", - u"枫", - u"窑", - u"芭", - u"夯", - u"鸿", - u"無", - u"烦", - u"恼", - u"闖", - u"贞", - u"鳥", - u"厦", - u"抱", - u"歐", - u"藝", - u"廖", - u"振", - u"腦", - u"舖", - u"酪", - u"碎", - u"浪", - u"荔", - u"巫", - u"撈", - u"醬", - u"段", - u"昔", - u"潘", - u"Λ", - u"禧", - u"妻", - u"瓢", - u"柏", - u"郁", - u"暹", - u"兮", - u"娃", - u"敏", - u"進", - u"距", - u"离", - u"倪", - u"征", - u"咱", - u"继", - u"责", - u"任", - u"銅", - u"啖", - u"赞", - u"菲", - u"蛇", - u"焰", - u"娜", - u"芮", - u"坦", - u"磅", - u"薛", - u"緣", - u"乔", - u"拱", - u"骚", - u"扰", - u"約", - u"喷", - u"驢", - u"仨", - u"纬", - u"臘", - u"邳", - u"终", - u"喏", - u"扫", - u"除", - u"恶", - u"争", - u"率", - u"‘", - u"肃", - u"雀", - u"鈴", - u"贼", - u"绕", - u"笋", - u"钩", - u"勒", - u"翠", - u"黎", - u"董", - u"澄", - u"境", - u"采", - u"拳", - u"捆", - u"粄", - u"诸", - u"暨", - u"榧", - u"葛", - u"親", - u"戚", - u"访", - u"股", - u"融", - u"潤", - u"寄", - u"递", - u"藩", - u"滇", - u"湛", - u"他", - u"篓", - u"普", - u"撞", - u"莅", - u"但", - u"沟", - u"暑", - u"促", - u"玲", - u"腩", - u"碼", - u"偏", - u"楹", - u"嘎", - u"洒", - u"抛", - u"危", - u"险", - u"损", - u"负", - u"銘", - u"黃", - u"燜", - u"說", - u"杆", - u"称", - u"蹭", - u"聊", - u"妙", - u"滕", - u"曦", - u"肴", - u"萧", - u"颗", - u"剂", - u"義", - u"锋", - u"授", - u"权", - u"著", - u"茴", - u"蒝", - u"侬", - u"顏", - u"菁", - u"擦", - u"鞋", - u"庞", - u"毕", - u"谱", - u"樱", - u"→", - u"綦", - u"舞", - u"蹈", - u"躁", - u"渠", - u"俐", - u"涧", - u"馀", - u"潇", - u"邻", - u"须", - u"藻", - u"纺", - u"织", - u"军", - u"沅", - u"豐", - u"爐", - u"韭", - u"棚", - u"綿", - u"麯", - u"剑", - u"娱", - u"链", - u"锤", - u"炼", - u"献", - u"晟", - u"章", - u"謎", - u"数", - u"侯", - u"她", - u"疗", - u"途", - u"篇", - u"则", - u"邓", - u"赐", - u"閣", - u"對", - u"猩", - u"邑", - u"區", - u"鬼", - u"莫", - u"沪", - u"淼", - u"赤", - u"混", - u"沌", - u"需", - u"求", - u"痛", - u"绮", - u"琦", - u"荃", - u"熳", - u"佑", - u"Á", - u"ō", - u"現", - u"専", - u"卢", - u"譽", - u"缠", - u"曾", - u"鸣", - u"琴", - u"汊", - u"濮", - u"哇", - u"哩", - u"唝", - u"曲", - u"坂", - u"呼", - u"莴", - u"怕", - u"蒋", - u"伞", - u"炙", - u"燻", - u"瑧", - u"冈", - u"讲", - u"硬", - u"详", - u"鹵", - u"摇", - u"偃", - u"嵩", - u"严", - u"谨", - u"′", - u"剥", - u"穗", - u"榮", - u"禹", - u"颐", - u"局", - u"刚", - u"▕", - u"暖", - u"漠", - u"炎", - u"頤", - u"樟", - u"?", - u"储", - u"移", - u"缕", - u"艰", - u"袍", - u"瑪", - u"麗", - u"参", - u"䬺", - u"趁", - u"呦", - u"霖", - u"饵", - u"溪", - u"孔", - u"澤", - u"袜", - u"蔓", - u"熠", - u"显", - u"屏", - u"缇", - u"寇", - u"亞", - u"坑", - u"槟", - u"榔", - u"絳", - u"驿", - u"歹", - u"匾", - u"猴", - u"旭", - u"竞", - u"­", - u"唛", - u"介", - u"习", - u"涡", - u"寓", - u"掉", - u"蘸", - u"愉", - u"佼", - u"ǒ", - u"納", - u"∶", - u"革", - u"嚸", - u"募", - u"螃", - u"鲢", - u"俤", - u"扁", - u"寳", - u"辽", - u"∧", - u"厚", - u"裤", - u"扯", - u"屯", - u"废", - u"挪", - u"辘", - u"碉", - u"歇", - u"漓", - u"腻", - u"捣", - u"孩", - u"烁", - u"整", - u"按", - u"Ⓡ", - u"眉", - u"脸", - u"痣", - u"粑", - u"序", - u"穿", - u"樊", - u"玮", - u"★", - u"扑", - u"渊", - u"醴", - u"瑶", - u"農", - u"檔", - u"憩", - u"霊", - u"赫", - u"呜", - u"~", - u"备", - u"説", - u"莓", - u"钻", - u"播", - u"冻", - u"紅", - u"菽", - u"喪", - u"埔", - u"壽", - u"❤", - u"籽", - u"咻", - u"籣", - u"尹", - u"潭", - u"穆", - u"壮", - u"使", - u"霄", - u"蔵", - u"浒", - u"岳", - u"熘", - u"臺", - u"殷", - u"孤", - u"邂", - u"逅", - u"厕", - u"郸", - u"铭", - u"莆", - u"抻", - u"虽", - u"倦", - u"怠", - u"矣", - u"茵", - u"垂", - u"殿", - u"鄂", - u"嗑", - u"续", - u"钦", - u"党", - u"鲫", - u"蔡", - u"侧", - u"割", - u"彰", - u"凝", - u"熬", - u"叕", - u"純", - u"谛", - u"籠", - u"宋", - u"峡", - u"俩", - u"雜", - u"跑", - u"⑧", - u"焼", - u"-", - u"逢", - u"澧", - u"舵", - u"异", - u"冯", - u"战", - u"决", - u"棍", - u";", - u"﹣", - u"丑", - u"妇", - u"焉", - u"芷", - u"楂", - u"坞", - u"壳", - u"馐", - u"帜", - u"旅", - u"鳯", - u"簡", - u"凍", - u"秜", - u"结", - u"咩", - u"丫", - u"稠", - u"暗", - u"缔", - u"乎", - u"被", - u"狠", - u"皲", - u"豉", - u"崇", - u"渭", - u"担", - u"鹤", - u"製", - u"蛎", - u"笛", - u"奔", - u"赴", - u"盼", - u"鳌", - u"拜", - u"络", - u"灸", - u"膜", - u"刮", - u"痧", - u"毒", - u"萊", - u"陂", - u"濑", - u"唇", - u"抵", - u"押", - u"置", - u"馇", - u"泌", - u"尿", - u"傻", - u"像", - u"孃", - u"陣", - u"靓", - u"规", - u"企", - u"矮", - u"凳", - u"贰", - u"兎", - u"庵", - u"質", - u"阅", - u"读", - u"◆", - u"练", - u"墩", - u"曼", - u"呱", - u"泓", - u"耐", - u"磁", - u"枣", - u"罉", - u"浴", - u"氧", - u"洱", - u"鳅", - u"線", - u"炳", - u"顽", - u"符", - u"倌", - u"泥", - u"郊", - u"柯", - u"餘", - u"巍", - u"论", - u"沽", - u"荘", - u"奕", - u"啃", - u"髙", - u"○", - u"芬", - u"苟", - u"且", - u"阆", - u"確", - u"獅", - u"匣", - u"睫", - u"牙", - u"戒", - u"俊", - u"阜", - u"遵", - u"爵", - u"遗", - u"捧", - u"仑", - u"构", - u"豬", - u"挡", - u"弓", - u"蠔", - u"旬", - u"鱻", - u"镖", - u"燚", - u"歌", - u"壁", - u"啫", - u"饷", - u"仰", - u"韶", - u"勞", - u"軒", - u"菒", - u"炫", - u"廊", - u"塞", - u"脏", - u"堤", - u"浅", - u"辈", - u"靡", - u"裙", - u"尺", - u"廚", - u"向", - u"磊", - u"咬", - u"皓", - u"卿", - u"懂", - u"葉", - u"廿", - u"芸", - u"賴", - u"埠", - u"應", - u"碟", - u"溧", - u"訂", - u"選", - u"睦", - u"举", - u"钳", - u"哟", - u"霍", - u"扞", - u"侣", - u"營", - u"龟", - u"钜", - u"埭", - u"が", - u"搽", - u"螞", - u"蟻", - u"娚", - u"蒜", - u"厝", - u"垵", - u"☎", - u"捌", - u"倒", - u"骑", - u"Ξ", - u"谋", - u"黍", - u"侍", - u"赏", - u"扮", - u"忱", - u"蘑", - u"洁", - u"嘆", - u"闹", - u"谭", - u"鶏", - u"種", - u"φ", - u"坤", - u"麓", - u"麒", - u"麟", - u"喂", - u"琳", - u"Ⓑ", - u"趙", - u"總", - u"這", - u"奖", - u"取", - u"拔", - u"錯", - u"仉", - u"缸", - u"廟", - u"暢", - u"腔", - u"卓", - u"腱", - u"朙", - u"紹", - u"莹", - u"缺", - u"抺", - u"睿", - u"氣", - u"该", - u"貼", - u"妍", - u"拆", - u"穇", - u"箩", - u"希", - u"廰", - u"祗", - u"盲", - u"坝", - u"骆", - u"熄", - u"蛮", - u"賓", - u"馮", - u"尋", - u"泊", - u"孫", - u"槁", - u"亖", - u"俯", - u"浣", - u"婴", - u"锨", - u"馥", - u"闷", - u"梆", - u"▫", - u"姥", - u"哲", - u"录", - u"甫", - u"床", - u"嬌", - u"烎", - u"梵", - u"枪", - u"乍", - u"璜", - u"羌", - u"崂", - u"穷", - u"榕", - u"聲", - u"喚", - u"駕", - u"晕", - u"嬷", - u"箕", - u"婧", - u"盧", - u"楓", - u"柃", - u"差", - u"「", - u"」", - u"佶", - u"唔", - u"壕", - u"歆", - u"盏", - u"擂", - u"睇", - u"巾", - u"查", - u"淖", - u"哪", - u"沣", - u"赣", - u"優", - u"諾", - u"礁", - u"努", - u"畔", - u"疙", - u"瘩", - u"握", - u"叮", - u"栙", - u"甑", - u"嶺", - u"涌", - u"透", - u"钓", - u"斜", - u"搬", - u"迁", - u"妨", - u"借", - u"仍", - u"鳕", - u"瓷", - u"绘", - u"餠", - u"á", - u"ǎ", - u"祈", - u"邨", - u"醒", - u"闵", - u"砖", - u"锹", - u"咀", - u"綠", - u"幕", - u"忠", - u"雾", - u"覓", - u"靜", - u"擔", - u"篮", - u"杉", - u"势", - u"薇", - u"甬", - u"频", - u"般", - u"仲", - u"蘇", - u"鸟", - u"卞", - u"憾", - u"資", - u"駱", - u"蝶", - u"為", - u"仟", - u"耗", - u"莘", - u"涉", - u"昕", - u"盈", - u"熹", - u"觀", - u"瑭", - u"湃", - u"兢", - u"淞", - u"䒩", - u"結", - u"柗", - u"鲤", - u"糟", - u"粕", - u"塗", - u"簽", - u"怎", - u"桐", - u"皆", - u"羽", - u"盯", - u"氽", - u"晏", - u"液", - u"镀", - u"珂", - u"悸", - u"∙", - u"桑", - u"夢", - u"楽", - u"剩", - u"纵", - u"逝", - u"欺", - u"統", - u"飛", - u"姣", - u"俄", - u"揪", - u"薡", - u"幅", - u"蓋", - u"︳", - u"屉", - u"㕔", - u"а", - u"铸", - u"韦", - u"銀", - u"檀", - u"击", - u"伿", - u"隍", - u"『", - u"』", - u"芥", - u"☆", - u"声", - u"跆", - u"肋", - u"榭", - u"牵", - u"棧", - u"網", - u"愁", - u"嗏", - u"嵗", - u"巡", - u"稚", - u"貴", - u"買", - u"恰", - u"㸆", - u"捻", - u"玫", - u"瑰", - u"炕", - u"梧", - u"餡", - u"锌", - u"焱", - u"驰", - u"堽", - u"邯", - u"珑", - u"尕", - u"宰", - u"栓", - u"喃", - u"殊", - u"燊", - u"慈", - u"羴", - u"逃", - u"脱", - u"邹", - u"檐", - u"碌", - u"页", - u"荠", - u"券", - u"題", - u"龚", - u"肌", - u"蕉", - u"囬", - u"肫", - u"坪", - u"沉", - u"淀", - u"斌", - u"鳝", - u"核", - u"喳", - u"剃", - u"昭", - u"{", - u"}", - u"坏", - u"烜", - u"媛", - u"猛", - u"桓", - u"欣", - u"碁", - u"竭", - u"堇", - u"↑", - u"扛", - u"罄", - u"栾", - u"鲶", - u"鍕", - u"崔", - u"橘", - u"携", - u"丈", - u"射", - u"梗", - u"檸", - u"疼", - u"卑", - u"捉", - u"障", - u"裏", - u"遍", - u"蓓", - u"析", - u"許", - u"虫", - u"坨", - u"馔", - u"窄", - u"姫", - u"噤", - u"係", - u"湿", - u"汐", - u"鳜", - u"船", - u"崽", - u"+", - u"例", - u"灼", - u"祿", - u"腥", - u"峭", - u"酌", - u"喽", - u"件", - u"郏", - u"栀", - u"鲨", - u"寫", - u"與", - u"诈", - u"斥", - u"炮", - u"稿", - u"懿", - u"掂", - u"鹭", - u"乱", - u"恬", - u"婷", - u"苦", - u"埃", - u"珊", - u"禅", - u"裹", - u"圃", - u"鹌", - u"鹑", - u"û", - u"澡", - u"囧", - u"阡", - u"靑", - u"警", - u"牢", - u"嘱", - u"鳞", - u"浃", - u"贷", - u"慧", - u"翊", - u"讨", - u"碧", - u"剪", - u"陌", - u"冀", - u"砵", - u"迅", - u"鹰", - u"竟", - u"召", - u"敌", - u"鯡", - u"蒌", - u"蒿", - u"扶", - u"③", - u"誘", - u"嘻", - u"輪", - u"嬢", - u"瓮", - u"絲", - u"嚣", - u"荀", - u"莽", - u"鄧", - u"咋", - u"勿", - u"佈", - u"洽", - u"羹", - u"模", - u"貨", - u"粱", - u"凈", - u"腹", - u"鄭", - u"署", - u"儒", - u"隧", - u"鉢", - u"茫", - u"蔻", - u"í", - u"ó", - u"裴", - u"偉", - u"Θ", - u"祎", - u"褥", - u"殖", - u"湫", - u"瀚", - u"貓", - u"汪", - u"紙", - u"極", - u"伤", - u"灰", - u"團", - u"橄", - u"榄", - u"拽", - u"响", - u"貌", - u"傣", - u"舂", - u"斩", - u"飨", - u"执", - u"諸", - u"蒂", - u"嘣", - u"葡", - u"渤", - u"惺", - u"驛", - u"戰", - u"箬", - u"俭", - u"瀏", - u"嫦", - u"琵", - u"琶", - u"咿", - u"吖", - u"舱", - u"韵", - u"揭", - u"祁", - u"將", - u"軍", - u"吟", - u"彼", - u"岚", - u"绒", - u"煤", - u"淝", - u"歸", - u"锐", - u"嗯", - u"傾", - u"甩", - u"瞳", - u"睁", - u"鳗", - u"遜", - u"嗲", - u"虚", - u"娴", - u"碱", - u"呷", - u"{", - u"哚", - u"兜", - u"喇", - u"叭", - u"燦", - u"逻", - u"匪", - u"槐", - u"撒", - u"写", - u"踩", - u"踏", - u"霞", - u"喫", - u"返", - u"赚", - u"拓", - u"動", - u"觞", - u"鲽", - u"鐘", - u"闰", - u"扳", - u"沖", - u"賈", - u"璐", - u"煸", - u"棵", - u"峪", - u"π", - u"憶", - u"齋", - u"娇", - u"穎", - u"嫁", - u"玥", - u"胚", - u"喊", - u"阻", - u"餓", - u"截", - u"孵", - u"屎", - u"爾", - u"莳", - u"倔", - u"娄", - u"祸", - u"`", - u"姿", - u"稽", - u"戌", - u"缪", - u"ī", - u"糠", - u"痴", - u"猎", - u"嬉", - u"柑", - u"鞍", - u"兹", - u"凼", - u"舅", - u"褐", - u"醪", - u"仪", - u"氷", - u"單", - u"丞", - u"碛", - u"绽", - u"袂", - u"檢", - u"瀾", - u"饃", - u"孖", - u"雍", - u"ò", - u"螄", - u"涤", - u"茨", - u"寮", - u"近", - u"辜", - u"茅", - u"孟", - u"累", - u"宣", - u"樹", - u"鷹", - u"膝", - u"臉", - u"襪", - u"嘢", - u"嵐", - u"▲", - u"璇", - u"竺", - u"気", - u"迈", - u"糐", - u"挥", - u"瑜", - u"伽", - u"\"", - u"裳", - u"纹", - u"潯", - u"幾", - u"朔", - u"枊", - u"釀", - u"劝", - u"俺", - u"粢", - u"馓", - u"胥", - u"拥", - u"嘶", - u"達", - u"蝴", - u"昱", - u"ホ", - u"ル", - u"モ", - u"ニ", - u"颂", - u"噫", - u"否", - u"笙", - u"绎", - u"俞", - u"泵", - u"测", - u"耿", - u"揚", - u"犇", - u"锄", - u"卧", - u"炯", - u"烽", - u"橡", - u"操", - u"齊", - u"隴", - u"宀", - u"荥", - u"滙", - u"贪", - u"関", - u"垦", - u"↓", - u"麽", - u"暧", - u"匯", - u"恨", - u"叽", - u"断", - u"鮪", - u"椎", - u"病", - u"迹", - u"禺", - u"搓", - u"瀛", - u"唤", - u"埕", - u"愤", - u"怒", - u"拐", - u"狱", - u"垅", - u"绅", - u"設", - u"計", - u"書", - u"楷", - u"鮨", - u"邪", - u"郴", - u"盞", - u"榆", - u"恺", - u"樵", - u"煙", - u"舫", - u"翡", - u"砸", - u"叹", - u"縣", - u"璞", - u"禮", - u"獻", - u"似", - u"吆", - u"嘛", - u"灭", - u"擇", - u"夥", - u"ē", - u"曰", - u"蜗", - u"櫻", - u"▏", - u"鑪", - u"鯊", - u"視", - u"淄", - u"钰", - u"〝", - u"〞", - u"報", - u"退", - u"壶", - u"鳴", - u"拒", - u"旱", - u"鼠", - u"蕴", - u"峧", - u"赶", - u"咏", - u"寬", - u"渎", - u"靣", - u"卟", - u"宙", - u"趟", - u"負", - u"镫", - u"讷", - u"迭", - u"彝", - u"樣", - u"輕", - u"却", - u"覆", - u"庖", - u"扉", - u"聖", - u"喬", - u"瞻", - u"瞿", - u"箭", - u"胆", - u"ε", - u"韧", - u"誌", - u"既", - u"淳", - u"饞", - u"ě", - u"圍", - u"墟", - u"俚", - u"翕", - u"貂", - u"畜", - u"緹", - u"搄", - u"旮", - u"旯", - u"寂", - u"寞", - u"詹", - u"茜", - u"鉄", - u"絕", - u"泸", - u"嬤", - u"允", - u"炘", - u"骏", - u"侑", - u"晒", - u"玄", - u"粧", - u"糘", - u"毫", - u"幽", - u"攸", - u"愧", - u"侨", - u"衰", - u"ぉ", - u"に", - u"き", - u"ぃ", - u"炽", - u"倉", - u"斛", - u"領", - u"盾", - u"窜", - u"鲷", - u"瓏", - u"媚", - u"爲", - u"裸", - u"窦", - u"虞", - u"處", - u"魷", - u"}", - u"羡", - u"冕", - u"祺", - u"裁", - u"粶", - u"䬴", - u"嚟", - u"辆", - u"撮", - u"隋", - u"'", - u"勝", - u"梭", - u"茸", - u"咭", - u"崟", - u"滷", - u"緻", - u"沩", - u"颠", - u"诠", - u"珺", - u"拙", - u"察", - u"≡", - u"辅", - u"父", - u"雁", - u"裱", - u"瞄", - u"漖", - u"鯨", - u"略", - u"橱", - u"帼", - u"棉", - u"濠", - u"蕃", - u"ǔ", - u"崮", - u"阮", - u"勋", - u"苍", - u"喔", - u"猜", - u"箔", - u"è", - u"雏", - u"睐", - u"袭", - u"皋", - u"彻", - u"売", - u"垚", - u"咯", - u"凑", - u"汴", - u"纽", - u"巩", - u"宸", - u"墅", - u"茏", - u"裡", - u"昧", - u"飽", - u"坯", - u"濟", - u"└", - u"┐", - u"懷", - u"霾", - u"´", - u"閑", - u"茹", - u"闳", - u"湶", - u"鈣", - u"圓", - u"昊", - u"眞", - u"標", - u"凖", - u"皱", - u"箍", - u"筹", - u"孬", - u"唠", - u"輝", - u"输", - u"綺", - u"驭", - u"哼", - u"匡", - u"偵", - u"蝇", - u"運", - u"漟", - u"乘", - u"Ē", - u"卉", - u"邴", - u"謠", - u"怿", - u"亁", - u"棱", - u"呐", - u"湄", - u"莜", - u"阶", - u"堔", - u"炜", - u"邀", - u"笠", - u"遏", - u"犯", - u"罪", - u"栢", - u"餛", - u"亀", - u"苓", - u"膏", - u"伸", - u"?", - u"阪", - u"委", - u"妯", - u"娌", - u"仝", - u"咧", - u"鍚", - u"▼", - u"遠", - u"摑", - u"滘", - u"颁", - u"ʌ", - u"锈", - u"佤", - u"佗", - u"卌", - u"É", - u"↙", - u"蔺", - u"汰", - u"塍", - u"認", - u"鳟", - u"畿", - u"耦", - u"吨", - u"䒕", - u"茬", - u"枼", - u"饕", - u"涼", - u"烀", - u"汶", - u"齿", - u"貳", - u"沱", - u"楞", - u"屹", - u"掺", - u"挢", - u"荻", - u"偷", - u"辶", - u"饌", - u"泮", - u"喧", - u"某", - u"聂", - u"夾", - u"吁", - u"鎬", - u"谅", - u"鞘", - u"泪", - u"佩", - u"㎡", - u"鐡", - u"犊", - u"漳", - u"睢", - u"粘", - u"輔", - u"爬", - u"濃", - u"し", - u"ん", - u"い", - u"ち", - u"ょ", - u"く", - u"ど", - u"ぅ", - u"戍", - u"咚", - u"蒡", - u"惯", - u"隣", - u"沭", - u"撇", - u"妞", - u"筛", - u"昵", - u"赁", - u"震", - u"欠", - u"涞", - u"從", - u"靚", - u"绥", - u"俑", - u"熔", - u"曙", - u"侗", - u"√", - u"仗", - u"袖", - u"饶", - u"辫", - u"琉", - u"鴿", - u"裂", - u"缝", - u"灞", - u"崖", - u"炑", - u"昝", - u"┌", - u"┘", - u"邕", - u"趴", - u"踢", - u"迩", - u"浈", - u"挚", - u"聆", - u"犁", - u"陝", - u"滾", - u"彎", - u"問", - u"癮", - u"砚", - u"ú", - u"瀧", - u"吮", - u"毓", - u"劵", - u"槽", - u"黒", - u"忍", - u"畈", - u"姊", - u"沛", - u"忽", - u"摘", - u"燍", - u"♡", - u"汝", - u"贛", - u"叻", - u"甸", - u"乞", - u"丐", - u"践", - u"嗞", - u"㥁", - u"斐", - u"圖", - u"祯", - u"牤", - u"攻", - u"弯", - u"幹", - u"杠", - u"苞", - u"滤", - u"筆", - u"練", - u"鞑", - u"ˊ", - u"萤", - u"榶", - u"叨", - u"轨", - u"耒", - u"嚮", - u"┃", - u"漪", - u"剛", - u"键", - u"弋", - u"彦", - u"瘋", - u"词", - u"敖", - u"鸦", - u"秧", - u"囚", - u"绾", - u"镶", - u"濂", - u"↘", - u"豁", - u"煒", - u"萄", - u"珲", - u"緋", - u"昂", - u"瀨", - u"缓", - u"疲", - u"替", - u"汥", - u"殡", - u"葬", - u"靳", - u"揉", - u"闭", - u"睛", - u"偘", - u"佚", - u"$", - u";", - u"^"}; - - if (name == "CHARSET_EN_36") - return CHARSET_EN_36; - else if (name == "CHARSET_CH_94") - return CHARSET_CH_94; - else if (name == "CHARSET_CN_3944") - return CHARSET_CN_3944; - CV_Error(-1, "Charset not supported! Exiting ..."); -} - diff --git a/models/text_recognition_crnn/crnn.py b/models/text_recognition_crnn/crnn.py deleted file mode 100644 index 20860c5f..00000000 --- a/models/text_recognition_crnn/crnn.py +++ /dev/null @@ -1,4176 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import numpy as np -import cv2 as cv - -class CRNN: - def __init__(self, modelPath, backendId=0, targetId=0): - self._model_path = modelPath - self._backendId = backendId - self._targetId = targetId - - self._model = cv.dnn.readNet(self._model_path) - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - # load charset by the name of model - if '_EN_' in self._model_path: - self._charset = self._load_charset(self.CHARSET_EN_36) - elif '_CH_' in self._model_path: - self._charset = self._load_charset(self.CHARSET_CH_94) - elif '_CN_' in self._model_path: - self._charset = self._load_charset(self.CHARSET_CN_3944) - else: - print('Charset not supported! Exiting ...') - exit() - - self._inputSize = [100, 32] # Fixed - self._targetVertices = np.array([ - [0, self._inputSize[1] - 1], - [0, 0], - [self._inputSize[0] - 1, 0], - [self._inputSize[0] - 1, self._inputSize[1] - 1] - ], dtype=np.float32) - - @property - def name(self): - return self.__class__.__name__ - - def _load_charset(self, charset): - return ''.join(charset.splitlines()) - - def setBackendAndTarget(self, backendId, targetId): - self._backendId = backendId - self._targetId = targetId - self._model.setPreferableBackend(self._backendId) - self._model.setPreferableTarget(self._targetId) - - def _preprocess(self, image, rbbox): - # Remove conf, reshape and ensure all is np.float32 - vertices = rbbox.reshape((4, 2)).astype(np.float32) - - rotationMatrix = cv.getPerspectiveTransform(vertices, self._targetVertices) - cropped = cv.warpPerspective(image, rotationMatrix, self._inputSize) - - # 'CN' can detect digits (0\~9), upper/lower-case letters (a\~z and A\~Z), and some special characters - # 'CH' can detect digits (0\~9), upper/lower-case letters (a\~z and A\~Z), some Chinese characters and some special characters - if 'CN' in self._model_path or 'CH' in self._model_path: - pass - else: - cropped = cv.cvtColor(cropped, cv.COLOR_BGR2GRAY) - - return cv.dnn.blobFromImage(cropped, size=self._inputSize, mean=127.5, scalefactor=1 / 127.5) - - def infer(self, image, rbbox): - # Preprocess - inputBlob = self._preprocess(image, rbbox) - - # Forward - self._model.setInput(inputBlob) - outputBlob = self._model.forward() - - # Postprocess - results = self._postprocess(outputBlob) - - return results - - def _postprocess(self, outputBlob): - '''Decode charaters from outputBlob - ''' - text = '' - for i in range(outputBlob.shape[0]): - c = np.argmax(outputBlob[i][0]) - if c != 0: - text += self._charset[c - 1] - else: - text += '-' - - # adjacent same letters as well as background text must be removed to get the final output - char_list = [] - for i in range(len(text)): - if text[i] != '-' and (not (i > 0 and text[i] == text[i - 1])): - char_list.append(text[i]) - return ''.join(char_list) - - CHARSET_EN_36 = '''0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -a -b -c -d -e -f -g -h -i -j -k -l -m -n -o -p -q -r -s -t -u -v -w -x -y -z''' - - CHARSET_CH_94 = ''' -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -a -b -c -d -e -f -g -h -i -j -k -l -m -n -o -p -q -r -s -t -u -v -w -x -y -z -A -B -C -D -E -F -G -H -I -J -K -L -M -N -O -P -Q -R -S -T -U -V -W -X -Y -Z -! -" -# -$ -% -& -' -( -) -* -+ -, -- -. -/ -: -; -< -= -> -? -@ -[ -\ -] -^ -_ -` -{ -| -} -~''' - - CHARSET_CN_3944 = ''' -H -O -K -I -T -E -A -酱 -鸭 -传 -奇 -J -N -G -Y -C -U -Q -蝦 -兵 -蟹 -煲 -这 -是 -可 -以 -先 -吃 -后 -涮 -的 -干 -锅 -菜 -加 -盟 -电 -话 -: -1 -7 -3 -9 -8 -郑 -州 -总 -店 -雪 -花 -勇 -闯 -天 -涯 -虾 -, -一 -送 -鱼 -锡 -纸 -蛤 -土 -豆 -粉 -砂 -米 -线 -牛 -筋 -面 -刀 -削 -水 -饺 -吧 -沙 -拉 -老 -饭 -盒 -教 -室 -主 -题 -餐 -厅 -仁 -馄 -饨 -重 -庆 -小 -便 -当 -全 -国 -连 -锁 -4 -0 -- -6 -5 -2 -人 -快 -量 -贩 -蓬 -朗 -御 -茶 -川 -渝 -捞 -火 -古 -之 -匠 -今 -七 -西 -域 -羊 -城 -l -i -k -n -g -c -o -f -e -w -贵 -阳 -素 -有 -家 -会 -展 -口 -乐 -三 -惹 -烤 -肉 -h -t -子 -馆 -常 -盖 -浇 -兴 -业 -路 -书 -亦 -燒 -仙 -草 -L -: -德 -啤 -工 -坊 -杏 -屋 -高 -桥 -号 -品 -麻 -辣 -烫 -检 -官 -. -千 -翼 -木 -兰 -画 -食 -上 -汤 -剁 -馅 -手 -煮 -时 -尚 -健 -康 -傲 -椒 -B -啵 -条 -脾 -气 -! -/ -月 -腾 -讯 -应 -用 -喵 -泡 -我 -鲜 -滚 -给 -你 -看 -客 -来 -香 -汉 -湘 -本 -地 -炒 -系 -列 -订 -仔 -肘 -蹄 -梅 -扣 -黄 -焖 -排 -骨 -炖 -鸡 -韓 -金 -利 -串 -舊 -街 -梨 -村 -座 -经 -济 -实 -惠 -绿 -色 -炭 -庐 -蛙 -忆 -蓉 -源 -真 -d -D -概 -念 -创 -意 -六 -熏 -各 -种 -精 -美 -y -疯 -狂 -世 -界 -杯 -特 -价 -酒 -元 -瓶 -沸 -带 -F -请 -二 -楼 -自 -动 -升 -降 -烏 -邦 -嗦 -味 -风 -货 -团 -外 -卖 -嘞 -个 -折 -辛 -束 -舌 -尖 -中 -包 -浆 -腐 -r -P -a -u -丸 -作 -福 -M -漫 -蜜 -冰 -拌 -匆 -那 -年 -R -S -果 -光 -夹 -馍 -凉 -皮 -过 -祖 -南 -山 -風 -景 -堂 -烘 -培 -龍 -坎 -半 -婆 -建 -设 -富 -强 -丽 -菏 -泽 -省 -安 -港 -竹 -签 -撩 -只 -为 -好 -生 -活 -抓 -海 -最 -网 -红 -铁 -统 -® -功 -夫 -鱿 -大 -闻 -就 -知 -遇 -见 -文 -合 -热 -森 -台 -湾 -卤 -然 -汁 -甄 -选 -材 -还 -原 -初 -衷 -* -洪 -龙 -公 -酸 -巴 -乡 -焦 -烧 -淘 -成 -都 -眼 -镜 -优 -菓 -恋 -V -化 -糖 -、 -粥 -田 -螺 -斓 -X -爺 -W -j -院 -华 -Z -蜊 -北 -京 -刷 -蝎 -腿 -梦 -幻 -奶 -式 -蛋 -鍋 -区 -· -领 -航 -者 -四 -通 -往 -楚 -河 -停 -车 -场 -凌 -晨 -点 -杞 -缘 -王 -集 -唐 -菠 -萝 -泰 -板 -鳳 -凰 -樓 -名 -壹 -猪 -晴 -舍 -犟 -师 -傅 -飯 -致 -青 -春 -轰 -炸 -卡 -里 -身 -厨 -房 -x -聚 -鑫 -阁 -岛 -纯 -聘 -专 -长 -庄 -鄉 -更 -珍 -固 -新 -岩 -v -s -m -至 -尊 -比 -萨 -广 -披 -饮 -管 -理 -限 -司 -p -幸 -东 -正 -挞 -少 -女 -克 -装 -童 -哒 -磨 -厂 -怼 -纤 -入 -户 -独 -溜 -共 -享 -滋 -江 -门 -九 -蒸 -胜 -盛 -& -魔 -爪 -鹅 -皇 -( -) -友 -甲 -魚 -首 -烹 -行 -员 -若 -资 -议 -联 -同 -急 -私 -燕 -儿 -巢 -鹏 -记 -腊 -营 -欢 -迎 -旗 -舰 -叫 -了 -做 -故 -铃 -煎 -饼 -哥 -力 -五 -谷 -野 -戈 -厠 -所 -超 -牌 -冒 -陳 -陈 -苕 -爽 -滑 -启 -秦 -择 -现 -进 -惊 -喜 -定 -于 -雅 -膳 -多 -推 -淇 -淋 -b -思 -堡 -偶 -相 -伴 -呈 -湯 -绝 -浏 -' -刘 -态 -牧 -万 -达 -和 -番 -丼 -— -机 -瘦 -绵 -柔 -厉 -蚝 -娘 -彩 -百 -事 -调 -韩 -爱 -喝 -玩 -放 -肆 -寿 -净 -配 -髓 -非 -道 -额 -吉 -招 -商 -杂 -粮 -筐 -运 -转 -服 -务 -缤 -灿 -腕 -楠 -彤 -学 -橋 -试 -浩 -减 -薪 -诚 -霸 -第 -间 -日 -极 -料 -開 -業 -霏 -星 -期 -分 -秒 -内 -咨 -询 -。 -樐 -头 -开 -氏 -渔 -约 -劳 -保 -礼 -宏 -武 -佘 -轻 -奢 -艺 -井 -隆 -鐵 -卷 -染 -焙 -钵 -马 -牟 -洋 -芋 -片 -流 -宽 -心 -位 -清 -潼 -关 -祥 -背 -凡 -哈 -尔 -滨 -珠 -派 -艾 -让 -变 -得 -样 -玖 -等 -综 -性 -涵 -粗 -冠 -記 -肠 -湖 -财 -贡 -桃 -杭 -平 -桂 -林 -煨 -档 -案 -造 -潮 -汕 -宗 -单 -县 -鲁 -舜 -脆 -酥 -糕 -仕 -十 -临 -簋 -宴 -字 -太 -灌 -薄 -尝 -址 -晗 -幢 -购 -梁 -醉 -皖 -庭 -白 -肥 -块 -石 -碗 -颜 -值 -張 -瘾 -跷 -脚 -而 -叁 -蜀 -橙 -市 -边 -早 -晚 -云 -吞 -目 -表 -赵 -烩 -擀 -蔬 -找 -回 -游 -刃 -余 -支 -洗 -吹 -休 -闲 -简 -撸 -根 -据 -鸽 -铜 -亲 -贝 -纪 -吕 -豚 -饅 -悦 -汇 -油 -无 -制 -在 -寻 -碳 -馋 -嘴 -架 -荣 -斋 -护 -角 -落 -铺 -臊 -丝 -围 -柳 -蛳 -蒲 -庙 -视 -荐 -缃 -想 -呀 -姜 -母 -起 -泉 -族 -群 -众 -其 -它 -血 -双 -补 -阴 -润 -不 -禽 -类 -款 -较 -候 -些 -畅 -脉 -痰 -疏 -肝 -帮 -助 -消 -增 -欲 -尤 -对 -胃 -畏 -寒 -很 -效 -秘 -黑 -嘿 -佳 -越 -脑 -桶 -项 -▪ -| -榜 -许 -仿 -或 -酬 -宾 -指 -买 -赠 -笃 -鼎 -盆 -™ -咕 -咾 -肚 -识 -栖 -凤 -渡 -筒 -彬 -弟 -醋 -財 -師 -民 -博 -丁 -扒 -翅 -墨 -柠 -檬 -紫 -薯 -焗 -芝 -士 -胸 -图 -妮 -杀 -菌 -爹 -尽 -归 -宁 -粽 -瑞 -轩 -午 -陕 -出 -才 -盘 -植 -甜 -粒 -神 -舟 -玻 -璃 -医 -划 -药 -郡 -毛 -张 -姐 -留 -满 -下 -兄 -法 -鋪 -é -[ -槑 -] -言 -密 -帝 -場 -朴 -寨 -奉 -z -什 -顺 -疆 -馕 -豫 -怀 -旧 -验 -昙 -搞 -圣 -格 -ǐ -à -隱 -燙 -状 -居 -饱 -底 -免 -费 -廣 -點 -專 -門 -语 -叉 -左 -岸 -发 -乌 -齐 -冷 -命 -● -修 -闸 -飞 -空 -养 -笼 -興 -银 -套 -東 -吴 -麺 -館 -¥ -从 -前 -乙 -弘 -炝 -夏 -秋 -冬 -咖 -啡 -℃ -© -莲 -塘 -哆 -梓 -依 -哎 -麦 -泗 -泾 -瓯 -胡 -∣ -歺 -八 -度 -深 -夜 -旋 -永 -远 -温 -又 -晶 -溏 -ä -盔 -飘 -劲 -旺 -楸 -良 -譜 -餅 -苏 -莎 -足 -宵 -与 -楊 -國 -莱 -卜 -炊 -挑 -剔 -存 -错 -方 -程 -解 -能 -堆 -洲 -诗 -玛 -渴 -脖 -丛 -狼 -翁 -姓 -葫 -芦 -沾 -葵 -の -咔 -粹 -弥 -乖 -悠 -茗 -别 -走 -柒 -榨 -咥 -虹 -沏 -桔 -叔 -贴 -办 -充 -崎 -鮮 -属 -彭 -浦 -町 -郎 -° -悟 -惑 -科 -英 -育 -岁 -幼 -园 -慢 -摆 -_ -狐 -狸 -典 -暴 -帥 -尾 -琼 -見 -望 -烟 -坚 -鸳 -鸯 -直 -校 -饪 -承 -们 -么 -¥ -份 -宇 -炉 -峰 -乃 -趣 -代 -刨 -抖 -音 -占 -谜 -答 -熟 -控 -蕾 -节 -社 -您 -《 -羅 -茉 -瀞 -憨 -尼 -丰 -镇 -酿 -避 -抢 -突 -破 -杰 -姆 -波 -观 -澜 -庫 -舒 -谁 -短 -島 -爷 -码 -每 -欧 -注 -册 -标 -腸 -奈 -熊 -粵 -吳 -衢 -雄 -际 -葱 -柱 -压 -陪 -器 -厘 -柴 -席 -饿 -俏 -汽 -站 -霜 -荟 -禾 -咘 -臭 -夷 -肖 -微 -组 -刺 -拼 -打 -信 -步 -! -说 -囍 -智 -藍 -鹿 -巷 -顾 -勃 -頭 -帕 -徐 -渣 -嗨 -鲍 -抽 -莊 -胗 -耳 -栈 -葑 -谊 -李 -够 -歪 -到 -杜 -绪 -始 -“ -” -编 -感 -谢 -阿 -妹 -抄 -屿 -旁 -钟 -糰 -鷄 -觉 -队 -明 -没 -幺 -罗 -恭 -發 -溢 -圆 -筵 -鲩 -斤 -噜 -府 -雕 -牦 -津 -間 -粤 -义 -驾 -嫩 -眷 -苔 -怡 -逍 -遥 -即 -把 -季 -鹃 -妈 -烙 -淡 -嘟 -班 -散 -磐 -稣 -耍 -芽 -昌 -粿 -鼓 -姑 -央 -告 -翔 -迦 -缆 -怪 -俗 -菩 -宥 -酵 -男 -顿 -蚂 -蚁 -q -緑 -瑩 -養 -滿 -接 -立 -勤 -封 -徽 -酷 -( -慕 -曹 -吊 -咸 -矿 -黛 -刻 -呗 -布 -袋 -钝 -丘 -逗 -窗 -吾 -塔 -坡 -周 -雙 -朝 -末 -如 -杨 -淮 -摄 -影 -翻 -窝 -物 -椰 -荞 -搅 -陇 -收 -两 -倍 -狮 -伊 -後 -晖 -長 -箐 -豪 -耀 -漢 -釜 -宮 -次 -掌 -斯 -朋 -针 -菇 -蚬 -拍 -雒 -陽 -漿 -麵 -條 -部 -← -柜 -驴 -证 -票 -账 -汗 -汆 -稍 -戏 -菋 -卫 -匹 -栋 -馨 -肯 -迪 -邢 -梯 -容 -嘉 -莞 -袁 -锦 -遮 -雨 -篷 -腰 -肺 -剡 -乾 -, -翰 -蔚 -刁 -藤 -帅 -傳 -维 -笔 -历 -史 -】 -适 -煌 -倾 -沧 -姬 -训 -邵 -诺 -敢 -质 -益 -佬 -兼 -职 -盅 -诊 -扬 -速 -宝 -褚 -糁 -钢 -松 -婚 -秀 -盐 -及 -個 -飲 -绍 -槿 -觅 -逼 -兽 -》 -吐 -右 -久 -闺 -祝 -贺 -啦 -瓦 -甏 -探 -辰 -碚 -芳 -灣 -泷 -饰 -隔 -帐 -飮 -搜 -時 -宫 -蘭 -再 -糊 -仓 -稻 -玉 -印 -象 -稀 -拴 -桩 -餃 -贾 -贱 -球 -萌 -撕 -脂 -肪 -层 -晋 -荷 -钱 -潍 -失 -孜 -提 -供 -具 -洛 -涂 -叠 -豊 -积 -媒 -级 -纷 -巧 -瓜 -苹 -琥 -珀 -蜂 -柚 -莉 -爆 -龄 -饸 -饹 -郞 -嫡 -億 -姚 -繁 -监 -督 -示 -佰 -汍 -% -甘 -蔗 -喻 -骄 -基 -因 -匙 -评 -侠 -赢 -交 -歡 -待 -馒 -产 -倡 -导 -低 -茂 -沐 -熙 -延 -丧 -受 -确 -睡 -蓝 -未 -賣 -電 -話 -农 -札 -岗 -树 -赖 -琪 -驻 -辉 -软 -防 -盗 -隐 -形 -纱 -灶 -扎 -环 -禁 -止 -吸 -萬 -昆 -几 -跳 -媳 -婦 -坛 -< -> -拿 -妖 -协 -朱 -住 -宿 -魅 -照 -碰 -滴 -何 -贤 -棒 -持 -啊 -赛 -版 -帆 -順 -狗 -情 -+ -洞 -奋 -斗 -亨 -叶 -涛 -铝 -范 -汀 -號 -律 -價 -鞭 -肩 -# -愚 -奥 -脯 -沁 -奚 -魏 -批 -租 -宠 -炲 -横 -沥 -彪 -投 -诉 -犀 -去 -屠 -鲅 -~ -俱 -徒 -鴻 -劉 -迷 -荤 -威 -曜 -連 -鎖 -馳 -载 -添 -筑 -陵 -佐 -敦 -> -郭 -厢 -祛 -茄 -堰 -漂 -亮 -爅 -虎 -膀 -叼 -猫 -藏 -陶 -鲈 -栏 -… -考 -冲 -胖 -裕 -沃 -挂 -报 -兔 -胶 -臨 -附 -处 -嫂 -萃 -幂 -吻 -聪 -糯 -糍 -棋 -烓 -脊 -衡 -亚 -副 -肤 -荆 -榴 -绚 -黔 -圈 -纳 -课 -逸 -宜 -= -烊 -姨 -施 -救 -贸 -啥 -也 -贯 -雷 -呆 -棠 -伙 -岐 -宛 -媽 -寸 -澳 -已 -還 -兒 -Ⅱ -凯 -株 -藕 -闽 -窖 -瀘 -售 -索 -体 -型 -樂 -琅 -琊 -夺 -扩 -) -诱 -滩 -浓 -要 -芹 -君 -反 -复 -羔 -追 -演 -唱 -過 -綫 -乳 -涩 -芒 -露 -蒙 -羯 -励 -志 -嵊 -閒 -罐 -佛 -墙 -頁 -坐 -眯 -预 -華 -廉 -释 -必 -随 -逐 -引 -究 -爸 -灵 -勺 -岂 -俵 -廷 -苗 -岭 -将 -來 -泛 -朵 -維 -園 -廳 -圳 -伦 -寶 -付 -仅 -減 -谦 -硕 -抚 -慶 -雞 -郝 -计 -熱 -杖 -亭 -喱 -惜 -莒 -另 -陆 -拾 -伍 -谈 -嚼 -娅 -翟 -別 -颈 -邮 -弄 -• -扇 -哦 -吼 -耶 -宅 -帽 -魂 -搭 -笨 -映 -拨 -烂 -馈 -胎 -溶 -\ -善 -销 -难 -忘 -斑 -噢 -錫 -娟 -語 -哨 -筷 -摊 -均 -椅 -改 -换 -跟 -帖 -勾 -缅 -孙 -啪 -栗 -着 -漁 -吓 -易 -漲 -靖 -枸 -馬 -昇 -當 -麥 -妆 -塑 -魯 -鎮 -吗 -魁 -丹 -杈 -技 -术 -泼 -零 -忙 -漾 -創 -攀 -郫 -抿 -稼 -假 -循 -泳 -池 -膨 -巨 -歧 -愛 -鵝 -悉 -灯 -激 -踪 -细 -會 -舔 -愿 -們 -衹 -令 -浔 -丨 -酉 -惦 -耕 -× -闪 -經 -玺 -芯 -襄 -賦 -予 -學 -苑 -托 -丢 -赔 -ā -聽 -濤 -浮 -伯 -兑 -币 -治 -愈 -盱 -眙 -漏 -夕 -搏 -由 -完 -切 -罕 -息 -燃 -叙 -萍 -碑 -腌 -衣 -害 -己 -患 -浙 -闫 -| -芈 -谣 -戴 -錦 -謝 -恩 -芊 -拇 -矾 -政 -锣 -跃 -钥 -寺 -驼 -芙 -插 -恒 -咪 -禄 -摩 -轮 -譚 -鴨 -戊 -申 -丙 -邊 -唯 -登 -困 -貢 -誉 -賀 -认 -准 -妃 -潜 -旨 -死 -桌 -尧 -箱 -届 -获 -顶 -柿 -臂 -蓮 -凭 -慵 -懒 -醇 -籍 -静 -淌 -此 -甚 -绣 -渌 -呢 -问 -抹 -弹 -捷 -邱 -旦 -曉 -艳 -雲 -研 -守 -鼻 -¦ -揽 -含 -沂 -听 -帛 -端 -兆 -舆 -谐 -帘 -笑 -寅 -【 -車 -@ -& -胪 -臻 -蘆 -衙 -餌 -① -鉴 -敬 -枝 -沈 -衔 -蝉 -芜 -烈 -库 -椿 -稳 -’ -豌 -亿 -缙 -獨 -菊 -沤 -迟 -忧 -沫 -伟 -靠 -并 -互 -晓 -枫 -窑 -芭 -夯 -鸿 -無 -烦 -恼 -闖 -贞 -鳥 -厦 -抱 -歐 -藝 -廖 -振 -腦 -舖 -酪 -碎 -浪 -荔 -巫 -撈 -醬 -段 -昔 -潘 -Λ -禧 -妻 -瓢 -柏 -郁 -暹 -兮 -娃 -敏 -進 -距 -离 -倪 -征 -咱 -继 -责 -任 -銅 -啖 -赞 -菲 -蛇 -焰 -娜 -芮 -坦 -磅 -薛 -緣 -乔 -拱 -骚 -扰 -約 -喷 -驢 -仨 -纬 -臘 -邳 -终 -喏 -扫 -除 -恶 -争 -率 -‘ -肃 -雀 -鈴 -贼 -绕 -笋 -钩 -勒 -翠 -黎 -董 -澄 -境 -采 -拳 -捆 -粄 -诸 -暨 -榧 -葛 -親 -戚 -访 -股 -融 -潤 -寄 -递 -藩 -滇 -湛 -他 -篓 -普 -撞 -莅 -但 -沟 -暑 -促 -玲 -腩 -碼 -偏 -楹 -嘎 -洒 -抛 -危 -险 -损 -负 -銘 -黃 -燜 -說 -杆 -称 -蹭 -聊 -妙 -滕 -曦 -肴 -萧 -颗 -剂 -義 -锋 -授 -权 -著 -茴 -蒝 -侬 -顏 -菁 -擦 -鞋 -庞 -毕 -谱 -樱 -→ -綦 -舞 -蹈 -躁 -渠 -俐 -涧 -馀 -潇 -邻 -须 -藻 -纺 -织 -军 -沅 -豐 -爐 -韭 -棚 -綿 -麯 -剑 -娱 -链 -锤 -炼 -献 -晟 -章 -謎 -数 -侯 -她 -疗 -途 -篇 -则 -邓 -赐 -閣 -對 -猩 -邑 -區 -鬼 -莫 -沪 -淼 -赤 -混 -沌 -需 -求 -痛 -绮 -琦 -荃 -熳 -佑 -Á -ō -現 -専 -卢 -譽 -缠 -曾 -鸣 -琴 -汊 -濮 -哇 -哩 -唝 -曲 -坂 -呼 -莴 -怕 -蒋 -伞 -炙 -燻 -瑧 -冈 -讲 -硬 -详 -鹵 -摇 -偃 -嵩 -严 -谨 -′ -剥 -穗 -榮 -禹 -颐 -局 -刚 -▕ -暖 -漠 -炎 -頤 -樟 -? -储 -移 -缕 -艰 -袍 -瑪 -麗 -参 -䬺 -趁 -呦 -霖 -饵 -溪 -孔 -澤 -袜 -蔓 -熠 -显 -屏 -缇 -寇 -亞 -坑 -槟 -榔 -絳 -驿 -歹 -匾 -猴 -旭 -竞 -­ -唛 -介 -习 -涡 -寓 -掉 -蘸 -愉 -佼 -ǒ -納 -∶ -革 -嚸 -募 -螃 -鲢 -俤 -扁 -寳 -辽 -∧ -厚 -裤 -扯 -屯 -废 -挪 -辘 -碉 -歇 -漓 -腻 -捣 -孩 -烁 -整 -按 -Ⓡ -眉 -脸 -痣 -粑 -序 -穿 -樊 -玮 -★ -扑 -渊 -醴 -瑶 -農 -檔 -憩 -霊 -赫 -呜 -~ -备 -説 -莓 -钻 -播 -冻 -紅 -菽 -喪 -埔 -壽 -❤ -籽 -咻 -籣 -尹 -潭 -穆 -壮 -使 -霄 -蔵 -浒 -岳 -熘 -臺 -殷 -孤 -邂 -逅 -厕 -郸 -铭 -莆 -抻 -虽 -倦 -怠 -矣 -茵 -垂 -殿 -鄂 -嗑 -续 -钦 -党 -鲫 -蔡 -侧 -割 -彰 -凝 -熬 -叕 -純 -谛 -籠 -宋 -峡 -俩 -雜 -跑 -⑧ -焼 -- -逢 -澧 -舵 -异 -冯 -战 -决 -棍 -; -﹣ -丑 -妇 -焉 -芷 -楂 -坞 -壳 -馐 -帜 -旅 -鳯 -簡 -凍 -秜 -结 -咩 -丫 -稠 -暗 -缔 -乎 -被 -狠 -皲 -豉 -崇 -渭 -担 -鹤 -製 -蛎 -笛 -奔 -赴 -盼 -鳌 -拜 -络 -灸 -膜 -刮 -痧 -毒 -萊 -陂 -濑 -唇 -抵 -押 -置 -馇 -泌 -尿 -傻 -像 -孃 -陣 -靓 -规 -企 -矮 -凳 -贰 -兎 -庵 -質 -阅 -读 -◆ -练 -墩 -曼 -呱 -泓 -耐 -磁 -枣 -罉 -浴 -氧 -洱 -鳅 -線 -炳 -顽 -符 -倌 -泥 -郊 -柯 -餘 -巍 -论 -沽 -荘 -奕 -啃 -髙 -○ -芬 -苟 -且 -阆 -確 -獅 -匣 -睫 -牙 -戒 -俊 -阜 -遵 -爵 -遗 -捧 -仑 -构 -豬 -挡 -弓 -蠔 -旬 -鱻 -镖 -燚 -歌 -壁 -啫 -饷 -仰 -韶 -勞 -軒 -菒 -炫 -廊 -塞 -脏 -堤 -浅 -辈 -靡 -裙 -尺 -廚 -向 -磊 -咬 -皓 -卿 -懂 -葉 -廿 -芸 -賴 -埠 -應 -碟 -溧 -訂 -選 -睦 -举 -钳 -哟 -霍 -扞 -侣 -營 -龟 -钜 -埭 -が -搽 -螞 -蟻 -娚 -蒜 -厝 -垵 -☎ -捌 -倒 -骑 -Ξ -谋 -黍 -侍 -赏 -扮 -忱 -蘑 -洁 -嘆 -闹 -谭 -鶏 -種 -φ -坤 -麓 -麒 -麟 -喂 -琳 -Ⓑ -趙 -總 -這 -奖 -取 -拔 -錯 -仉 -缸 -廟 -暢 -腔 -卓 -腱 -朙 -紹 -莹 -缺 -抺 -睿 -氣 -该 -貼 -妍 -拆 -穇 -箩 -希 -廰 -祗 -盲 -坝 -骆 -熄 -蛮 -賓 -馮 -尋 -泊 -孫 -槁 -亖 -俯 -浣 -婴 -锨 -馥 -闷 -梆 -▫ -姥 -哲 -录 -甫 -床 -嬌 -烎 -梵 -枪 -乍 -璜 -羌 -崂 -穷 -榕 -聲 -喚 -駕 -晕 -嬷 -箕 -婧 -盧 -楓 -柃 -差 -「 -」 -佶 -唔 -壕 -歆 -盏 -擂 -睇 -巾 -查 -淖 -哪 -沣 -赣 -優 -諾 -礁 -努 -畔 -疙 -瘩 -握 -叮 -栙 -甑 -嶺 -涌 -透 -钓 -斜 -搬 -迁 -妨 -借 -仍 -鳕 -瓷 -绘 -餠 -á -ǎ -祈 -邨 -醒 -闵 -砖 -锹 -咀 -綠 -幕 -忠 -雾 -覓 -靜 -擔 -篮 -杉 -势 -薇 -甬 -频 -般 -仲 -蘇 -鸟 -卞 -憾 -資 -駱 -蝶 -為 -仟 -耗 -莘 -涉 -昕 -盈 -熹 -觀 -瑭 -湃 -兢 -淞 -䒩 -結 -柗 -鲤 -糟 -粕 -塗 -簽 -怎 -桐 -皆 -羽 -盯 -氽 -晏 -液 -镀 -珂 -悸 -∙ -桑 -夢 -楽 -剩 -纵 -逝 -欺 -統 -飛 -姣 -俄 -揪 -薡 -幅 -蓋 -︳ -屉 -㕔 -а -铸 -韦 -銀 -檀 -击 -伿 -隍 -『 -』 -芥 -☆ -声 -跆 -肋 -榭 -牵 -棧 -網 -愁 -嗏 -嵗 -巡 -稚 -貴 -買 -恰 -㸆 -捻 -玫 -瑰 -炕 -梧 -餡 -锌 -焱 -驰 -堽 -邯 -珑 -尕 -宰 -栓 -喃 -殊 -燊 -慈 -羴 -逃 -脱 -邹 -檐 -碌 -页 -荠 -券 -題 -龚 -肌 -蕉 -囬 -肫 -坪 -沉 -淀 -斌 -鳝 -核 -喳 -剃 -昭 -{ -} -坏 -烜 -媛 -猛 -桓 -欣 -碁 -竭 -堇 -↑ -扛 -罄 -栾 -鲶 -鍕 -崔 -橘 -携 -丈 -射 -梗 -檸 -疼 -卑 -捉 -障 -裏 -遍 -蓓 -析 -許 -虫 -坨 -馔 -窄 -姫 -噤 -係 -湿 -汐 -鳜 -船 -崽 -+ -例 -灼 -祿 -腥 -峭 -酌 -喽 -件 -郏 -栀 -鲨 -寫 -與 -诈 -斥 -炮 -稿 -懿 -掂 -鹭 -乱 -恬 -婷 -苦 -埃 -珊 -禅 -裹 -圃 -鹌 -鹑 -û -澡 -囧 -阡 -靑 -警 -牢 -嘱 -鳞 -浃 -贷 -慧 -翊 -讨 -碧 -剪 -陌 -冀 -砵 -迅 -鹰 -竟 -召 -敌 -鯡 -蒌 -蒿 -扶 -③ -誘 -嘻 -輪 -嬢 -瓮 -絲 -嚣 -荀 -莽 -鄧 -咋 -勿 -佈 -洽 -羹 -模 -貨 -粱 -凈 -腹 -鄭 -署 -儒 -隧 -鉢 -茫 -蔻 -í -ó -裴 -偉 -Θ -祎 -褥 -殖 -湫 -瀚 -貓 -汪 -紙 -極 -伤 -灰 -團 -橄 -榄 -拽 -响 -貌 -傣 -舂 -斩 -飨 -执 -諸 -蒂 -嘣 -葡 -渤 -惺 -驛 -戰 -箬 -俭 -瀏 -嫦 -琵 -琶 -咿 -吖 -舱 -韵 -揭 -祁 -將 -軍 -吟 -彼 -岚 -绒 -煤 -淝 -歸 -锐 -嗯 -傾 -甩 -瞳 -睁 -鳗 -遜 -嗲 -虚 -娴 -碱 -呷 -{ -哚 -兜 -喇 -叭 -燦 -逻 -匪 -槐 -撒 -写 -踩 -踏 -霞 -喫 -返 -赚 -拓 -動 -觞 -鲽 -鐘 -闰 -扳 -沖 -賈 -璐 -煸 -棵 -峪 -π -憶 -齋 -娇 -穎 -嫁 -玥 -胚 -喊 -阻 -餓 -截 -孵 -屎 -爾 -莳 -倔 -娄 -祸 -` -姿 -稽 -戌 -缪 -ī -糠 -痴 -猎 -嬉 -柑 -鞍 -兹 -凼 -舅 -褐 -醪 -仪 -氷 -單 -丞 -碛 -绽 -袂 -檢 -瀾 -饃 -孖 -雍 -ò -螄 -涤 -茨 -寮 -近 -辜 -茅 -孟 -累 -宣 -樹 -鷹 -膝 -臉 -襪 -嘢 -嵐 -▲ -璇 -竺 -気 -迈 -糐 -挥 -瑜 -伽 -" -裳 -纹 -潯 -幾 -朔 -枊 -釀 -劝 -俺 -粢 -馓 -胥 -拥 -嘶 -達 -蝴 -昱 -ホ -ル -モ -ニ -颂 -噫 -否 -笙 -绎 -俞 -泵 -测 -耿 -揚 -犇 -锄 -卧 -炯 -烽 -橡 -操 -齊 -隴 -宀 -荥 -滙 -贪 -関 -垦 -↓ -麽 -暧 -匯 -恨 -叽 -断 -鮪 -椎 -病 -迹 -禺 -搓 -瀛 -唤 -埕 -愤 -怒 -拐 -狱 -垅 -绅 -設 -計 -書 -楷 -鮨 -邪 -郴 -盞 -榆 -恺 -樵 -煙 -舫 -翡 -砸 -叹 -縣 -璞 -禮 -獻 -似 -吆 -嘛 -灭 -擇 -夥 -ē -曰 -蜗 -櫻 -▏ -鑪 -鯊 -視 -淄 -钰 -〝 -〞 -報 -退 -壶 -鳴 -拒 -旱 -鼠 -蕴 -峧 -赶 -咏 -寬 -渎 -靣 -卟 -宙 -趟 -負 -镫 -讷 -迭 -彝 -樣 -輕 -却 -覆 -庖 -扉 -聖 -喬 -瞻 -瞿 -箭 -胆 -ε -韧 -誌 -既 -淳 -饞 -ě -圍 -墟 -俚 -翕 -貂 -畜 -緹 -搄 -旮 -旯 -寂 -寞 -詹 -茜 -鉄 -絕 -泸 -嬤 -允 -炘 -骏 -侑 -晒 -玄 -粧 -糘 -毫 -幽 -攸 -愧 -侨 -衰 -ぉ -に -き -ぃ -炽 -倉 -斛 -領 -盾 -窜 -鲷 -瓏 -媚 -爲 -裸 -窦 -虞 -處 -魷 -} -羡 -冕 -祺 -裁 -粶 -䬴 -嚟 -辆 -撮 -隋 -' -勝 -梭 -茸 -咭 -崟 -滷 -緻 -沩 -颠 -诠 -珺 -拙 -察 -≡ -辅 -父 -雁 -裱 -瞄 -漖 -鯨 -略 -橱 -帼 -棉 -濠 -蕃 -ǔ -崮 -阮 -勋 -苍 -喔 -猜 -箔 -è -雏 -睐 -袭 -皋 -彻 -売 -垚 -咯 -凑 -汴 -纽 -巩 -宸 -墅 -茏 -裡 -昧 -飽 -坯 -濟 -└ -┐ -懷 -霾 -´ -閑 -茹 -闳 -湶 -鈣 -圓 -昊 -眞 -標 -凖 -皱 -箍 -筹 -孬 -唠 -輝 -输 -綺 -驭 -哼 -匡 -偵 -蝇 -運 -漟 -乘 -Ē -卉 -邴 -謠 -怿 -亁 -棱 -呐 -湄 -莜 -阶 -堔 -炜 -邀 -笠 -遏 -犯 -罪 -栢 -餛 -亀 -苓 -膏 -伸 -? -阪 -委 -妯 -娌 -仝 -咧 -鍚 -▼ -遠 -摑 -滘 -颁 -ʌ -锈 -佤 -佗 -卌 -É -↙ -蔺 -汰 -塍 -認 -鳟 -畿 -耦 -吨 -䒕 -茬 -枼 -饕 -涼 -烀 -汶 -齿 -貳 -沱 -楞 -屹 -掺 -挢 -荻 -偷 -辶 -饌 -泮 -喧 -某 -聂 -夾 -吁 -鎬 -谅 -鞘 -泪 -佩 -㎡ -鐡 -犊 -漳 -睢 -粘 -輔 -爬 -濃 -し -ん -い -ち -ょ -く -ど -ぅ -戍 -咚 -蒡 -惯 -隣 -沭 -撇 -妞 -筛 -昵 -赁 -震 -欠 -涞 -從 -靚 -绥 -俑 -熔 -曙 -侗 -√ -仗 -袖 -饶 -辫 -琉 -鴿 -裂 -缝 -灞 -崖 -炑 -昝 -┌ -┘ -邕 -趴 -踢 -迩 -浈 -挚 -聆 -犁 -陝 -滾 -彎 -問 -癮 -砚 -ú -瀧 -吮 -毓 -劵 -槽 -黒 -忍 -畈 -姊 -沛 -忽 -摘 -燍 -♡ -汝 -贛 -叻 -甸 -乞 -丐 -践 -嗞 -㥁 -斐 -圖 -祯 -牤 -攻 -弯 -幹 -杠 -苞 -滤 -筆 -練 -鞑 -ˊ -萤 -榶 -叨 -轨 -耒 -嚮 -┃ -漪 -剛 -键 -弋 -彦 -瘋 -词 -敖 -鸦 -秧 -囚 -绾 -镶 -濂 -↘ -豁 -煒 -萄 -珲 -緋 -昂 -瀨 -缓 -疲 -替 -汥 -殡 -葬 -靳 -揉 -闭 -睛 -偘 -佚 -$ -; -^''' diff --git a/models/text_recognition_crnn/demo.cpp b/models/text_recognition_crnn/demo.cpp deleted file mode 100644 index 0da944c9..00000000 --- a/models/text_recognition_crnn/demo.cpp +++ /dev/null @@ -1,294 +0,0 @@ -#include -#include - - -#include -#include -#include - -#include "charset_32_94_3944.h" - -using namespace std; -using namespace cv; -using namespace dnn; - -vector< pair > backendTargetPairs = { - std::make_pair(dnn::DNN_BACKEND_OPENCV, dnn::DNN_TARGET_CPU), - std::make_pair(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA), - std::make_pair(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA_FP16), - std::make_pair(dnn::DNN_BACKEND_TIMVX, dnn::DNN_TARGET_NPU), - std::make_pair(dnn::DNN_BACKEND_CANN, dnn::DNN_TARGET_NPU)}; - -vector loadCharset(string); - -std::string keys = -"{ help h | | Print help message. }" -"{ model m | text_recognition_CRNN_EN_2021sep.onnx | Usage: Set model type, defaults to text_recognition_CRNN_EN_2021sep.onnx }" -"{ input i | | Usage: Path to input image or video file. Skip this argument to capture frames from a camera.}" -"{ width | 736 | Usage: Resize input image to certain width, default = 736. It should be multiple by 32.}" -"{ height | 736 | Usage: Resize input image to certain height, default = 736. It should be multiple by 32.}" -"{ binary_threshold | 0.3 | Usage: Threshold of the binary map, default = 0.3.}" -"{ polygon_threshold | 0.5 | Usage: Threshold of polygons, default = 0.5.}" -"{ max_candidates | 200 | Usage: Set maximum number of polygon candidates, default = 200.}" -"{ unclip_ratio | 2.0 | Usage: The unclip ratio of the detected text region, which determines the output size, default = 2.0.}" -"{ save s | 1 | Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.}" -"{ viz v | 1 | Usage: Specify to open a new window to show results.}" -"{ backend bt | 0 | Choose one of computation backends: " -"0: (default) OpenCV implementation + CPU, " -"1: CUDA + GPU (CUDA), " -"2: CUDA + GPU (CUDA FP16), " -"3: TIM-VX + NPU, " -"4: CANN + NPU}"; - - -class PPOCRDet { -public: - - PPOCRDet(string modPath, Size inSize = Size(736, 736), float binThresh = 0.3, - float polyThresh = 0.5, int maxCand = 200, double unRatio = 2.0, - dnn::Backend bId = DNN_BACKEND_DEFAULT, dnn::Target tId = DNN_TARGET_CPU) : modelPath(modPath), inputSize(inSize), binaryThreshold(binThresh), - polygonThreshold(polyThresh), maxCandidates(maxCand), unclipRatio(unRatio), - backendId(bId), targetId(tId) - { - this->model = TextDetectionModel_DB(readNet(modelPath)); - this->model.setPreferableBackend(backendId); - this->model.setPreferableTarget(targetId); - - this->model.setBinaryThreshold(binaryThreshold); - this->model.setPolygonThreshold(polygonThreshold); - this->model.setUnclipRatio(unclipRatio); - this->model.setMaxCandidates(maxCandidates); - - this->model.setInputParams(1.0 / 255.0, inputSize, Scalar(122.67891434, 116.66876762, 104.00698793)); - } - pair< vector>, vector > infer(Mat image) { - CV_Assert(image.rows == this->inputSize.height && "height of input image != net input size "); - CV_Assert(image.cols == this->inputSize.width && "width of input image != net input size "); - vector> pt; - vector confidence; - this->model.detect(image, pt, confidence); - return make_pair< vector> &, vector< float > &>(pt, confidence); - } - -private: - string modelPath; - TextDetectionModel_DB model; - Size inputSize; - float binaryThreshold; - float polygonThreshold; - int maxCandidates; - double unclipRatio; - dnn::Backend backendId; - dnn::Target targetId; - -}; - - - -class CRNN { -private: - string modelPath; - dnn::Backend backendId; - dnn::Target targetId; - Net model; - vector charset; - Size inputSize; - Mat targetVertices; - -public: - CRNN(string modPath, dnn::Backend bId = DNN_BACKEND_DEFAULT, dnn::Target tId = DNN_TARGET_CPU) : modelPath(modPath), backendId(bId), targetId(tId) { - - this->model = readNet(this->modelPath); - this->model.setPreferableBackend(this->backendId); - this->model.setPreferableTarget(this->targetId); - // load charset by the name of model - if (this->modelPath.find("_EN_") != string::npos) - this->charset = loadCharset("CHARSET_EN_36"); - else if (this->modelPath.find("_CH_") != string::npos) - this->charset = loadCharset("CHARSET_CH_94"); - else if (this->modelPath.find("_CN_") != string::npos) - this->charset = loadCharset("CHARSET_CN_3944"); - else - CV_Error(-1, "Charset not supported! Exiting ..."); - - this->inputSize = Size(100, 32); // Fixed - this->targetVertices = Mat(4, 1, CV_32FC2); - this->targetVertices.row(0) = Vec2f(0, this->inputSize.height - 1); - this->targetVertices.row(1) = Vec2f(0, 0); - this->targetVertices.row(2) = Vec2f(this->inputSize.width - 1, 0); - this->targetVertices.row(3) = Vec2f(this->inputSize.width - 1, this->inputSize.height - 1); - } - - Mat preprocess(Mat image, Mat rbbox) - { - // Remove conf, reshape and ensure all is np.float32 - Mat vertices; - rbbox.reshape(2, 4).convertTo(vertices, CV_32FC2); - - Mat rotationMatrix = getPerspectiveTransform(vertices, this->targetVertices); - Mat cropped; - warpPerspective(image, cropped, rotationMatrix, this->inputSize); - - // 'CN' can detect digits (0\~9), upper/lower-case letters (a\~z and A\~Z), and some special characters - // 'CH' can detect digits (0\~9), upper/lower-case le6tters (a\~z and A\~Z), some Chinese characters and some special characters - if (this->modelPath.find("CN") == string::npos && this->modelPath.find("CH") == string::npos) - cvtColor(cropped, cropped, COLOR_BGR2GRAY); - Mat blob = blobFromImage(cropped, 1 / 127.5, this->inputSize, Scalar::all(127.5)); - return blob; - } - - u16string infer(Mat image, Mat rbbox) - { - // Preprocess - Mat inputBlob = this->preprocess(image, rbbox); - - // Forward - this->model.setInput(inputBlob); - Mat outputBlob = this->model.forward(); - - // Postprocess - u16string results = this->postprocess(outputBlob); - - return results; - } - - u16string postprocess(Mat outputBlob) - { - // Decode charaters from outputBlob - Mat character = outputBlob.reshape(1, outputBlob.size[0]); - u16string text(u""); - for (int i = 0; i < character.rows; i++) - { - double minVal, maxVal; - Point maxIdx; - minMaxLoc(character.row(i), &minVal, &maxVal, nullptr, &maxIdx); - if (maxIdx.x != 0) - text += charset[maxIdx.x - 1]; - else - text += u"-"; - } - // adjacent same letters as well as background text must be removed to get the final output - u16string textFilter(u""); - - for (int i = 0; i < text.size(); i++) - if (text[i] != u'-' && !(i > 0 && text[i] == text[i - 1])) - textFilter += text[i]; - return textFilter; - } -}; - - -Mat visualize(Mat image, pair< vector>, vector >&results, double fps=-1, Scalar boxColor=Scalar(0, 255, 0), Scalar textColor=Scalar(0, 0, 255), bool isClosed=true, int thickness=2) -{ - Mat output; - image.copyTo(output); - if (fps > 0) - putText(output, format("FPS: %.2f", fps), Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, textColor); - polylines(output, results.first, isClosed, boxColor, thickness); - return output; -} - -int main(int argc, char** argv) -{ - CommandLineParser parser(argc, argv, keys); - - parser.about("An End-to-End Trainable Neural Network for Image-based Sequence Recognition and Its Application to Scene Text Recognition (https://arxiv.org/abs/1507.05717)"); - if (parser.has("help")) - { - parser.printMessage(); - return 0; - } - - int backendTargetid = parser.get("backend"); - String modelPath = parser.get("model"); - - if (modelPath.empty()) - { - CV_Error(Error::StsError, "Model file " + modelPath + " not found"); - } - - Size inpSize(parser.get("width"), parser.get("height")); - float binThresh = parser.get("binary_threshold"); - float polyThresh = parser.get("polygon_threshold"); - int maxCand = parser.get("max_candidates"); - double unRatio = parser.get("unclip_ratio"); - bool save = parser.get("save"); - bool viz = parser.get("viz"); - - PPOCRDet detector("../text_detection_ppocr/text_detection_en_ppocrv3_2023may.onnx", inpSize, binThresh, polyThresh, maxCand, unRatio, backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second); - CRNN recognizer(modelPath, backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second); - //! [Open a video file or an image file or a camera stream] - VideoCapture cap; - if (parser.has("input")) - cap.open(parser.get("input")); - else - cap.open(0); - if (!cap.isOpened()) - CV_Error(Error::StsError, "Cannot open video or file"); - Mat originalImage; - static const std::string kWinName = modelPath; - while (waitKey(1) < 0) - { - cap >> originalImage; - if (originalImage.empty()) - { - if (parser.has("input")) - { - cout << "Frame is empty" << endl; - break; - } - else - continue; - } - int originalW = originalImage.cols; - int originalH = originalImage.rows; - double scaleHeight = originalH / double(inpSize.height); - double scaleWidth = originalW / double(inpSize.width); - Mat image; - resize(originalImage, image, inpSize); - - // inference of text detector - TickMeter tm; - tm.start(); - pair< vector>, vector > results = detector.infer(image); - tm.stop(); - if (results.first.size() > 0 && results.second.size() > 0) - { - u16string texts; - auto score=results.second.begin(); - for (auto box : results.first) - { - Mat result = Mat(box).reshape(2, 4); - texts = texts + u"'" + recognizer.infer(image, result) + u"'"; - } - std::wstring_convert, char16_t> converter; - std::cout << converter.to_bytes(texts) << std::endl; - } - auto x = results.first; - // Scale the results bounding box - for (auto &pts : results.first) - { - for (int i = 0; i < 4; i++) - { - pts[i].x = int(pts[i].x * scaleWidth); - pts[i].y = int(pts[i].y * scaleHeight); - } - } - originalImage = visualize(originalImage, results, tm.getFPS()); - tm.reset(); - if (parser.has("input")) - { - if (save) - { - cout << "Result image saved to result.jpg\n"; - imwrite("result.jpg", originalImage); - } - if (viz) - imshow(kWinName, originalImage); - } - else - imshow(kWinName, originalImage); - - } - return 0; -} diff --git a/models/text_recognition_crnn/demo.py b/models/text_recognition_crnn/demo.py deleted file mode 100644 index 02ba036f..00000000 --- a/models/text_recognition_crnn/demo.py +++ /dev/null @@ -1,169 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import sys -import argparse - -import numpy as np -import cv2 as cv - -# Check OpenCV version -opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) -assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ - "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" - -from crnn import CRNN - -sys.path.append('../text_detection_ppocr') -from ppocr_det import PPOCRDet - -# Valid combinations of backends and targets -backend_target_pairs = [ - [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], - [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], - [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], - [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] -] - -parser = argparse.ArgumentParser( - description="An End-to-End Trainable Neural Network for Image-based Sequence Recognition and Its Application to Scene Text Recognition (https://arxiv.org/abs/1507.05717)") -parser.add_argument('--input', '-i', type=str, - help='Usage: Set path to the input image. Omit for using default camera.') -parser.add_argument('--model', '-m', type=str, default='text_recognition_CRNN_EN_2021sep.onnx', - help='Usage: Set model path, defaults to text_recognition_CRNN_EN_2021sep.onnx.') -parser.add_argument('--backend_target', '-bt', type=int, default=0, - help='''Choose one of the backend-target pair to run this demo: - {:d}: (default) OpenCV implementation + CPU, - {:d}: CUDA + GPU (CUDA), - {:d}: CUDA + GPU (CUDA FP16), - {:d}: TIM-VX + NPU, - {:d}: CANN + NPU - '''.format(*[x for x in range(len(backend_target_pairs))])) -parser.add_argument('--width', type=int, default=736, - help='Preprocess input image by resizing to a specific width. It should be multiple by 32.') -parser.add_argument('--height', type=int, default=736, - help='Preprocess input image by resizing to a specific height. It should be multiple by 32.') -parser.add_argument('--save', '-s', action='store_true', - help='Usage: Specify to save a file with results. Invalid in case of camera input.') -parser.add_argument('--vis', '-v', action='store_true', - help='Usage: Specify to open a new window to show results. Invalid in case of camera input.') -args = parser.parse_args() - -def visualize(image, boxes, texts, color=(0, 255, 0), isClosed=True, thickness=2): - output = image.copy() - - pts = np.array(boxes[0]) - output = cv.polylines(output, pts, isClosed, color, thickness) - for box, text in zip(boxes[0], texts): - cv.putText(output, text, (box[1].astype(np.int32)), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) - return output - -if __name__ == '__main__': - backend_id = backend_target_pairs[args.backend_target][0] - target_id = backend_target_pairs[args.backend_target][1] - - # Instantiate PPOCRDet for text detection - detector = PPOCRDet(modelPath='../text_detection_ppocr/text_detection_en_ppocrv3_2023may.onnx', - inputSize=[args.width, args.height], - binaryThreshold=0.3, - polygonThreshold=0.5, - maxCandidates=200, - unclipRatio=2.0, - backendId=backend_id, - targetId=target_id) - # Instantiate CRNN for text recognition - recognizer = CRNN(modelPath=args.model, backendId=backend_id, targetId=target_id) - - # If input is an image - if args.input is not None: - original_image = cv.imread(args.input) - original_w = original_image.shape[1] - original_h = original_image.shape[0] - scaleHeight = original_h / args.height - scaleWidth = original_w / args.width - image = cv.resize(original_image, [args.width, args.height]) - - # Inference - results = detector.infer(image) - texts = [] - for box, score in zip(results[0], results[1]): - texts.append( - recognizer.infer(image, box.reshape(8)) - ) - - # Scale the results bounding box - for i in range(len(results[0])): - for j in range(4): - box = results[0][i][j] - results[0][i][j][0] = box[0] * scaleWidth - results[0][i][j][1] = box[1] * scaleHeight - - # Draw results on the input image - original_image = visualize(original_image, results, texts) - - # Save results if save is true - if args.save: - print('Results saved to result.jpg\n') - cv.imwrite('result.jpg', original_image) - - # Visualize results in a new window - if args.vis: - cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE) - cv.imshow(args.input, original_image) - cv.waitKey(0) - else: # Omit input to call default camera - deviceId = 0 - cap = cv.VideoCapture(deviceId) - - tm = cv.TickMeter() - while cv.waitKey(1) < 0: - hasFrame, original_image = cap.read() - if not hasFrame: - print('No frames grabbed!') - break - - original_w = original_image.shape[1] - original_h = original_image.shape[0] - scaleHeight = original_h / args.height - scaleWidth = original_w / args.width - - frame = cv.resize(original_image, [args.width, args.height]) - # Inference of text detector - tm.start() - results = detector.infer(frame) - tm.stop() - cv.putText(frame, 'Latency - {}: {:.2f}'.format(detector.name, tm.getFPS()), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) - tm.reset() - - # Inference of text recognizer - if len(results[0]) and len(results[1]): - texts = [] - tm.start() - for box, score in zip(results[0], results[1]): - result = np.hstack( - (box.reshape(8), score) - ) - texts.append( - recognizer.infer(frame, box.reshape(8)) - ) - tm.stop() - cv.putText(frame, 'Latency - {}: {:.2f}'.format(recognizer.name, tm.getFPS()), (0, 30), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) - tm.reset() - - # Scale the results bounding box - for i in range(len(results[0])): - for j in range(4): - box = results[0][i][j] - results[0][i][j][0] = box[0] * scaleWidth - results[0][i][j][1] = box[1] * scaleHeight - - # Draw results on the input image - original_image = visualize(original_image, results, texts) - print(texts) - - # Visualize results in a new Window - cv.imshow('{} Demo'.format(recognizer.name), original_image) diff --git a/models/text_recognition_crnn/example_outputs/CRNNCTC.gif b/models/text_recognition_crnn/example_outputs/CRNNCTC.gif deleted file mode 100644 index 09689aaf..00000000 --- a/models/text_recognition_crnn/example_outputs/CRNNCTC.gif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ad60d87b58f365d168ae4d444dc27306e6d379ed16dbe82b44f443a43f4e65db -size 5249246 diff --git a/models/text_recognition_crnn/example_outputs/demo.jpg b/models/text_recognition_crnn/example_outputs/demo.jpg deleted file mode 100644 index 35ae4184..00000000 --- a/models/text_recognition_crnn/example_outputs/demo.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:93b5838416d9d131f7a0fe3f00addfce0ed984052c15f69a8904d553066aa0aa -size 39430 diff --git a/models/text_recognition_crnn/text_recognition_CRNN_CH_2021sep.onnx b/models/text_recognition_crnn/text_recognition_CRNN_CH_2021sep.onnx deleted file mode 100644 index 443f5dec..00000000 --- a/models/text_recognition_crnn/text_recognition_CRNN_CH_2021sep.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2dc566fd01ac2118b25c6960508ebd758b64c421a2bfa78dc05401ada6737e0b -size 64906971 diff --git a/models/text_recognition_crnn/text_recognition_CRNN_CH_2021sep_int8bq.onnx b/models/text_recognition_crnn/text_recognition_CRNN_CH_2021sep_int8bq.onnx deleted file mode 100644 index b737148f..00000000 --- a/models/text_recognition_crnn/text_recognition_CRNN_CH_2021sep_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c205e1a1a3bc5a1d585ea380b55d2801ba655c473bd5d41730fc14d6341f2e16 -size 26887550 diff --git a/models/text_recognition_crnn/text_recognition_CRNN_CH_2022oct_int8.onnx b/models/text_recognition_crnn/text_recognition_CRNN_CH_2022oct_int8.onnx deleted file mode 100644 index 089d9ba5..00000000 --- a/models/text_recognition_crnn/text_recognition_CRNN_CH_2022oct_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c2bc75af1998c0b608f86ab875cbdcd109d18b27ff0d9872e7b7429fd1945f3a -size 25783320 diff --git a/models/text_recognition_crnn/text_recognition_CRNN_CH_2023feb_fp16.onnx b/models/text_recognition_crnn/text_recognition_CRNN_CH_2023feb_fp16.onnx deleted file mode 100644 index c619d4da..00000000 --- a/models/text_recognition_crnn/text_recognition_CRNN_CH_2023feb_fp16.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cfef028889b3a21771e687d501ac38ccab6d37d199e94f244d60cc21f743526b -size 32472394 diff --git a/models/text_recognition_crnn/text_recognition_CRNN_CN_2021nov.onnx b/models/text_recognition_crnn/text_recognition_CRNN_CN_2021nov.onnx deleted file mode 100644 index e48fe7fe..00000000 --- a/models/text_recognition_crnn/text_recognition_CRNN_CN_2021nov.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c760bf82d684b87dfabb288e6c0f92d41a8cd6c1780661ca2c3cd10c2065a9ba -size 72807160 diff --git a/models/text_recognition_crnn/text_recognition_CRNN_CN_2021nov_int8.onnx b/models/text_recognition_crnn/text_recognition_CRNN_CN_2021nov_int8.onnx deleted file mode 100644 index a6a987f5..00000000 --- a/models/text_recognition_crnn/text_recognition_CRNN_CN_2021nov_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:63b37da9f35d1861fb1af40ab82313794291ad49c950374dc4ed232b56e1b656 -size 27710536 diff --git a/models/text_recognition_crnn/text_recognition_CRNN_CN_2021nov_int8bq.onnx b/models/text_recognition_crnn/text_recognition_CRNN_CN_2021nov_int8bq.onnx deleted file mode 100644 index 7be6a406..00000000 --- a/models/text_recognition_crnn/text_recognition_CRNN_CN_2021nov_int8bq.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a5a5aac86dbb06be8853ec07d1f5e8bac810a6cc4233390a3c020487b9a7881c -size 29026387 diff --git a/models/text_recognition_crnn/text_recognition_CRNN_EN_2021sep.onnx b/models/text_recognition_crnn/text_recognition_CRNN_EN_2021sep.onnx deleted file mode 100644 index ff13f288..00000000 --- a/models/text_recognition_crnn/text_recognition_CRNN_EN_2021sep.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a84b1f6e11a65c2d733cb0cc1f014aae3f99051e3f11447dc282faa678eee544 -size 33823087 diff --git a/models/text_recognition_crnn/text_recognition_CRNN_EN_2022oct_int8.onnx b/models/text_recognition_crnn/text_recognition_CRNN_EN_2022oct_int8.onnx deleted file mode 100644 index d9d2a04d..00000000 --- a/models/text_recognition_crnn/text_recognition_CRNN_EN_2022oct_int8.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:94117b4c2652337b3f1aef81b2ec15a74e97973b1c58f743e86380b95b95ffa2 -size 16378030 diff --git a/models/text_recognition_crnn/text_recognition_CRNN_EN_2023feb_fp16.onnx b/models/text_recognition_crnn/text_recognition_CRNN_EN_2023feb_fp16.onnx deleted file mode 100644 index 8017c197..00000000 --- a/models/text_recognition_crnn/text_recognition_CRNN_EN_2023feb_fp16.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e785f79aeb817e1600b18fad5e740bddc281a9eb053d648f163af335a32d59d0 -size 16916177 diff --git a/reports/2023-4.9.0/assets/benchmark_table_4.9.0.png b/reports/2023-4.9.0/assets/benchmark_table_4.9.0.png deleted file mode 100644 index d0cd6fa1..00000000 --- a/reports/2023-4.9.0/assets/benchmark_table_4.9.0.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:978f537c9a967810915ba049ffb667fb41c2b9b22be90c0c334a350e1ac37505 -size 311950 diff --git a/reports/2023-4.9.0/opencv_zoo_report-cn-2023-4.9.0.md b/reports/2023-4.9.0/opencv_zoo_report-cn-2023-4.9.0.md deleted file mode 100644 index 85c8d7bf..00000000 --- a/reports/2023-4.9.0/opencv_zoo_report-cn-2023-4.9.0.md +++ /dev/null @@ -1,53 +0,0 @@ -# OpenCV Model Zoo报告 - 模型、板卡和性能基准结果分析 - -[![benchmark_table](assets/benchmark_table_4.9.0.png)](benchmark_table) - -[OpenCV Model Zoo](https://github.com/opencv/opencv_zoo)项目于2021年9月启动。从那时起,我们已收集了43个模型权重,涵盖19个任务,并添加了13种硬件设置,涵盖不同的CPU架构(x86-64、ARM和RISC-V)以及不同的计算单元(CPU、GPU和NPU)。所有这些模型和硬件都经过我们的全面测试,并保证与OpenCV的最新版本(目前是4.9.0)兼容,如我们的基准表所示。 - -## Models - -截至此版本发布,我们在opencv_zoo中共有43个模型权重,涵盖了总共19个任务。这些模型是考虑到许可证的,这意味着基本上您可以为任何目的使用opencv_zoo中的所有模型,甚至用于商业用途。它们主要来自以下4个来源: - -- OpenCV中国团队。YuNet人脸检测模型由我们团队的一名成员开发和维护。 -- OpenCV Area Chair。这是由OpenCV基金会启动的一个项目,详情可以在[这里](https://opencv.org/opencv-area-chairs/)找到。人脸识别的SFace模型和面部表情识别的FER模型是由Area Chair邓教授贡献的。 -- 与OpenCV的合作。人体分割模型来自百度PaddlePaddle,修改后的YuNet用于车牌检测来自[watrix.ai](watrix.ai)。 -- OpenCV社区。从2022年开始,我们在Google Summer of Code(GSoC)计划中有关于模型贡献的项目想法。GSoC学生已成功贡献了6个模型,涵盖了目标检测、目标跟踪和光流估计等任务。 - -我们欢迎您的贡献! - -此外,我们为每个模型提供了在最新版本的OpenCV中可立即使用的Python和C++演示。我们还提供了[可视化样例](https://github.com/opencv/opencv_zoo?tab=readme-ov-file#some-examples),以便开发者们更好地了解任务和输出的类型。 - -## Boards - -opencv_zoo中有13种硬件设置,其中之一是搭载Intel i7-12700K的 PC,其他都是单板计算机(SBC)。它们按CPU架构分类如下: - - - -x86-64: - -- Intel Core i7-12700K:8 P核(3.60GHz,4.90GHz turbo),4 E核(2.70GHz,3.80GHz turbo),20线程。 - -ARM: - -| 板卡 | SoC 型号 | CPU 型号 | GPU 型号 | NPU 性能(Int8) | -| ----- | --- | --- | --- | --- | -| Khadas VIM3 | Amlogic A311D | 2.20GHz 四核 Cortex-A73 + 1.80GHz 双核 Cortex-A53 | ARM G52 | 5 TOPS | -| Khadas VIM4 | Amlogic A311D2 | 2.2GHz 四核 ARM Cortex-A73 + 2.0GHz 四核 Cortex-A53 | Mali G52MP8(8EE) 800Mhz | 3.2 TOPS | -| Khadas Edge 2 | Rockchip RK3588S | 2.25GHz 四核 Cortex-A76 + 1.80GHz 四核 Cortex-A55 | 1GHz ARM Mali-G610 | 6 TOPS | -| Raspberry Pi 4B | Broadcom BCM2711 | 1.5GHz 四核 Cortex-A72 | *未知* | *无* | -| Horizon Sunrise X3 PI | Sunrise X3 | 1.2GHz 四核 Cortex-A53 | *未知* | 5 TOPS,双核伯努利架构| -| MAIX-III AXera-Pi | AXera AX620A | 四核 Cortex-A7 | *未知* | 3.6 TOPS | -| Toybrick RV1126 | Rockchip RV1126 | 四核 Cortex-A7 | *未知* | 2.0 TOPS | -| NVIDIA Jetson Nano B01 | *未知* | 1.43GHz 四核 Cortex-A57 | 128 核 NVIDIA Maxwell | *无* | -| NVIDIA Jetson Nano Orin | *未知* | 6 核 Cortex®-A78AE | 1024 核 NVIDIA Ampere | *无* | -| Atlas 200 DK | *未知* | *未知* | *未知* | 22 TOPS,Ascend 310 | -| Atlas 200I DK A2 | *未知* | 1.0GHz 四核 | *未知* | 8 TOPS,Ascend 310B | - -RISC-V: - -| 板卡 | SoC 型号 | CPU 型号 | GPU 型号 | -| ----- | --------- | --------- | --------- | -| StarFive VisionFive 2 | StarFive JH7110 | 1.5GHz 四核 RISC-V 64 位 | 600MHz IMG BXE-4-32 MC1 | -| Allwinner Nezha D1 | Allwinner D1 | 1.0GHz 单核 RISC-V 64 位,RVV-0.7.1 | *未知* | - -我们的目标是在边缘设备上进行高效计算!在过去的几年中,我们(OpenCV)中国团队,已经在dnn模块针对ARM架构优化的方面付出了大量努力,特别关注卷积神经网络的卷积内核优化和Vision Transformers的GEMM内核优化。更值得一提的是,我们为dnn模块引入了NPU支持,支持Khadas VIM3、Atlas 200 DK 和Atlas 200I DK A2上的 NPU。在 NPU 上运行模型可以帮助将计算负载从CPU分配到NPU,甚至可以达到更快的推理速度(例如,在 Atlas 200 DK 上 Ascend 310 的测试结果)。 diff --git a/reports/2023-4.9.0/opencv_zoo_report-en-2023-4.9.0.md b/reports/2023-4.9.0/opencv_zoo_report-en-2023-4.9.0.md deleted file mode 100644 index 122bb136..00000000 --- a/reports/2023-4.9.0/opencv_zoo_report-en-2023-4.9.0.md +++ /dev/null @@ -1,56 +0,0 @@ -# OpenCV Model Zoo Report - Models, Boards and Benchmark Result Analysis - - -[![benchmark_table](assets/benchmark_table_4.9.0.png)](benchmark_table) - -[OpenCV Model Zoo](https://github.com/opencv/opencv_zoo) was started back in September, 2021. Since then, we have collected 43 model weights covering 19 tasks and added 13 hardware setups covering different CPU architectures (x86-64, ARM and RISC-V) and different computing units (CPU, GPU and NPU). All these models and hardware are fully tested by us and guaranteed to work with latest release of OpenCV (currently 4.9.0) as our benchmark table shown. - -## Models - -As of this release, we have 43 model weights covering 19 tasks in total in the zoo. These models are collected with licenses in mind, meaning you can bascially use all the models in the zoo for whatever purposes you want, even for commercial purpose. They are collected from mainly 4 sources: - -- OpenCV China team. The YuNet model for face detection is developed and maintained by one of our team members. -- OpenCV Area Chair. This is a program started by OpenCV Foundation, details can be found [here](https://opencv.org/opencv-area-chairs/). The SFace model for face recognition and FER model for facial expression recognition are contributed from one of the Area Chairs Prof. Deng. -- Cooperation with OpenCV. The HumanSeg model for human segmentation is from Baidu PaddlePaddle, and the modified YuNet for license plate detection is from [watrix.ai](watrix.ai). -- Community. Started from 2022, we have project ideas for model contribution in the Google Summer of Code (GSoC) program. GSoC students have successfully contributed 6 models covering tasks such as object detection, object tracking and optical flow estimation. - -We welcome your contribution! - -Besides, demos in Python and C++, which work out-of-the-box with latest OpenCV, are also provided for each model. We also provide [visual examples](https://github.com/opencv/opencv_zoo?tab=readme-ov-file#some-examples) so that people can better understand what the task is and what kind of the output is. - -## Boards - -There are 13 hardware setups in the zoo, one of them is a PC with Intel i7-12700K, and the others are single board computers (SBC). They are categorized by CPU architecture as follows: - - - -x86-64: - -- Intel Core i7-12700K: 8 P-core (3.60GHz, 4.90GHz turbo), 4 E-core (2.70GHz, 3.80GHz turbo), 20 threads. - - -ARM: - -| Board | SoC model | CPU model | GPU model | NPU Performance (Int8) | -| ----- | --- | --- | --- | --- | -| Khadas VIM3 | Amlogic A311D | 2.20GHz Quad-core Cortex-A73 + 1.80GHz Dual-core Cortex-A53 | ARM G52 | 5 TOPS | -| Khadas VIM4 | Amlogic A311D2 | 2.2GHz Quad-core ARM Cortex-A73 + 2.0GHz Quad-core Cortex-A53 | Mali G52MP8(8EE) 800Mhz | 3.2 TOPS | -| Khadas Edge 2 | Rockchip RK3588S | 2.25GHz Quad-core Cortex-A76 + 1.80GHz Quad-core Cortex-A55 | 1GHz ARM Mali-G610 | 6 TOPS | -| Raspberry Pi 4B | Broadcom BCM2711 | 1.5GHz Quad-core Cortex-A72 | *Unknown* | *No* | -| Horizon Sunrise X3 PI | Sunrise X3 | 1.2GHz Quad-core Cortex-A53 | *Unkown* | 5 TOPS, Dual-core Bernoulli Arch| -| MAIX-III AXera-Pi | AXera AX620A | Quad-core Cortex-A7 | *Unknown* | 3.6 TOPS | -| Toybrick RV1126 | Rockchip RV1126 | Quad-core Cortex-A7 | *Unknown* | 2.0 TOPS | -| NVIDIA Jetson Nano B01 | *Unknown* | 1.43GHz Quad-core Cortex-A57 | 128-core NVIDIA Maxwell | *No* | -| NVIDIA Jetson Nano Orin | *Unknown* | 6-core Cortex®-A78AE | 1024-core NVIDIA Ampere | *No* | -| Atlas 200 DK | *Unknown* | *Unknown* | *Unknown* | 22 TOPS, Ascend 310 | -| Atlas 200I DK A2 | *Unknown* | 1.0GHz Quad-core | *Unknown* | 8 TOPS, Ascend 310B | - - -RISC-V: - -| Board | SoC model | CPU model | GPU model | -| ----- | --------- | --------- | --------- | -| StarFive VisionFive 2 | StarFive JH7110 | 1.5GHz Quad-core RISC-V 64-bit | 600MHz IMG BXE-4-32 MC1 | -| Allwinner Nezha D1 | Allwinner D1 | 1.0GHz single-core RISC-V 64-bit, RVV-0.7.1 | *Unknown* | - -We are targetting on efficient computing on edge devices! In the past few years, we, the OpenCV China team, have spent most of our effort in optimizing dnn module for ARM architecture, focusing especially on convolution kernel optimization for ConvNets and GEMM kernel optimization for Vision Transformers. What's even more worth mentioning is that we introduce NPU support for the dnn module, supporing the NPU in Khadas VIM3, Atlas 200 DK and Atlas 200I DK A2. Running the model on NPU can help distribute computing loads from CPU to NPU and even reaching a faster inference speed (see benchmark results on Ascend 310 on Atlas 200 DK for example). diff --git a/reports/README.md b/reports/README.md deleted file mode 100644 index 623e0d9f..00000000 --- a/reports/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Reports on models and boards - -Here we present reports on models and boards in the zoo per major release of OpenCV since 4.9.0. diff --git a/tools/eval/README.md b/tools/eval/README.md deleted file mode 100644 index d30829e3..00000000 --- a/tools/eval/README.md +++ /dev/null @@ -1,228 +0,0 @@ -# Accuracy evaluation of models in OpenCV Zoo - -Make sure you have the following packages installed: - -```shell -pip install tqdm -pip install scikit-learn -pip install scipy==1.8.1 -``` - -Generally speaking, evaluation can be done with the following command: - -```shell -python eval.py -m model_name -d dataset_name -dr dataset_root_dir -``` - -Supported datasets: - -- [ImageNet](#imagenet) -- [WIDERFace](#widerface) -- [LFW](#lfw) -- [ICDAR](#icdar2003) -- [IIIT5K](#iiit5k) -- [Mini Supervisely](#mini-supervisely) - -## ImageNet - -### Prepare data - -Please visit https://image-net.org/ to download the ImageNet dataset (only need images in `ILSVRC/Data/CLS-LOC/val`) and [the labels from caffe](http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz). Organize files as follow: - -```shell -$ tree -L 2 /path/to/imagenet -. -├── caffe_ilsvrc12 -│   ├── det_synset_words.txt -│   ├── imagenet.bet.pickle -│   ├── imagenet_mean.binaryproto -│   ├── synsets.txt -│   ├── synset_words.txt -│   ├── test.txt -│   ├── train.txt -│   └── val.txt -├── caffe_ilsvrc12.tar.gz -├── ILSVRC -│   ├── Annotations -│   ├── Data -│   └── ImageSets -├── imagenet_object_localization_patched2019.tar.gz -├── LOC_sample_submission.csv -├── LOC_synset_mapping.txt -├── LOC_train_solution.csv -└── LOC_val_solution.csv -``` - -### Evaluation - -Run evaluation with the following command: - -```shell -python eval.py -m mobilenet -d imagenet -dr /path/to/imagenet -``` - -## WIDERFace - -The script is modified based on [WiderFace-Evaluation](https://github.com/wondervictor/WiderFace-Evaluation). - -### Prepare data - -Please visit http://shuoyang1213.me/WIDERFACE to download the WIDERFace dataset [Validation Images](https://huggingface.co/datasets/wider_face/resolve/main/data/WIDER_val.zip), [Face annotations](http://shuoyang1213.me/WIDERFACE/support/bbx_annotation/wider_face_split.zip) and [eval_tools](http://shuoyang1213.me/WIDERFACE/support/eval_script/eval_tools.zip). Organize files as follow: - -```shell -$ tree -L 2 /path/to/widerface -. -├── eval_tools -│   ├── boxoverlap.m -│   ├── evaluation.m -│   ├── ground_truth -│   ├── nms.m -│   ├── norm_score.m -│   ├── plot -│   ├── read_pred.m -│   └── wider_eval.m -├── wider_face_split -│   ├── readme.txt -│   ├── wider_face_test_filelist.txt -│   ├── wider_face_test.mat -│   ├── wider_face_train_bbx_gt.txt -│   ├── wider_face_train.mat -│   ├── wider_face_val_bbx_gt.txt -│   └── wider_face_val.mat -└── WIDER_val - └── images -``` - -### Evaluation - -Run evaluation with the following command: - -```shell -python eval.py -m yunet -d widerface -dr /path/to/widerface -``` - -## LFW - -The script is modified based on [evaluation of InsightFace](https://github.com/deepinsight/insightface/blob/f92bf1e48470fdd567e003f196f8ff70461f7a20/src/eval/lfw.py). - -This evaluation uses [YuNet](../../models/face_detection_yunet) as face detector. The structure of the face bounding boxes saved in [lfw_face_bboxes.npy](../eval/datasets/lfw_face_bboxes.npy) is shown below. -Each row represents the bounding box of the main face that will be used in each image. - -```shell -[ - [x, y, w, h, x_re, y_re, x_le, y_le, x_nt, y_nt, x_rcm, y_rcm, x_lcm, y_lcm], - ... - [x, y, w, h, x_re, y_re, x_le, y_le, x_nt, y_nt, x_rcm, y_rcm, x_lcm, y_lcm] -] -``` - -`x1, y1, w, h` are the top-left coordinates, width and height of the face bounding box, `{x, y}_{re, le, nt, rcm, lcm}` stands for the coordinates of right eye, left eye, nose tip, the right corner and left corner of the mouth respectively. Data type of this numpy array is `np.float32`. - - -### Prepare data - -Please visit http://vis-www.cs.umass.edu/lfw to download the LFW [all images](http://vis-www.cs.umass.edu/lfw/lfw.tgz)(needs to be decompressed) and [pairs.txt](http://vis-www.cs.umass.edu/lfw/pairs.txt)(needs to be placed in the `view2` folder). Organize files as follow: - -```shell -$ tree -L 2 /path/to/lfw -. -├── lfw -│   ├── Aaron_Eckhart -│   ├── ... -│   └── Zydrunas_Ilgauskas -└── view2 -    └── pairs.txt -``` - -### Evaluation - -Run evaluation with the following command: - -```shell -python eval.py -m sface -d lfw -dr /path/to/lfw -``` - -## ICDAR2003 - -### Prepare data - -Please visit http://iapr-tc11.org/mediawiki/index.php/ICDAR_2003_Robust_Reading_Competitions to download the ICDAR2003 dataset and the labels. You have to download the Robust Word Recognition [TrialTrain Set](http://www.iapr-tc11.org/dataset/ICDAR2003_RobustReading/TrialTrain/word.zip) only. - -```shell -$ tree -L 2 /path/to/icdar -. -├── word -│   ├── 1 -│ │ ├── self -│ │ ├── ... -│ │ └── willcooks -│   ├── ... -│   └── 12 -└── word.xml -    -``` - -### Evaluation - -Run evaluation with the following command: - -```shell -python eval.py -m crnn -d icdar -dr /path/to/icdar -``` - -### Example - -```shell -download zip file from http://www.iapr-tc11.org/dataset/ICDAR2003_RobustReading/TrialTrain/word.zip -upzip file to /path/to/icdar -python eval.py -m crnn -d icdar -dr /path/to/icdar -``` - -## IIIT5K - -### Prepare data - -Please visit https://github.com/cv-small-snails/Text-Recognition-Material to download the IIIT5K dataset and the labels. - -### Evaluation - -All the datasets in the format of lmdb can be evaluated by this script.
-Run evaluation with the following command: - -```shell -python eval.py -m crnn -d iiit5k -dr /path/to/iiit5k -``` - - -## Mini Supervisely - -### Prepare data -Please download the mini_supervisely data from [here](https://paddleseg.bj.bcebos.com/humanseg/data/mini_supervisely.zip) which includes the validation dataset and unzip it. - -```shell -$ tree -L 2 /path/to/mini_supervisely -. -├── Annotations -│   ├── ache-adult-depression-expression-41253.png -│   ├── ... -├── Images -│   ├── ache-adult-depression-expression-41253.jpg -│   ├── ... -├── test.txt -├── train.txt -└── val.txt -``` - -### Evaluation - -Run evaluation with the following command : - -```shell -python eval.py -m pphumanseg -d mini_supervisely -dr /path/to/pphumanseg -``` - -Run evaluation on quantized model with the following command : - -```shell -python eval.py -m pphumanseg_q -d mini_supervisely -dr /path/to/pphumanseg -``` \ No newline at end of file diff --git a/tools/eval/datasets/__init__.py b/tools/eval/datasets/__init__.py deleted file mode 100644 index 5ed59faa..00000000 --- a/tools/eval/datasets/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .imagenet import ImageNet -from .widerface import WIDERFace -from .lfw import LFW -from .icdar import ICDAR -from .iiit5k import IIIT5K -from .minisupervisely import MiniSupervisely - -class Registery: - def __init__(self, name): - self._name = name - self._dict = dict() - - def get(self, key): - return self._dict[key] - - def register(self, item): - self._dict[item.__name__] = item - -DATASETS = Registery("Datasets") -DATASETS.register(ImageNet) -DATASETS.register(WIDERFace) -DATASETS.register(LFW) -DATASETS.register(ICDAR) -DATASETS.register(IIIT5K) -DATASETS.register(MiniSupervisely) diff --git a/tools/eval/datasets/icdar.py b/tools/eval/datasets/icdar.py deleted file mode 100644 index 80b9eb42..00000000 --- a/tools/eval/datasets/icdar.py +++ /dev/null @@ -1,54 +0,0 @@ -import os -import numpy as np -import cv2 as cv -import xml.dom.minidom as minidom -from tqdm import tqdm - -class ICDAR: - def __init__(self, root): - self.root = root - self.acc = -1 - self.inputSize = [100, 32] - self.val_label_file = os.path.join(root, "word.xml") - self.val_label = self.load_label(self.val_label_file) - - @property - def name(self): - return self.__class__.__name__ - - def load_label(self, label_file): - label = list() - dom = minidom.getDOMImplementation().createDocument(None, 'Root', None) - root = dom.documentElement - dom = minidom.parse(self.val_label_file) - root = dom.documentElement - names = root.getElementsByTagName('image') - for name in names: - key = os.path.join(self.root, name.getAttribute('file')) - value = name.getAttribute('tag').lower() - label.append([key, value]) - - return label - - def eval(self, model): - right_num = 0 - pbar = tqdm(self.val_label) - pbar.set_description("Evaluating {} with {} val set".format(model.name, self.name)) - - for fn, label in pbar: - - img = cv.imread(fn) - - rbbox = np.array([0, img.shape[0], 0, 0, img.shape[1], 0, img.shape[1], img.shape[0]]) - pred = model.infer(img, rbbox) - if label.lower() == pred.lower(): - right_num += 1 - - self.acc = right_num/(len(self.val_label) * 1.0) - - - def get_result(self): - return self.acc - - def print_result(self): - print("Accuracy: {:.2f}%".format(self.acc*100)) \ No newline at end of file diff --git a/tools/eval/datasets/iiit5k.py b/tools/eval/datasets/iiit5k.py deleted file mode 100644 index 82b08047..00000000 --- a/tools/eval/datasets/iiit5k.py +++ /dev/null @@ -1,56 +0,0 @@ -import lmdb -import os -import numpy as np -import cv2 as cv -from tqdm import tqdm - -class IIIT5K: - def __init__(self, root): - self.root = root - self.acc = -1 - self.inputSize = [100, 32] - - self.val_label = self.load_label(self.root) - - @property - def name(self): - return self.__class__.__name__ - - def load_label(self, root): - lmdb_file = root - lmdb_env = lmdb.open(lmdb_file) - lmdb_txn = lmdb_env.begin() - lmdb_cursor = lmdb_txn.cursor() - label = list() - for key, value in lmdb_cursor: - image_index = key.decode() - if image_index.split('-')[0] == 'image': - img = cv.imdecode(np.fromstring(value, np.uint8), 3) - label_index = 'label-' + image_index.split('-')[1] - value = lmdb_txn.get(label_index.encode()).decode().lower() - label.append([img, value]) - else: - break - return label - - def eval(self, model): - right_num = 0 - pbar = tqdm(self.val_label) - pbar.set_description("Evaluating {} with {} val set".format(model.name, self.name)) - - for img, value in pbar: - - - rbbox = np.array([0, img.shape[0], 0, 0, img.shape[1], 0, img.shape[1], img.shape[0]]) - pred = model.infer(img, rbbox).lower() - if value == pred: - right_num += 1 - - self.acc = right_num/(len(self.val_label) * 1.0) - - - def get_result(self): - return self.acc - - def print_result(self): - print("Accuracy: {:.2f}%".format(self.acc*100)) \ No newline at end of file diff --git a/tools/eval/datasets/imagenet.py b/tools/eval/datasets/imagenet.py deleted file mode 100644 index 571a89e9..00000000 --- a/tools/eval/datasets/imagenet.py +++ /dev/null @@ -1,65 +0,0 @@ -import os - -import numpy as np -import cv2 as cv - -from tqdm import tqdm - -class ImageNet: - def __init__(self, root, size=224): - self.root = root - self.size = size - self.top1_acc = -1 - self.top5_acc = -1 - - self.root_val = os.path.join(self.root, "ILSVRC", "Data", "CLS-LOC", "val") - self.val_label_file = os.path.join(self.root, "caffe_ilsvrc12", "val.txt") - - self.val_label = self.load_label(self.val_label_file) - - @property - def name(self): - return self.__class__.__name__ - - def load_label(self, label_file): - label = list() - with open(label_file, "r") as f: - for line in f: - line = line.strip() - key, value = line.split() - - key = os.path.join(self.root_val, key) - value = int(value) - - label.append([key, value]) - - return label - - def eval(self, model): - top_1_hits = 0 - top_5_hits = 0 - pbar = tqdm(self.val_label) - pbar.set_description("Evaluating {} with {} val set".format(model.name, self.name)) - - for fn, label in pbar: - - img = cv.imread(fn) - img = cv.cvtColor(img, cv.COLOR_BGR2RGB) - img = cv.resize(img, dsize=(256, 256)) - img = img[16:240, 16:240, :] - - pred = model.infer(img) - if label == pred[0][0]: - top_1_hits += 1 - if label in pred[0]: - top_5_hits += 1 - - self.top1_acc = top_1_hits/(len(self.val_label) * 1.0) - self.top5_acc = top_5_hits/(len(self.val_label) * 1.0) - - def get_result(self): - return self.top1_acc, self.top5_acc - - def print_result(self): - print("Top-1 Accuracy: {:.2f}%; Top-5 Accuracy: {:.2f}%".format(self.top1_acc*100, self.top5_acc*100)) - diff --git a/tools/eval/datasets/lfw.py b/tools/eval/datasets/lfw.py deleted file mode 100644 index c001b3f9..00000000 --- a/tools/eval/datasets/lfw.py +++ /dev/null @@ -1,239 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import numpy as np - -from sklearn.model_selection import KFold -from scipy import interpolate -import sklearn -from sklearn.decomposition import PCA - -import cv2 as cv -from tqdm import tqdm - - -def calculate_roc(thresholds, - embeddings1, - embeddings2, - actual_issame, - nrof_folds=10, - pca=0): - assert (embeddings1.shape[0] == embeddings2.shape[0]) - assert (embeddings1.shape[1] == embeddings2.shape[1]) - nrof_pairs = min(len(actual_issame), embeddings1.shape[0]) - nrof_thresholds = len(thresholds) - k_fold = KFold(n_splits=nrof_folds, shuffle=False) - - tprs = np.zeros((nrof_folds, nrof_thresholds)) - fprs = np.zeros((nrof_folds, nrof_thresholds)) - accuracy = np.zeros((nrof_folds)) - indices = np.arange(nrof_pairs) - # print('pca', pca) - - if pca == 0: - diff = np.subtract(embeddings1, embeddings2) - dist = np.sum(np.square(diff), 1) - - for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)): - # print('train_set', train_set) - # print('test_set', test_set) - if pca > 0: - print('doing pca on', fold_idx) - embed1_train = embeddings1[train_set] - embed2_train = embeddings2[train_set] - _embed_train = np.concatenate((embed1_train, embed2_train), axis=0) - # print(_embed_train.shape) - pca_model = PCA(n_components=pca) - pca_model.fit(_embed_train) - embed1 = pca_model.transform(embeddings1) - embed2 = pca_model.transform(embeddings2) - embed1 = sklearn.preprocessing.normalize(embed1) - embed2 = sklearn.preprocessing.normalize(embed2) - # print(embed1.shape, embed2.shape) - diff = np.subtract(embed1, embed2) - dist = np.sum(np.square(diff), 1) - - # Find the best threshold for the fold - acc_train = np.zeros((nrof_thresholds)) - for threshold_idx, threshold in enumerate(thresholds): - _, _, acc_train[threshold_idx] = calculate_accuracy( - threshold, dist[train_set], actual_issame[train_set]) - best_threshold_index = np.argmax(acc_train) - for threshold_idx, threshold in enumerate(thresholds): - tprs[fold_idx, - threshold_idx], fprs[fold_idx, - threshold_idx], _ = calculate_accuracy( - threshold, dist[test_set], - actual_issame[test_set]) - _, _, accuracy[fold_idx] = calculate_accuracy( - thresholds[best_threshold_index], dist[test_set], - actual_issame[test_set]) - - tpr = np.mean(tprs, 0) - fpr = np.mean(fprs, 0) - return tpr, fpr, accuracy - - -def calculate_accuracy(threshold, dist, actual_issame): - predict_issame = np.less(dist, threshold) - tp = np.sum(np.logical_and(predict_issame, actual_issame)) - fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame))) - tn = np.sum( - np.logical_and(np.logical_not(predict_issame), - np.logical_not(actual_issame))) - fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame)) - - tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn) - fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn) - acc = float(tp + tn) / dist.size - return tpr, fpr, acc - - -def calculate_val(thresholds, - embeddings1, - embeddings2, - actual_issame, - far_target, - nrof_folds=10): - assert (embeddings1.shape[0] == embeddings2.shape[0]) - assert (embeddings1.shape[1] == embeddings2.shape[1]) - nrof_pairs = min(len(actual_issame), embeddings1.shape[0]) - nrof_thresholds = len(thresholds) - k_fold = KFold(n_splits=nrof_folds, shuffle=False) - - val = np.zeros(nrof_folds) - far = np.zeros(nrof_folds) - - diff = np.subtract(embeddings1, embeddings2) - dist = np.sum(np.square(diff), 1) - indices = np.arange(nrof_pairs) - - for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)): - - # Find the threshold that gives FAR = far_target - far_train = np.zeros(nrof_thresholds) - for threshold_idx, threshold in enumerate(thresholds): - _, far_train[threshold_idx] = calculate_val_far( - threshold, dist[train_set], actual_issame[train_set]) - if np.max(far_train) >= far_target: - f = interpolate.interp1d(far_train, thresholds, kind='slinear') - threshold = f(far_target) - else: - threshold = 0.0 - - val[fold_idx], far[fold_idx] = calculate_val_far( - threshold, dist[test_set], actual_issame[test_set]) - - val_mean = np.mean(val) - far_mean = np.mean(far) - val_std = np.std(val) - return val_mean, val_std, far_mean - - -def calculate_val_far(threshold, dist, actual_issame): - predict_issame = np.less(dist, threshold) - true_accept = np.sum(np.logical_and(predict_issame, actual_issame)) - false_accept = np.sum( - np.logical_and(predict_issame, np.logical_not(actual_issame))) - n_same = np.sum(actual_issame) - n_diff = np.sum(np.logical_not(actual_issame)) - val = float(true_accept) / float(n_same) - far = float(false_accept) / float(n_diff) - return val, far - - -def evaluate(embeddings, actual_issame, nrof_folds=10, pca=0): - # Calculate evaluation metrics - thresholds = np.arange(0, 4, 0.01) - embeddings1 = embeddings[0::2] - embeddings2 = embeddings[1::2] - tpr, fpr, accuracy = calculate_roc(thresholds, - embeddings1, - embeddings2, - np.asarray(actual_issame), - nrof_folds=nrof_folds, - pca=pca) - thresholds = np.arange(0, 4, 0.001) - val, val_std, far = calculate_val(thresholds, - embeddings1, - embeddings2, - np.asarray(actual_issame), - 1e-3, - nrof_folds=nrof_folds) - return tpr, fpr, accuracy, val, val_std, far - - -class LFW: - def __init__(self, root, target_size=250): - self.LFW_IMAGE_SIZE = 250 - - self.lfw_root = root - self.target_size = target_size - - self.lfw_pairs_path = os.path.join(self.lfw_root, 'view2/pairs.txt') - self.image_path_pattern = os.path.join(self.lfw_root, 'lfw', '{person_name}', '{image_name}') - - self.lfw_image_paths, self.id_list = self.load_pairs() - - @property - def name(self): - return 'LFW' - - def __len__(self): - return len(self.lfw_image_paths) - - @property - def ids(self): - return self.id_list - - def load_pairs(self): - image_paths = [] - id_list = [] - with open(self.lfw_pairs_path, 'r') as f: - for line in f.readlines()[1:]: - line = line.strip().split() - if len(line) == 3: - person_name = line[0] - image1_name = '{}_{:04d}.jpg'.format(person_name, int(line[1])) - image2_name = '{}_{:04d}.jpg'.format(person_name, int(line[2])) - image_paths += [ - self.image_path_pattern.format(person_name=person_name, image_name=image1_name), - self.image_path_pattern.format(person_name=person_name, image_name=image2_name) - ] - id_list.append(True) - elif len(line) == 4: - person1_name = line[0] - image1_name = '{}_{:04d}.jpg'.format(person1_name, int(line[1])) - person2_name = line[2] - image2_name = '{}_{:04d}.jpg'.format(person2_name, int(line[3])) - image_paths += [ - self.image_path_pattern.format(person_name=person1_name, image_name=image1_name), - self.image_path_pattern.format(person_name=person2_name, image_name=image2_name) - ] - id_list.append(False) - return image_paths, id_list - - def __getitem__(self, key): - img = cv.imread(self.lfw_image_paths[key]) - if self.target_size != self.LFW_IMAGE_SIZE: - img = cv.resize(img, (self.target_size, self.target_size)) - return img - - def eval(self, model): - ids = self.ids - embeddings = np.zeros(shape=(len(self), 128)) - face_bboxes = np.load("./datasets/lfw_face_bboxes.npy") - for idx, img in tqdm(enumerate(self), desc="Evaluating {} with {} val set".format(model.name, self.name)): - embedding = model.infer(img, face_bboxes[idx]) - embeddings[idx] = embedding - - embeddings = sklearn.preprocessing.normalize(embeddings) - self.tpr, self.fpr, self.acc, self.val, self.std, self.far = evaluate(embeddings, ids, nrof_folds=10) - self.acc, self.std = np.mean(self.acc), np.std(self.acc) - - def print_result(self): - print("==================== Results ====================") - print("Average Accuracy: {:.4f}".format(self.acc)) - print("=================================================") diff --git a/tools/eval/datasets/lfw_face_bboxes.npy b/tools/eval/datasets/lfw_face_bboxes.npy deleted file mode 100644 index d3988c31..00000000 Binary files a/tools/eval/datasets/lfw_face_bboxes.npy and /dev/null differ diff --git a/tools/eval/datasets/minisupervisely.py b/tools/eval/datasets/minisupervisely.py deleted file mode 100644 index 63008dab..00000000 --- a/tools/eval/datasets/minisupervisely.py +++ /dev/null @@ -1,202 +0,0 @@ -import os -import cv2 as cv -import numpy as np -from tqdm import tqdm - - -class MiniSupervisely : - - ''' - Refer to https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.7/paddleseg/core/val.py - for official evaluation implementation. - ''' - - def __init__(self, root) : - self.root = root - self.val_path = os.path.join(root, 'val.txt') - self.image_set = self.load_data(self.val_path) - self.num_classes = 2 - self.miou = -1 - self.class_miou = -1 - self.acc = -1 - self.class_acc = -1 - - - @property - def name(self): - return self.__class__.__name__ - - - def load_data(self, val_path) : - """ - Load validation image set from val.txt file - Args : - val_path (str) : path to val.txt file - Returns : - image_set (list) : list of image path of input and expected image - """ - - image_set = [] - with open(val_path, 'r') as f : - for line in f.readlines() : - image_set.append(line.strip().split()) - - return image_set - - - def eval(self, model) : - """ - Evaluate model on validation set - Args : - model (object) : PP_HumanSeg model object - """ - - intersect_area_all = np.zeros([1], dtype=np.int64) - pred_area_all = np.zeros([1], dtype=np.int64) - label_area_all = np.zeros([1], dtype=np.int64) - - pbar = tqdm(self.image_set) - - pbar.set_description( - "Evaluating {} with {} val set".format(model.name, self.name)) - - for input_image, expected_image in pbar : - - input_image = cv.imread(os.path.join(self.root, input_image)).astype('float32') - - expected_image = cv.imread(os.path.join(self.root, expected_image), cv.IMREAD_GRAYSCALE)[np.newaxis, :, :] - - output_image = model.infer(input_image) - - intersect_area, pred_area, label_area = self.calculate_area( - output_image.astype('uint32'), - expected_image.astype('uint32'), - self.num_classes) - - intersect_area_all = intersect_area_all + intersect_area - pred_area_all = pred_area_all + pred_area - label_area_all = label_area_all + label_area - - self.class_iou, self.miou = self.mean_iou(intersect_area_all, pred_area_all, - label_area_all) - self.class_acc, self.acc = self.accuracy(intersect_area_all, pred_area_all) - - - def get_results(self) : - """ - Get evaluation results - Returns : - miou (float) : mean iou - class_miou (list) : iou on all classes - acc (float) : mean accuracy - class_acc (list) : accuracy on all classes - """ - return self.miou, self.class_miou, self.acc, self.class_acc - - - def print_result(self) : - """ - Print evaluation results - """ - print("Mean IoU : ", self.miou) - print("Mean Accuracy : ", self.acc) - print("Class IoU : ", self.class_iou) - print("Class Accuracy : ", self.class_acc) - - - def calculate_area(self,pred, label, num_classes, ignore_index=255): - """ - Calculate intersect, prediction and label area - Args: - pred (Tensor): The prediction by model. - label (Tensor): The ground truth of image. - num_classes (int): The unique number of target classes. - ignore_index (int): Specifies a target value that is ignored. Default: 255. - Returns: - Tensor: The intersection area of prediction and the ground on all class. - Tensor: The prediction area on all class. - Tensor: The ground truth area on all class - """ - - - if len(pred.shape) == 4: - pred = np.squeeze(pred, axis=1) - if len(label.shape) == 4: - label = np.squeeze(label, axis=1) - if not pred.shape == label.shape: - raise ValueError('Shape of `pred` and `label should be equal, ' - 'but there are {} and {}.'.format(pred.shape, - label.shape)) - - mask = label != ignore_index - pred_area = [] - label_area = [] - intersect_area = [] - - #iterate over all classes and calculate their respective areas - for i in range(num_classes): - pred_i = np.logical_and(pred == i, mask) - label_i = label == i - intersect_i = np.logical_and(pred_i, label_i) - pred_area.append(np.sum(pred_i.astype('int32'))) - label_area.append(np.sum(label_i.astype('int32'))) - intersect_area.append(np.sum(intersect_i.astype('int32'))) - - return intersect_area, pred_area, label_area - - - def mean_iou(self,intersect_area, pred_area, label_area): - """ - Calculate iou. - Args: - intersect_area (Tensor): The intersection area of prediction and ground truth on all classes. - pred_area (Tensor): The prediction area on all classes. - label_area (Tensor): The ground truth area on all classes. - Returns: - np.ndarray: iou on all classes. - float: mean iou of all classes. - """ - intersect_area = np.array(intersect_area) - pred_area = np.array(pred_area) - label_area = np.array(label_area) - - union = pred_area + label_area - intersect_area - - class_iou = [] - for i in range(len(intersect_area)): - if union[i] == 0: - iou = 0 - else: - iou = intersect_area[i] / union[i] - class_iou.append(iou) - - miou = np.mean(class_iou) - - return np.array(class_iou), miou - - - def accuracy(self,intersect_area, pred_area): - """ - Calculate accuracy - Args: - intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.. - pred_area (Tensor): The prediction area on all classes. - Returns: - np.ndarray: accuracy on all classes. - float: mean accuracy. - """ - - intersect_area = np.array(intersect_area) - pred_area = np.array(pred_area) - - class_acc = [] - for i in range(len(intersect_area)): - if pred_area[i] == 0: - acc = 0 - else: - acc = intersect_area[i] / pred_area[i] - class_acc.append(acc) - - macc = np.sum(intersect_area) / np.sum(pred_area) - - return np.array(class_acc), macc diff --git a/tools/eval/datasets/widerface.py b/tools/eval/datasets/widerface.py deleted file mode 100644 index 50237c2e..00000000 --- a/tools/eval/datasets/widerface.py +++ /dev/null @@ -1,315 +0,0 @@ -import os -import tqdm -import pickle -import numpy as np -from scipy.io import loadmat -import cv2 as cv - - -def get_gt_boxes(gt_dir): - """ gt dir: (wider_face_val.mat, wider_easy_val.mat, wider_medium_val.mat, wider_hard_val.mat)""" - - gt_mat = loadmat(os.path.join(gt_dir, 'wider_face_val.mat')) - hard_mat = loadmat(os.path.join(gt_dir, 'wider_hard_val.mat')) - medium_mat = loadmat(os.path.join(gt_dir, 'wider_medium_val.mat')) - easy_mat = loadmat(os.path.join(gt_dir, 'wider_easy_val.mat')) - - facebox_list = gt_mat['face_bbx_list'] - event_list = gt_mat['event_list'] - file_list = gt_mat['file_list'] - - hard_gt_list = hard_mat['gt_list'] - medium_gt_list = medium_mat['gt_list'] - easy_gt_list = easy_mat['gt_list'] - - return facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list - - -def get_gt_boxes_from_txt(gt_path, cache_dir): - cache_file = os.path.join(cache_dir, 'gt_cache.pkl') - if os.path.exists(cache_file): - f = open(cache_file, 'rb') - boxes = pickle.load(f) - f.close() - return boxes - - f = open(gt_path, 'r') - state = 0 - lines = f.readlines() - lines = list(map(lambda x: x.rstrip('\r\n'), lines)) - boxes = {} - print(len(lines)) - f.close() - current_boxes = [] - current_name = None - for line in lines: - if state == 0 and '--' in line: - state = 1 - current_name = line - continue - if state == 1: - state = 2 - continue - - if state == 2 and '--' in line: - state = 1 - boxes[current_name] = np.array(current_boxes).astype('float32') - current_name = line - current_boxes = [] - continue - - if state == 2: - box = [float(x) for x in line.split(' ')[:4]] - current_boxes.append(box) - continue - - f = open(cache_file, 'wb') - pickle.dump(boxes, f) - f.close() - return boxes - - -def norm_score(pred): - """ norm score - pred {key: [[x1,y1,x2,y2,s]]} - """ - - max_score = 0 - min_score = 1 - - for _, k in pred.items(): - for _, v in k.items(): - if len(v) == 0: - continue - _min = np.min(v[:, -1]) - _max = np.max(v[:, -1]) - max_score = max(_max, max_score) - min_score = min(_min, min_score) - - diff = max_score - min_score - for _, k in pred.items(): - for _, v in k.items(): - if len(v) == 0: - continue - v[:, -1] = (v[:, -1] - min_score) / diff - - -def bbox_overlaps(a, b): - """ - return iou of a and b, numpy version for data augenmentation - """ - lt = np.maximum(a[:, np.newaxis, 0:2], b[:, 0:2]) - rb = np.minimum(a[:, np.newaxis, 2:4], b[:, 2:4]) - - area_i = np.prod(rb - lt + 1, axis=2) * (lt < rb).all(axis=2) - area_a = np.prod(a[:, 2:4] - a[:, 0:2] + 1, axis=1) - area_b = np.prod(b[:, 2:4] - b[:, 0:2] + 1, axis=1) - return area_i / (area_a[:, np.newaxis] + area_b - area_i) - - -def image_eval(pred, gt, ignore, iou_thresh): - """ single image evaluation - pred: Nx5 - gt: Nx4 - ignore: - """ - - _pred = pred.copy() - _gt = gt.copy() - pred_recall = np.zeros(_pred.shape[0]) - recall_list = np.zeros(_gt.shape[0]) - proposal_list = np.ones(_pred.shape[0]) - - _pred[:, 2] = _pred[:, 2] + _pred[:, 0] - _pred[:, 3] = _pred[:, 3] + _pred[:, 1] - _gt[:, 2] = _gt[:, 2] + _gt[:, 0] - _gt[:, 3] = _gt[:, 3] + _gt[:, 1] - - overlaps = bbox_overlaps(_pred[:, :4], _gt) - - for h in range(_pred.shape[0]): - - gt_overlap = overlaps[h] - max_overlap, max_idx = gt_overlap.max(), gt_overlap.argmax() - if max_overlap >= iou_thresh: - if ignore[max_idx] == 0: - recall_list[max_idx] = -1 - proposal_list[h] = -1 - elif recall_list[max_idx] == 0: - recall_list[max_idx] = 1 - - r_keep_index = np.where(recall_list == 1)[0] - pred_recall[h] = len(r_keep_index) - return pred_recall, proposal_list - - -def img_pr_info(thresh_num, pred_info, proposal_list, pred_recall): - pr_info = np.zeros((thresh_num, 2)).astype('float') - for t in range(thresh_num): - - thresh = 1 - (t + 1) / thresh_num - r_index = np.where(pred_info[:, 4] >= thresh)[0] - if len(r_index) == 0: - pr_info[t, 0] = 0 - pr_info[t, 1] = 0 - else: - r_index = r_index[-1] - p_index = np.where(proposal_list[:r_index + 1] == 1)[0] - pr_info[t, 0] = len(p_index) - pr_info[t, 1] = pred_recall[r_index] - return pr_info - - -def dataset_pr_info(thresh_num, pr_curve, count_face): - _pr_curve = np.zeros((thresh_num, 2)) - for i in range(thresh_num): - _pr_curve[i, 0] = pr_curve[i, 1] / pr_curve[i, 0] - _pr_curve[i, 1] = pr_curve[i, 1] / count_face - return _pr_curve - - -def voc_ap(rec, prec): - # correct AP calculation - # first append sentinel values at the end - mrec = np.concatenate(([0.], rec, [1.])) - mpre = np.concatenate(([0.], prec, [0.])) - - # compute the precision envelope - for i in range(mpre.size - 1, 0, -1): - mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) - - # to calculate area under PR curve, look for points - # where X axis (recall) changes value - i = np.where(mrec[1:] != mrec[:-1])[0] - - # and sum (\Delta recall) * prec - ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) - return ap - - -def evaluation(pred, gt_path, iou_thresh=0.5): - norm_score(pred) - facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list = get_gt_boxes(gt_path) - event_num = len(event_list) - thresh_num = 1000 - settings = ['easy', 'medium', 'hard'] - setting_gts = [easy_gt_list, medium_gt_list, hard_gt_list] - aps = [] - for setting_id in range(3): - # different setting - gt_list = setting_gts[setting_id] - count_face = 0 - pr_curve = np.zeros((thresh_num, 2)).astype('float') - # [hard, medium, easy] - pbar = tqdm.tqdm(range(event_num)) - for i in pbar: - pbar.set_description('Processing {}'.format(settings[setting_id])) - event_name = str(event_list[i][0][0]) - img_list = file_list[i][0] - pred_list = pred[event_name] - sub_gt_list = gt_list[i][0] - # img_pr_info_list = np.zeros((len(img_list), thresh_num, 2)) - gt_bbx_list = facebox_list[i][0] - - for j in range(len(img_list)): - pred_info = pred_list[str(img_list[j][0][0])] - - gt_boxes = gt_bbx_list[j][0].astype('float') - keep_index = sub_gt_list[j][0] - count_face += len(keep_index) - - if len(gt_boxes) == 0 or len(pred_info) == 0: - continue - ignore = np.zeros(gt_boxes.shape[0]) - if len(keep_index) != 0: - ignore[keep_index - 1] = 1 - pred_recall, proposal_list = image_eval(pred_info, gt_boxes, ignore, iou_thresh) - - _img_pr_info = img_pr_info(thresh_num, pred_info, proposal_list, pred_recall) - - pr_curve += _img_pr_info - pr_curve = dataset_pr_info(thresh_num, pr_curve, count_face) - - propose = pr_curve[:, 0] - recall = pr_curve[:, 1] - - ap = voc_ap(recall, propose) - aps.append(ap) - return aps - - -class WIDERFace: - def __init__(self, root, split='val'): - self.aps = [] - self.widerface_root = root - self._split = split - - self.widerface_img_paths = { - 'val': os.path.join(self.widerface_root, 'WIDER_val', 'images'), - 'test': os.path.join(self.widerface_root, 'WIDER_test', 'images') - } - - self.widerface_split_fpaths = { - 'val': os.path.join(self.widerface_root, 'wider_face_split', 'wider_face_val.mat'), - 'test': os.path.join(self.widerface_root, 'wider_face_split', 'wider_face_test.mat') - } - self.img_list, self.num_img = self.load_list() - - @property - def name(self): - return self.__class__.__name__ - - def load_list(self): - n_imgs = 0 - flist = [] - - split_fpath = self.widerface_split_fpaths[self._split] - img_path = self.widerface_img_paths[self._split] - - anno_data = loadmat(split_fpath) - event_list = anno_data.get('event_list') - file_list = anno_data.get('file_list') - - for event_idx, event in enumerate(event_list): - event_name = event[0][0] - for f_idx, f in enumerate(file_list[event_idx][0]): - f_name = f[0][0] - f_path = os.path.join(img_path, event_name, f_name + '.jpg') - flist.append(f_path) - n_imgs += 1 - - return flist, n_imgs - - def __getitem__(self, index): - img = cv.imread(self.img_list[index]) - event, name = self.img_list[index].split(os.sep)[-2:] - return event, name, img - - def eval(self, model): - results_list = dict() - pbar = tqdm.tqdm(self) - pbar.set_description_str("Evaluating {} with {} val set".format(model.name, self.name)) - # forward - for event_name, img_name, img in pbar: - img_shape = [img.shape[1], img.shape[0]] - model.setInputSize(img_shape) - det = model.infer(img) - - if not results_list.get(event_name): - results_list[event_name] = dict() - - if det is None: - det = np.array([[10, 10, 20, 20, 0.002]]) - else: - det = np.append(np.around(det[:, :4], 1), np.around(det[:, -1], 3).reshape(-1, 1), axis=1) - - results_list[event_name][img_name.rstrip('.jpg')] = det - - self.aps = evaluation(results_list, os.path.join(self.widerface_root, 'eval_tools', 'ground_truth')) - - def print_result(self): - print("==================== Results ====================") - print("Easy Val AP: {}".format(self.aps[0])) - print("Medium Val AP: {}".format(self.aps[1])) - print("Hard Val AP: {}".format(self.aps[2])) - print("=================================================") diff --git a/tools/eval/eval.py b/tools/eval/eval.py deleted file mode 100644 index a046d5bd..00000000 --- a/tools/eval/eval.py +++ /dev/null @@ -1,182 +0,0 @@ -import os -import sys -import argparse - -import numpy as np -import cv2 as cv - -from datasets import DATASETS - -if "PYTHONPATH" in os.environ: - root_dir = os.environ["PYTHONPATH"] -else: - root_dir = os.path.join("..", "..") -sys.path.append(root_dir) -from models import MODELS - -parser = argparse.ArgumentParser("Evaluation with OpenCV on different models in the zoo.") -parser.add_argument("--model", "-m", type=str, required=True, help="model name") -parser.add_argument("--dataset", "-d", type=str, required=True, help="Dataset name") -parser.add_argument("--dataset_root", "-dr", type=str, required=True, help="Root directory of given dataset") -args = parser.parse_args() - -models = dict( - mobilenetv1=dict( - name="MobileNet", - topic="image_classification", - modelPath=os.path.join(root_dir, "models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr.onnx"), - topK=5, - loadLabel=False), - mobilenetv1_q=dict( - name="MobileNet", - topic="image_classification", - modelPath=os.path.join(root_dir, "models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr_int8.onnx"), - topK=5, - loadLabel=False), - mobilenetv1_bq=dict( - name="MobileNet", - topic="image_classification", - modelPath=os.path.join(root_dir, "models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr_int8bq.onnx"), - topK=5, - loadLabel=False), - mobilenetv2=dict( - name="MobileNet", - topic="image_classification", - modelPath=os.path.join(root_dir, "models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr.onnx"), - topK=5, - loadLabel=False), - mobilenetv2_q=dict( - name="MobileNet", - topic="image_classification", - modelPath=os.path.join(root_dir, "models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr_int8.onnx"), - topK=5, - loadLabel=False), - mobilenetv2_bq=dict( - name="MobileNet", - topic="image_classification", - modelPath=os.path.join(root_dir, "models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr_int8bq.onnx"), - topK=5, - loadLabel=False), - ppresnet=dict( - name="PPResNet", - topic="image_classification", - modelPath=os.path.join(root_dir, "models/image_classification_ppresnet/image_classification_ppresnet50_2022jan.onnx"), - topK=5, - loadLabel=False), - ppresnet_q=dict( - name="PPResNet", - topic="image_classification", - modelPath=os.path.join(root_dir, "models/image_classification_ppresnet/image_classification_ppresnet50_2022jan_int8.onnx"), - topK=5, - loadLabel=False), - ppresnet_bq=dict( - name="PPResNet", - topic="image_classification", - modelPath=os.path.join(root_dir, "models/image_classification_ppresnet/image_classification_ppresnet50_2022jan_int8bq.onnx"), - topK=5, - loadLabel=False), - yunet=dict( - name="YuNet", - topic="face_detection", - modelPath=os.path.join(root_dir, "models/face_detection_yunet/face_detection_yunet_2023mar.onnx"), - topK=5000, - confThreshold=0.3, - nmsThreshold=0.45), - yunet_q=dict( - name="YuNet", - topic="face_detection", - modelPath=os.path.join(root_dir, "models/face_detection_yunet/face_detection_yunet_2023mar_int8.onnx"), - topK=5000, - confThreshold=0.3, - nmsThreshold=0.45), - yunet_bq=dict( - name="YuNet", - topic="face_detection", - modelPath=os.path.join(root_dir, "models/face_detection_yunet/face_detection_yunet_2023mar_int8bq.onnx"), - topK=5000, - confThreshold=0.3, - nmsThreshold=0.45), - sface=dict( - name="SFace", - topic="face_recognition", - modelPath=os.path.join(root_dir, "models/face_recognition_sface/face_recognition_sface_2021dec.onnx")), - sface_q=dict( - name="SFace", - topic="face_recognition", - modelPath=os.path.join(root_dir, "models/face_recognition_sface/face_recognition_sface_2021dec_int8.onnx")), - sface_bq=dict( - name="SFace", - topic="face_recognition", - modelPath=os.path.join(root_dir, "models/face_recognition_sface/face_recognition_sface_2021dec_int8bq.onnx")), - crnn_en=dict( - name="CRNN", - topic="text_recognition", - modelPath=os.path.join(root_dir, "models/text_recognition_crnn/text_recognition_CRNN_EN_2021sep.onnx")), - crnn_en_q=dict( - name="CRNN", - topic="text_recognition", - modelPath=os.path.join(root_dir, "models/text_recognition_crnn/text_recognition_CRNN_EN_2022oct_int8.onnx")), - pphumanseg=dict( - name="PPHumanSeg", - topic="human_segmentation", - modelPath=os.path.join(root_dir, "models/human_segmentation_pphumanseg/human_segmentation_pphumanseg_2023mar.onnx")), - pphumanseg_q=dict( - name="PPHumanSeg", - topic="human_segmentation", - modelPath=os.path.join(root_dir, "models/human_segmentation_pphumanseg/human_segmentation_pphumanseg_2023mar_int8.onnx")), - pphumanseg_bq=dict( - name="PPHumanSeg", - topic="human_segmentation", - modelPath=os.path.join(root_dir, "models/human_segmentation_pphumanseg/human_segmentation_pphumanseg_2023mar_int8bq.onnx")), -) - -datasets = dict( - imagenet=dict( - name="ImageNet", - topic="image_classification", - size=224), - widerface=dict( - name="WIDERFace", - topic="face_detection"), - lfw=dict( - name="LFW", - topic="face_recognition", - target_size=112), - icdar=dict( - name="ICDAR", - topic="text_recognition"), - iiit5k=dict( - name="IIIT5K", - topic="text_recognition"), - mini_supervisely=dict( - name="MiniSupervisely", - topic="human_segmentation"), -) - -def main(args): - # Instantiate model - model_key = args.model.lower() - assert model_key in models - - model_name = models[model_key].pop("name") - model_topic = models[model_key].pop("topic") - model_handler, _ = MODELS.get(model_name) - model = model_handler(**models[model_key]) - - # Instantiate dataset - dataset_key = args.dataset.lower() - assert dataset_key in datasets - - dataset_name = datasets[dataset_key].pop("name") - dataset_topic = datasets[dataset_key].pop("topic") - dataset = DATASETS.get(dataset_name)(root=args.dataset_root, **datasets[dataset_key]) - - # Check if model_topic matches dataset_topic - assert model_topic == dataset_topic - - # Run evaluation - dataset.eval(model) - dataset.print_result() - -if __name__ == "__main__": - main(args) diff --git a/tools/quantize/README.md b/tools/quantize/README.md deleted file mode 100644 index 2ef80180..00000000 --- a/tools/quantize/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Quantization with ONNXRUNTIME and Neural Compressor - -[ONNXRUNTIME](https://github.com/microsoft/onnxruntime) and [Neural Compressor](https://github.com/intel/neural-compressor) are used for quantization in the Zoo. - -Install dependencies before trying quantization: -```shell -pip install -r requirements.txt -``` - -## Quantization Usage - -Quantize all models in the Zoo: -```shell -python quantize-ort.py -python quantize-inc.py -``` - -Quantize one of the models in the Zoo: -```shell -# python quantize.py -python quantize-ort.py yunet -python quantize-inc.py mobilenetv1 -``` - -Customizing quantization configs: -```python -# Quantize with ONNXRUNTIME -# 1. add your model into `models` dict in quantize-ort.py -models = dict( - # ... - model1=Quantize(model_path='/path/to/model1.onnx', - calibration_image_dir='/path/to/images', - transforms=Compose([''' transforms ''']), # transforms can be found in transforms.py - per_channel=False, # set False to quantize in per-tensor style - act_type='int8', # available types: 'int8', 'uint8' - wt_type='int8' # available types: 'int8', 'uint8' - ) -) -# 2. quantize your model -python quantize-ort.py model1 - - -# Quantize with Intel Neural Compressor -# 1. add your model into `models` dict in quantize-inc.py -models = dict( - # ... - model1=Quantize(model_path='/path/to/model1.onnx', - config_path='/path/to/model1.yaml'), -) -# 2. prepare your YAML config model1.yaml (see configs in ./inc_configs) -# 3. quantize your model -python quantize-inc.py model1 -``` - -## Blockwise quantization usage - -Block-quantized models under each model directory are generated with `--block_size=64` - -`block_quantize.py` requires Python>=3.7 - -To perform weight-only blockwise quantization: - -```shell -python block_quantize.py --input_model INPUT_MODEL.onnx --output_model OUTPUT_MODEL.onnx --block_size {block size} --bits {8,16} -``` - -## Dataset -Some models are quantized with extra datasets. -- [MP-PalmDet](../../models/palm_detection_mediapipe) and [MP-HandPose](../../models/handpose_estimation_mediapipe) are quantized with evaluation set of [FreiHAND](https://lmb.informatik.uni-freiburg.de/resources/datasets/FreihandDataset.en.html). Download the dataset from [this link](https://lmb.informatik.uni-freiburg.de/data/freihand/FreiHAND_pub_v2_eval.zip). Unpack it and replace `path/to/dataset` with the path to `FreiHAND_pub_v2_eval/evaluation/rgb`. diff --git a/tools/quantize/block_quantize.py b/tools/quantize/block_quantize.py deleted file mode 100644 index 4eb3d638..00000000 --- a/tools/quantize/block_quantize.py +++ /dev/null @@ -1,513 +0,0 @@ -import sys - -MIN_PYTHON_VERSION = (3, 7) - -if sys.version_info < MIN_PYTHON_VERSION: - raise ImportError("This script requires Python 3.7 or higher!") - -import argparse -import os -from dataclasses import dataclass, field -from typing import Dict, Tuple -from enum import Enum, auto - -import numpy as np -import onnx -from onnx import helper - -BITS_TO_NUMPY_TYPE = {8: np.int8, 16: np.int16} - - -SUPPORTED_OPS = {"Conv", "Gemm", "MatMul"} - -ONNX_OPSET = 21 - - -class WeightCategory(Enum): - INITIALIZER = auto() - CONSTANT = auto() - NONE = auto() - - -@dataclass -class BlockQuantizeConfig: - input_model_path: str - output_model_path: str - block_size: int - bits: int - verbose: bool - - -@dataclass -class BlockQuantizeResult: - quantized_weights: np.ndarray = field(default_factory=lambda: np.array([])) - scales: np.ndarray = field(default_factory=lambda: np.array([])) - zero_point: np.ndarray = field(default_factory=lambda: np.array([])) - block_size: int = 1 - axis: int = 1 - original_shape: Tuple = field(default_factory=tuple) - quantization_error: np.ndarray = field(default_factory=lambda: np.array([])) - - -def closest_divisor(number: int, divisor: int) -> int: - for d in range(divisor, 0, -1): - if number % d == 0: - return d - return 1 - - -def block_dequantize_tensor( - x: np.ndarray, block_axis: int, scale: np.ndarray, zero_point: np.ndarray -) -> np.ndarray: - repeats = x.shape[block_axis] // scale.shape[block_axis] - - x_scale_elementwise = np.repeat(scale, repeats=repeats, axis=block_axis) - x_zero_point_elementwise = np.repeat(zero_point, repeats=repeats, axis=block_axis) - - y = ( - x.astype(np.float32) - x_zero_point_elementwise.astype(np.float32) - ) * x_scale_elementwise - - return y - - -def block_quantize_tensor( - x: np.ndarray, - block_axis: int, - scale: np.ndarray, - zero_point: np.ndarray, - n_bits: int, -) -> np.ndarray: - repeats = x.shape[block_axis] // scale.shape[block_axis] - - y_scale_elementwise = np.repeat(scale, repeats=repeats, axis=block_axis) - y_zero_point_elementwise = np.repeat(zero_point, repeats=repeats, axis=block_axis) - - type_info = np.iinfo(BITS_TO_NUMPY_TYPE[n_bits]) - min_value = type_info.min - max_value = type_info.max - - y = np.rint(x / y_scale_elementwise + y_zero_point_elementwise) - y = np.clip(y, min_value, max_value) - y = y.astype(BITS_TO_NUMPY_TYPE[n_bits]) - - return y - - -def create_dequantize_node( - node_name, - quantized_weights, - scales, - zero_point, - dequantized_weights, - block_size, - axis, -) -> onnx.NodeProto: - block_size_attr = helper.make_attribute("block_size", block_size) - axis_attr = helper.make_attribute("axis", axis) - - n = helper.make_node( - "DequantizeLinear", - inputs=[quantized_weights, scales, zero_point], - outputs=[dequantized_weights], - name=node_name, - ) - n.attribute.extend([block_size_attr, axis_attr]) - return n - - -def create_reshape_node( - node_name, dequantized_weights, shape_tensor, reshaped_weights_name -) -> onnx.NodeProto: - return helper.make_node( - "Reshape", - inputs=[dequantized_weights, shape_tensor], - outputs=[reshaped_weights_name], - name=node_name, - ) - - -class BlockQuantizer: - def __init__(self, conf: BlockQuantizeConfig) -> None: - self.conf = conf - self.validate_conf() - - self.model = onnx.load(conf.input_model_path) - - if self.model.opset_import[0].version != ONNX_OPSET: - self.model = onnx.version_converter.convert_version(self.model, ONNX_OPSET) - - self.graph = self.model.graph - self.initializers_map = { - init.name: init for init in self.model.graph.initializer - } - self.costants_map = { - node.output[0]: next( - attr.t for attr in node.attribute if attr.name == "value" - ) - for node in self.model.graph.node - if node.op_type == "Constant" - } - - def validate_conf(self): - if not os.path.isfile(self.conf.input_model_path): - raise ValueError( - f"Input model path '{self.conf.input_model_path}' does not exist or is not a file." - ) - - if not self.conf.input_model_path.lower().endswith(".onnx"): - raise ValueError( - f"Input model path '{self.conf.input_model_path}' must have a .onnx extension." - ) - - if not self.conf.output_model_path.lower().endswith(".onnx"): - raise ValueError( - f"Output model path '{self.conf.output_model_path}' must have a .onnx extension." - ) - - if self.conf.block_size <= 0: - raise ValueError("Block size must be a positive integer.") - - if self.conf.bits not in BITS_TO_NUMPY_TYPE: - allowed_values = ", ".join([str(k) for k in BITS_TO_NUMPY_TYPE.keys()]) - raise ValueError( - f"Bits must be one of the following values: [{allowed_values}]." - ) - - def get_weight_category(self, name: str) -> WeightCategory: - if name in self.initializers_map: - return WeightCategory.INITIALIZER - if name in self.costants_map: - return WeightCategory.CONSTANT - else: - return WeightCategory.NONE - - def get_weight_tensor(self, name: str, category: WeightCategory) -> np.ndarray: - if category == WeightCategory.INITIALIZER: - return onnx.numpy_helper.to_array(self.initializers_map[name]) - elif category == WeightCategory.CONSTANT: - return onnx.numpy_helper.to_array(self.costants_map[name]) - else: - raise AssertionError("Invalid weight category") - - def remove_fp32_weights(self, name: str, category: WeightCategory): - if category == WeightCategory.INITIALIZER: - self.graph.initializer.remove( - next(init for init in self.graph.initializer if init.name == name) - ) - elif category == WeightCategory.CONSTANT: - self.graph.node.remove( - next( - node - for node in self.graph.node - if node.op_type == "Constant" and node.output[0] == name - ) - ) - else: - raise AssertionError("Invalid weight category") - - def compute_scale_zeropoint( - self, b_min: np.ndarray, b_max: np.ndarray - ) -> Tuple[np.ndarray, np.ndarray]: - assert ( - b_min <= b_max - ).all(), "minimum must not be greater than maximum when computing scale and zero point" - - # zero must be present in the range, this enforces qmin <= zero_point <= qmax - b_min = np.minimum(b_min, np.zeros_like(b_min, dtype=b_min.dtype)) - b_max = np.maximum(b_max, np.zeros_like(b_max, dtype=b_max.dtype)) - - type_info = np.iinfo(BITS_TO_NUMPY_TYPE[self.conf.bits]) - qmin = type_info.min - qmax = type_info.max - - dq = qmax - qmin - - scales = np.where(b_max != b_min, (b_max - b_min) / dq, 1.0) - - zeropoints = np.where(b_max != b_min, np.rint(qmin - b_min / scales), 0.0) - zeropoints = zeropoints.astype(BITS_TO_NUMPY_TYPE[self.conf.bits]) - - return (scales, zeropoints) - - def block_quantize(self, weight: np.ndarray) -> BlockQuantizeResult: - original_shape = weight.shape - - if weight.ndim > 1: - weight = weight.reshape((weight.shape[0], -1)) - quantization_axis = 1 - else: - quantization_axis = 0 - - block_size = closest_divisor( - weight.shape[quantization_axis], self.conf.block_size - ) - - assert ( - weight.shape[quantization_axis] % block_size == 0 - ), f"weight shape ({weight.shape[quantization_axis]}) must be divisible by block size ({block_size})" - - # Flattening the tensor after the quantization axis - new_shape = list(weight.shape[: quantization_axis + 1]) + [-1] - new_shape[quantization_axis] = new_shape[quantization_axis] // block_size - - blocked_weight = weight.reshape(new_shape) - - blocked_max = np.max(blocked_weight, -1) - blocked_min = np.min(blocked_weight, -1) - - scales, zeropoints = self.compute_scale_zeropoint(blocked_min, blocked_max) - - quantized_weight = block_quantize_tensor( - weight, quantization_axis, scales, zeropoints, self.conf.bits - ) - reconstructed_mat = block_dequantize_tensor( - quantized_weight, quantization_axis, scales, zeropoints - ) - - # Relative Norm - qerror = np.linalg.norm(reconstructed_mat - weight) / (np.linalg.norm(weight) + 1e-10) - - res = BlockQuantizeResult( - quantized_weight, - scales, - zeropoints, - block_size, - quantization_axis, - original_shape, - qerror, - ) - - return res - - def get_model_size(self, model_path: str) -> float: - size_bytes = os.path.getsize(model_path) - size_mb = size_bytes / 1024 - - return size_mb - - def display_summary(self, sqe: Dict[str, int]): - sqe_v = list(sqe.values()) - if len(sqe_v) == 0: - mse = 0 - print( - "Warning: No weights have been quantized, likely due to unsupported layers." - ) - else: - mse = sum(sqe_v) / len(sqe_v) - original_model_size = self.get_model_size(self.conf.input_model_path) - quantized_model_size = self.get_model_size(self.conf.output_model_path) - - if self.conf.verbose: - sorted_sqe = sorted(sqe.items(), key=lambda item: item[1], reverse=True) - longest_key_len = max(len(key) for key in sqe.keys()) - - print("Quantization error (Relative Norm) sorted in ascending order:") - - for key, value in sorted_sqe: - print(f"{key:<{longest_key_len}} : {value}") - - print("Done! Results saved in", self.conf.output_model_path) - print("\nSummary of Results:\n") - print(f"{'Metric':<30} {'Value':<10}") - print(f"{'-'*40}") - print(f"{'Relative Norm Error':<31} {mse:.6f}") - print(f"{'Original Model Size (KB)':<31} {original_model_size:,.2f}") - print(f"{'Block-Quantized Model Size (KB)':<30} {quantized_model_size:,.2f}") - - def run(self): - print("Quantizing the model...") - - quantized_inputs = [] - sqe = {} - - node_idx = 0 - - while node_idx < len(self.model.graph.node): - node = self.model.graph.node[node_idx] - - if node.op_type in SUPPORTED_OPS: - for input_idx, input_name in enumerate(node.input): - weightCategory = self.get_weight_category(input_name) - - # Skip quantization if weights are taken as external input - if weightCategory == WeightCategory.NONE: - continue - - weight = self.get_weight_tensor(input_name, weightCategory) - - quantized_weights_name = f"{input_name}_quantized" - quantized_node_name = f"{input_name}_quantized_node" - dequantized_weights_name = f"{input_name}_dequantized" - scales_name = f"{input_name}_scales" - zero_point_name = f"{input_name}_zero_point" - - shape_node_name = f"{input_name}_shape_node" - shape_name = f"{input_name}_shape" - reshaped_weights_name = f"{input_name}_reshaped" - - # Skip quantization if weights don't contain enough elements to create at least 1 block - if weight.size < self.conf.block_size: - continue - - reshape_needed = weight.ndim > 2 - - # In case of parameter sharing - if input_name in quantized_inputs: - node.input[input_idx] = ( - reshaped_weights_name - if reshape_needed - else dequantized_weights_name - ) - continue - - - block_quantize_res = self.block_quantize(weight) - - # Skip quantization if it wouldn't reduce the model size - if block_quantize_res.block_size == 1: - continue - - quantized_inputs.append(input_name) - - dequantize_node = create_dequantize_node( - quantized_node_name, - quantized_weights_name, - scales_name, - zero_point_name, - dequantized_weights_name, - block_quantize_res.block_size, - block_quantize_res.axis, - ) - - if reshape_needed: - reshape_node = create_reshape_node( - shape_node_name, - dequantized_weights_name, - shape_name, - reshaped_weights_name, - ) - - shape_tensor = onnx.numpy_helper.from_array( - np.array(block_quantize_res.original_shape), name=shape_name - ) - scale_initializer = onnx.numpy_helper.from_array( - block_quantize_res.scales, name=scales_name - ) - zero_point_initializer = onnx.numpy_helper.from_array( - block_quantize_res.zero_point, name=zero_point_name - ) - quantized_weights_initializer = onnx.numpy_helper.from_array( - block_quantize_res.quantized_weights, - name=quantized_weights_name, - ) - - dequantized_weights_info = helper.make_tensor_value_info( - dequantized_weights_name, - onnx.TensorProto.FLOAT, - block_quantize_res.quantized_weights.shape, - ) - - if reshape_needed: - shape_info = helper.make_tensor_value_info( - reshaped_weights_name, - onnx.TensorProto.FLOAT, - block_quantize_res.original_shape, - ) - - self.graph.initializer.extend( - [ - scale_initializer, - zero_point_initializer, - shape_tensor, - quantized_weights_initializer, - ] - ) - - self.remove_fp32_weights(input_name, weightCategory) - - node.input[input_idx] = ( - reshaped_weights_name - if reshape_needed - else dequantized_weights_name - ) - - # Preserving graph nodes topological order - if reshape_needed: - self.graph.node.insert(0, reshape_node) - node_idx += 1 - - self.graph.node.insert(0, dequantize_node) - node_idx += 1 - if reshape_needed: - self.graph.value_info.insert(0, shape_info) - self.graph.value_info.insert(0, dequantized_weights_info) - - sqe[input_name] = block_quantize_res.quantization_error - - node_idx += 1 - - onnx.checker.check_model(self.model, full_check=True) - onnx.save(self.model, self.conf.output_model_path) - - self.display_summary(sqe) - - -def setup_args() -> argparse.Namespace: - parser = argparse.ArgumentParser(description="Blockwise quantization tool") - - parser.add_argument( - "-i", - "--input_model", - type=str, - help="The path of onnx model to quantize", - required=True, - ) - parser.add_argument( - "-bs", - "--block_size", - type=int, - help="The maximum size of quantization block", - required=True, - ) - parser.add_argument( - "-b", - "--bits", - type=int, - help="Quantization bits", - choices=[8, 16], - default=8, - required=False, - ) - parser.add_argument( - "-o", - "--output_model", - type=str, - help="The output model path", - default="block_quantized_model.onnx", - required=False, - ) - parser.add_argument( - "-v", - "--verbose", - action="store_true", - help="Enable verbose output", - required=False, - ) - - return parser.parse_args() - - -if __name__ == "__main__": - args = setup_args() - - quantization_config = BlockQuantizeConfig( - input_model_path=args.input_model, - output_model_path=args.output_model, - block_size=args.block_size, - bits=args.bits, - verbose=args.verbose - ) - - quantizer = BlockQuantizer(quantization_config) - quantizer.run() diff --git a/tools/quantize/inc_configs/fer.yaml b/tools/quantize/inc_configs/fer.yaml deleted file mode 100644 index 69380842..00000000 --- a/tools/quantize/inc_configs/fer.yaml +++ /dev/null @@ -1,38 +0,0 @@ -version: 1.0 - -model: # mandatory. used to specify model specific information. - name: fer - framework: onnxrt_qlinearops # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - approach: post_training_static_quant # optional. default value is post_training_static_quant. - calibration: - dataloader: - batch_size: 1 - dataset: - dummy: - shape: [1, 3, 112, 112] - low: -1.0 - high: 1.0 - dtype: float32 - label: True - - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - weight: - granularity: per_tensor - scheme: asym - dtype: int8 - algorithm: minmax - activation: - granularity: per_tensor - scheme: asym - dtype: int8 - algorithm: minmax - -tuning: - accuracy_criterion: - relative: 0.02 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - max_trials: 50 # optional. max tune times. default value is 100. combine with timeout field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/tools/quantize/inc_configs/lpd_yunet.yaml b/tools/quantize/inc_configs/lpd_yunet.yaml deleted file mode 100644 index 5e700c60..00000000 --- a/tools/quantize/inc_configs/lpd_yunet.yaml +++ /dev/null @@ -1,52 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: 1.0 - -model: # mandatory. used to specify model specific information. - name: lpd_yunet - framework: onnxrt_qlinearops # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - approach: post_training_static_quant # optional. default value is post_training_static_quant. - calibration: - dataloader: - batch_size: 1 - dataset: - dummy: - shape: [1, 3, 240, 320] - low: 0.0 - high: 127.0 - dtype: float32 - label: True - - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - weight: - granularity: per_tensor - scheme: asym - dtype: int8 - algorithm: minmax - activation: - granularity: per_tensor - scheme: asym - dtype: int8 - algorithm: minmax - -tuning: - accuracy_criterion: - relative: 0.02 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/tools/quantize/inc_configs/mobilenet.yaml b/tools/quantize/inc_configs/mobilenet.yaml deleted file mode 100644 index cedf006e..00000000 --- a/tools/quantize/inc_configs/mobilenet.yaml +++ /dev/null @@ -1,98 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: 1.0 - -model: # mandatory. used to specify model specific information. - name: mobilenetv2 - framework: onnxrt_qlinearops # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - approach: post_training_static_quant # optional. default value is post_training_static_quant. - calibration: - dataloader: - batch_size: 1 - dataset: - ImagenetRaw: - data_path: /path/to/imagenet/val - image_list: /path/to/imagenet/val.txt # download from http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz - transform: - Rescale: {} - Resize: - size: 256 - CenterCrop: - size: 224 - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - Transpose: - perm: [2, 0, 1] - Cast: - dtype: float32 -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. - metric: - topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. - dataloader: - batch_size: 1 - dataset: - ImagenetRaw: - data_path: /path/to/imagenet/val - image_list: /path/to/imagenet/val.txt # download from http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz - transform: - Rescale: {} - Resize: - size: 256 - CenterCrop: - size: 224 - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - Transpose: - perm: [2, 0, 1] - Cast: - dtype: float32 - performance: # optional. used to benchmark performance of passing model. - warmup: 10 - iteration: 1000 - configs: - cores_per_instance: 4 - num_of_instance: 1 - dataloader: - batch_size: 1 - dataset: - ImagenetRaw: - data_path: /path/to/imagenet/val - image_list: /path/to/imagenet/val.txt # download from http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz - transform: - Rescale: {} - Resize: - size: 256 - CenterCrop: - size: 224 - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - Transpose: - perm: [2, 0, 1] - Cast: - dtype: float32 - -tuning: - accuracy_criterion: - relative: 0.02 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/tools/quantize/inc_configs/mp_handpose.yaml b/tools/quantize/inc_configs/mp_handpose.yaml deleted file mode 100644 index 1ef66a27..00000000 --- a/tools/quantize/inc_configs/mp_handpose.yaml +++ /dev/null @@ -1,52 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: 1.0 - -model: # mandatory. used to specify model specific information. - name: mp_handpose - framework: onnxrt_qlinearops # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - approach: post_training_static_quant # optional. default value is post_training_static_quant. - calibration: - dataloader: - batch_size: 1 - dataset: - dummy: - shape: [1, 256, 256, 3] - low: -1.0 - high: 1.0 - dtype: float32 - label: True - - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - weight: - granularity: per_tensor - scheme: asym - dtype: int8 - algorithm: minmax - activation: - granularity: per_tensor - scheme: asym - dtype: int8 - algorithm: minmax - -tuning: - accuracy_criterion: - relative: 0.02 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/tools/quantize/quantize-inc.py b/tools/quantize/quantize-inc.py deleted file mode 100644 index f72f5c92..00000000 --- a/tools/quantize/quantize-inc.py +++ /dev/null @@ -1,150 +0,0 @@ -import os -import sys -import numpy as np -import cv2 as cv - -import onnx -from neural_compressor.experimental import Quantization, common -from neural_compressor.experimental.metric import BaseMetric - - -class Accuracy(BaseMetric): - def __init__(self, *args): - self.pred_list = [] - self.label_list = [] - self.samples = 0 - - def update(self, predict, label): - predict = np.array(predict) - label = np.array(label) - self.pred_list.append(np.argmax(predict[0])) - self.label_list.append(label[0][0]) - self.samples += 1 - - def reset(self): - self.pred_list = [] - self.label_list = [] - self.samples = 0 - - def result(self): - correct_num = np.sum(np.array(self.pred_list) == np.array(self.label_list)) - return correct_num / self.samples - - -class Quantize: - def __init__(self, model_path, config_path, custom_dataset=None, eval_dataset=None, metric=None): - self.model_path = model_path - self.config_path = config_path - self.custom_dataset = custom_dataset - self.eval_dataset = eval_dataset - self.metric = metric - - def run(self): - print('Quantizing (int8) with Intel\'s Neural Compressor:') - print('\tModel: {}'.format(self.model_path)) - print('\tConfig: {}'.format(self.config_path)) - - output_name = '{}-int8-quantized.onnx'.format(self.model_path[:-5]) - - model = onnx.load(self.model_path) - quantizer = Quantization(self.config_path) - quantizer.model = common.Model(model) - if self.custom_dataset is not None: - quantizer.calib_dataloader = common.DataLoader(self.custom_dataset) - if self.eval_dataset is not None: - quantizer.eval_dataloader = common.DataLoader(self.eval_dataset) - if self.metric is not None: - quantizer.metric = common.Metric(metric_cls=self.metric, name='metric') - q_model = quantizer() - q_model.save(output_name) - - -class Dataset: - def __init__(self, root, size=None, dim='chw', scale=1.0, mean=0.0, std=1.0, swapRB=False, toFP32=False): - self.root = root - self.size = size - self.dim = dim - self.scale = scale - self.mean = mean - self.std = std - self.swapRB = swapRB - self.toFP32 = toFP32 - - self.image_list, self.label_list = self.load_image_list(self.root) - - def load_image_list(self, path): - image_list = [] - label_list = [] - for f in os.listdir(path): - if not f.endswith('.jpg'): - continue - image_list.append(os.path.join(path, f)) - label_list.append(1) - return image_list, label_list - - def __getitem__(self, idx): - img = cv.imread(self.image_list[idx]) - - if self.swapRB: - img = cv.cvtColor(img, cv.COLOR_BGR2RGB) - - if self.size: - img = cv.resize(img, dsize=self.size) - - if self.toFP32: - img = img.astype(np.float32) - - img = img * self.scale - img = img - self.mean - img = img / self.std - - if self.dim == 'chw': - img = img.transpose(2, 0, 1) # hwc -> chw - - return img, self.label_list[idx] - - def __len__(self): - return len(self.image_list) - - -class FerDataset(Dataset): - def __init__(self, root, size=None, dim='chw', scale=1.0, mean=0.0, std=1.0, swapRB=False, toFP32=False): - super(FerDataset, self).__init__(root, size, dim, scale, mean, std, swapRB, toFP32) - - def load_image_list(self, path): - image_list = [] - label_list = [] - for f in os.listdir(path): - if not f.endswith('.jpg'): - continue - image_list.append(os.path.join(path, f)) - label_list.append(int(f.split("_")[2])) - return image_list, label_list - - -models = dict( - mobilenetv1=Quantize(model_path='../../models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr.onnx', - config_path='./inc_configs/mobilenet.yaml'), - mobilenetv2=Quantize(model_path='../../models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr.onnx', - config_path='./inc_configs/mobilenet.yaml'), - mp_handpose=Quantize(model_path='../../models/handpose_estimation_mediapipe/handpose_estimation_mediapipe_2022may.onnx', - config_path='./inc_configs/mp_handpose.yaml', - custom_dataset=Dataset(root='../../benchmark/data/palm_detection', dim='hwc', swapRB=True, mean=127.5, std=127.5, toFP32=True)), - fer=Quantize(model_path='../../models/facial_expression_recognition/facial_expression_recognition_mobilefacenet_2022july.onnx', - config_path='./inc_configs/fer.yaml', - custom_dataset=FerDataset(root='../../benchmark/data/facial_expression_recognition/fer_calibration', size=(112, 112), toFP32=True, swapRB=True, scale=1./255, mean=0.5, std=0.5), - eval_dataset=FerDataset(root='../../benchmark/data/facial_expression_recognition/fer_evaluation', size=(112, 112), toFP32=True, swapRB=True, scale=1./255, mean=0.5, std=0.5), - metric=Accuracy), -) - -if __name__ == '__main__': - selected_models = [] - for i in range(1, len(sys.argv)): - selected_models.append(sys.argv[i]) - if not selected_models: - selected_models = list(models.keys()) - print('Models to be quantized: {}'.format(str(selected_models))) - - for selected_model_name in selected_models: - q = models[selected_model_name] - q.run() diff --git a/tools/quantize/quantize-ort.py b/tools/quantize/quantize-ort.py index aba57f71..df3ccc20 100644 --- a/tools/quantize/quantize-ort.py +++ b/tools/quantize/quantize-ort.py @@ -46,7 +46,7 @@ def get_calibration_data(self, image_dir): return blobs class Quantize: - def __init__(self, model_path, calibration_image_dir, transforms=Compose(), per_channel=False, act_type='int8', wt_type='int8', data_dim='chw', nodes_to_exclude=[]): + def __init__(self, model_path, calibration_image_dir, transforms=Compose(), per_channel=False, act_type='int8', wt_type='int8', data_dim='chw', nodes_to_exclude=[], quant_format='qdq'): self.type_dict = {"uint8" : QuantType.QUInt8, "int8" : QuantType.QInt8} self.model_path = model_path @@ -56,6 +56,11 @@ def __init__(self, model_path, calibration_image_dir, transforms=Compose(), per_ self.act_type = act_type self.wt_type = wt_type self.nodes_to_exclude = nodes_to_exclude + + # FIX: The legacy QOperator format breaks TensorRT, CUDA, and NPU execution providers + # because hardware accelerators cannot fuse QLinear Conv/MatMul ops natively. + # QDQ (QuantizeLinear/DequantizeLinear) is the industry standard required for deep hardware acceleration. + self.quant_format = QuantFormat.QDQ if quant_format == 'qdq' else QuantFormat.QOperator # data reader self.dr = DataReader(self.model_path, self.calibration_image_dir, self.transforms, data_dim) @@ -77,9 +82,9 @@ def run(self): print('Quantizing {}: act_type {}, wt_type {}'.format(self.model_path, self.act_type, self.wt_type)) new_model_path = self.check_opset() quant_pre_process(new_model_path, new_model_path) - output_name = '{}_{}.onnx'.format(self.model_path[:-5], self.wt_type) + output_name = '{}_{}_{}.onnx'.format(self.model_path[:-5], self.wt_type, self.quant_format.name.lower()) quantize_static(new_model_path, output_name, self.dr, - quant_format=QuantFormat.QOperator, # start from onnxruntime==1.11.0, quant_format is set to QuantFormat.QDQ by default, which performs fake quantization + quant_format=self.quant_format, # Replaced hardcoded QOperator with dynamically fusable QDQ format per_channel=self.per_channel, weight_type=self.type_dict[self.wt_type], activation_type=self.type_dict[self.act_type], diff --git a/tools/quantize/requirements.txt b/tools/quantize/requirements.txt deleted file mode 100644 index d8519a95..00000000 --- a/tools/quantize/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -opencv-python>=4.10.0 -numpy -onnx -onnxruntime -onnxruntime-extensions -neural-compressor diff --git a/tools/quantize/transform.py b/tools/quantize/transform.py deleted file mode 100644 index 10d97521..00000000 --- a/tools/quantize/transform.py +++ /dev/null @@ -1,129 +0,0 @@ -# This file is part of OpenCV Zoo project. -# It is subject to the license terms in the LICENSE file found in the same directory. -# -# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. -# Third party copyrights are property of their respective owners. - -import collections -import numpy as np -import cv2 as cv -import sys - -class Compose: - def __init__(self, transforms=[]): - self.transforms = transforms - - def __call__(self, img): - for t in self.transforms: - img = t(img) - if img is None: - break - return img - -class Resize: - def __init__(self, size, interpolation=cv.INTER_LINEAR): - self.size = size - self.interpolation = interpolation - - def __call__(self, img): - return cv.resize(img, self.size) - -class CenterCrop: - def __init__(self, size): - self.size = size # w, h - - def __call__(self, img): - h, w, _ = img.shape - ws = int(w / 2 - self.size[0] / 2) - hs = int(h / 2 - self.size[1] / 2) - return img[hs:hs+self.size[1], ws:ws+self.size[0], :] - -class Normalize: - def __init__(self, mean=None, std=None): - self.mean = mean - self.std = std - - def __call__(self, img): - img = img.astype("float32") - if self.mean is not None: - img[:, :, 0] = img[:, :, 0] - self.mean[0] - img[:, :, 1] = img[:, :, 1] - self.mean[1] - img[:, :, 2] = img[:, :, 2] - self.mean[2] - if self.std is not None: - img[:, :, 0] = img[:, :, 0] / self.std[0] - img[:, :, 1] = img[:, :, 1] / self.std[1] - img[:, :, 2] = img[:, :, 2] / self.std[2] - return img - -class ColorConvert: - def __init__(self, ctype): - self.ctype = ctype - - def __call__(self, img): - return cv.cvtColor(img, self.ctype) - -class HandAlign: - def __init__(self, model): - self.model = model - sys.path.append('../../models/palm_detection_mediapipe') - from mp_palmdet import MPPalmDet - self.palm_detector = MPPalmDet(modelPath='../../models/palm_detection_mediapipe/palm_detection_mediapipe_2023feb.onnx', nmsThreshold=0.3, scoreThreshold=0.9) - - def __call__(self, img): - return self.mp_handpose_align(img) - - def mp_handpose_align(self, img): - palms = self.palm_detector.infer(img) - if len(palms) == 0: - return None - palm = palms[0] - palm_bbox = palm[0:4].reshape(2, 2) - palm_landmarks = palm[4:18].reshape(7, 2) - p1 = palm_landmarks[0] - p2 = palm_landmarks[2] - radians = np.pi / 2 - np.arctan2(-(p2[1] - p1[1]), p2[0] - p1[0]) - radians = radians - 2 * np.pi * np.floor((radians + np.pi) / (2 * np.pi)) - angle = np.rad2deg(radians) - # get bbox center - center_palm_bbox = np.sum(palm_bbox, axis=0) / 2 - # get rotation matrix - rotation_matrix = cv.getRotationMatrix2D(center_palm_bbox, angle, 1.0) - # get rotated image - rotated_image = cv.warpAffine(img, rotation_matrix, (img.shape[1], img.shape[0])) - # get bounding boxes from rotated palm landmarks - homogeneous_coord = np.c_[palm_landmarks, np.ones(palm_landmarks.shape[0])] - rotated_palm_landmarks = np.array([ - np.dot(homogeneous_coord, rotation_matrix[0]), - np.dot(homogeneous_coord, rotation_matrix[1])]) - # get landmark bounding box - rotated_palm_bbox = np.array([ - np.amin(rotated_palm_landmarks, axis=1), - np.amax(rotated_palm_landmarks, axis=1)]) # [top-left, bottom-right] - - # shift bounding box - wh_rotated_palm_bbox = rotated_palm_bbox[1] - rotated_palm_bbox[0] - shift_vector = [0, -0.1] * wh_rotated_palm_bbox - rotated_palm_bbox = rotated_palm_bbox + shift_vector - # squarify bounding boxx - center_rotated_plam_bbox = np.sum(rotated_palm_bbox, axis=0) / 2 - wh_rotated_palm_bbox = rotated_palm_bbox[1] - rotated_palm_bbox[0] - new_half_size = np.amax(wh_rotated_palm_bbox) / 2 - rotated_palm_bbox = np.array([ - center_rotated_plam_bbox - new_half_size, - center_rotated_plam_bbox + new_half_size]) - - # enlarge bounding box - center_rotated_plam_bbox = np.sum(rotated_palm_bbox, axis=0) / 2 - wh_rotated_palm_bbox = rotated_palm_bbox[1] - rotated_palm_bbox[0] - new_half_size = wh_rotated_palm_bbox * 1.5 - rotated_palm_bbox = np.array([ - center_rotated_plam_bbox - new_half_size, - center_rotated_plam_bbox + new_half_size]) - - # Crop the rotated image by the bounding box - [[x1, y1], [x2, y2]] = rotated_palm_bbox.astype(np.int32) - diff = np.maximum([-x1, -y1, x2 - rotated_image.shape[1], y2 - rotated_image.shape[0]], 0) - [x1, y1, x2, y2] = [x1, y1, x2, y2] + diff - crop = rotated_image[y1:y2, x1:x2, :] - crop = cv.copyMakeBorder(crop, diff[1], diff[3], diff[0], diff[2], cv.BORDER_CONSTANT, value=(0, 0, 0)) - return crop