From dbc3b3411f3b5e9d186afb627b2e0254e8056626 Mon Sep 17 00:00:00 2001 From: Joe Evans Date: Mon, 28 Nov 2022 13:51:10 -0800 Subject: [PATCH 1/5] [v1.9.x] TLP Updates (#21148) (#21149) * Update repo URLs and website to remove Incubating references. * Update repo to remove references to Apache Incubator, update website, remove DISCLAIMER. * Update license check configuration. * Update license check configuration. * Update license check configuration. * Update license check configuration. * Update license check configuration. * Add Apache 2.0 license to files without it. * Add Apache 2.0 license to files without it. * Remove references to DISCLAIMER in build scripts / configs. * Rearrange dependencies for ubuntu_tutorials to prevent pip hangs. * Change node type for Cpp: MKLDNN+GPU builds. * Update node type * Add missing node assign for G4 node type. --- .licenserc.yaml | 7 + CMakeLists.txt | 17 ++ CONTRIBUTORS.md | 6 +- DISCLAIMER | 10 - LICENSE | 4 +- NEWS.md | 96 ++++---- NOTICE | 4 +- README.md | 47 ++-- benchmark/opperf/README.md | 6 +- .../opperf/nd_operations/misc_operators.py | 2 +- benchmark/opperf/utils/benchmark_utils.py | 4 +- benchmark/opperf/utils/op_registry_utils.py | 4 +- cd/README.md | 2 +- cd/python/pypi/README.md | 2 +- cd/python/pypi/pypi_package.sh | 2 +- cd/utils/artifact_repository.md | 2 +- cd/utils/requirements.txt | 16 ++ ci/docker/runtime_functions.sh | 5 +- ci/jenkins/Jenkinsfile_centos_gpu | 2 +- ci/jenkins/Jenkinsfile_unix_cpu | 2 +- ci/publish/website/deploy.sh | 12 +- ci/requirements.txt | 16 ++ cpp-package/README.md | 10 +- cpp-package/example/README.md | 30 +-- cpp-package/example/inference/README.md | 18 +- .../multi_threaded_inference.cc | 2 +- cpp-package/include/mxnet-cpp/contrib.h | 6 +- cpp-package/include/mxnet-cpp/symbol.hpp | 2 +- cpp-package/tests/ci_test.sh | 2 +- doap.rdf | 6 +- docker/docker-python/README.md | 4 +- docs/README.md | 10 +- docs/python_docs/python/scripts/conf.py | 2 +- .../gluon_from_experiment_to_deployment.md | 4 +- .../gluon/training/fit_api_tutorial.md | 2 +- .../legacy/ndarray/gotchas_numpy_in_mxnet.md | 4 +- .../performance/backend/dnnl/dnnl_readme.md | 20 +- .../tutorials/performance/backend/profiler.md | 4 +- .../python/tutorials/performance/index.rst | 2 +- .../themes/mx-theme/mxtheme/footer.html | 6 +- .../themes/mx-theme/mxtheme/header_top.html | 2 +- docs/static_site/src/_config.yml | 2 +- docs/static_site/src/_config_beta.yml | 2 +- docs/static_site/src/_config_prod.yml | 2 +- docs/static_site/src/_includes/footer.html | 12 +- .../src/_includes/get_started/cloud/cpu.md | 2 +- .../src/_includes/get_started/cloud/gpu.md | 2 +- .../get_started/linux/python/cpu/docker.md | 2 +- .../get_started/linux/python/cpu/pip.md | 2 +- .../get_started/linux/python/gpu/docker.md | 2 +- .../get_started/linux/python/gpu/pip.md | 2 +- docs/static_site/src/_includes/header.html | 3 +- docs/static_site/src/assets/img/asf_logo.svg | 210 ++++++++++++++++++ docs/static_site/src/index.html | 2 +- .../api/architecture/exception_handling.md | 2 +- .../src/pages/api/architecture/note_engine.md | 2 +- .../pages/api/architecture/program_model.md | 2 +- .../tutorials/multi_threaded_inference.md | 14 +- .../tutorials/mxnet_cpp_inference_tutorial.md | 22 +- docs/static_site/src/pages/api/cpp/index.md | 10 +- ...b_contribution_and_PR_verification_tips.md | 6 +- ...xception_handing_and_custom_error_types.md | 2 +- .../src/pages/api/faq/add_op_in_backend.md | 8 +- .../src/pages/api/faq/distributed_training.md | 8 +- docs/static_site/src/pages/api/faq/env_var.md | 4 +- docs/static_site/src/pages/api/faq/float16.md | 2 +- .../src/pages/api/faq/gradient_compression.md | 2 +- .../src/pages/api/faq/large_tensor_support.md | 6 +- docs/static_site/src/pages/api/faq/perf.md | 2 +- .../api/java/docs/tutorials/ssd_inference.md | 6 +- .../src/pages/api/r/docs/tutorials/symbol.md | 2 +- .../pages/api/scala/docs/tutorials/infer.md | 6 +- .../src/pages/api/scala/docs/tutorials/io.md | 6 +- docs/static_site/src/pages/api/scala/index.md | 4 +- docs/static_site/src/pages/ecosystem.html | 2 +- .../pages/get_started/build_from_source.md | 8 +- .../src/pages/get_started/download.md | 2 +- .../src/pages/get_started/index.html | 2 +- .../src/pages/get_started/jetson_setup.md | 4 +- .../src/pages/get_started/validate_mxnet.md | 2 +- example/README.md | 4 +- example/distributed_training/README.md | 2 +- example/quantization/README.md | 14 +- python/mxnet/error.py | 2 +- python/mxnet/gluon/block.py | 4 +- python/mxnet/onnx/mx2onnx/_export_model.py | 2 +- python/mxnet/onnx/setup.py | 2 +- python/setup.py | 2 +- rat-excludes | 1 - src/imperative/cached_op.cc | 2 +- src/operator/linalg_impl.h | 2 +- src/operator/nn/fully_connected-inl.h | 2 +- tests/CMakeLists.txt | 16 ++ tests/cpp/operator/batchnorm_test.cc | 2 +- tests/nightly/test_large_array.py | 20 +- tests/nightly/test_large_vector.py | 8 +- tests/python/dnnl/test_dnnl.py | 2 +- tests/python/gpu/test_operator_gpu.py | 2 +- .../python/quantization/test_quantization.py | 4 +- tests/python/unittest/test_executor.py | 2 +- tests/python/unittest/test_gluon.py | 40 ++-- tests/python/unittest/test_gluon_utils.py | 2 +- tests/python/unittest/test_ndarray.py | 12 +- tests/python/unittest/test_numpy_ndarray.py | 2 +- tests/python/unittest/test_operator.py | 40 ++-- tests/python/unittest/test_profiler.py | 10 +- tests/python/unittest/test_random.py | 6 +- tests/python/unittest/test_sparse_operator.py | 2 +- tests/python/unittest/test_symbol.py | 2 +- tests/python/unittest/test_test_utils.py | 2 +- tests/tutorials/test_tutorials.py | 6 +- tools/create_source_archive.sh | 2 +- tools/dependencies/README.md | 18 +- tools/diagnose.py | 2 +- tools/pip/MANIFEST.in | 1 - tools/pip/doc/PYPI_README.md | 2 +- tools/pip/setup.py | 2 +- tools/staticbuild/build.sh | 1 - tools/windowsbuild/README.md | 2 +- 119 files changed, 647 insertions(+), 387 deletions(-) delete mode 100644 DISCLAIMER create mode 100644 docs/static_site/src/assets/img/asf_logo.svg diff --git a/.licenserc.yaml b/.licenserc.yaml index 080411bfcd95..0b54ed187a83 100644 --- a/.licenserc.yaml +++ b/.licenserc.yaml @@ -22,6 +22,12 @@ header: - '.gitmodules' - '.licenserc.yaml' - '.asf.yaml' + - 'CODEOWNERS' + - 'python/mxnet/_cy3/README.md' + - 'tools/dependencies/LICENSE.binary.dependencies' + # files not distributed in source archive (listed in tools/source-exclude-artifacts.txt) + - 'docs' + - 'R-package' # files licensed under apache-2.0 license but do not include full license headers recognized by skywalking-eyes - '**/*.ipynb' - 'src/operator/deformable_convolution-inl.h' @@ -68,6 +74,7 @@ header: - 'include/dmlc' # symlink to 3rdparty/dmlc-core/include/dmlc - 'include/mshadow' # symlink to 3rdparty/mshadow/mshadow - 'include/onednn' # symlinks to 3rdparty/onednn + - 'include/nnvm' # symlinks to 3rdparty/tvm/nnvm/include/nnvm # test/build data - 'tests/python/dnnl/data/test_dnnl_test_dnnl_model_model1.json' diff --git a/CMakeLists.txt b/CMakeLists.txt index 5343c324a5a3..53a6978f4505 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,3 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + cmake_minimum_required(VERSION 3.13) # workaround to store CMAKE_CROSSCOMPILING because is getting reset by the project command diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 6fe0e3397171..4a2371d16f49 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -18,7 +18,7 @@ ~ --> -Contributors of Apache MXNet (incubating) +Contributors of Apache MXNet ========================================= Apache MXNet adopts the Apache way and governs by merit. We believe that it is important to create an inclusive community where everyone can use, contribute to, and influence the direction of @@ -143,7 +143,7 @@ healthy project. The PPMC actively seeks to appoint new committers from the list List of Contributors -------------------- -* [Top-100 Contributors](https://github.com/apache/incubator-mxnet/graphs/contributors) +* [Top-100 Contributors](https://github.com/apache/mxnet/graphs/contributors) - To contributors: please add your name to the list when you submit a patch to the project:) * [Aditya Trivedi](https://github.com/iadi7ya) * [Feng Wang](https://github.com/happynear) @@ -314,5 +314,5 @@ Label Bot - @mxnet-label-bot update [specify comma separated labels here] (i.e. @mxnet-label-bot update [Bug, Python]) - - Available label names which are supported: [Labels](https://github.com/apache/incubator-mxnet/labels) + - Available label names which are supported: [Labels](https://github.com/apache/mxnet/labels) - For further details: [My Wiki Page](https://cwiki.apache.org/confluence/display/MXNET/Machine+Learning+Based+GitHub+Bot) diff --git a/DISCLAIMER b/DISCLAIMER deleted file mode 100644 index eacaa1b85bc3..000000000000 --- a/DISCLAIMER +++ /dev/null @@ -1,10 +0,0 @@ -Apache MXNet is an effort undergoing incubation at -The Apache Software Foundation (ASF), sponsored by the name of Apache Incubator PMC. - -Incubation is required of all newly accepted projects until a further review -indicates that the infrastructure, communications, and decision making process -have stabilized in a manner consistent with other successful ASF projects. - -While incubation status is not necessarily a reflection of the completeness -or stability of the code, it does indicate that the project has yet to be fully -endorsed by the ASF. diff --git a/LICENSE b/LICENSE index b0c90d10cdab..689515a3ab64 100644 --- a/LICENSE +++ b/LICENSE @@ -202,9 +202,9 @@ limitations under the License. ====================================================================================== - Apache MXNET (incubating) Subcomponents: + Apache MXNET Subcomponents: - The Apache MXNET (incubating) project contains subcomponents with separate + The Apache MXNET project contains subcomponents with separate copyright notices and license terms. Your use of the source code for the these subcomponents is subject to the terms and conditions of the following licenses. See licenses/ for text of these licenses. diff --git a/NEWS.md b/NEWS.md index d1d8e5ddeef3..f05f7b07440d 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1553,10 +1553,10 @@ Apache MXNet (incubating) 1.5.1 is a maintenance release incorporating important #### Automatic Mixed Precision(experimental) Training Deep Learning networks is a very computationally intensive task. Novel model architectures tend to have increasing numbers of layers and parameters, which slow down training. Fortunately, software optimizations and new generations of training hardware make it a feasible task. However, most of the hardware and software optimization opportunities exist in exploiting lower precision (e.g. FP16) to, for example, utilize Tensor Cores available on new Volta and Turing GPUs. While training in FP16 showed great success in image classification tasks, other more complicated neural networks typically stayed in FP32 due to difficulties in applying the FP16 training guidelines. -That is where AMP (Automatic Mixed Precision) comes into play. It automatically applies the guidelines of FP16 training, using FP16 precision where it provides the most benefit, while conservatively keeping in full FP32 precision operations unsafe to do in FP16. To learn more about AMP, check out this [tutorial](https://github.com/apache/incubator-mxnet/blob/master/docs/tutorials/amp/amp_tutorial.md). +That is where AMP (Automatic Mixed Precision) comes into play. It automatically applies the guidelines of FP16 training, using FP16 precision where it provides the most benefit, while conservatively keeping in full FP32 precision operations unsafe to do in FP16. To learn more about AMP, check out this [tutorial](https://github.com/apache/mxnet/blob/master/docs/tutorials/amp/amp_tutorial.md). #### MKL-DNN Reduced precision inference and RNN API support -Two advanced features, fused computation and reduced-precision kernels, are introduced by MKL-DNN in the recent version. These features can significantly speed up the inference performance on CPU for a broad range of deep learning topologies. MXNet MKL-DNN backend provides optimized implementations for various operators covering a broad range of applications including image classification, object detection, and natural language processing. Refer to the [MKL-DNN operator documentation](https://github.com/apache/incubator-mxnet/blob/v1.5.x/docs/tutorials/mkldnn/operator_list.md) for more information. +Two advanced features, fused computation and reduced-precision kernels, are introduced by MKL-DNN in the recent version. These features can significantly speed up the inference performance on CPU for a broad range of deep learning topologies. MXNet MKL-DNN backend provides optimized implementations for various operators covering a broad range of applications including image classification, object detection, and natural language processing. Refer to the [MKL-DNN operator documentation](https://github.com/apache/mxnet/blob/v1.5.x/docs/tutorials/mkldnn/operator_list.md) for more information. #### Dynamic Shape(experimental) MXNet now supports Dynamic Shape in both imperative and symbolic mode. MXNet used to require that operators statically infer the output shapes from the input shapes. However, there exist some operators that don't meet this requirement. Examples are: @@ -2096,7 +2096,7 @@ Note: this feature is still experimental, for more details, refer to [design doc * Fixes installation nightly test by filtering out the git commands (#14144) * fix nightly test on tutorials (#14036) * Fix MXNet R package build (#13952) -* re-enable test after issue fixed https://github.com/apache/incubator-mxnet/issues/10973 (#14032) +* re-enable test after issue fixed https://github.com/apache/mxnet/issues/10973 (#14032) * Add back R tests and fix typo around R and perl tests (#13940) * Fix document build (#13927) * Temporarily disables windows pipeline to unblock PRs (#14261) @@ -2109,7 +2109,7 @@ Note: this feature is still experimental, for more details, refer to [design doc * Rearrange tests written only for update_on_kvstore = True (#13514) * add batch norm test (#13625) * Adadelta optimizer test (#13443) -* Skip flaky test https://github.com/apache/incubator-mxnet/issues/13446 (#13480) +* Skip flaky test https://github.com/apache/mxnet/issues/13446 (#13480) * Comment out test_unix_python3_tensorrt_gpu step (#14642) * Enable bulking test on windows (#14392) * rewrote the concat test to avoid flaky failures (#14049) @@ -2547,11 +2547,11 @@ For distributed training, the `Reduce` communication patterns used by NCCL and M * multiple trees (bandwidth-optimal for large messages) to handle `Reduce` on large messages More details can be found here: [Topology-aware AllReduce](https://cwiki.apache.org/confluence/display/MXNET/Single+machine+All+Reduce+Topology-aware+Communication) -Note: This is an experimental feature and has known problems - see [13341](https://github.com/apache/incubator-mxnet/issues/13341). Please help to contribute to improve the robustness of the feature. +Note: This is an experimental feature and has known problems - see [13341](https://github.com/apache/mxnet/issues/13341). Please help to contribute to improve the robustness of the feature. #### MKLDNN backend: Graph optimization and Quantization (experimental) -Two advanced features, graph optimization (operator fusion) and reduced-precision (INT8) computation, are introduced to MKLDNN backend in this release ([#12530](https://github.com/apache/incubator-mxnet/pull/12530), [#13297](https://github.com/apache/incubator-mxnet/pull/13297), [#13260](https://github.com/apache/incubator-mxnet/pull/13260)). +Two advanced features, graph optimization (operator fusion) and reduced-precision (INT8) computation, are introduced to MKLDNN backend in this release ([#12530](https://github.com/apache/mxnet/pull/12530), [#13297](https://github.com/apache/mxnet/pull/13297), [#13260](https://github.com/apache/mxnet/pull/13260)). These features significantly boost the inference performance on CPU (up to 4X) for a broad range of deep learning topologies. Currently, this feature is only available for inference on platforms with [supported Intel CPUs](https://github.com/intel/mkl-dnn#system-requirements). ##### Graph Optimization @@ -2560,7 +2560,7 @@ MKLDNN backend takes advantage of MXNet subgraph to implement the most of possib ##### Quantization Performance of reduced-precision (INT8) computation is also dramatically improved after the graph optimization feature is applied on CPU Platforms. Various models are supported and can benefit from reduced-precision computation, including symbolic models, Gluon models and even custom models. Users can run most of the pre-trained models with only a few lines of commands and a new quantization script imagenet_gen_qsym_mkldnn.py. The observed accuracy loss is less than 0.5% for popular CNN networks, like ResNet-50, Inception-BN, MobileNet, etc. -Please find detailed information and performance/accuracy numbers here: [MKLDNN README](https://mxnet.apache.org/api/python/docs/tutorials/performance/backend/mkldnn/mkldnn_readme.html), [quantization README](https://github.com/apache/incubator-mxnet/tree/master/example/quantization#1) and [design proposal](https://cwiki.apache.org/confluence/display/MXNET/MXNet+Graph+Optimization+and+Quantization+based+on+subgraph+and+MKL-DNN) +Please find detailed information and performance/accuracy numbers here: [MKLDNN README](https://mxnet.apache.org/api/python/docs/tutorials/performance/backend/mkldnn/mkldnn_readme.html), [quantization README](https://github.com/apache/mxnet/tree/master/example/quantization#1) and [design proposal](https://cwiki.apache.org/confluence/display/MXNET/MXNet+Graph+Optimization+and+Quantization+based+on+subgraph+and+MKL-DNN) ### New Operators @@ -2683,7 +2683,7 @@ Please find detailed information and performance/accuracy numbers here: [MKLDNN * [MXNET-1026] [Perl] Sync with recent changes in Python's API (#12739) #### Julia -* Import Julia binding (#10149), how to use is available at https://github.com/apache/incubator-mxnet/tree/master/julia +* Import Julia binding (#10149), how to use is available at https://github.com/apache/mxnet/tree/master/julia ### Performance benchmarks and improvements * Update mshadow for omp acceleration when nvcc is not present (#12674) @@ -2991,15 +2991,15 @@ Submodule@commit ID::Last updated by MXNet:: Last update in submodule ### Bug fixes -* [MXNET-953] Fix oob memory read (v1.3.x) / [#13118](https://github.com/apache/incubator-mxnet/pull/13118) +* [MXNET-953] Fix oob memory read (v1.3.x) / [#13118](https://github.com/apache/mxnet/pull/13118) Simple bugfix addressing an out-of-bounds memory read. -* [MXNET-969] Fix buffer overflow in RNNOp (v1.3.x) / [#13119](https://github.com/apache/incubator-mxnet/pull/13119) +* [MXNET-969] Fix buffer overflow in RNNOp (v1.3.x) / [#13119](https://github.com/apache/mxnet/pull/13119) This fixes an buffer overflow detected by ASAN. -* CudnnFind() usage improvements (v1.3.x) / [#13123](https://github.com/apache/incubator-mxnet/pull/13123) +* CudnnFind() usage improvements (v1.3.x) / [#13123](https://github.com/apache/mxnet/pull/13123) This PR improves the MXNet's use of cudnnFind() to address a few issues: 1. With the gluon imperative style, cudnnFind() is called during forward(), and so might have its timings perturbed by other GPU activity (including potentially other cudnnFind() calls). 2. With some cuda drivers versions, care is needed to ensure that the large I/O and workspace cudaMallocs() performed by cudnnFind() are immediately released and available to MXNet. @@ -3012,24 +3012,24 @@ This fixes an buffer overflow detected by ASAN. 4. Increased training performance based on being able to consistently run with models that approach the GPU's full global memory footprint. 5. Adds a unittest for and solves issue #12662. -* [MXNET-922] Fix memleak in profiler (v1.3.x) / [#13120](https://github.com/apache/incubator-mxnet/pull/13120) +* [MXNET-922] Fix memleak in profiler (v1.3.x) / [#13120](https://github.com/apache/mxnet/pull/13120) Fix a memleak reported locally by ASAN during a normal inference test. -* Fix lazy record io when used with dataloader and multi_worker > 0 (v1.3.x) / [#13124](https://github.com/apache/incubator-mxnet/pull/13124) +* Fix lazy record io when used with dataloader and multi_worker > 0 (v1.3.x) / [#13124](https://github.com/apache/mxnet/pull/13124) Fixes multi_worker data loader when record file is used. The MXRecordIO instance needs to require a new file handler after fork to be safely manipulated simultaneously. This fix also safely voids the previous temporary fixes #12093 #11370. -* fixed symbols naming in RNNCell, LSTMCell, GRUCell (v1.3.x) / [#13158](https://github.com/apache/incubator-mxnet/pull/13158) +* fixed symbols naming in RNNCell, LSTMCell, GRUCell (v1.3.x) / [#13158](https://github.com/apache/mxnet/pull/13158) This fixes #12783, by assigning all nodes in hybrid_forward a unique name. Some operations were in fact performed without attaching the appropriate (time) prefix to the name, which makes serialized graphs non-deserializable. -* Fixed `__setattr__` method of `_MXClassPropertyMetaClass` (v1.3.x) / [#13157](https://github.com/apache/incubator-mxnet/pull/13157) +* Fixed `__setattr__` method of `_MXClassPropertyMetaClass` (v1.3.x) / [#13157](https://github.com/apache/mxnet/pull/13157) Fixed `__setattr__` method -* allow foreach on input with 0 length (v1.3.x) / [#13151](https://github.com/apache/incubator-mxnet/pull/13151) +* allow foreach on input with 0 length (v1.3.x) / [#13151](https://github.com/apache/mxnet/pull/13151) Fix #12470. With this change, outs shape can be inferred correctly. -* Infer dtype in SymbolBlock import from input symbol (v1.3.x) / [#13117](https://github.com/apache/incubator-mxnet/pull/13117) +* Infer dtype in SymbolBlock import from input symbol (v1.3.x) / [#13117](https://github.com/apache/mxnet/pull/13117) Fix for the issue - #11849 Currently, Gluon symbol block cannot import any symbol with type other than fp32. All the parameters are created as FP32 leading to failure in importing the params when it is of type fp16, fp64 etc, In this PR, we infer the type of the symbol being imported and create the Symbol Block Parameters with that inferred type. @@ -3037,14 +3037,14 @@ This fixes an buffer overflow detected by ASAN. ### Documentation fixes -* Document the newly added env variable (v1.3.x) / [#13156](https://github.com/apache/incubator-mxnet/pull/13156) - Document the env variable: MXNET_ENFORCE_DETERMINISM added in PR: [#12992](https://github.com/apache/incubator-mxnet/pull/12992) +* Document the newly added env variable (v1.3.x) / [#13156](https://github.com/apache/mxnet/pull/13156) + Document the env variable: MXNET_ENFORCE_DETERMINISM added in PR: [#12992](https://github.com/apache/mxnet/pull/12992) -* fix broken links (v1.3.x) / [#13155](https://github.com/apache/incubator-mxnet/pull/13155) +* fix broken links (v1.3.x) / [#13155](https://github.com/apache/mxnet/pull/13155) This PR fixes broken links on the website. -* fix broken Python IO API docs (v1.3.x) / [#13154](https://github.com/apache/incubator-mxnet/pull/13154) - Fixes [#12854: Data Iterators documentation is broken](https://github.com/apache/incubator-mxnet/issues/12854) +* fix broken Python IO API docs (v1.3.x) / [#13154](https://github.com/apache/mxnet/pull/13154) + Fixes [#12854: Data Iterators documentation is broken](https://github.com/apache/mxnet/issues/12854) This PR manually specifies members of the IO module so that the docs will render as expected. This is workaround in the docs to deal with a bug introduced in the Python code/structure since v1.3.0. See the comments for more info. @@ -3052,7 +3052,7 @@ This fixes an buffer overflow detected by ASAN. This is important for any future modules - that they recognize this issue and make efforts to map the params and other elements. -* add/update infer_range docs (v1.3.x) / [#13153](https://github.com/apache/incubator-mxnet/pull/13153) +* add/update infer_range docs (v1.3.x) / [#13153](https://github.com/apache/mxnet/pull/13153) This PR adds or updates the docs for the infer_range feature. Clarifies the param in the C op docs @@ -3063,20 +3063,20 @@ This fixes an buffer overflow detected by ASAN. ### Other Improvements -* [MXNET-1179] Enforce deterministic algorithms in convolution layers (v1.3.x) / [#13152](https://github.com/apache/incubator-mxnet/pull/13152) +* [MXNET-1179] Enforce deterministic algorithms in convolution layers (v1.3.x) / [#13152](https://github.com/apache/mxnet/pull/13152) Some of the CUDNN convolution algorithms are non-deterministic (see issue #11341). This PR adds an env variable to enforce determinism in the convolution operators. If set to true, only deterministic CUDNN algorithms will be used. If no deterministic algorithm is available, MXNet will error out. ### Submodule updates -* update mshadow (v1.3.x) / [#13122](https://github.com/apache/incubator-mxnet/pull/13122) +* update mshadow (v1.3.x) / [#13122](https://github.com/apache/mxnet/pull/13122) Update mshadow for omp acceleration when nvcc is not present ### Known issues The test test_operator.test_dropout has issues and has been disabled on the branch: -* Disable flaky test test_operator.test_dropout (v1.3.x) / [#13200](https://github.com/apache/incubator-mxnet/pull/13200) +* Disable flaky test test_operator.test_dropout (v1.3.x) / [#13200](https://github.com/apache/mxnet/pull/13200) @@ -3086,14 +3086,14 @@ For more information and examples, see [full release notes](https://cwiki.apache ## 1.3.0 ### New Features - Gluon RNN layers are now HybridBlocks -- In this release, Gluon RNN layers such as `gluon.rnn.RNN`, `gluon.rnn.LSTM`, `gluon.rnn.GRU` becomes `HybridBlock`s as part of [gluon.rnn improvements project](https://github.com/apache/incubator-mxnet/projects/11) (#11482). -- This is the result of newly available fused RNN operators added for CPU: LSTM([#10104](https://github.com/apache/incubator-mxnet/pull/10104)), vanilla RNN([#11399](https://github.com/apache/incubator-mxnet/pull/11399)), GRU([#10311](https://github.com/apache/incubator-mxnet/pull/10311)) +- In this release, Gluon RNN layers such as `gluon.rnn.RNN`, `gluon.rnn.LSTM`, `gluon.rnn.GRU` becomes `HybridBlock`s as part of [gluon.rnn improvements project](https://github.com/apache/mxnet/projects/11) (#11482). +- This is the result of newly available fused RNN operators added for CPU: LSTM([#10104](https://github.com/apache/mxnet/pull/10104)), vanilla RNN([#11399](https://github.com/apache/mxnet/pull/11399)), GRU([#10311](https://github.com/apache/mxnet/pull/10311)) - Now many dynamic networks that are based on Gluon RNN layers can now be completely hybridized, exported, and used in the inference APIs in other language bindings such as R, Scala, etc. ### MKL-DNN improvements - Introducing more functionality support for MKL-DNN as follows: - - Added support for more activation functions like, "sigmoid", "tanh", "softrelu". ([#10336](https://github.com/apache/incubator-mxnet/pull/10336)) - - Added Debugging functionality: Result check ([#12069](https://github.com/apache/incubator-mxnet/pull/12069)) and Backend switch ([#12058](https://github.com/apache/incubator-mxnet/pull/12058)). + - Added support for more activation functions like, "sigmoid", "tanh", "softrelu". ([#10336](https://github.com/apache/mxnet/pull/10336)) + - Added Debugging functionality: Result check ([#12069](https://github.com/apache/mxnet/pull/12069)) and Backend switch ([#12058](https://github.com/apache/mxnet/pull/12058)). ### New Features - Gluon Model Zoo Pre-trained Models - Gluon Vision Model Zoo now provides MobileNetV2 pre-trained models (#10879) in addition to @@ -3102,7 +3102,7 @@ For more information and examples, see [full release notes](https://cwiki.apache - Updated pre-trained models provide state-of-the-art performance on all resnetv1, resnetv2, and vgg16, vgg19, vgg16_bn, vgg19_bn models (#11327 #11860 #11830). ### New Features - Clojure package (experimental) -- MXNet now supports the Clojure programming language. The MXNet Clojure package brings flexible and efficient GPU computing and state-of-art deep learning to Clojure. It enables you to write seamless tensor/matrix computation with multiple GPUs in Clojure. It also lets you construct and customize the state-of-art deep learning models in Clojure, and apply them to tasks, such as image classification and data science challenges.([#11205](https://github.com/apache/incubator-mxnet/pull/11205)) +- MXNet now supports the Clojure programming language. The MXNet Clojure package brings flexible and efficient GPU computing and state-of-art deep learning to Clojure. It enables you to write seamless tensor/matrix computation with multiple GPUs in Clojure. It also lets you construct and customize the state-of-art deep learning models in Clojure, and apply them to tasks, such as image classification and data science challenges.([#11205](https://github.com/apache/mxnet/pull/11205)) - Checkout examples and API documentation [here](https://mxnet.apache.org/api/clojure/index.html). ### New Features - Synchronized Cross-GPU Batch Norm (experimental) @@ -3110,16 +3110,16 @@ For more information and examples, see [full release notes](https://cwiki.apache - This enables stable training on large-scale networks with high memory consumption such as FCN for image segmentation. ### New Features - Sparse Tensor Support for Gluon (experimental) -- Sparse gradient support is added to `gluon.nn.Embedding`. Set `sparse_grad=True` to enable when constructing the Embedding block. ([#10924](https://github.com/apache/incubator-mxnet/pull/10924)) -- Gluon Parameter now supports "row_sparse" storage type, which reduces communication cost and memory consumption for multi-GPU training for large models. `gluon.contrib.nn.SparseEmbedding` is an example empowered by this. ([#11001](https://github.com/apache/incubator-mxnet/pull/11001), [#11429](https://github.com/apache/incubator-mxnet/pull/11429)) -- Gluon HybridBlock now supports hybridization with sparse operators ([#11306](https://github.com/apache/incubator-mxnet/pull/11306)). +- Sparse gradient support is added to `gluon.nn.Embedding`. Set `sparse_grad=True` to enable when constructing the Embedding block. ([#10924](https://github.com/apache/mxnet/pull/10924)) +- Gluon Parameter now supports "row_sparse" storage type, which reduces communication cost and memory consumption for multi-GPU training for large models. `gluon.contrib.nn.SparseEmbedding` is an example empowered by this. ([#11001](https://github.com/apache/mxnet/pull/11001), [#11429](https://github.com/apache/mxnet/pull/11429)) +- Gluon HybridBlock now supports hybridization with sparse operators ([#11306](https://github.com/apache/mxnet/pull/11306)). ### New Features - Control flow operators (experimental) - This is the first step towards optimizing dynamic neural networks with variable computation graphs, by adding symbolic and imperative control flow operators. [Proposal](https://cwiki.apache.org/confluence/display/MXNET/Optimize+dynamic+neural+network+models+with+control+flow+operators). -- New operators introduced: foreach([#11531](https://github.com/apache/incubator-mxnet/pull/11531)), while_loop([#11566](https://github.com/apache/incubator-mxnet/pull/11566)), cond([#11760](https://github.com/apache/incubator-mxnet/pull/11760)). +- New operators introduced: foreach([#11531](https://github.com/apache/mxnet/pull/11531)), while_loop([#11566](https://github.com/apache/mxnet/pull/11566)), cond([#11760](https://github.com/apache/mxnet/pull/11760)). ### New Features - Scala API Improvements (experimental) -- Improvements to MXNet Scala API usability([#10660](https://github.com/apache/incubator-mxnet/pull/10660), [#10787](https://github.com/apache/incubator-mxnet/pull/10787), [#10991](https://github.com/apache/incubator-mxnet/pull/10991)) +- Improvements to MXNet Scala API usability([#10660](https://github.com/apache/mxnet/pull/10660), [#10787](https://github.com/apache/mxnet/pull/10787), [#10991](https://github.com/apache/mxnet/pull/10991)) - Symbol.api and NDArray.api would bring new set of functions that have complete definition for all arguments. - Please see this [Type safe API design document](https://cwiki.apache.org/confluence/display/MXNET/Scala+Type-safe+API+Design+Doc) for more details. @@ -3128,21 +3128,21 @@ For more information and examples, see [full release notes](https://cwiki.apache - Unlike the default memory pool requires exact size match to reuse released memory chunks, this new memory pool uses exponential-linear rounding so that similar sized memory chunks can all be reused, which is more suitable for all the workloads with dynamic-shape inputs and outputs. Set environment variable `MXNET_GPU_MEM_POOL_TYPE=Round` to enable. ### New Features - Topology-aware AllReduce (experimental) -- This features uses trees to perform the Reduce and Broadcast. It uses the idea of minimum spanning trees to do a binary tree Reduce communication pattern to improve it. This topology aware approach reduces the existing limitations for single machine communication shown by mehods like parameter server and NCCL ring reduction. It is an experimental feature ([#11591](https://github.com/apache/incubator-mxnet/pull/11591)). +- This features uses trees to perform the Reduce and Broadcast. It uses the idea of minimum spanning trees to do a binary tree Reduce communication pattern to improve it. This topology aware approach reduces the existing limitations for single machine communication shown by mehods like parameter server and NCCL ring reduction. It is an experimental feature ([#11591](https://github.com/apache/mxnet/pull/11591)). - Paper followed for implementation: [Optimal message scheduling for aggregation](https://www.sysml.cc/doc/178.pdf). - Set environment variable `MXNET_KVSTORE_USETREE=1` to enable. ### New Features - Export MXNet models to ONNX format (experimental) -- With this feature, now MXNet models can be exported to ONNX format([#11213](https://github.com/apache/incubator-mxnet/pull/11213)). Currently, MXNet supports ONNX v1.2.1. [API documentation](https://mxnet.apache.org/api/python/contrib/onnx.html). +- With this feature, now MXNet models can be exported to ONNX format([#11213](https://github.com/apache/mxnet/pull/11213)). Currently, MXNet supports ONNX v1.2.1. [API documentation](https://mxnet.apache.org/api/python/contrib/onnx.html). - Checkout this [tutorial](https://mxnet.apache.org/tutorials/onnx/export_mxnet_to_onnx.html) which shows how to use MXNet to ONNX exporter APIs. ONNX protobuf so that those models can be imported in other frameworks for inference. ### New Features - TensorRT Runtime Integration (experimental) - [TensorRT](https://developer.nvidia.com/tensorrt) provides significant acceleration of model inference on NVIDIA GPUs compared to running the full graph in MxNet using unfused GPU operators. In addition to faster fp32 inference, TensorRT optimizes fp16 inference, and is capable of int8 inference (provided the quantization steps are performed). Besides increasing throughput, TensorRT significantly reduces inference latency, especially for small batches. -- This feature in MXNet now introduces runtime integration of TensorRT into MXNet, in order to accelerate inference.([#11325](https://github.com/apache/incubator-mxnet/pull/11325)) +- This feature in MXNet now introduces runtime integration of TensorRT into MXNet, in order to accelerate inference.([#11325](https://github.com/apache/mxnet/pull/11325)) - Currently, its in contrib package. ### New Examples - Scala -- Refurnished Scala Examples with improved API, documentation and CI test coverage. ([#11753](https://github.com/apache/incubator-mxnet/pull/11753), [#11621](https://github.com/apache/incubator-mxnet/pull/11621) ) +- Refurnished Scala Examples with improved API, documentation and CI test coverage. ([#11753](https://github.com/apache/mxnet/pull/11753), [#11621](https://github.com/apache/mxnet/pull/11621) ) - Now all Scala examples have: - No bugs block in the middle - Good Readme to start with @@ -3150,11 +3150,11 @@ For more information and examples, see [full release notes](https://cwiki.apache - monitored in CI in each PR runs ### Maintenance - Flaky Tests improvement effort -- Fixed 130 flaky tests on CI. Tracked progress of the project [here](https://github.com/apache/incubator-mxnet/projects/9). +- Fixed 130 flaky tests on CI. Tracked progress of the project [here](https://github.com/apache/mxnet/projects/9). - Add flakiness checker (#11572) ### Maintenance - MXNet Model Backwards Compatibility Checker -- This tool ([#11626](https://github.com/apache/incubator-mxnet/pull/11626)) helps in ensuring consistency and sanity while performing inference on the latest version of MXNet using models trained on older versions of MXNet. +- This tool ([#11626](https://github.com/apache/mxnet/pull/11626)) helps in ensuring consistency and sanity while performing inference on the latest version of MXNet using models trained on older versions of MXNet. - This tool will help in detecting issues earlier in the development cycle which break backwards compatibility on MXNet and would contribute towards ensuring a healthy and stable release of MXNet. ### Maintenance - Integrated testing for "the Straight Dope" @@ -3195,7 +3195,7 @@ For more information and examples, see [full release notes](https://cwiki.apache - Improve performance of broadcast ops backward pass (#11252) - Improved numerical stability as a result of using stable L2 norm (#11573) - Accelerate the performance of topk for GPU and CPU side (#12085 #10997 ; This changes the behavior of topk when nan values occur in the input) -- Support for dot(dns, csr) = dns and dot(dns, csr.T) = dns on CPU ([#11113](https://github.com/apache/incubator-mxnet/pull/11113)) +- Support for dot(dns, csr) = dns and dot(dns, csr.T) = dns on CPU ([#11113](https://github.com/apache/mxnet/pull/11113)) - Performance improvement for Batch Dot on CPU from mshadow ([mshadow PR#342](https://github.com/dmlc/mshadow/pull/342)) ### API Changes @@ -3279,10 +3279,10 @@ For more information and examples, see [full release notes](https://cwiki.apache - Implemented new [Scala Inference APIs](https://cwiki.apache.org/confluence/display/MXNET/MXNetScalaInferenceAPI) which offer an easy-to-use, Scala Idiomatic and thread-safe high level APIs for performing predictions with deep learning models trained with MXNet (#9678). Implemented a new ImageClassifier class which provides APIs for classification tasks on a Java BufferedImage using a pre-trained model you provide (#10054). Implemented a new ObjectDetector class which provides APIs for object and boundary detections on a Java BufferedImage using a pre-trained model you provide (#10229). ### New Features - Added a Module to Import ONNX models into MXNet -- Implemented a new ONNX module in MXNet which offers an easy to use API to import ONNX models into MXNet's symbolic interface (#9963). Checkout the [example](https://github.com/apache/incubator-mxnet/blob/master/example/onnx/super_resolution.py) on how you could use this [API](https://cwiki.apache.org/confluence/display/MXNET/ONNX-MXNet+API+Design) to import ONNX models and perform inference on MXNet. Currently, the ONNX-MXNet Import module is still experimental. Please use it with caution. +- Implemented a new ONNX module in MXNet which offers an easy to use API to import ONNX models into MXNet's symbolic interface (#9963). Checkout the [example](https://github.com/apache/mxnet/blob/master/example/onnx/super_resolution.py) on how you could use this [API](https://cwiki.apache.org/confluence/display/MXNET/ONNX-MXNet+API+Design) to import ONNX models and perform inference on MXNet. Currently, the ONNX-MXNet Import module is still experimental. Please use it with caution. ### New Features - Added Support for Model Quantization with Calibration -- Implemented model quantization by adopting the [TensorFlow approach](https://www.tensorflow.org/performance/quantization) with calibration by borrowing the idea from Nvidia's [TensorRT](http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf). The focus of this work is on keeping quantized models (ConvNets for now) inference accuracy loss under control when compared to their corresponding FP32 models. Please see the [example](https://github.com/apache/incubator-mxnet/tree/master/example/quantization) on how to quantize a FP32 model with or without calibration (#9552). Currently, the Quantization support is still experimental. Please use it with caution. +- Implemented model quantization by adopting the [TensorFlow approach](https://www.tensorflow.org/performance/quantization) with calibration by borrowing the idea from Nvidia's [TensorRT](http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf). The focus of this work is on keeping quantized models (ConvNets for now) inference accuracy loss under control when compared to their corresponding FP32 models. Please see the [example](https://github.com/apache/mxnet/tree/master/example/quantization) on how to quantize a FP32 model with or without calibration (#9552). Currently, the Quantization support is still experimental. Please use it with caution. ### New Features - MKL-DNN Integration - MXNet now integrates with Intel MKL-DNN to accelerate neural network operators: Convolution, Deconvolution, FullyConnected, Pooling, Batch Normalization, Activation, LRN, Softmax, as well as some common operators: sum and concat (#9677). This integration allows NDArray to contain data with MKL-DNN layouts and reduces data layout conversion to get the maximal performance from MKL-DNN. Currently, the MKL-DNN integration is still experimental. Please use it with caution. @@ -3301,7 +3301,7 @@ For more information and examples, see [full release notes](https://cwiki.apache - Changed API for the Pooling operator from `mxnet.symbol.Pooling(data=None, global_pool=_Null, cudnn_off=_Null, kernel=_Null, pool_type=_Null, pooling_convention=_Null, stride=_Null, pad=_Null, name=None, attr=None, out=None, **kwargs)` to `mxnet.symbol.Pooling(data=None, kernel=_Null, pool_type=_Null, global_pool=_Null, cudnn_off=_Null, pooling_convention=_Null, stride=_Null, pad=_Null, name=None, attr=None, out=None, **kwargs)`. This is a breaking change when kwargs are not provided since the new api expects the arguments starting from `global_pool` at the fourth position instead of the second position. (#10000). ### Bug Fixes -- Fixed tests - Flakiness/Bugs - (#9598, #9951, #10259, #10197, #10136, #10422). Please see: [Tests Improvement Project](https://github.com/apache/incubator-mxnet/projects/9) +- Fixed tests - Flakiness/Bugs - (#9598, #9951, #10259, #10197, #10136, #10422). Please see: [Tests Improvement Project](https://github.com/apache/mxnet/projects/9) - Fixed `cudnn_conv` and `cudnn_deconv` deadlock (#10392). - Fixed a race condition in `io.LibSVMIter` when batch size is large (#10124). - Fixed a race condition in converting data layouts in MKL-DNN (#9862). @@ -3398,7 +3398,7 @@ For more information and examples, see [full release notes](https://cwiki.apache - [DevGuide.md](https://github.com/google/googletest/blob/ec44c6c1675c25b9827aacd08c02433cccde7780/googlemock/docs/DevGuide.md) in the 3rdparty submodule googletest licensed under CC-BY-2.5. - Incompatibility in the behavior of MXNet Convolution operator for certain unsupported use cases: Raises an exception when MKLDNN is enabled, fails silently when it is not. - MXNet convolution generates wrong results for 1-element strides (#10689). -- [Tutorial on fine-tuning an ONNX model](https://github.com/apache/incubator-mxnet/blob/v1.2.0/docs/tutorials/onnx/fine_tuning_gluon.md) fails when using cpu context. +- [Tutorial on fine-tuning an ONNX model](https://github.com/apache/mxnet/blob/v1.2.0/docs/tutorials/onnx/fine_tuning_gluon.md) fails when using cpu context. - CMake build ignores the `USE_MKLDNN` flag and doesn't build with MKLDNN support even with `-DUSE_MKLDNN=1`. To workaround the issue please see: #10801. - Linking the dmlc-core library fails with CMake build when building with `USE_OPENMP=OFF`. To workaround the issue, please use the updated CMakeLists in dmlc-core unit tests directory: https://github.com/dmlc/dmlc-core/pull/396. You can also workaround the issue by using make instead of cmake when building with `USE_OPENMP=OFF`. @@ -3471,7 +3471,7 @@ For more information and examples, see [full release notes](https://cwiki.apache - Added Lambda block for wrapping a user defined function as a block. - Generalized `gluon.data.ArrayDataset` to support arbitrary number of arrays. ### New Features - ARM / Raspberry Pi support [Experimental] - - MXNet now compiles and runs on ARMv6, ARMv7, ARMv64 including Raspberry Pi devices. See https://github.com/apache/incubator-mxnet/tree/master/docker_multiarch for more information. + - MXNet now compiles and runs on ARMv6, ARMv7, ARMv64 including Raspberry Pi devices. See https://github.com/apache/mxnet/tree/master/docker_multiarch for more information. ### New Features - NVIDIA Jetson support [Experimental] - MXNet now compiles and runs on NVIDIA Jetson TX2 boards with GPU acceleration. - You can install the python MXNet package on a Jetson board by running - `$ pip install mxnet-jetson-tx2`. diff --git a/NOTICE b/NOTICE index 0f6594c1728a..4d544719337c 100644 --- a/NOTICE +++ b/NOTICE @@ -1,5 +1,5 @@ - Apache MXNET (incubating) - Copyright 2017-2021 The Apache Software Foundation + Apache MXNET + Copyright 2017-2023 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/README.md b/README.md index b757f7e29f0e..be91fc0696e9 100644 --- a/README.md +++ b/README.md @@ -24,9 +24,9 @@ [![banner](https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/image/banner.png)](https://mxnet.apache.org) -Apache MXNet (incubating) for Deep Learning +Apache MXNet for Deep Learning =========================================== -[![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/apache/incubator-mxnet)](https://github.com/apache/incubator-mxnet/releases) [![GitHub stars](https://img.shields.io/github/stars/apache/incubator-mxnet)](https://github.com/apache/incubator-mxnet/stargazers) [![GitHub forks](https://img.shields.io/github/forks/apache/incubator-mxnet)](https://github.com/apache/incubator-mxnet/network) [![GitHub contributors](https://img.shields.io/github/contributors-anon/apache/incubator-mxnet)](https://github.com/apache/incubator-mxnet/graphs/contributors) [![GitHub issues](https://img.shields.io/github/issues/apache/incubator-mxnet)](https://github.com/apache/incubator-mxnet/issues) [![good first issue](https://img.shields.io/github/issues/apache/incubator-mxnet/good%20first%20issue)](https://github.com/apache/incubator-mxnet/labels/good%20first%20issue) [![GitHub pull requests by-label](https://img.shields.io/github/issues-pr/apache/mxnet/pr-awaiting-review)](https://github.com/apache/mxnet/labels/pr-awaiting-review) [![GitHub license](https://img.shields.io/github/license/apache/incubator-mxnet)](https://github.com/apache/incubator-mxnet/blob/master/LICENSE) [![Twitter](https://img.shields.io/twitter/url?style=social&url=https%3A%2F%2Fgithub.com%2Fapache%2Fincubator-mxnet)](https://twitter.com/intent/tweet?text=Wow:%20https%3A%2F%2Fgithub.com%2Fapache%2Fincubator-mxnet%20@ApacheMXNet) [![Twitter Follow](https://img.shields.io/twitter/follow/ApacheMXNet?style=social)](https://twitter.com/ApacheMXNet) +[![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/apache/mxnet)](https://github.com/apache/mxnet/releases) [![GitHub stars](https://img.shields.io/github/stars/apache/mxnet)](https://github.com/apache/mxnet/stargazers) [![GitHub forks](https://img.shields.io/github/forks/apache/mxnet)](https://github.com/apache/mxnet/network) [![GitHub contributors](https://img.shields.io/github/contributors-anon/apache/mxnet)](https://github.com/apache/mxnet/graphs/contributors) [![GitHub issues](https://img.shields.io/github/issues/apache/mxnet)](https://github.com/apache/mxnet/issues) [![good first issue](https://img.shields.io/github/issues/apache/mxnet/good%20first%20issue)](https://github.com/apache/mxnet/labels/good%20first%20issue) [![GitHub pull requests by-label](https://img.shields.io/github/issues-pr/apache/mxnet/pr-awaiting-review)](https://github.com/apache/mxnet/labels/pr-awaiting-review) [![GitHub license](https://img.shields.io/github/license/apache/mxnet)](https://github.com/apache/mxnet/blob/master/LICENSE) [![Twitter](https://img.shields.io/twitter/url?style=social&url=https%3A%2F%2Fgithub.com%2Fapache%2Fmxnet)](https://twitter.com/intent/tweet?text=Wow:%20https%3A%2F%2Fgithub.com%2Fapache%2Fmxnet%20@ApacheMXNet) [![Twitter Follow](https://img.shields.io/twitter/follow/ApacheMXNet?style=social)](https://twitter.com/ApacheMXNet) Apache MXNet is a deep learning framework designed for both *efficiency* and *flexibility*. It allows you to ***mix*** [symbolic and imperative programming](https://mxnet.apache.org/api/architecture/program_model) @@ -39,12 +39,12 @@ Apache MXNet is more than a deep learning project. It is a [community](https://m on a mission of democratizing AI. It is a collection of [blue prints and guidelines](https://mxnet.apache.org/api/architecture/overview) for building deep learning systems, and interesting insights of DL systems for hackers. -Licensed under an [Apache-2.0](https://github.com/apache/incubator-mxnet/blob/master/LICENSE) license. +Licensed under an [Apache-2.0](https://github.com/apache/mxnet/blob/master/LICENSE) license. | Branch | Build Status | |:-------:|:-------------:| -| [master](https://github.com/apache/incubator-mxnet/tree/master) | [![CentOS CPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-cpu/job/master/badge/icon?subject=build%20centos%20cpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-cpu/job/master/) [![CentOS GPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-gpu/job/master/badge/icon?subject=build%20centos%20gpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-gpu/job/master/) [![Clang Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/clang/job/master/badge/icon?subject=build%20clang)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/clang/job/master/)
[![Edge Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/edge/job/master/badge/icon?subject=build%20edge)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/edge/job/master/) [![Miscellaneous Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/miscellaneous/job/master/badge/icon?subject=build%20miscellaneous)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/miscellaneous/job/master/) [![Sanity Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/sanity/job/master/badge/icon?subject=build%20sanity)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/sanity/job/master/)
[![Unix CPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-cpu/job/master/badge/icon?subject=build%20unix%20cpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-cpu/job/master/) [![Unix GPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-gpu/job/master/badge/icon?subject=build%20unix%20gpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-gpu/job/master/) [![Website Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/website/job/master/badge/icon?subject=build%20website)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/website/job/master/)
[![Windows CPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-cpu/job/master/badge/icon?subject=build%20windows%20cpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-cpu/job/master/) [![Windows GPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-gpu/job/master/badge/icon?subject=build%20windows%20gpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-gpu/job/master/) [![Documentation Status](http://jenkins.mxnet-ci.com/job/restricted-website-build/badge/icon)](https://mxnet.apache.org/) | -| [v1.x](https://github.com/apache/incubator-mxnet/tree/v1.x) | [![CentOS CPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-cpu/job/v1.x/badge/icon?subject=build%20centos%20cpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-cpu/job/v1.x/) [![CentOS GPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-gpu/job/v1.x/badge/icon?subject=build%20centos%20gpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-gpu/job/v1.x/) [![Clang Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/clang/job/v1.x/badge/icon?subject=build%20clang)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/clang/job/v1.x/)
[![Edge Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/edge/job/v1.x/badge/icon?subject=build%20edge)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/edge/job/v1.x/) [![Miscellaneous Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/miscellaneous/job/v1.x/badge/icon?subject=build%20miscellaneous)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/miscellaneous/job/v1.x/) [![Sanity Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/sanity/job/v1.x/badge/icon?subject=build%20sanity)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/sanity/job/v1.x/)
[![Unix CPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-cpu/job/v1.x/badge/icon?subject=build%20unix%20cpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-cpu/job/v1.x/) [![Unix GPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-gpu/job/v1.x/badge/icon?subject=build%20unix%20gpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-gpu/job/v1.x/) [![Website Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/website/job/v1.x/badge/icon?subject=build%20website)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/website/job/v1.x/)
[![Windows CPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-cpu/job/v1.x/badge/icon?subject=build%20windows%20cpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-cpu/job/v1.x/) [![Windows GPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-gpu/job/v1.x/badge/icon?subject=build%20windows%20gpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-gpu/job/v1.x/) [![Documentation Status](http://jenkins.mxnet-ci.com/job/restricted-website-build/badge/icon)](https://mxnet.apache.org/) | +| [master](https://github.com/apache/mxnet/tree/master) | [![CentOS CPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-cpu/job/master/badge/icon?subject=build%20centos%20cpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-cpu/job/master/) [![CentOS GPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-gpu/job/master/badge/icon?subject=build%20centos%20gpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-gpu/job/master/) [![Clang Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/clang/job/master/badge/icon?subject=build%20clang)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/clang/job/master/)
[![Edge Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/edge/job/master/badge/icon?subject=build%20edge)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/edge/job/master/) [![Miscellaneous Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/miscellaneous/job/master/badge/icon?subject=build%20miscellaneous)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/miscellaneous/job/master/) [![Sanity Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/sanity/job/master/badge/icon?subject=build%20sanity)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/sanity/job/master/)
[![Unix CPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-cpu/job/master/badge/icon?subject=build%20unix%20cpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-cpu/job/master/) [![Unix GPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-gpu/job/master/badge/icon?subject=build%20unix%20gpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-gpu/job/master/) [![Website Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/website/job/master/badge/icon?subject=build%20website)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/website/job/master/)
[![Windows CPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-cpu/job/master/badge/icon?subject=build%20windows%20cpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-cpu/job/master/) [![Windows GPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-gpu/job/master/badge/icon?subject=build%20windows%20gpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-gpu/job/master/) [![Documentation Status](http://jenkins.mxnet-ci.com/job/restricted-website-build/badge/icon)](https://mxnet.apache.org/) | +| [v1.x](https://github.com/apache/mxnet/tree/v1.x) | [![CentOS CPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-cpu/job/v1.x/badge/icon?subject=build%20centos%20cpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-cpu/job/v1.x/) [![CentOS GPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-gpu/job/v1.x/badge/icon?subject=build%20centos%20gpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/centos-gpu/job/v1.x/) [![Clang Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/clang/job/v1.x/badge/icon?subject=build%20clang)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/clang/job/v1.x/)
[![Edge Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/edge/job/v1.x/badge/icon?subject=build%20edge)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/edge/job/v1.x/) [![Miscellaneous Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/miscellaneous/job/v1.x/badge/icon?subject=build%20miscellaneous)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/miscellaneous/job/v1.x/) [![Sanity Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/sanity/job/v1.x/badge/icon?subject=build%20sanity)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/sanity/job/v1.x/)
[![Unix CPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-cpu/job/v1.x/badge/icon?subject=build%20unix%20cpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-cpu/job/v1.x/) [![Unix GPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-gpu/job/v1.x/badge/icon?subject=build%20unix%20gpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/unix-gpu/job/v1.x/) [![Website Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/website/job/v1.x/badge/icon?subject=build%20website)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/website/job/v1.x/)
[![Windows CPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-cpu/job/v1.x/badge/icon?subject=build%20windows%20cpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-cpu/job/v1.x/) [![Windows GPU Build Status](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-gpu/job/v1.x/badge/icon?subject=build%20windows%20gpu)](http://jenkins.mxnet-ci.com/job/mxnet-validation/job/windows-gpu/job/v1.x/) [![Documentation Status](http://jenkins.mxnet-ci.com/job/restricted-website-build/badge/icon)](https://mxnet.apache.org/) | Features -------- @@ -62,29 +62,28 @@ Contents * [Tutorials](https://mxnet.apache.org/api/python/docs/tutorials/) * [Ecosystem](https://mxnet.apache.org/ecosystem) * [API Documentation](https://mxnet.apache.org/api) -* [Examples](https://github.com/apache/incubator-mxnet-examples) +* [Examples](https://github.com/apache/mxnet-examples) * [Stay Connected](#stay-connected) * [Social Media](#social-media) What's New ---------- -* [1.9.1 Release](https://github.com/apache/incubator-mxnet/releases/tag/1.9.1) - MXNet 1.9.1 Release. -* [1.9.0 Release](https://github.com/apache/incubator-mxnet/releases/tag/1.9.0) - MXNet 1.9.0 Release. -* [1.8.0 Release](https://github.com/apache/incubator-mxnet/releases/tag/1.8.0) - MXNet 1.8.0 Release. -* [1.7.0 Release](https://github.com/apache/incubator-mxnet/releases/tag/1.7.0) - MXNet 1.7.0 Release. -* [1.6.0 Release](https://github.com/apache/incubator-mxnet/releases/tag/1.6.0) - MXNet 1.6.0 Release. -* [1.5.1 Release](https://github.com/apache/incubator-mxnet/releases/tag/1.5.1) - MXNet 1.5.1 Patch Release. -* [1.5.0 Release](https://github.com/apache/incubator-mxnet/releases/tag/1.5.0) - MXNet 1.5.0 Release. -* [1.4.1 Release](https://github.com/apache/incubator-mxnet/releases/tag/1.4.1) - MXNet 1.4.1 Patch Release. -* [1.4.0 Release](https://github.com/apache/incubator-mxnet/releases/tag/1.4.0) - MXNet 1.4.0 Release. -* [1.3.1 Release](https://github.com/apache/incubator-mxnet/releases/tag/1.3.1) - MXNet 1.3.1 Patch Release. -* [1.3.0 Release](https://github.com/apache/incubator-mxnet/releases/tag/1.3.0) - MXNet 1.3.0 Release. -* [1.2.0 Release](https://github.com/apache/incubator-mxnet/releases/tag/1.2.0) - MXNet 1.2.0 Release. -* [1.1.0 Release](https://github.com/apache/incubator-mxnet/releases/tag/1.1.0) - MXNet 1.1.0 Release. -* [1.0.0 Release](https://github.com/apache/incubator-mxnet/releases/tag/1.0.0) - MXNet 1.0.0 Release. -* [0.12.1 Release](https://github.com/apache/incubator-mxnet/releases/tag/0.12.1) - MXNet 0.12.1 Patch Release. -* [0.12.0 Release](https://github.com/apache/incubator-mxnet/releases/tag/0.12.0) - MXNet 0.12.0 Release. -* [0.11.0 Release](https://github.com/apache/incubator-mxnet/releases/tag/0.11.0) - MXNet 0.11.0 Release. +* [1.9.1 Release](https://github.com/apache/mxnet/releases/tag/1.9.1) - MXNet 1.9.1 Release. +* [1.8.0 Release](https://github.com/apache/mxnet/releases/tag/1.8.0) - MXNet 1.8.0 Release. +* [1.7.0 Release](https://github.com/apache/mxnet/releases/tag/1.7.0) - MXNet 1.7.0 Release. +* [1.6.0 Release](https://github.com/apache/mxnet/releases/tag/1.6.0) - MXNet 1.6.0 Release. +* [1.5.1 Release](https://github.com/apache/mxnet/releases/tag/1.5.1) - MXNet 1.5.1 Patch Release. +* [1.5.0 Release](https://github.com/apache/mxnet/releases/tag/1.5.0) - MXNet 1.5.0 Release. +* [1.4.1 Release](https://github.com/apache/mxnet/releases/tag/1.4.1) - MXNet 1.4.1 Patch Release. +* [1.4.0 Release](https://github.com/apache/mxnet/releases/tag/1.4.0) - MXNet 1.4.0 Release. +* [1.3.1 Release](https://github.com/apache/mxnet/releases/tag/1.3.1) - MXNet 1.3.1 Patch Release. +* [1.3.0 Release](https://github.com/apache/mxnet/releases/tag/1.3.0) - MXNet 1.3.0 Release. +* [1.2.0 Release](https://github.com/apache/mxnet/releases/tag/1.2.0) - MXNet 1.2.0 Release. +* [1.1.0 Release](https://github.com/apache/mxnet/releases/tag/1.1.0) - MXNet 1.1.0 Release. +* [1.0.0 Release](https://github.com/apache/mxnet/releases/tag/1.0.0) - MXNet 1.0.0 Release. +* [0.12.1 Release](https://github.com/apache/mxnet/releases/tag/0.12.1) - MXNet 0.12.1 Patch Release. +* [0.12.0 Release](https://github.com/apache/mxnet/releases/tag/0.12.0) - MXNet 0.12.0 Release. +* [0.11.0 Release](https://github.com/apache/mxnet/releases/tag/0.11.0) - MXNet 0.11.0 Release. * [Apache Incubator](http://incubator.apache.org/projects/mxnet.html) - We are now an Apache Incubator project. * [0.10.0 Release](https://github.com/apache/mxnet/releases/tag/v0.10.0) - MXNet 0.10.0 Release. * [0.9.3 Release](./docs/architecture/release_note_0_9.md) - First 0.9 official release. @@ -105,7 +104,7 @@ Stay Connected | Channel | Purpose | |---|---| -| [Follow MXNet Development on Github](https://github.com/apache/incubator-mxnet/issues) | See what's going on in the MXNet project. | +| [Follow MXNet Development on Github](https://github.com/apache/mxnet/issues) | See what's going on in the MXNet project. | | [MXNet Confluence Wiki for Developers](https://cwiki.apache.org/confluence/display/MXNET/Apache+MXNet+Home) | MXNet developer wiki for information related to project development, maintained by contributors and developers. To request write access, send an email to [send request to the dev list](mailto:dev@mxnet.apache.org?subject=Requesting%20CWiki%20write%20access) . | | [dev@mxnet.apache.org mailing list](https://lists.apache.org/list.html?dev@mxnet.apache.org) | The "dev list". Discussions about the development of MXNet. To subscribe, send an email to [dev-subscribe@mxnet.apache.org](mailto:dev-subscribe@mxnet.apache.org) . | | [discuss.mxnet.io](https://discuss.mxnet.io) | Asking & answering MXNet usage questions. | diff --git a/benchmark/opperf/README.md b/benchmark/opperf/README.md index 1a6657582a3b..ea9721aeaf3d 100644 --- a/benchmark/opperf/README.md +++ b/benchmark/opperf/README.md @@ -57,7 +57,7 @@ Note: 2. To install MXNet, refer [Installing MXNet page](https://mxnet.apache.org/versions/master/install/index.html) ``` -export PYTHONPATH=$PYTHONPATH:/path/to/incubator-mxnet/ +export PYTHONPATH=$PYTHONPATH:/path/to/mxnet/ ``` ## Usecase 1 - Run benchmarks for all the operators @@ -65,7 +65,7 @@ export PYTHONPATH=$PYTHONPATH:/path/to/incubator-mxnet/ Below command runs all the MXNet operators (NDArray) benchmarks with default inputs and saves the final result as JSON in the given file. ``` -python incubator-mxnet/benchmark/opperf/opperf.py --output-format json --output-file mxnet_operator_benchmark_results.json +python mxnet/benchmark/opperf/opperf.py --output-format json --output-file mxnet_operator_benchmark_results.json ``` **Other Supported Options:** @@ -260,7 +260,7 @@ See `utils/op_registry_utils.py` for more details. Optionally, you could use the python time package as the profiler engine to caliberate runtime in each operator. To use python timer for all operators, use the argument --profiler 'python': ``` -python incubator-mxnet/benchmark/opperf/opperf.py --profiler='python' +python mxnet/benchmark/opperf/opperf.py --profiler='python' ``` To use python timer for a specific operator, pass the argument profiler to the run_performance_test method: diff --git a/benchmark/opperf/nd_operations/misc_operators.py b/benchmark/opperf/nd_operations/misc_operators.py index fb8535a959a0..8593aa0bcda3 100644 --- a/benchmark/opperf/nd_operations/misc_operators.py +++ b/benchmark/opperf/nd_operations/misc_operators.py @@ -123,7 +123,7 @@ def run_mx_misc_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='na warmup=warmup, runs=runs) # There are currently issus with UpSampling with bilinear interpolation. - # track issue here: https://github.com/apache/incubator-mxnet/issues/9138 + # track issue here: https://github.com/apache/mxnet/issues/9138 upsampling_benchmark = run_performance_test([getattr(MX_OP_MODULE, "UpSampling")], run_backward=True, dtype=dtype, diff --git a/benchmark/opperf/utils/benchmark_utils.py b/benchmark/opperf/utils/benchmark_utils.py index 99d6bc0cc9ec..3363ac7b5dc7 100644 --- a/benchmark/opperf/utils/benchmark_utils.py +++ b/benchmark/opperf/utils/benchmark_utils.py @@ -239,7 +239,7 @@ def run_performance_test(ops, inputs, run_backward=True, List of dictionary of benchmark results. key -> name of the operator, Value is benchmark results. Note: when run_performance_test is called on the nd.Embedding operator with run_backward=True, an error will - be thrown. Track issue here: https://github.com/apache/incubator-mxnet/issues/11314 + be thrown. Track issue here: https://github.com/apache/mxnet/issues/11314 """ if not isinstance(ops, list): ops = [ops] @@ -282,7 +282,7 @@ def run_benchmark_operator(name, size = (128,128), additional_inputs = {}, def run_op_benchmarks(ops, dtype, ctx, profiler, int64_tensor, warmup, runs): # Running im2col either forwards or backwards on GPU results in errors - # track issue here: https://github.com/apache/incubator-mxnet/issues/17493 + # track issue here: https://github.com/apache/mxnet/issues/17493 gpu_disabled_ops = ['im2col'] # For each operator, run benchmarks diff --git a/benchmark/opperf/utils/op_registry_utils.py b/benchmark/opperf/utils/op_registry_utils.py index 6b9efc84227a..80e40d4ea4a8 100644 --- a/benchmark/opperf/utils/op_registry_utils.py +++ b/benchmark/opperf/utils/op_registry_utils.py @@ -487,9 +487,9 @@ def get_all_indexing_routines(): """Gets all indexing routines registered with MXNet. # @ChaiBapchya unravel_index errors out on certain inputs - # tracked here https://github.com/apache/incubator-mxnet/issues/16771 + # tracked here https://github.com/apache/mxnet/issues/16771 # @ChaiBapchya scatter_nd errors with core dump - # tracked here https://github.com/apache/incubator-mxnet/issues/17480 + # tracked here https://github.com/apache/mxnet/issues/17480 Returns ------- diff --git a/cd/README.md b/cd/README.md index 24ee1c03dd86..2276c94f5b40 100644 --- a/cd/README.md +++ b/cd/README.md @@ -39,7 +39,7 @@ Currently, below variants are supported. All of these variants except native hav * *cu110*: CUDA 11.0 * *cu112*: CUDA 11.2 -*For more on variants, see [here](https://github.com/apache/incubator-mxnet/issues/8671)* +*For more on variants, see [here](https://github.com/apache/mxnet/issues/8671)* ## Framework Components diff --git a/cd/python/pypi/README.md b/cd/python/pypi/README.md index 1c3665038af7..0909b9768cc3 100644 --- a/cd/python/pypi/README.md +++ b/cd/python/pypi/README.md @@ -24,7 +24,7 @@ The Jenkins pipelines for continuous delivery of the PyPI MXNet packages. The pipelines for each variant are run, and fail, independently. Each depends on a successful build of the statically linked libmxet library. -The pipeline relies on the scripts and resources located in [tools/pip](https://github.com/apache/incubator-mxnet/tree/master/tools/pip) +The pipeline relies on the scripts and resources located in [tools/pip](https://github.com/apache/mxnet/tree/master/tools/pip) to build the PyPI packages. ## Credentials diff --git a/cd/python/pypi/pypi_package.sh b/cd/python/pypi/pypi_package.sh index 26626ef422e2..fbc83b613e3f 100755 --- a/cd/python/pypi/pypi_package.sh +++ b/cd/python/pypi/pypi_package.sh @@ -21,7 +21,7 @@ set -ex # variant = cpu, native, cu101, cu102, etc. export mxnet_variant=${1:?"Please specify the mxnet variant"} -# Due to this PR: https://github.com/apache/incubator-mxnet/pull/14899 +# Due to this PR: https://github.com/apache/mxnet/pull/14899 # The setup.py expects that dnnl_version.h be present in # mxnet-build/3rdparty/onednn/build/install/include # The artifact repository stores this file in the dependencies diff --git a/cd/utils/artifact_repository.md b/cd/utils/artifact_repository.md index e1c70cfd2441..2b998998a631 100644 --- a/cd/utils/artifact_repository.md +++ b/cd/utils/artifact_repository.md @@ -26,7 +26,7 @@ An MXNet artifact is defined as the following set of files: * The compiled libmxnet.so * License files for dependencies that required their licenses to be shipped with the binary -* Dependencies that should be shipped together with the binary. For instance, for packaging the python wheel files, some dependencies that cannot be statically linked to the library need to also be included, see here (https://github.com/apache/incubator-mxnet/blob/master/tools/pip/setup.py#L142). +* Dependencies that should be shipped together with the binary. For instance, for packaging the python wheel files, some dependencies that cannot be statically linked to the library need to also be included, see here (https://github.com/apache/mxnet/blob/master/tools/pip/setup.py#L142). The artifact_repository.py script automates the upload and download of the specified files with the appropriate S3 object keys by taking explicitly set, or automatically derived, values for the different characteristics of the artifact. diff --git a/cd/utils/requirements.txt b/cd/utils/requirements.txt index 4ecbff9c00cb..0aaf101ca477 100644 --- a/cd/utils/requirements.txt +++ b/cd/utils/requirements.txt @@ -1,2 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. boto3==1.9.114 PyYAML==5.1 diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index 4fe85898b640..2a19f361553d 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -107,7 +107,6 @@ gather_licenses() { cp tools/dependencies/LICENSE.binary.dependencies licenses/ cp NOTICE licenses/ cp LICENSE licenses/ - cp DISCLAIMER licenses/ } # Compiles the dynamic mxnet library @@ -818,7 +817,7 @@ cd_unittest_ubuntu() { OMP_NUM_THREADS=$(expr $(nproc) / 4) pytest -m 'not serial' -n 4 --durations=50 --verbose tests/python/unittest pytest -m 'serial' --durations=50 --verbose tests/python/unittest - # https://github.com/apache/incubator-mxnet/issues/11801 + # https://github.com/apache/mxnet/issues/11801 # if [[ ${mxnet_variant} = "cpu" ]] || [[ ${mxnet_variant} = "mkl" ]]; then # integrationtest_ubuntu_cpu_dist_kvstore # fi @@ -1394,7 +1393,7 @@ create_repo() { git clone $mxnet_url $repo_folder --recursive echo "Adding MXNet upstream repo..." cd $repo_folder - git remote add upstream https://github.com/apache/incubator-mxnet + git remote add upstream https://github.com/apache/mxnet cd .. } diff --git a/ci/jenkins/Jenkinsfile_centos_gpu b/ci/jenkins/Jenkinsfile_centos_gpu index 1eff794d5a0d..3cdb52e34c7b 100644 --- a/ci/jenkins/Jenkinsfile_centos_gpu +++ b/ci/jenkins/Jenkinsfile_centos_gpu @@ -29,7 +29,7 @@ node('utility') { utils = load('ci/Jenkinsfile_utils.groovy') custom_steps = load('ci/jenkins/Jenkins_steps.groovy') } -utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu', linux_gpu: 'mxnetlinux-gpu', linux_gpu_p3: 'mxnetlinux-gpu-p3') +utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu', linux_gpu: 'mxnetlinux-gpu', linux_gpu_p3: 'mxnetlinux-gpu-p3', linux_gpu_g4: 'mxnetlinux-gpu-g4') utils.main_wrapper( core_logic: { diff --git a/ci/jenkins/Jenkinsfile_unix_cpu b/ci/jenkins/Jenkinsfile_unix_cpu index 22fc536592c2..261ada05b517 100644 --- a/ci/jenkins/Jenkinsfile_unix_cpu +++ b/ci/jenkins/Jenkinsfile_unix_cpu @@ -55,7 +55,7 @@ core_logic: { custom_steps.test_unix_onnx_cpu('cpu'), */ /* Disabled due to master build failure: - * http://jenkins.mxnet-ci.amazon-ml.com/blue/organizations/jenkins/incubator-mxnet/detail/master/1221/pipeline/ + * http://jenkins.mxnet-ci.com/blue/organizations/jenkins/incubator-mxnet/detail/master/1221/pipeline/ * https://github.com/apache/incubator-mxnet/issues/11801 custom_steps.test_unix_distributed_kvstore_cpu('cpu') */ diff --git a/ci/publish/website/deploy.sh b/ci/publish/website/deploy.sh index 8b89415e001d..c5b0eb3fc884 100644 --- a/ci/publish/website/deploy.sh +++ b/ci/publish/website/deploy.sh @@ -39,11 +39,11 @@ jekyll_fork=ThomasDelteil setup_mxnet_site_repo() { fork=$1 - if [ ! -d "incubator-mxnet-site" ]; then - git clone https://$APACHE_USERNAME:$APACHE_PASSWORD@github.com/aaronmarkham/incubator-mxnet-site.git + if [ ! -d "mxnet-site" ]; then + git clone https://$APACHE_USERNAME:$APACHE_PASSWORD@github.com/aaronmarkham/mxnet-site.git fi - cd incubator-mxnet-site + cd mxnet-site git checkout asf-site rm -rf * git rm -r * @@ -66,14 +66,14 @@ setup_jekyll_repo() $jekyll_fork # Copy in the main jekyll website artifacts web_artifacts=mxnet.io-v2/release -web_dir=incubator-mxnet-site +web_dir=mxnet-site cp -a $web_artifacts/* $web_dir fetch_artifacts() { api=$1 artifacts=https://mxnet-public.s3.us-east-2.amazonaws.com/docs/$version/$api-artifacts.tgz - dir=incubator-mxnet-site/api/ + dir=mxnet-site/api/ wget -q $artifacts mkdir -p $dir tar xf $api-artifacts.tgz -C $dir @@ -86,7 +86,7 @@ do done # Commit the updates -cd incubator-mxnet-site +cd mxnet-site pwd git branch git add . diff --git a/ci/requirements.txt b/ci/requirements.txt index 8f21ead27f7c..7adc32fd7a16 100644 --- a/ci/requirements.txt +++ b/ci/requirements.txt @@ -1 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. docker==3.5.0 diff --git a/cpp-package/README.md b/cpp-package/README.md index fc38832ea814..5cedb8c37014 100644 --- a/cpp-package/README.md +++ b/cpp-package/README.md @@ -32,17 +32,17 @@ The cpp-package directory contains the implementation of C++ API. Users are requ 1. Building the MXNet C++ package requires building MXNet from source. 2. Clone the MXNet GitHub repository **recursively** to ensure the code in submodules is available for building MXNet. ``` - git clone --recursive https://github.com/apache/incubator-mxnet mxnet + git clone --recursive https://github.com/apache/mxnet mxnet ``` 3. Install the [recommended dependencies](https://mxnet.apache.org/versions/master/get_started/build_from_source.html#installing-mxnet's-recommended-dependencies) and [optional dependencies](https://mxnet.apache.org/versions/master/get_started/build_from_source.html#overview-of-optional-dependencies-and-optional-features) for building MXNet from source. -4. There is a configuration file for cmake, [config/*.cmake]() that contains all the compilation options. You can edit this file and set the appropriate options prior to running the **cmake** command. -5. Please refer to [cmake configuration files](https://github.com/apache/incubator-mxnet/blob/970a2cfbe77d09ee610fdd70afca1a93247cf4fb/config/linux_gpu.cmake#L18-L37) for more details on how to configure and compile MXNet. +4. There is a configuration file for cmake, [config/*.cmake]() that contains all the compilation options. You can edit this file and set the appropriate options prior to running the **cmake** command. +5. Please refer to [cmake configuration files](https://github.com/apache/mxnet/blob/970a2cfbe77d09ee610fdd70afca1a93247cf4fb/config/linux_gpu.cmake#L18-L37) for more details on how to configure and compile MXNet. 6. For enabling the build of C++ Package, set the **-DUSE\_CPP\_PACKAGE = 1** in cmake options. ### Cross-Compilation steps: 1. Build the C++ package for the **host** platform to generate op.h file. -2. Remove the following line in [CMakeLists.txt](). +2. Remove the following line in [CMakeLists.txt](). ``` COMMAND python OpWrapperGenerator.py $ ``` @@ -53,7 +53,7 @@ The cpp-package directory contains the implementation of C++ API. Users are requ In order to consume the C++ API please follow the steps below. 1. Ensure that the MXNet shared library is built from source with the **USE\_CPP\_PACKAGE = 1**. -2. Include the [MxNetCpp.h]() in the program that is going to consume MXNet C++ API. +2. Include the [MxNetCpp.h]() in the program that is going to consume MXNet C++ API. ```c++ #include ``` diff --git a/cpp-package/example/README.md b/cpp-package/example/README.md index ace50bda563a..e508a7f17f1a 100644 --- a/cpp-package/example/README.md +++ b/cpp-package/example/README.md @@ -22,17 +22,17 @@ ## Building C++ examples -The examples in this folder demonstrate the **training** workflow. The **inference workflow** related examples can be found in [inference]() folder. -Please build the MXNet C++ Package as explained in the [README]() File. -The examples in this folder are built while building the MXNet library and cpp-package from source. You can get the executable files by just copying them from ```incubator-mxnet/build/cpp-package/example``` +The examples in this folder demonstrate the **training** workflow. The **inference workflow** related examples can be found in [inference]() folder. +Please build the MXNet C++ Package as explained in the [README]() File. +The examples in this folder are built while building the MXNet library and cpp-package from source. You can get the executable files by just copying them from ```mxnet/build/cpp-package/example``` The examples that are built to be run on GPU may not work on the non-GPU machines. ## Examples demonstrating training workflow -This directory contains following examples. In order to run the examples, ensure that the path to the MXNet shared library is added to the OS specific environment variable viz. **LD\_LIBRARY\_PATH** for Linux, Mac and Ubuntu OS and **PATH** for Windows OS. For example `export LD_LIBRARY_PATH=/usr/local/cuda/lib64:/home/ubuntu/incubator-mxnet/build` on ubuntu using gpu. +This directory contains following examples. In order to run the examples, ensure that the path to the MXNet shared library is added to the OS specific environment variable viz. **LD\_LIBRARY\_PATH** for Linux, Mac and Ubuntu OS and **PATH** for Windows OS. For example `export LD_LIBRARY_PATH=/usr/local/cuda/lib64:/home/ubuntu/mxnet/build` on ubuntu using gpu. -### [alexnet.cpp]() +### [alexnet.cpp]() The example implements the C++ version of AlexNet. The networks trains on MNIST data. The number of epochs can be specified as a command line argument. For example to train with 10 epochs use the following: @@ -40,7 +40,7 @@ The example implements the C++ version of AlexNet. The networks trains on MNIST build/alexnet 10 ``` -### [googlenet.cpp]() +### [googlenet.cpp]() The code implements a GoogLeNet/Inception network using the C++ API. The example uses MNIST data to train the network. By default, the example trains the model for 100 epochs. The number of epochs can also be specified in the command line. For example, to train the model for 10 epochs use the following: @@ -48,7 +48,7 @@ The code implements a GoogLeNet/Inception network using the C++ API. The example build/googlenet 10 ``` -### [mlp.cpp]() +### [mlp.cpp]() The code implements a multilayer perceptron from scratch. The example creates its own dummy data to train the model. The example does not require command line parameters. It trains the model for 20,000 epochs. To run the example use the following command: @@ -57,7 +57,7 @@ To run the example use the following command: build/mlp ``` -### [mlp_cpu.cpp]() +### [mlp_cpu.cpp]() The code implements a multilayer perceptron to train the MNIST data. The code demonstrates the use of "SimpleBind" C++ API and MNISTIter. The example is designed to work on CPU. The example does not require command line parameters. To run the example use the following command: @@ -66,7 +66,7 @@ To run the example use the following command: build/mlp_cpu ``` -### [mlp_gpu.cpp]() +### [mlp_gpu.cpp]() The code implements a multilayer perceptron to train the MNIST data. The code demonstrates the use of the "SimpleBind" C++ API and MNISTIter. The example is designed to work on GPU. The example does not require command line arguments. To run the example execute following command: @@ -74,7 +74,7 @@ The code implements a multilayer perceptron to train the MNIST data. The code de build/mlp_gpu ``` -### [mlp_csv.cpp]() +### [mlp_csv.cpp]() The code implements a multilayer perceptron to train the MNIST data. The code demonstrates the use of the "SimpleBind" C++ API and CSVIter. The CSVIter can iterate data that is in CSV format. The example can be run on CPU or GPU. The example usage is as follows: @@ -83,12 +83,12 @@ build/mlp_csv --train data/mnist_data/mnist_train.csv --test data/mnist_data/mni ``` * To get the `mnist_training_set.csv` and `mnist_test_set.csv` please run the following command: ```python -# in incubator-mxnet/cpp-package/example directory +# in mxnet/cpp-package/example directory python mnist_to_csv.py ./data/mnist_data/train-images-idx3-ubyte ./data/mnist_data/train-labels-idx1-ubyte ./data/mnist_data/mnist_train.csv 60000 python mnist_to_csv.py ./data/mnist_data/t10k-images-idx3-ubyte ./data/mnist_data/t10k-labels-idx1-ubyte ./data/mnist_data/mnist_test.csv 10000 ``` -### [resnet.cpp]() +### [resnet.cpp]() The code implements a resnet model using the C++ API. The model is used to train MNIST data. The number of epochs for training the model can be specified on the command line. By default, model is trained for 100 epochs. For example, to train with 10 epochs use the following command: @@ -96,14 +96,14 @@ The code implements a resnet model using the C++ API. The model is used to train build/resnet 10 ``` -### [lenet.cpp]() +### [lenet.cpp]() The code implements a lenet model using the C++ API. It uses MNIST training data in CSV format to train the network. The example does not use built-in CSVIter to read the data from CSV file. The number of epochs can be specified on the command line. By default, the mode is trained for 100,000 epochs. For example, to train with 10 epochs use the following command: ``` build/lenet 10 ``` -### [lenet\_with\_mxdataiter.cpp]() +### [lenet\_with\_mxdataiter.cpp]() The code implements a lenet model using the C++ API. It uses MNIST training data to train the network. The example uses built-in MNISTIter to read the data. The number of epochs can be specified on the command line. By default, the mode is trained for 100 epochs. For example, to train with 10 epochs use the following command: @@ -113,7 +113,7 @@ build/lenet_with_mxdataiter 10 In addition, there is `run_lenet_with_mxdataiter.sh` that downloads the mnist data and run `lenet_with_mxdataiter` example. -### [inception_bn.cpp]() +### [inception_bn.cpp]() The code implements an Inception network using the C++ API with batch normalization. The example uses MNIST data to train the network. The model trains for 100 epochs. The example can be run by executing the following command: diff --git a/cpp-package/example/inference/README.md b/cpp-package/example/inference/README.md index fc81dea45b0b..37061cec0d70 100644 --- a/cpp-package/example/inference/README.md +++ b/cpp-package/example/inference/README.md @@ -22,17 +22,17 @@ ## Building C++ Inference examples -The examples in this folder demonstrate the **inference** workflow. Please build the MXNet C++ Package as explained in the [README]() File. You can get the executable files by just copying them from ```incubator-mxnet/build/cpp-package/example``` +The examples in this folder demonstrate the **inference** workflow. Please build the MXNet C++ Package as explained in the [README]() File. You can get the executable files by just copying them from ```mxnet/build/cpp-package/example``` ## Examples demonstrating inference workflow This directory contains following examples. In order to run the examples, ensure that the path to the MXNet shared library is added to the OS specific environment variable viz. **LD\_LIBRARY\_PATH** for Linux, Mac and Ubuntu OS and **PATH** for Windows OS. -## [imagenet_inference.cpp]() +## [imagenet_inference.cpp]() -This example demonstrates image classification workflow with pre-trained models using MXNet C++ API. Now this script also supports inference with quantized CNN models generated by oneDNN (see this [quantization flow](https://github.com/apache/incubator-mxnet/blob/master/example/quantization/README.md)). By using C++ API, the latency of most models will be reduced to some extent compared with current Python implementation. +This example demonstrates image classification workflow with pre-trained models using MXNet C++ API. Now this script also supports inference with quantized CNN models generated by oneDNN (see this [quantization flow](https://github.com/apache/mxnet/blob/master/example/quantization/README.md)). By using C++ API, the latency of most models will be reduced to some extent compared with current Python implementation. -Most of CNN models have been tested on Linux systems. And 50000 images are used to collect accuracy numbers. Please refer to this [README](https://github.com/apache/incubator-mxnet/blob/master/example/quantization/README.md) for more details about accuracy. +Most of CNN models have been tested on Linux systems. And 50000 images are used to collect accuracy numbers. Please refer to this [README](https://github.com/apache/mxnet/blob/master/example/quantization/README.md) for more details about accuracy. The following performance numbers are collected via using C++ inference API on AWS EC2 C5.12xlarge. The environment variables are set like below: @@ -79,10 +79,10 @@ imagenet_inference --symbol_file Follow the below steps to do inference with more models. - Download the pre-trained FP32 models into ```./model``` directory. -- Refer this [README](https://github.com/apache/incubator-mxnet/blob/master/example/quantization/README.md) to generate the corresponding quantized models and also put them into ```./model``` directory. +- Refer this [README](https://github.com/apache/mxnet/blob/master/example/quantization/README.md) to generate the corresponding quantized models and also put them into ```./model``` directory. - Prepare [validation dataset](http://data.mxnet.io/data/val_256_q90.rec) and put it into ```./data``` directory. -The below command lines show how to run inference with FP32/INT8 resnet50_v1 model. Because the C++ inference script provides the almost same command line as this [Python script](https://github.com/apache/incubator-mxnet/blob/master/example/quantization/imagenet_inference.py) and then users can easily go from Python to C++. +The below command lines show how to run inference with FP32/INT8 resnet50_v1 model. Because the C++ inference script provides the almost same command line as this [Python script](https://github.com/apache/mxnet/blob/master/example/quantization/imagenet_inference.py) and then users can easily go from Python to C++. ``` # FP32 inference @@ -98,7 +98,7 @@ The below command lines show how to run inference with FP32/INT8 resnet50_v1 mod ./imagenet_inference --symbol_file "./model/resnet50_v1-quantized-5batches-naive-symbol.json" --batch_size 64 --num_inference_batches 500 --benchmark ``` -For a quick inference test, users can directly run [unit_test_imagenet_inference.sh]() by using the below command. This script will automatically download the pre-trained **Inception-Bn** and **resnet50_v1_int8** model and **validation dataset** which are required for inference. +For a quick inference test, users can directly run [unit_test_imagenet_inference.sh]() by using the below command. This script will automatically download the pre-trained **Inception-Bn** and **resnet50_v1_int8** model and **validation dataset** which are required for inference. ``` ./unit_test_imagenet_inference.sh @@ -144,7 +144,7 @@ imagenet_inference.cpp:439: benchmark completed! imagenet_inference.cpp:440: batch size: 16 num batch: 500 throughput: 6284.78 imgs/s latency:0.159115 ms ``` -## [sentiment_analysis_rnn.cpp]() +## [sentiment_analysis_rnn.cpp]() This example demonstrates how you can load a pre-trained RNN model and use it to predict the sentiment expressed in the given movie review with the MXNet C++ API. The example is capable of processing variable legnth inputs. It performs the following tasks - Loads the pre-trained RNN model. - Loads the dictionary file containing the word to index mapping. @@ -208,4 +208,4 @@ Input Line : [ The direction is awesome] Score : 0.968855 The sentiment score between 0 and 1, (1 being positive)=0.966677 ``` -Alternatively, you can run the [unit_test_sentiment_analysis_rnn.sh]() script. +Alternatively, you can run the [unit_test_sentiment_analysis_rnn.sh]() script. diff --git a/cpp-package/example/inference/multi_threaded_inference/multi_threaded_inference.cc b/cpp-package/example/inference/multi_threaded_inference/multi_threaded_inference.cc index 9b279e9c4315..74e7acd064e4 100644 --- a/cpp-package/example/inference/multi_threaded_inference/multi_threaded_inference.cc +++ b/cpp-package/example/inference/multi_threaded_inference/multi_threaded_inference.cc @@ -38,7 +38,7 @@ const float DEFAULT_MEAN = 117.0; // Code to load image, PrintOutput results, helper functions for the same obtained from: -// https://github.com/apache/incubator-mxnet/blob/master/example/image-classification/predict-cpp/ +// https://github.com/apache/mxnet/blob/master/example/image-classification/predict-cpp/ static std::string trim(const std::string& input) { auto not_space = [](int ch) { return !std::isspace(ch); }; diff --git a/cpp-package/include/mxnet-cpp/contrib.h b/cpp-package/include/mxnet-cpp/contrib.h index b754ab5e5725..5feefd32efdf 100644 --- a/cpp-package/include/mxnet-cpp/contrib.h +++ b/cpp-package/include/mxnet-cpp/contrib.h @@ -58,14 +58,14 @@ inline std::vector split(const std::string& str, const std::string& namespace contrib { // needs to be same with -// https://github.com/apache/incubator-mxnet/blob/1c874cfc807cee755c38f6486e8e0f4d94416cd8/src/operator/subgraph/tensorrt/tensorrt-inl.h#L190 +// https://github.com/apache/mxnet/blob/1c874cfc807cee755c38f6486e8e0f4d94416cd8/src/operator/subgraph/tensorrt/tensorrt-inl.h#L190 static const std::string TENSORRT_SUBGRAPH_PARAM_IDENTIFIER = "subgraph_params_names"; // NOLINT // needs to be same with -// https://github.com/apache/incubator-mxnet/blob/master/src/operator/subgraph/tensorrt/tensorrt.cc#L244 +// https://github.com/apache/mxnet/blob/master/src/operator/subgraph/tensorrt/tensorrt.cc#L244 static const std::string TENSORRT_SUBGRAPH_PARAM_PREFIX = "subgraph_param_"; // NOLINT /*! * this is a mimic to - * https://github.com/apache/incubator-mxnet/blob/master/python/mxnet/contrib/tensorrt.py#L37 + * https://github.com/apache/mxnet/blob/master/python/mxnet/contrib/tensorrt.py#L37 * @param symbol symbol that already called subgraph api * @param argParams original arg params, params needed by tensorrt will be removed after calling * this function diff --git a/cpp-package/include/mxnet-cpp/symbol.hpp b/cpp-package/include/mxnet-cpp/symbol.hpp index 187dad842862..340b29923ea5 100644 --- a/cpp-package/include/mxnet-cpp/symbol.hpp +++ b/cpp-package/include/mxnet-cpp/symbol.hpp @@ -191,7 +191,7 @@ inline std::map Symbol::ListAttributes() const { std::map attributes; for (mx_uint i = 0; i < size; ++i) { // pairs is 2 * size with key, value pairs according to - // https://github.com/apache/incubator-mxnet/blob/master/include/mxnet/c_api.h#L1428 + // https://github.com/apache/mxnet/blob/master/include/mxnet/c_api.h#L1428 attributes[pairs[2 * i]] = pairs[2 * i + 1]; } return attributes; diff --git a/cpp-package/tests/ci_test.sh b/cpp-package/tests/ci_test.sh index 75805b04ef75..2b6205278539 100755 --- a/cpp-package/tests/ci_test.sh +++ b/cpp-package/tests/ci_test.sh @@ -60,7 +60,7 @@ cp /work/build/cpp-package/example/test_score . cp /work/build/cpp-package/example/test_ndarray_copy . ./test_ndarray_copy -# skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/20011 +# skippping temporarily, tracked by https://github.com/apache/mxnet/issues/20011 cp /work/build/cpp-package/example/test_regress_label . ./test_regress_label diff --git a/doap.rdf b/doap.rdf index 10ff7139b440..c8b88bfa6867 100644 --- a/doap.rdf +++ b/doap.rdf @@ -29,7 +29,7 @@ Apache MXNet is a deep learning framework designed for both efficiency and flexibility. Apache MXNet is a deep learning framework designed for both efficiency and flexibility. It's lightweight, Portable, Flexible Distributed/Mobile Deep Learning with dynamic, mutation-aware data-flow dependency scheduler; for Python, R, Julia, Scala, Go, Javascript and more - + C++ @@ -43,8 +43,8 @@ - - + + diff --git a/docker/docker-python/README.md b/docker/docker-python/README.md index a5dd0e3fb5f7..67930910b76c 100644 --- a/docker/docker-python/README.md +++ b/docker/docker-python/README.md @@ -44,10 +44,10 @@ Refer: https://pypi.org/project/mxnet/ `./build_python_dockerfile.sh ` For example: -`./build_python_dockerfile.sh 1.3.0 1.3.0.post0 ~/build-docker/incubator-mxnet` +`./build_python_dockerfile.sh 1.3.0 1.3.0.post0 ~/build-docker/mxnet` ### Tests run -* [test_mxnet.py](https://github.com/apache/incubator-mxnet/blob/master/docker/docker-python/test_mxnet.py): This script is used to make sure that the docker image builds the expected mxnet version. That is, the version picked by pip is the same as as the version passed as a parameter. +* [test_mxnet.py](https://github.com/apache/mxnet/blob/master/docker/docker-python/test_mxnet.py): This script is used to make sure that the docker image builds the expected mxnet version. That is, the version picked by pip is the same as as the version passed as a parameter. ### Dockerhub Credentials Dockerhub credentials will be required to push images at the end of this script. diff --git a/docs/README.md b/docs/README.md index 45624562f9a0..6f326134a321 100644 --- a/docs/README.md +++ b/docs/README.md @@ -38,7 +38,7 @@ If you plan to contribute changes to the documentation or website, please submit MXNet's Python documentation is built with [Sphinx](https://www.sphinx-doc.org) and a variety of plugins including [pandoc](https://pandoc.org/), and [recommonmark](https://github.com/rtfd/recommonmark). -More information on the dependencies can be found in the [CI folder's installation scripts](https://github.com/apache/incubator-mxnet/tree/master/ci/docker/install/ubuntu_docs.sh). +More information on the dependencies can be found in the [CI folder's installation scripts](https://github.com/apache/mxnet/tree/master/ci/docker/install/ubuntu_docs.sh). You can run just the Python docs by following the instructions in the Python API guide. @@ -56,7 +56,7 @@ If you only need to make changes to tutorials or other pages that are not genera ### Ubuntu Setup -As this is maintained for CI, Ubuntu is recommended. Refer to [ubuntu_doc.sh](https://github.com/apache/incubator-mxnet/tree/master/ci/docker/install/ubuntu_docs.sh) for the latest install script. +As this is maintained for CI, Ubuntu is recommended. Refer to [ubuntu_doc.sh](https://github.com/apache/mxnet/tree/master/ci/docker/install/ubuntu_docs.sh) for the latest install script. ### Caveat for Rendering Outputs @@ -158,11 +158,11 @@ The `-W` Sphinx option enforces "warnings as errors". This will help you debug y ## Production Website Deployment Process -[Apache Jenkins MXNet website building job](https://builds.apache.org/job/incubator-mxnet-build-site/) is used to build MXNet website. +[Apache Jenkins MXNet website building job](https://builds.apache.org/job/mxnet-build-site/) is used to build MXNet website. -The Jenkins docs build job will fetch MXNet repository, build MXNet website and push all static files to [host repository](https://github.com/apache/incubator-mxnet-site.git). +The Jenkins docs build job will fetch MXNet repository, build MXNet website and push all static files to [host repository](https://github.com/apache/mxnet-site.git). -The host repo is hooked with [Apache gitbox](https://gitbox.apache.org/repos/asf?p=incubator-mxnet-site.git;a=summary) to host website. +The host repo is hooked with [Apache gitbox](https://gitbox.apache.org/repos/asf?p=mxnet-site.git;a=summary) to host website. ### Processes for Running the Docs Build Jobs diff --git a/docs/python_docs/python/scripts/conf.py b/docs/python_docs/python/scripts/conf.py index 190065d6ee49..0de935840149 100644 --- a/docs/python_docs/python/scripts/conf.py +++ b/docs/python_docs/python/scripts/conf.py @@ -32,7 +32,7 @@ project = 'Apache MXNet' author = f'{project} developers' copyright = f'2015-2020, {author}' -github_doc_root = 'https://github.com/apache/incubator-mxnet/tree/master/docs/' +github_doc_root = 'https://github.com/apache/mxnet/tree/master/docs/' doc_root = 'https://mxnet.apache.org/' # add markdown parser diff --git a/docs/python_docs/python/tutorials/getting-started/gluon_from_experiment_to_deployment.md b/docs/python_docs/python/tutorials/getting-started/gluon_from_experiment_to_deployment.md index a04e028f107e..3a1c7c55fc91 100644 --- a/docs/python_docs/python/tutorials/getting-started/gluon_from_experiment_to_deployment.md +++ b/docs/python_docs/python/tutorials/getting-started/gluon_from_experiment_to_deployment.md @@ -46,7 +46,7 @@ We have prepared a utility file to help you download and organize your data into ```{.python .input} import mxnet as mx data_util_file = "oxford_102_flower_dataset.py" -base_url = "https://raw.githubusercontent.com/apache/incubator-mxnet/master/docs/tutorial_utils/data/{}?raw=true" +base_url = "https://raw.githubusercontent.com/apache/mxnet/master/docs/tutorial_utils/data/{}?raw=true" mx.test_utils.download(base_url.format(data_util_file), fname=data_util_file) import oxford_102_flower_dataset @@ -271,4 +271,4 @@ You can find more ways to run inference and deploy your models here: 2. [Gluon book on fine-tuning](https://www.d2l.ai/chapter_computer-vision/fine-tuning.html) 3. [Gluon CV transfer learning tutorial](https://cv.gluon.ai/build/examples_classification/transfer_learning_minc.html) 4. [Gluon crash course](https://gluon-crash-course.mxnet.io/) -5. [Gluon CPP inference example](https://github.com/apache/incubator-mxnet/blob/master/cpp-package/example/inference/) +5. [Gluon CPP inference example](https://github.com/apache/mxnet/blob/master/cpp-package/example/inference/) diff --git a/docs/python_docs/python/tutorials/packages/gluon/training/fit_api_tutorial.md b/docs/python_docs/python/tutorials/packages/gluon/training/fit_api_tutorial.md index fba454ca4074..5c43b6c5bb49 100644 --- a/docs/python_docs/python/tutorials/packages/gluon/training/fit_api_tutorial.md +++ b/docs/python_docs/python/tutorials/packages/gluon/training/fit_api_tutorial.md @@ -163,7 +163,7 @@ There are also some default utility handlers that will be added to your estimato `ValidationHandler` is used to validate your model on test data at each epoch's end and then calculate validation metrics. You can create these utility handlers with different configurations and pass to estimator. This will override the default handler configuration. You can create a custom handler by inheriting one or multiple -[base event handlers](https://github.com/apache/incubator-mxnet/blob/master/python/mxnet/gluon/contrib/estimator/event_handler.py#L32) +[base event handlers](https://github.com/apache/mxnet/blob/master/python/mxnet/gluon/contrib/estimator/event_handler.py#L32) including: `TrainBegin`, `TrainEnd`, `EpochBegin`, `EpochEnd`, `BatchBegin`, `BatchEnd`. diff --git a/docs/python_docs/python/tutorials/packages/legacy/ndarray/gotchas_numpy_in_mxnet.md b/docs/python_docs/python/tutorials/packages/legacy/ndarray/gotchas_numpy_in_mxnet.md index 8f4f15309aa1..e0c0fc57ee1b 100644 --- a/docs/python_docs/python/tutorials/packages/legacy/ndarray/gotchas_numpy_in_mxnet.md +++ b/docs/python_docs/python/tutorials/packages/legacy/ndarray/gotchas_numpy_in_mxnet.md @@ -104,9 +104,9 @@ pad_array(nd.array([1, 2, 3]), max_length=10) `` -### Search for an operator on [Github](https://github.com/apache/incubator-mxnet/labels/Operator) +### Search for an operator on [Github](https://github.com/apache/mxnet/labels/Operator) -Apache MXNet community is responsive to requests, and everyone is welcomed to contribute new operators. Have in mind, that there is always a lag between new operators being merged into the codebase and release of a next stable version. For example, [nd.diag()](https://github.com/apache/incubator-mxnet/pull/11643) operator was recently introduced to Apache MXNet, but on the moment of writing this tutorial, it is not in any stable release. You can always get all latest implementations by installing the [master version](https://mxnet.apache.org/get_started?version=master&platform=linux&language=python&environ=pip&processor=cpu#) of Apache MXNet. +Apache MXNet community is responsive to requests, and everyone is welcomed to contribute new operators. Have in mind, that there is always a lag between new operators being merged into the codebase and release of a next stable version. For example, [nd.diag()](https://github.com/apache/mxnet/pull/11643) operator was recently introduced to Apache MXNet, but on the moment of writing this tutorial, it is not in any stable release. You can always get all latest implementations by installing the [master version](https://mxnet.apache.org/get_started?version=master&platform=linux&language=python&environ=pip&processor=cpu#) of Apache MXNet. ## How to minimize the impact of blocking calls diff --git a/docs/python_docs/python/tutorials/performance/backend/dnnl/dnnl_readme.md b/docs/python_docs/python/tutorials/performance/backend/dnnl/dnnl_readme.md index a75e09293bf1..db725baf19b0 100644 --- a/docs/python_docs/python/tutorials/performance/backend/dnnl/dnnl_readme.md +++ b/docs/python_docs/python/tutorials/performance/backend/dnnl/dnnl_readme.md @@ -49,8 +49,8 @@ sudo apt-get install -y graphviz ### Clone MXNet sources ``` -git clone --recursive https://github.com/apache/incubator-mxnet.git -cd incubator-mxnet +git clone --recursive https://github.com/apache/mxnet.git +cd mxnet ``` ### Build MXNet with oneDNN @@ -105,8 +105,8 @@ brew install llvm ### Clone MXNet sources ``` -git clone --recursive https://github.com/apache/incubator-mxnet.git -cd incubator-mxnet +git clone --recursive https://github.com/apache/mxnet.git +cd mxnet ``` ### Build MXNet with oneDNN @@ -133,10 +133,10 @@ To build and install MXNet yourself, you need the following dependencies. Instal After you have installed all of the required dependencies, build the MXNet source code: -1. Start a Visual Studio command prompt by click windows Start menu>>Visual Studio 2015>>VS2015 X64 Native Tools Command Prompt, and download the MXNet source code from [GitHub](https://github.com/apache/incubator-mxnet) by the command: +1. Start a Visual Studio command prompt by click windows Start menu>>Visual Studio 2015>>VS2015 X64 Native Tools Command Prompt, and download the MXNet source code from [GitHub](https://github.com/apache/mxnet) by the command: ``` -git clone --recursive https://github.com/apache/incubator-mxnet.git -cd C:\incubator-mxent +git clone --recursive https://github.com/apache/mxnet.git +cd C:\mxent ``` 2. Enable oneDNN by -DUSE_ONEDNN=1. Use [CMake 3](https://cmake.org/) to create a Visual Studio solution in ```./build```. Make sure to specify the architecture in the command: @@ -172,7 +172,7 @@ User can follow the same steps of Visual Studio 2015 to build MXNET with oneDNN, Preinstall python and some dependent modules: ``` pip install numpy graphviz -set PYTHONPATH=[workdir]\incubator-mxnet\python +set PYTHONPATH=[workdir]\mxnet\python ``` or install mxnet ``` @@ -322,7 +322,7 @@ After optimization of Convolution + ReLU oneDNN executes both operations within MXNet built with oneDNN brings outstanding performance improvement on quantization and inference with INT8 Intel CPU Platform on Intel Xeon Scalable Platform. -- [CNN Quantization Examples](https://github.com/apache/incubator-mxnet/tree/master/example/quantization). +- [CNN Quantization Examples](https://github.com/apache/mxnet/tree/master/example/quantization). - [Model Quantization for Production-Level Neural Network Inference](https://cwiki.apache.org/confluence/display/MXNET/MXNet+Graph+Optimization+and+Quantization+based+on+subgraph+and+MKL-DNN). @@ -332,4 +332,4 @@ MXNet built with oneDNN brings outstanding performance improvement on quantizati - For questions or support specific to oneDNN, visit the [oneDNN](https://github.com/oneapi-src/oneDNN) website. -- If you find bugs, please open an issue on GitHub for [MXNet with MKL](https://github.com/apache/incubator-mxnet/labels/MKL) or [MXNet with oneDNN](https://github.com/apache/incubator-mxnet/labels/MKLDNN). +- If you find bugs, please open an issue on GitHub for [MXNet with MKL](https://github.com/apache/mxnet/labels/MKL) or [MXNet with oneDNN](https://github.com/apache/mxnet/labels/MKLDNN). diff --git a/docs/python_docs/python/tutorials/performance/backend/profiler.md b/docs/python_docs/python/tutorials/performance/backend/profiler.md index 216722ac9c1b..bc9c632424a5 100644 --- a/docs/python_docs/python/tutorials/performance/backend/profiler.md +++ b/docs/python_docs/python/tutorials/performance/backend/profiler.md @@ -326,7 +326,7 @@ You can initiate the profiling directly from inside Visual Profiler or from the `==11588== NVPROF is profiling process 11588, command: python my_profiler_script.py` -`==11588== Generated result file: /home/user/Development/incubator-mxnet/ci/my_profile.nvvp` +`==11588== Generated result file: /home/user/Development/mxnet/ci/my_profile.nvvp` We specified an output file called `my_profile.nvvp` and this will be annotated with NVTX ranges (for MXNet operations) that will be displayed alongside the standard NVProf timeline. This can be very useful when you're trying to find patterns between operators run by MXNet, and their associated CUDA kernel calls. @@ -352,7 +352,7 @@ Nsight Compute is available in CUDA 10 toolkit, but can be used to profile code ## Further reading -- [Examples using MXNet profiler.](https://github.com/apache/incubator-mxnet/tree/master/example/profiler) +- [Examples using MXNet profiler.](https://github.com/apache/mxnet/tree/master/example/profiler) - [Some tips for improving MXNet performance.](https://mxnet.apache.org/api/faq/perf) diff --git a/docs/python_docs/python/tutorials/performance/index.rst b/docs/python_docs/python/tutorials/performance/index.rst index f4491dba5af8..825c746ff1aa 100644 --- a/docs/python_docs/python/tutorials/performance/index.rst +++ b/docs/python_docs/python/tutorials/performance/index.rst @@ -114,7 +114,7 @@ Distributed Training .. card:: :title: MXNet with Horovod - :link: https://github.com/apache/incubator-mxnet/tree/master/example/distributed_training-horovod + :link: https://github.com/apache/mxnet/tree/master/example/distributed_training-horovod A set of example scripts demonstrating MNIST and ImageNet training with Horovod as the distributed training backend. diff --git a/docs/python_docs/themes/mx-theme/mxtheme/footer.html b/docs/python_docs/themes/mx-theme/mxtheme/footer.html index 42b15d4b0c24..ae9e62b62402 100644 --- a/docs/python_docs/themes/mx-theme/mxtheme/footer.html +++ b/docs/python_docs/themes/mx-theme/mxtheme/footer.html @@ -6,14 +6,14 @@ -
+